gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.code import If, Switch, Concat
from hwt.code_utils import rename_signal
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.defs import BIT
from hwt.hdl.types.struct import HStruct
from hwt.interfaces.std import Signal, Handshaked, VectSignal, \
HandshakeSync
from hwt.interfaces.utils import propagateClkRstn
from hwt.math import log2ceil
from hwt.serializer.mode import serializeParamsUniq
from hwt.synthesizer.interfaceLevel.interfaceUtils.utils import NotSpecified
from hwt.synthesizer.param import Param
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwtLib.amba.constants import RESP_OKAY
from hwtLib.amba.datapump.base import AxiDatapumpBase
from hwtLib.amba.datapump.intf import AxiWDatapumpIntf
from hwtLib.handshaked.fifo import HandshakedFifo
from hwtLib.handshaked.streamNode import StreamNode
from hwtSimApi.hdlSimulator import HdlSimulator
class WFifoIntf(Handshaked):
"""
.. hwt-autodoc::
"""
def _config(self):
self.SHIFT_OPTIONS = Param((0,))
def _declr(self):
if self.SHIFT_OPTIONS != (0,):
# The encoded value of how many bytes should be the data from input write data be shifted
# in order to fit the word on output write bus
self.shift = VectSignal(log2ceil(len(self.SHIFT_OPTIONS)))
# last word can be canceled because the address can have some offset which could
# potentially spot new word but due to limited transaction size (using req.rem)
# this should not happen, this flags provides this information
self.drop_last_word = Signal()
HandshakeSync._declr(self)
def _initSimAgent(self, sim:HdlSimulator):
raise NotSpecified()
class BFifoIntf(Handshaked):
"""
.. hwt-autodoc::
"""
def _config(self):
pass
def _declr(self):
self.isLast = Signal()
HandshakeSync._declr(self)
def _initSimAgent(self, sim:HdlSimulator):
raise NotSpecified()
@serializeParamsUniq
class Axi_wDatapump(AxiDatapumpBase):
"""
Axi3/Axi3Lte/Axi4/Axi4Lite to axi write datapump,
:see: :class:`hwtLib.amba.datapump.base.AxiDatapumpBase`
.. hwt-autodoc::
"""
def _declr(self):
super()._declr() # add clk, rst, axi addr channel and req channel
self.errorWrite = Signal()._m()
if self.ALIGNAS != 8:
self.errorAlignment = Signal()._m()
with self._paramsShared():
self.axi.HAS_R = False
d = self.driver = AxiWDatapumpIntf()
d.ID_WIDTH = 0
d.ID_WIDTH = 0
d.MAX_BYTES = self.MAX_CHUNKS * (self.CHUNK_WIDTH // 8)
# fifo for id propagation and frame splitting on axi.w channel
wf = self.writeInfoFifo = HandshakedFifo(WFifoIntf)
wf.DEPTH = self.MAX_TRANS_OVERLAP
wf.SHIFT_OPTIONS = self.getShiftOptions()
# fifo for propagation of end of frame from axi.b channel
bf = self.bInfoFifo = HandshakedFifo(BFifoIntf)
bf.DEPTH = self.MAX_TRANS_OVERLAP
def storeTransInfo(self, transInfo: WFifoIntf, isLast: bool):
if self.isAlwaysAligned():
return []
else:
req = self.driver.req
offset = req.addr[self.getSizeAlignBits():]
crossesWordBoundary = self.isCrossingWordBoundary(req.addr, req.rem)
return [
self.encodeShiftValue(transInfo.SHIFT_OPTIONS, offset, transInfo.shift),
transInfo.drop_last_word(~self.addrIsAligned(req.addr) & ~crossesWordBoundary)
]
def axiWHandler(self, wErrFlag: RtlSignal):
w = self.axi.w
wIn = self.driver.w
wInfo = self.writeInfoFifo.dataOut
bInfo = self.bInfoFifo.dataIn
dataAck = self._sig("dataAck")
inLast = wIn.last
if hasattr(w, "id"):
# AXI3 has id signal, AXI4 does not
w.id(self.ID_VAL)
if self.isAlwaysAligned():
w.data(wIn.data)
w.strb(wIn.strb)
if self.axi.LEN_WIDTH:
doSplit = wIn.last
else:
doSplit = BIT.from_py(1)
waitForShift = BIT.from_py(0)
else:
isFirst = self._reg("isFirstData", def_val=1)
prevData = self._reg("prevData", HStruct(
(wIn.data._dtype, "data"),
(wIn.strb._dtype, "strb"),
(BIT, "waitingForShift"),
),
def_val={"waitingForShift": 0})
waitForShift = prevData.waitingForShift
isShifted = (wInfo.shift != 0) | (wInfo.SHIFT_OPTIONS[0] != 0)
wInWillWaitForShift = wIn.valid & wIn.last & isShifted & ~prevData.waitingForShift & ~wInfo.drop_last_word
If(StreamNode([wIn, wInfo], [w, bInfo], skipWhen={wIn: waitForShift}).ack() & ~wErrFlag,
# data feed in to prevData is stalled if we need to dispath
# the remainder from previous word which was not yet dispatched due data shift
# the last data from wIn is consumed on wIn.last, however there is 1 beat stall
# for wIn i transaction was not aligned. wInfo and bInfo channels are activated
# after last beat of wOut is send
If(~prevData.waitingForShift,
prevData.data(wIn.data),
prevData.strb(wIn.strb),
),
waitForShift(wInWillWaitForShift),
isFirst((isShifted & waitForShift) | ((~isShifted | wInfo.drop_last_word) & wIn.last))
)
def applyShift(sh):
if sh == 0 and wInfo.SHIFT_OPTIONS[0] == 0:
return [
w.data(wIn.data),
w.strb(wIn.strb),
]
else:
rem_w = self.DATA_WIDTH - sh
return [
# wIn.data starts on 0 we need to shift it sh bits
# in first word the prefix is invalid, in rest of the frames it is taken from
# previous data
If(waitForShift,
w.data(Concat(Bits(rem_w).from_py(None), prevData.data[:rem_w])),
).Else(
w.data(Concat(wIn.data[rem_w:], prevData.data[:rem_w])),
),
If(waitForShift,
# wait until remainder of previous data is send
w.strb(Concat(Bits(rem_w // 8).from_py(0), prevData.strb[:rem_w // 8])),
).Elif(isFirst,
# ignore previous data
w.strb(Concat(wIn.strb[rem_w // 8:], Bits(sh // 8).from_py(0))),
).Else(
# take what is left from prev data and append from wIn
w.strb(Concat(wIn.strb[rem_w // 8:], prevData.strb[:rem_w // 8])),
)
]
Switch(wInfo.shift).add_cases([
(i, applyShift(sh))
for i, sh in enumerate(wInfo.SHIFT_OPTIONS)
]).Default(
w.data(None),
w.strb(None),
)
inLast = rename_signal(self, isShifted._ternary(waitForShift | (wIn.last & wInfo.drop_last_word), wIn.last), "inLast")
doSplit = inLast
if self.useTransSplitting():
wordCntr = self._reg("wWordCntr", self.getLen_t(), 0)
doSplit = rename_signal(self, wordCntr._eq(self.getAxiLenMax()) | doSplit, "doSplit1")
If(StreamNode([wInfo, wIn], [bInfo, w]).ack() & ~wErrFlag,
If(doSplit,
wordCntr(0)
).Else(
wordCntr(wordCntr + 1)
)
)
if self.AXI_CLS.LEN_WIDTH != 0:
w.last(doSplit)
# if this frame was split into a multiple frames wIn.last will equal 0
bInfo.isLast(inLast)
dataNode = StreamNode(
masters=[wIn, wInfo],
slaves=[bInfo, w],
skipWhen={
wIn: waitForShift,
},
extraConds={
wIn:~waitForShift,
wInfo: doSplit,
bInfo: doSplit,
w:~wErrFlag}
)
dataAck(dataNode.ack())
dataNode.sync()
def axiBHandler(self):
b = self.axi.b
ack = self.driver.ack
lastFlags = self.bInfoFifo.dataOut
StreamNode(
masters=[b, lastFlags],
slaves=[ack],
extraConds={
ack: lastFlags.isLast
}
).sync()
def _impl(self):
propagateClkRstn(self)
b = self.axi.b
wErrFlag = self._reg("wErrFlag", def_val=0)
If(b.valid & (b.resp != RESP_OKAY),
wErrFlag(1)
)
self.errorWrite(wErrFlag)
if self.ALIGNAS != 8:
wErrAlignFlag = self._reg("wErrAlignFlag", def_val=0)
req = self.driver.req
If(req.vld & ~self.addrIsAligned(req.addr),
wErrAlignFlag(1)
)
self.errorAlignment(wErrAlignFlag)
wErrFlag = wErrFlag | wErrAlignFlag
self.addrHandler(self.driver.req, self.axi.aw, self.writeInfoFifo.dataIn, wErrFlag)
self.axiWHandler(wErrFlag)
self.axiBHandler()
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = Axi_wDatapump()
# u.ALIGNAS = 8
print(to_rtl_str(u))
| |
"""Functions to construct sparse matrices
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag']
import numpy as np
from scipy._lib.six import xrange
from .sputils import upcast, get_index_dtype
from .csr import csr_matrix
from .csc import csc_matrix
from .bsr import bsr_matrix
from .coo import coo_matrix
from .dia import dia_matrix
from .base import issparse
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : str, optional
Format of the result. By default (format=None) an appropriate sparse
matrix format is returned. This choice is subject to change.
See Also
--------
diags : more convenient form of this function
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
>>> diags = np.array([0, -1, 2])
>>> spdiags(data, diags, 4, 4).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def diags(diagonals, offsets, shape=None, format=None, dtype=None):
"""
Construct a sparse matrix from diagonals.
Parameters
----------
diagonals : sequence of array_like
Sequence of arrays containing the matrix diagonals,
corresponding to `offsets`.
offsets : sequence of int
Diagonals to set:
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
shape : tuple of int, optional
Shape of the result. If omitted, a square matrix large enough
to contain the diagonals is returned.
format : {"dia", "csr", "csc", "lil", ...}, optional
Matrix format of the result. By default (format=None) an
appropriate sparse matrix format is returned. This choice is
subject to change.
dtype : dtype, optional
Data type of the matrix.
See Also
--------
spdiags : construct matrix from diagonals
Notes
-----
This function differs from `spdiags` in the way it handles
off-diagonals.
The result from `diags` is the sparse equivalent of::
np.diag(diagonals[0], offsets[0])
+ ...
+ np.diag(diagonals[k], offsets[k])
Repeated diagonal offsets are disallowed.
.. versionadded:: 0.11
Examples
--------
>>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
>>> diags(diagonals, [0, -1, 2]).toarray()
array([[1, 0, 1, 0],
[1, 2, 0, 2],
[0, 2, 3, 0],
[0, 0, 3, 4]])
Broadcasting of scalars is supported (but shape needs to be
specified):
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray()
array([[-2., 1., 0., 0.],
[ 1., -2., 1., 0.],
[ 0., 1., -2., 1.],
[ 0., 0., 1., -2.]])
If only one diagonal is wanted (as in `numpy.diag`), the following
works as well:
>>> diags([1, 2, 3], 1).toarray()
array([[ 0., 1., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 3.],
[ 0., 0., 0., 0.]])
"""
# if offsets is not a sequence, assume that there's only one diagonal
try:
iter(offsets)
except TypeError:
# now check that there's actually only one diagonal
try:
iter(diagonals[0])
except TypeError:
diagonals = [np.atleast_1d(diagonals)]
else:
raise ValueError("Different number of diagonals and offsets.")
else:
diagonals = list(map(np.atleast_1d, diagonals))
offsets = np.atleast_1d(offsets)
# Basic check
if len(diagonals) != len(offsets):
raise ValueError("Different number of diagonals and offsets.")
# Determine shape, if omitted
if shape is None:
m = len(diagonals[0]) + abs(int(offsets[0]))
shape = (m, m)
# Determine data type, if omitted
if dtype is None:
dtype = np.common_type(*diagonals)
# Construct data array
m, n = shape
M = max([min(m + offset, n - offset) + max(0, offset)
for offset in offsets])
M = max(0, M)
data_arr = np.zeros((len(offsets), M), dtype=dtype)
for j, diagonal in enumerate(diagonals):
offset = offsets[j]
k = max(0, offset)
length = min(m + offset, n - offset)
if length <= 0:
raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
try:
data_arr[j, k:k+length] = diagonal
except ValueError:
if len(diagonal) != length and len(diagonal) != 1:
raise ValueError(
"Diagonal length (index %d: %d at offset %d) does not "
"agree with matrix size (%d, %d)." % (
j, len(diagonal), offset, m, n))
raise
return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : int
Shape of the identity matrix.
dtype : dtype, optional
Data type of the matrix
format : str, optional
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> identity(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
return eye(n, n, dtype=dtype, format=format)
def eye(m, n=None, k=0, dtype=float, format=None):
"""Sparse matrix with ones on diagonal
Returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
Parameters
----------
m : int
Number of rows in the matrix.
n : int, optional
Number of columns. Default: `m`.
k : int, optional
Diagonal to place ones on. Default: 0 (main diagonal).
dtype : dtype, optional
Data type of the matrix.
format : str, optional
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> from scipy import sparse
>>> sparse.eye(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> sparse.eye(3, dtype=np.int8)
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if n is None:
n = m
m,n = int(m),int(n)
if m == n and k == 0:
# fast branch for special formats
if format in ['csr', 'csc']:
idx_dtype = get_index_dtype(maxval=n)
indptr = np.arange(n+1, dtype=idx_dtype)
indices = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
cls = {'csr': csr_matrix, 'csc': csc_matrix}[format]
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
idx_dtype = get_index_dtype(maxval=n)
row = np.arange(n, dtype=idx_dtype)
col = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : str, optional
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> from scipy import sparse
>>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]]))
>>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]]))
>>> sparse.kron(A, B).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> sparse.kron(A, [[1, 2], [3, 4]]).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
# B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
# use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
----------
A
square matrix
B
square matrix
format : str
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) # since L + R is not always same format
def _compressed_sparse_stack(blocks, axis):
"""
Stacking fast path for CSR/CSC matrices
(i) vstack for CSR, (ii) hstack for CSC.
"""
other_axis = 1 if axis == 0 else 0
data = np.concatenate([b.data for b in blocks])
indices = np.concatenate([b.indices for b in blocks])
indptr = []
last_indptr = 0
constant_dim = blocks[0].shape[other_axis]
sum_dim = 0
for b in blocks:
if b.shape[other_axis] != constant_dim:
raise ValueError('incompatible dimensions for axis %d' % other_axis)
sum_dim += b.shape[axis]
indptr.append(b.indptr[:-1] + last_indptr)
last_indptr += b.indptr[-1]
indptr.append([last_indptr])
indptr = np.concatenate(indptr)
if axis == 0:
return csr_matrix((data, indices, indptr),
shape=(sum_dim, constant_dim))
else:
return csc_matrix((data, indices, indptr),
shape=(constant_dim, sum_dim))
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, hstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> hstack([A,B]).toarray()
array([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str, optional
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5, 6]])
>>> vstack([A, B]).toarray()
array([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([[b] for b in blocks], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks : array_like
Grid of sparse matrices with compatible shapes.
An entry of None implies an all-zero matrix.
format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
The sparse format of the result (e.g. "csr"). By default an
appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
bmat : sparse matrix
See Also
--------
block_diag, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> bmat([[A, B], [None, C]]).toarray()
array([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat([[A, None], [None, C]]).toarray()
array([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = np.asarray(blocks, dtype='object')
if blocks.ndim != 2:
raise ValueError('blocks must be 2-D')
M,N = blocks.shape
# check for fast path cases
if (N == 1 and format in (None, 'csr') and all(isinstance(b, csr_matrix)
for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[:,0], 0)
if dtype is not None:
A = A.astype(dtype)
return A
elif (M == 1 and format in (None, 'csc')
and all(isinstance(b, csc_matrix) for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[0,:], 1)
if dtype is not None:
A = A.astype(dtype)
return A
block_mask = np.zeros(blocks.shape, dtype=bool)
brow_lengths = np.zeros(M, dtype=np.int64)
bcol_lengths = np.zeros(N, dtype=np.int64)
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
else:
if brow_lengths[i] != A.shape[0]:
raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
else:
if bcol_lengths[j] != A.shape[1]:
raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)
# ensure that at least one value in each row and col is not None
if brow_lengths.min() == 0:
raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin())
if bcol_lengths.min() == 0:
raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin())
nnz = sum([block.nnz for block in blocks[block_mask]])
if dtype is None:
dtype = upcast(*tuple([blk.dtype for blk in blocks[block_mask]]))
row_offsets = np.concatenate(([0], np.cumsum(brow_lengths)))
col_offsets = np.concatenate(([0], np.cumsum(bcol_lengths)))
shape = (np.sum(brow_lengths), np.sum(bcol_lengths))
data = np.empty(nnz, dtype=dtype)
idx_dtype = get_index_dtype(maxval=max(shape))
row = np.empty(nnz, dtype=idx_dtype)
col = np.empty(nnz, dtype=idx_dtype)
nnz = 0
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
B = blocks[i,j]
data[nnz:nnz + B.nnz] = B.data
row[nnz:nnz + B.nnz] = B.row
col[nnz:nnz + B.nnz] = B.col
row[nnz:nnz + B.nnz] += row_offsets[i]
col[nnz:nnz + B.nnz] += col_offsets[j]
nnz += B.nnz
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
def block_diag(mats, format=None, dtype=None):
"""
Build a block diagonal sparse matrix from provided matrices.
Parameters
----------
mats : sequence of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the matrix
is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
res : sparse matrix
Notes
-----
.. versionadded:: 0.11.0
See Also
--------
bmat, diags
Examples
--------
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> block_diag((A, B, C)).toarray()
array([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None]*nmat
if issparse(a):
row[ia] = a
else:
row[ia] = coo_matrix(a)
rows.append(row)
return bmat(rows, format=format, dtype=dtype)
def random(m, n, density=0.01, format='coo', dtype=None,
random_state=None, data_rvs=None):
"""Generate a sparse matrix of the given shape and density with randomly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used. This random state will be used
for sampling the sparsity structure, but not necessarily for sampling
the values of the structurally nonzero entries of the matrix.
data_rvs : callable, optional
Samples a requested number of random values.
This function should take a single argument specifying the length
of the ndarray that it will return. The structurally nonzero entries
of the sparse random matrix will be taken from the array sampled
by this function. By default, uniform [0, 1) random values will be
sampled using the same random state as is used for sampling
the sparsity structure.
Examples
--------
>>> from scipy.sparse import construct
>>> from scipy import stats
>>> class CustomRandomState(object):
... def randint(self, k):
... i = np.random.randint(k)
... return i - i % 2
>>> rs = CustomRandomState()
>>> rvs = stats.poisson(25, loc=10).rvs
>>> S = construct.random(3, 4, density=0.25, random_state=rs, data_rvs=rvs)
>>> S.A
array([[ 36., 0., 33., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 36., 0.]])
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
if dtype and (dtype not in [np.float32, np.float64, np.longdouble]):
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
tp = np.intc
if mn > np.iinfo(tp).max:
tp = np.int64
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
# Use the algorithm from python's random.sample for k < mn/3.
if mn < 3*k:
# We should use this line, but choice is only available in numpy >= 1.7
# ind = random_state.choice(mn, size=k, replace=False)
ind = random_state.permutation(mn)[:k]
else:
ind = np.empty(k, dtype=tp)
selected = set()
for i in xrange(k):
j = random_state.randint(mn)
while j in selected:
j = random_state.randint(mn)
selected.add(j)
ind[i] = j
j = np.floor(ind * 1. / m).astype(tp)
i = (ind - j * m).astype(tp)
vals = data_rvs(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with uniformly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Notes
-----
Only float types are supported for now.
"""
return random(m, n, density, format, dtype, random_state)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to convert variables to constants in TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.util import object_identity
from tensorflow.python.training.saver import export_meta_graph
_CONDITIONAL_OPS = set(["If", "StatelessIf"])
_LOOP_OPS = set(["While", "StatelessWhile"])
_CONTROL_FLOW_OPS = _CONDITIONAL_OPS.union(_LOOP_OPS)
def disable_lower_using_switch_merge(graph_def):
"""Set '_lower_using_switch_merge' attributes to False.
Sets the attribute to False in the NodeDefs in the main graph and the NodeDefs
in each function's graph.
Args:
graph_def: GraphDef proto.
Returns:
GraphDef
"""
output_graph_def = graph_pb2.GraphDef()
output_graph_def.CopyFrom(graph_def)
def disable_control_flow_lowering(node):
if node.op in _CONTROL_FLOW_OPS:
node.attr["_lower_using_switch_merge"].b = False
for node in output_graph_def.node:
disable_control_flow_lowering(node)
if output_graph_def.library:
for func in output_graph_def.library.function:
for node in func.node_def:
disable_control_flow_lowering(node)
return output_graph_def
def _run_inline_graph_optimization(func, lower_control_flow):
"""Apply function inline optimization to the graph.
Returns the GraphDef after Grappler's function inlining optimization is
applied. This optimization does not work on models with control flow.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
Returns:
GraphDef
"""
graph_def = func.graph.as_graph_def()
if not lower_control_flow:
graph_def = disable_lower_using_switch_merge(graph_def)
meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph)
# Clear the initializer_name for the variables collections, since they are not
# needed after saved to saved_model.
for name in [
"variables", "model_variables", "trainable_variables", "local_variables"
]:
raw_list = []
for raw in meta_graph.collection_def["variables"].bytes_list.value:
variable = variable_pb2.VariableDef()
variable.ParseFromString(raw)
variable.ClearField("initializer_name")
raw_list.append(variable.SerializeToString())
meta_graph.collection_def[name].bytes_list.value[:] = raw_list
# Add a collection 'train_op' so that Grappler knows the outputs.
fetch_collection = meta_graph_pb2.CollectionDef()
for array in func.inputs + func.outputs:
fetch_collection.node_list.value.append(array.name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
# Initialize RewriterConfig with everything disabled except function inlining.
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.min_graph_nodes = -1 # do not skip small graphs
rewrite_options.optimizers.append("function")
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _get_tensor_name(name):
"""Returns the name of the input tensor.
Args:
name: str
Returns:
str
"""
return name.split(":")[0]
def _get_new_function_name(name):
"""Returns the function name with '_frozen' appended.
Args:
name: str
Returns:
str
"""
return name + "_frozen"
def _get_node_defs_list(graph_def):
"""Returns a list of NodeDefs in the GraphDef.
This list consists of all NodeDefs in the main graph as well as all control
flow NodeDefs in the functions.
The remaining NodeDefs in the functions are not included because the op names
are not unique and the variables are handled differently than the main graph.
The control flow ops need to be extracted because they are need their
attributes to be updated similar to the control flow ops in the main graph.
Args:
graph_def: GraphDef proto.
Returns:
[NodeDef]
"""
node_defs = list(graph_def.node)
if graph_def.library:
for func in graph_def.library.function:
node_defs.extend(
[node for node in func.node_def if node.op in _CONTROL_FLOW_OPS])
return node_defs
def _get_tensor_data(func):
"""Gets the tensor data for all Placeholders in the model.
Returns a dictionary that maps the tensor name to a dictionary containing:
data: numpy data
index: int index in func.graph.captures
is_variable: bool indicating whether the tensor is a variable or not
Args:
func: ConcreteFunction.
Returns:
Dict
"""
tensor_data = {}
map_index_to_variable = {}
for var in func.graph.variables:
for idx, captured_input in enumerate(func.captured_inputs):
if var.handle is captured_input: # pylint: disable=protected-access
map_index_to_variable[idx] = var
break
# Iterates through all captures which are represented as Placeholders.
for idx, (val_tensor, name_tensor) in enumerate(func.graph.captures):
tensor_name = _get_tensor_name(name_tensor.name)
is_variable = idx in map_index_to_variable
if is_variable:
data = map_index_to_variable[idx].numpy()
else:
data = val_tensor.numpy()
tensor_data[tensor_name] = {
"data": data,
"index": idx,
"is_variable": is_variable,
}
return tensor_data
def _get_control_flow_function_data(node_defs, tensor_data):
"""Gets the types and shapes for the parameters to the function.
Creates a map from function name to a list of types and a list of shapes that
correspond with the function arguments. The data is primarily determined from
the corresponding "If" or "While" op. If the argument is a resource variable,
then the type is determined from the type of the data contained within the
Tensor. The shape data is only determined in the case of the "While" op.
`is_also_output_type` is used to identify the "While" bodies that require the
output types to be updated at the same time the input types are updated.
Args:
node_defs: List of NodeDefs.
tensor_data: {str name : Tensor}.
Returns:
{str function name : {"types" : [int representing DataType],
"shapes" : [[int] representing TensorShape]],
"is_also_output_type" : bool}
"""
func_data = {}
def get_resource_type(node_name):
numpy_type = tensor_data[node_name]["data"].dtype
return dtypes.as_dtype(numpy_type).as_datatype_enum
def get_resource_shape(node_name):
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=dim)
for dim in tensor_data[node_name]["data"].shape
])
def add_value(func_name, arg_types, output_shapes, is_also_output_type):
func_data[func_name] = {
"types": arg_types,
"shapes": output_shapes,
"is_also_output_type": is_also_output_type
}
for node in node_defs:
if node.op in _CONDITIONAL_OPS:
arg_types = [dtype for dtype in node.attr["Tin"].list.type]
for idx in range(len(arg_types)):
if arg_types[idx] == dtypes.resource:
# Skip first index which represents the condition.
arg_types[idx] = get_resource_type(node.input[idx + 1])
add_value(node.attr["then_branch"].func.name, arg_types, None, False)
add_value(node.attr["else_branch"].func.name, arg_types, None, False)
elif node.op in _LOOP_OPS:
arg_types = [dtype for dtype in node.attr["T"].list.type]
output_shapes = [shape for shape in node.attr["output_shapes"].list.shape]
for idx in range(len(arg_types)):
if arg_types[idx] == dtypes.resource:
input_name = node.input[idx]
arg_types[idx] = get_resource_type(input_name)
output_shapes[idx] = get_resource_shape(input_name)
add_value(node.attr["body"].func.name, arg_types, output_shapes, True)
add_value(node.attr["cond"].func.name, arg_types, output_shapes, False)
return func_data
def _populate_const_op(output_node, node_name, dtype, data, data_shape):
"""Creates a Const op.
Args:
output_node: TensorFlow NodeDef.
node_name: str node name.
dtype: AttrValue with a populated .type field.
data: numpy data value.
data_shape: Tuple of integers containing data shape.
"""
output_node.op = "Const"
output_node.name = node_name
output_node.attr["dtype"].CopyFrom(dtype)
tensor = tensor_util.make_tensor_proto(
data, dtype=dtype.type, shape=data_shape)
output_node.attr["value"].tensor.CopyFrom(tensor)
def _populate_identity_op(output_node, input_node):
"""Creates an Identity op from a ReadVariable op.
Args:
output_node: TensorFlow NodeDef.
input_node: TensorFlow NodeDef.
"""
output_node.op = "Identity"
output_node.name = input_node.name
output_node.input.append(input_node.input[0])
output_node.attr["T"].CopyFrom(input_node.attr["dtype"])
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
def _populate_if_op(output_node, input_node, function_data):
"""Updates the type attributes and function names of If or StatelessIf.
Args:
output_node: TensorFlow NodeDef.
input_node: TensorFlow NodeDef.
function_data: Map of function names to the list of types and shapes that
correspond with the function arguments.
"""
output_node.CopyFrom(input_node)
then_func = input_node.attr["then_branch"].func.name
output_node.attr["then_branch"].func.name = _get_new_function_name(then_func)
output_node.attr["else_branch"].func.name = _get_new_function_name(
input_node.attr["else_branch"].func.name)
output_node.attr["Tin"].list.CopyFrom(
attr_value_pb2.AttrValue.ListValue(
type=function_data[then_func]["types"]))
def _populate_while_op(output_node, input_node, function_data):
"""Updates the type attributes and function names of While or StatelessWhile.
Args:
output_node: TensorFlow NodeDef.
input_node: TensorFlow NodeDef.
function_data: Map of function names to the list of types and shapes that
correspond with the function arguments.
"""
output_node.CopyFrom(input_node)
cond_func = input_node.attr["cond"].func.name
output_node.attr["cond"].func.name = _get_new_function_name(cond_func)
output_node.attr["body"].func.name = _get_new_function_name(
input_node.attr["body"].func.name)
output_node.attr["T"].list.CopyFrom(
attr_value_pb2.AttrValue.ListValue(
type=function_data[cond_func]["types"]))
output_node.attr["output_shapes"].list.CopyFrom(
attr_value_pb2.AttrValue.ListValue(
shape=function_data[cond_func]["shapes"]))
def _construct_concrete_function(func, output_graph_def,
converted_input_indices):
"""Constructs a concrete function from the `output_graph_def`.
Args:
func: ConcreteFunction
output_graph_def: GraphDef proto.
converted_input_indices: Set of integers of input indices that were
converted to constants.
Returns:
ConcreteFunction.
"""
# Create a ConcreteFunction from the new GraphDef.
input_tensors = func.graph.internal_captures
converted_inputs = object_identity.ObjectIdentitySet(
[input_tensors[index] for index in converted_input_indices])
not_converted_inputs = object_identity.ObjectIdentitySet(
func.inputs).difference(converted_inputs)
not_converted_inputs_map = {
tensor.name: tensor for tensor in not_converted_inputs
}
new_input_names = [tensor.name for tensor in not_converted_inputs]
new_output_names = [tensor.name for tensor in func.outputs]
new_func = wrap_function.function_from_graph_def(output_graph_def,
new_input_names,
new_output_names)
# Manually propagate shape for input tensors where the shape is not correctly
# propagated. Scalars shapes are lost when wrapping the function.
for input_tensor in new_func.inputs:
input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape)
return new_func
def convert_variables_to_constants_v2(func, lower_control_flow=True):
"""Replaces all the variables in a graph with constants of the same values.
TensorFlow 2.0 function for converting all Variable ops into Const ops holding
the same values. This makes it possible to describe the network fully with a
single GraphDef file, and allows the removal of a lot of ops related to
loading and saving the variables. This function runs Grappler's function
inlining optimization in order to return a single subgraph.
The current implementation only works for graphs that do not contain any
control flow or embedding related ops.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
Returns:
ConcreteFunction containing a simplified version of the original.
"""
# Inline the graph in order to remove functions when possible.
graph_def = _run_inline_graph_optimization(func, lower_control_flow)
# Gets list of all node defs include those in the library.
node_defs = _get_node_defs_list(graph_def)
# Get mapping from node name to node.
name_to_node = {_get_tensor_name(node.name): node for node in node_defs}
# Get mapping from node name to variable value.
tensor_data = _get_tensor_data(func)
# Get mapping from function name to argument types.
function_data = _get_control_flow_function_data(node_defs, tensor_data)
# Get variable data for all nodes in `node_defs`.
reference_variables = {}
resource_identities = {}
placeholders = {}
converted_input_indices = set()
def _save_placeholder(node_name, dtype):
placeholders[node_name] = {
"dtype": dtype,
"data": tensor_data[node_name]["data"],
}
converted_input_indices.add(tensor_data[node_name]["index"])
for node in node_defs:
if node.op in _CONDITIONAL_OPS:
# Get dtype and data for resource Placeholders.
then_func = node.attr["then_branch"].func.name
arg_types = function_data[then_func]["types"]
for idx, input_tensor in enumerate(node.input[1:]):
input_name = _get_tensor_name(input_tensor)
if input_name in tensor_data:
dtype = attr_value_pb2.AttrValue(type=arg_types[idx])
_save_placeholder(_get_tensor_name(input_tensor), dtype)
elif node.op in _LOOP_OPS:
# Get dtype and data for resource Placeholders.
cond_func = node.attr["cond"].func.name
arg_types = function_data[cond_func]["types"]
for idx, input_tensor in enumerate(node.input):
input_name = _get_tensor_name(input_tensor)
if input_name in tensor_data:
dtype = attr_value_pb2.AttrValue(type=arg_types[idx])
_save_placeholder(_get_tensor_name(input_tensor), dtype)
elif (node.op == "Identity" and node.attr["T"].type == dtypes.resource and
name_to_node[_get_tensor_name(node.input[0])].op in _LOOP_OPS):
# Store the dtype for Identity resource ops that are outputs of While ops.
while_node = name_to_node[_get_tensor_name(node.input[0])]
body_func = while_node.attr["body"].func.name
input_data = node.input[0].split(":")
idx = 0 if len(input_data) == 1 else int(input_data[1])
dtype = attr_value_pb2.AttrValue(
type=function_data[body_func]["types"][idx])
resource_identities[node.name] = dtype
elif node.op == "VariableV2":
# Get data for VariableV2 ops (reference variables) that cannot be lifted.
with func.graph.as_default():
identity_node = array_ops.identity(
func.graph.as_graph_element(node.name + ":0"))
reference_variables[node.name] = (
func.prune([], [identity_node.name])()[0])
elif node.name in tensor_data and not tensor_data[node.name]["is_variable"]:
# Get dtype and data for non-variable Placeholders (ex. values for 1.X
# Const ops that are loaded as Placeholders in 2.0)
_save_placeholder(node.name, node.attr["dtype"])
elif node.op in ["ReadVariableOp", "ResourceGather"]:
# Get dtype and data for Placeholder ops associated with ReadVariableOp
# and ResourceGather ops. There can be an Identity in between the
# resource op and Placeholder. Store the dtype for the Identity ops.
input_name = _get_tensor_name(node.input[0])
while name_to_node[input_name].op == "Identity":
resource_identities[input_name] = node.attr["dtype"]
input_name = _get_tensor_name(name_to_node[input_name].input[0])
if name_to_node[input_name].op != "Placeholder":
raise ValueError("Cannot find the Placeholder op that is an input "
"to the ReadVariableOp.")
_save_placeholder(input_name, node.attr["dtype"])
# Reconstruct the graph with constants in place of variables.
output_graph_def = graph_pb2.GraphDef()
for input_node in graph_def.node:
output_node = output_graph_def.node.add()
# Convert VariableV2 ops to Const ops.
if input_node.name in reference_variables:
data = reference_variables[input_node.name]
dtype = attr_value_pb2.AttrValue(type=data.dtype.as_datatype_enum)
_populate_const_op(output_node, input_node.name, dtype, data.numpy(),
data.shape)
# Convert Placeholder ops to Const ops.
elif input_node.name in placeholders:
data = placeholders[input_node.name]["data"]
dtype = placeholders[input_node.name]["dtype"]
_populate_const_op(output_node, input_node.name, dtype, data, data.shape)
# Update the dtype for Identity ops that are inputs to ReadVariableOps.
elif input_node.name in resource_identities:
output_node.CopyFrom(input_node)
output_node.attr["T"].CopyFrom(resource_identities[input_node.name])
# Convert ReadVariableOps to Identity ops.
elif input_node.op == "ReadVariableOp":
_populate_identity_op(output_node, input_node)
# Convert ResourceGather to Gather ops with a Const axis feeding into it.
elif input_node.op == "ResourceGather":
if input_node.attr["batch_dims"].i != 0:
raise ValueError("batch_dims != 0 is not supported by freeze_graph.")
output_axis_node = output_graph_def.node.add()
axis_node_name = input_node.name + "/axis"
axis_dtype = input_node.attr["Tindices"]
axis_data = np.array(input_node.attr["batch_dims"].i)
_populate_const_op(output_axis_node, axis_node_name, axis_dtype,
axis_data, axis_data.shape)
output_node.op = "GatherV2"
output_node.name = input_node.name
output_node.input.extend(
[input_node.input[0], input_node.input[1], axis_node_name])
output_node.attr["Tparams"].CopyFrom(input_node.attr["dtype"])
output_node.attr["Tindices"].CopyFrom(input_node.attr["Tindices"])
output_node.attr["Taxis"].CopyFrom(axis_dtype)
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
# Update the function names and argument types for the conditional ops.
elif input_node.op in _CONDITIONAL_OPS:
_populate_if_op(output_node, input_node, function_data)
elif input_node.op in _LOOP_OPS:
_populate_while_op(output_node, input_node, function_data)
else:
output_node.CopyFrom(input_node)
# Add functions to reconstructed graph.
if graph_def.library:
library = output_graph_def.library
for input_library_func in graph_def.library.function:
orig_func_name = input_library_func.signature.name
new_func_name = _get_new_function_name(orig_func_name)
# Do not copy any functions that aren't being used in the graph. Any
# functions that are not used by control flow should have been inlined.
if orig_func_name not in function_data:
continue
output_library_func = library.function.add()
for key, value in input_library_func.ret.items():
output_library_func.ret[key] = value
for key, value in input_library_func.control_ret.items():
output_library_func.control_ret[key] = value
# Update the input types in the function signature. Update the output
# types for functions that are while loop bodies.
output_library_func.signature.CopyFrom(input_library_func.signature)
output_library_func.signature.name = new_func_name
for dtype, arg in zip(function_data[orig_func_name]["types"],
output_library_func.signature.input_arg):
arg.type = dtype
if function_data[orig_func_name]["is_also_output_type"]:
for dtype, arg in zip(function_data[orig_func_name]["types"],
output_library_func.signature.output_arg):
arg.type = dtype
# Update the NodeDefs.
func_variables = {
node.name: node.input[0]
for node in input_library_func.node_def
if node.op == "ReadVariableOp"
}
for input_node in input_library_func.node_def:
output_node = output_library_func.node_def.add()
# Convert ReadVariableOps to Identity ops.
if input_node.op == "ReadVariableOp":
_populate_identity_op(output_node, input_node)
# Update the function names and argument types for the conditional ops.
elif input_node.op in _CONDITIONAL_OPS:
_populate_if_op(output_node, input_node, function_data)
elif input_node.op in _LOOP_OPS:
_populate_while_op(output_node, input_node, function_data)
else:
output_node.CopyFrom(input_node)
# Convert :value to :output for ops that use the ReadVariableOp.
for idx, full_name in enumerate(input_node.input):
input_name = _get_tensor_name(full_name)
if input_name in func_variables:
full_name_parts = full_name.split(":")
full_name_parts[1] = "output"
input_name = ":".join(full_name_parts)
output_node.input[idx] = input_name
output_graph_def.versions.CopyFrom(graph_def.versions)
return _construct_concrete_function(func, output_graph_def,
converted_input_indices)
| |
'''
Created on Aug 4, 2011
@author: sean
'''
from __future__ import print_function
from ..visitors import Visitor, visit_children
from ..visitors.symbol_visitor import get_symbols
import ast
from ...utils import py2op
class ConditionalSymbolVisitor(Visitor):
def __init__(self):
self._cond_lhs = set()
self._stable_lhs = set()
self._cond_rhs = set()
self._stable_rhs = set()
self.undefined = set()
self.seen_break = False
visitModule = visit_children
visitPass = visit_children
def update_stable_rhs(self, symbols):
new_symbols = symbols - self._stable_rhs
self._update_undefined(new_symbols)
if self.seen_break:
self._cond_rhs.update(new_symbols)
else:
self._cond_rhs -= new_symbols
self._stable_rhs.update(new_symbols)
def update_stable_lhs(self, symbols):
new_symbols = symbols - self._stable_lhs
if self.seen_break:
self._cond_lhs.update(new_symbols)
else:
self._cond_lhs -= new_symbols
self._stable_lhs.update(new_symbols)
def update_cond_rhs(self, symbols):
new_symbols = symbols - self._stable_rhs
self._update_undefined(new_symbols)
self._cond_rhs.update(new_symbols)
def update_cond_lhs(self, symbols):
self._cond_lhs.update(symbols - self._stable_lhs)
def _update_undefined(self, symbols):
self.undefined.update(symbols - self._stable_lhs)
update_undefined = _update_undefined
@property
def stable_lhs(self):
assert not (self._stable_lhs & self._cond_lhs)
return self._stable_lhs
@property
def stable_rhs(self):
assert not (self._stable_rhs & self._cond_rhs)
return self._stable_rhs
@property
def cond_rhs(self):
assert not (self._stable_rhs & self._cond_rhs)
return self._cond_rhs
@property
def cond_lhs(self):
assert not (self._stable_lhs & self._cond_lhs)
return self._cond_lhs
@property
def lhs(self):
assert not (self._stable_lhs & self._cond_lhs)
return self._cond_lhs | self._stable_lhs
@property
def rhs(self):
assert not (self._stable_rhs & self._cond_rhs)
return self._cond_rhs | self._stable_rhs
def visitAugAssign(self, node):
values = get_symbols(node.value)
self.update_stable_rhs(values)
targets = get_symbols(node.target)
self.update_stable_rhs(targets)
self.update_stable_lhs(targets)
def visitAssign(self, node):
ids = set()
for target in node.targets:
ids.update(get_symbols(target, ast.Store))
rhs_ids = get_symbols(node.value, ast.Load)
for target in node.targets:
rhs_ids.update(get_symbols(target, ast.Load))
self.update_stable_rhs(rhs_ids)
self.update_stable_lhs(ids)
def visitBreak(self, node):
self.seen_break = True
def visitContinue(self, node):
self.seen_break = True
def visit_loop(self, node):
gen = ConditionalSymbolVisitor()
for stmnt in node.body:
gen.visit(stmnt)
self.update_cond_lhs(gen.cond_lhs)
self.update_cond_rhs(gen.cond_rhs)
outputs = gen.stable_lhs
inputs = gen.stable_rhs
gen = ConditionalSymbolVisitor()
for stmnt in node.orelse:
gen.visit(stmnt)
self.update_cond_rhs(gen.cond_rhs)
self.update_cond_lhs(gen.cond_lhs)
orelse_outputs = gen.stable_lhs
orelse_inputs = gen.stable_rhs
self.update_stable_lhs(outputs.intersection(orelse_outputs))
self.update_stable_rhs(inputs.intersection(orelse_inputs))
self.update_cond_lhs(outputs.symmetric_difference(orelse_outputs))
self.update_cond_rhs(inputs.symmetric_difference(orelse_inputs))
def visitFor(self, node):
lhs_symbols = get_symbols(node.target, ast.Store)
self.update_cond_lhs(lhs_symbols)
rhs_symbols = get_symbols(node.iter, ast.Load)
self.update_stable_rhs(rhs_symbols)
remove_from_undef = lhs_symbols - self.undefined
self.visit_loop(node)
self.undefined -= remove_from_undef
def visitExpr(self, node):
rhs_ids = get_symbols(node, ast.Load)
self.update_stable_rhs(rhs_ids)
def visitPrint(self, node):
rhs_ids = get_symbols(node, ast.Load)
self.update_stable_rhs(rhs_ids)
def visitWhile(self, node):
rhs_symbols = get_symbols(node.test, ast.Load)
self.update_stable_rhs(rhs_symbols)
self.visit_loop(node)
def visitIf(self, node):
rhs_symbols = get_symbols(node.test, ast.Load)
self.update_stable_rhs(rhs_symbols)
gen = ConditionalSymbolVisitor()
for stmnt in node.body:
gen.visit(stmnt)
if gen.seen_break:
self.seen_break = True
self.update_cond_lhs(gen._cond_lhs)
self.update_cond_rhs(gen._cond_rhs)
outputs = gen.stable_lhs
inputs = gen.stable_rhs
gen = ConditionalSymbolVisitor()
for stmnt in node.orelse:
gen.visit(stmnt)
self.update_cond_lhs(gen._cond_lhs)
self.update_cond_rhs(gen._cond_rhs)
orelse_outputs = gen.stable_lhs
orelse_inputs = gen.stable_rhs
self.update_stable_lhs(outputs.intersection(orelse_outputs))
self.update_stable_rhs(inputs.intersection(orelse_inputs))
self.update_cond_lhs(outputs.symmetric_difference(orelse_outputs))
self.update_cond_rhs(inputs.symmetric_difference(orelse_inputs))
@py2op
def visitExec(self, node):
self.update_stable_rhs(get_symbols(node.body, ast.Load))
if node.globals:
self.update_stable_rhs(get_symbols(node.globals, ast.Load))
if node.locals:
self.update_stable_rhs(get_symbols(node.locals, ast.Load))
def visitAssert(self, node):
self.update_stable_rhs(get_symbols(node.test, ast.Load))
if node.msg:
self.update_stable_rhs(get_symbols(node.msg, ast.Load))
@py2op
def visitRaise(self, node):
if node.type:
self.update_stable_rhs(get_symbols(node.type, ast.Load))
if node.inst:
self.update_stable_rhs(get_symbols(node.inst, ast.Load))
if node.tback:
self.update_stable_rhs(get_symbols(node.tback, ast.Load))
@visitRaise.py3op
def visitRaise(self, node):
if node.exc:
self.update_stable_rhs(get_symbols(node.exc, ast.Load))
if node.cause:
self.update_stable_rhs(get_symbols(node.cause, ast.Load))
def visitTryExcept(self, node):
gen = ConditionalSymbolVisitor()
gen.visit_list(node.body)
self.update_undefined(gen.undefined)
handlers = [csv(hndlr) for hndlr in node.handlers]
for g in handlers:
self.update_undefined(g.undefined)
stable_rhs = gen.stable_rhs.intersection(*[g.stable_rhs for g in handlers])
self.update_stable_rhs(stable_rhs)
all_rhs = gen.rhs.union(*[g.rhs for g in handlers])
self.update_cond_rhs(all_rhs - stable_rhs)
stable_lhs = gen.stable_lhs.intersection(*[g.stable_lhs for g in handlers])
self.update_stable_lhs(stable_lhs)
all_lhs = gen.lhs.union(*[g.lhs for g in handlers])
self.update_cond_lhs(all_lhs - stable_lhs)
gen = ConditionalSymbolVisitor()
gen.visit_list(node.orelse)
self.update_undefined(gen.undefined)
self.update_cond_lhs(gen.lhs)
self.update_cond_rhs(gen.rhs)
@py2op
def visitExceptHandler(self, node):
if node.type:
self.update_stable_rhs(get_symbols(node.type, ast.Load))
if node.name:
self.update_stable_lhs(get_symbols(node.name, ast.Store))
self.visit_list(node.body)
@visitExceptHandler.py3op
def visitExceptHandler(self, node):
if node.type:
self.update_stable_rhs(get_symbols(node.type, ast.Load))
if node.name:
self.update_stable_lhs({node.name})
self.visit_list(node.body)
def visitTryFinally(self, node):
self.visit_list(node.body)
self.visit_list(node.finalbody)
def visitImportFrom(self, node):
symbols = get_symbols(node)
self.update_stable_lhs(symbols)
def visitImport(self, node):
symbols = get_symbols(node)
self.update_stable_lhs(symbols)
def visitLambda(self, node):
gen = ConditionalSymbolVisitor()
gen.update_stable_lhs(symbols={arg for arg in node.args.args})
gen.visit_list(node.body)
self.update_stable_rhs(gen.undefined)
def visitFunctionDef(self, node):
for decorator in node.decorator_list:
self.update_stable_rhs(get_symbols(decorator, ast.Load))
self.update_stable_lhs({node.name})
gen = ConditionalSymbolVisitor()
gen.update_stable_lhs(symbols={arg for arg in node.args.args})
gen.visit_list(node.body)
self.update_stable_rhs(gen.undefined)
def visitGlobal(self, node):
pass
def visitWith(self, node):
self.update_stable_rhs(get_symbols(node.context_expr, ast.Load))
if node.optional_vars:
self.update_stable_lhs(get_symbols(node.optional_vars, ast.Load))
self.visit_list(node.body)
def visitReturn(self, node):
self.update_stable_rhs(get_symbols(node.value, ast.Load))
def csv(node):
gen = ConditionalSymbolVisitor()
gen.visit(node)
return gen
def lhs(node):
'''
Return a set of symbols in `node` that are assigned.
:param node: ast node
:returns: set of strings.
'''
gen = ConditionalSymbolVisitor()
if isinstance(node, (list, tuple)):
gen.visit_list(node)
else:
gen.visit(node)
return gen.lhs
def rhs(node):
'''
Return a set of symbols in `node` that are used.
:param node: ast node
:returns: set of strings.
'''
gen = ConditionalSymbolVisitor()
if isinstance(node, (list, tuple)):
gen.visit_list(node)
else:
gen.visit(node)
return gen.rhs
def conditional_lhs(node):
'''
Group outputs into contitional and stable
:param node: ast node
:returns: tuple of (contitional, stable)
'''
gen = ConditionalSymbolVisitor()
gen.visit(node)
return gen.cond_lhs, gen.stable_lhs
def conditional_symbols(node):
'''
Group lhs and rhs into contitional, stable and undefined
:param node: ast node
:returns: tuple of (contitional_lhs, stable_lhs),(contitional_rhs, stable_rhs), undefined
'''
gen = ConditionalSymbolVisitor()
gen.visit(node)
lhs = gen.cond_lhs, gen.stable_lhs
rhs = gen.cond_rhs, gen.stable_rhs
undefined = gen.undefined
return lhs, rhs, undefined
if __name__ == '__main__':
source = '''
while k:
a = 1
b = 1
break
d = 1
else:
a =2
c= 3
d = 1
'''
print(conditional_lhs(ast.parse(source)))
| |
#!/usr/bin/env python
#
#-----------------------------------------------------------------------
# A test suite for the table interface built on bsddb.db
#-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
# Copyright (C) 2002 Gregory P. Smith
#
# March 20, 2000
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# -- Gregory P. Smith <greg@krypto.org>
#
# $Id: test_dbtables.py 79285 2010-03-22 14:22:26Z jesus.cea $
import os, re, sys
if sys.version_info[0] < 3 :
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
else :
import pickle
import unittest
from test_all import db, dbtables, test_support, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class TableDBTestCase(unittest.TestCase):
db_name = 'test-table.db'
def setUp(self):
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
self.testHomeDir = get_new_environment_path()
self.tdb = dbtables.bsdTableDB(
filename='tabletest.db', dbhome=self.testHomeDir, create=1)
def tearDown(self):
self.tdb.close()
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
test_support.rmtree(self.testHomeDir)
def test01(self):
tabname = "test01"
colname = 'cool numbers'
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, [colname])
import sys
if sys.version_info[0] < 3 :
self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159, 1)})
else :
self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159,
1).decode("iso8859-1")}) # 8 bits
if verbose:
self.tdb._db_print()
values = self.tdb.Select(
tabname, [colname], conditions={colname: None})
import sys
if sys.version_info[0] < 3 :
colval = pickle.loads(values[0][colname])
else :
colval = pickle.loads(bytes(values[0][colname], "iso8859-1"))
self.assert_(colval > 3.141)
self.assert_(colval < 3.142)
def test02(self):
tabname = "test02"
col0 = 'coolness factor'
col1 = 'but can it fly?'
col2 = 'Species'
import sys
if sys.version_info[0] < 3 :
testinfo = [
{col0: pickle.dumps(8, 1), col1: 'no', col2: 'Penguin'},
{col0: pickle.dumps(-1, 1), col1: 'no', col2: 'Turkey'},
{col0: pickle.dumps(9, 1), col1: 'yes', col2: 'SR-71A Blackbird'}
]
else :
testinfo = [
{col0: pickle.dumps(8, 1).decode("iso8859-1"),
col1: 'no', col2: 'Penguin'},
{col0: pickle.dumps(-1, 1).decode("iso8859-1"),
col1: 'no', col2: 'Turkey'},
{col0: pickle.dumps(9, 1).decode("iso8859-1"),
col1: 'yes', col2: 'SR-71A Blackbird'}
]
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, [col0, col1, col2])
for row in testinfo :
self.tdb.Insert(tabname, row)
import sys
if sys.version_info[0] < 3 :
values = self.tdb.Select(tabname, [col2],
conditions={col0: lambda x: pickle.loads(x) >= 8})
else :
values = self.tdb.Select(tabname, [col2],
conditions={col0: lambda x:
pickle.loads(bytes(x, "iso8859-1")) >= 8})
self.assertEqual(len(values), 2)
if values[0]['Species'] == 'Penguin' :
self.assertEqual(values[1]['Species'], 'SR-71A Blackbird')
elif values[0]['Species'] == 'SR-71A Blackbird' :
self.assertEqual(values[1]['Species'], 'Penguin')
else :
if verbose:
print "values= %r" % (values,)
raise RuntimeError("Wrong values returned!")
def test03(self):
tabname = "test03"
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
if verbose:
print '...before CreateTable...'
self.tdb._db_print()
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
if verbose:
print '...after CreateTable...'
self.tdb._db_print()
self.tdb.Drop(tabname)
if verbose:
print '...after Drop...'
self.tdb._db_print()
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
try:
self.tdb.Insert(tabname,
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': "Zero"})
self.fail('Expected an exception')
except dbtables.TableDBError:
pass
try:
self.tdb.Select(tabname, [], conditions={'foo': '123'})
self.fail('Expected an exception')
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname,
{'a': '42',
'b': "bad",
'c': "meep",
'e': 'Fuzzy wuzzy was a bear'})
self.tdb.Insert(tabname,
{'a': '581750',
'b': "good",
'd': "bla",
'c': "black",
'e': 'fuzzy was here'})
self.tdb.Insert(tabname,
{'a': '800000',
'b': "good",
'd': "bla",
'c': "black",
'e': 'Fuzzy wuzzy is a bear'})
if verbose:
self.tdb._db_print()
# this should return two rows
values = self.tdb.Select(tabname, ['b', 'a', 'd'],
conditions={'e': re.compile('wuzzy').search,
'a': re.compile('^[0-9]+$').match})
self.assertEqual(len(values), 2)
# now lets delete one of them and try again
self.tdb.Delete(tabname, conditions={'b': dbtables.ExactCond('good')})
values = self.tdb.Select(
tabname, ['a', 'd', 'b'],
conditions={'e': dbtables.PrefixCond('Fuzzy')})
self.assertEqual(len(values), 1)
self.assertEqual(values[0]['d'], None)
values = self.tdb.Select(tabname, ['b'],
conditions={'c': lambda c: c == 'meep'})
self.assertEqual(len(values), 1)
self.assertEqual(values[0]['b'], "bad")
def test04_MultiCondSelect(self):
tabname = "test04_MultiCondSelect"
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
try:
self.tdb.Insert(tabname,
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': "Zero"})
self.fail('Expected an exception')
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D",
'e': "E"})
self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D",
'e': "-E"})
self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-",
'e': "E-"})
if verbose:
self.tdb._db_print()
# This select should return 0 rows. it is designed to test
# the bug identified and fixed in sourceforge bug # 590449
# (Big Thanks to "Rob Tillotson (n9mtb)" for tracking this down
# and supplying a fix!! This one caused many headaches to say
# the least...)
values = self.tdb.Select(tabname, ['b', 'a', 'd'],
conditions={'e': dbtables.ExactCond('E'),
'a': dbtables.ExactCond('A'),
'd': dbtables.PrefixCond('-')
} )
self.assertEqual(len(values), 0, values)
def test_CreateOrExtend(self):
tabname = "test_CreateOrExtend"
self.tdb.CreateOrExtendTable(
tabname, ['name', 'taste', 'filling', 'alcohol content', 'price'])
try:
self.tdb.Insert(tabname,
{'taste': 'crap',
'filling': 'no',
'is it Guinness?': 'no'})
self.fail("Insert should've failed due to bad column name")
except:
pass
self.tdb.CreateOrExtendTable(tabname,
['name', 'taste', 'is it Guinness?'])
# these should both succeed as the table should contain the union of both sets of columns.
self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no',
'is it Guinness?': 'no'})
self.tdb.Insert(tabname, {'taste': 'great', 'filling': 'yes',
'is it Guinness?': 'yes',
'name': 'Guinness'})
def test_CondObjs(self):
tabname = "test_CondObjs"
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e', 'p'])
self.tdb.Insert(tabname, {'a': "the letter A",
'b': "the letter B",
'c': "is for cookie"})
self.tdb.Insert(tabname, {'a': "is for aardvark",
'e': "the letter E",
'c': "is for cookie",
'd': "is for dog"})
self.tdb.Insert(tabname, {'a': "the letter A",
'e': "the letter E",
'c': "is for cookie",
'p': "is for Python"})
values = self.tdb.Select(
tabname, ['p', 'e'],
conditions={'e': dbtables.PrefixCond('the l')})
self.assertEqual(len(values), 2, values)
self.assertEqual(values[0]['e'], values[1]['e'], values)
self.assertNotEqual(values[0]['p'], values[1]['p'], values)
values = self.tdb.Select(
tabname, ['d', 'a'],
conditions={'a': dbtables.LikeCond('%aardvark%')})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['d'], "is for dog", values)
self.assertEqual(values[0]['a'], "is for aardvark", values)
values = self.tdb.Select(tabname, None,
{'b': dbtables.Cond(),
'e':dbtables.LikeCond('%letter%'),
'a':dbtables.PrefixCond('is'),
'd':dbtables.ExactCond('is for dog'),
'c':dbtables.PrefixCond('is for'),
'p':lambda s: not s})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['d'], "is for dog", values)
self.assertEqual(values[0]['a'], "is for aardvark", values)
def test_Delete(self):
tabname = "test_Delete"
self.tdb.CreateTable(tabname, ['x', 'y', 'z'])
# prior to 2001-05-09 there was a bug where Delete() would
# fail if it encountered any rows that did not have values in
# every column.
# Hunted and Squashed by <Donwulff> (Jukka Santala - donwulff@nic.fi)
self.tdb.Insert(tabname, {'x': 'X1', 'y':'Y1'})
self.tdb.Insert(tabname, {'x': 'X2', 'y':'Y2', 'z': 'Z2'})
self.tdb.Delete(tabname, conditions={'x': dbtables.PrefixCond('X')})
values = self.tdb.Select(tabname, ['y'],
conditions={'x': dbtables.PrefixCond('X')})
self.assertEqual(len(values), 0)
def test_Modify(self):
tabname = "test_Modify"
self.tdb.CreateTable(tabname, ['Name', 'Type', 'Access'])
self.tdb.Insert(tabname, {'Name': 'Index to MP3 files.doc',
'Type': 'Word', 'Access': '8'})
self.tdb.Insert(tabname, {'Name': 'Nifty.MP3', 'Access': '1'})
self.tdb.Insert(tabname, {'Type': 'Unknown', 'Access': '0'})
def set_type(type):
if type is None:
return 'MP3'
return type
def increment_access(count):
return str(int(count)+1)
def remove_value(value):
return None
self.tdb.Modify(tabname,
conditions={'Access': dbtables.ExactCond('0')},
mappings={'Access': remove_value})
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%MP3%')},
mappings={'Type': set_type})
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%')},
mappings={'Access': increment_access})
try:
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%')},
mappings={'Access': 'What is your quest?'})
except TypeError:
# success, the string value in mappings isn't callable
pass
else:
raise RuntimeError, "why was TypeError not raised for bad callable?"
# Delete key in select conditions
values = self.tdb.Select(
tabname, None,
conditions={'Type': dbtables.ExactCond('Unknown')})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['Name'], None, values)
self.assertEqual(values[0]['Access'], None, values)
# Modify value by select conditions
values = self.tdb.Select(
tabname, None,
conditions={'Name': dbtables.ExactCond('Nifty.MP3')})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['Type'], "MP3", values)
self.assertEqual(values[0]['Access'], "2", values)
# Make sure change applied only to select conditions
values = self.tdb.Select(
tabname, None, conditions={'Name': dbtables.LikeCond('%doc%')})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['Type'], "Word", values)
self.assertEqual(values[0]['Access'], "9", values)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TableDBTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| |
"""Test code for sparse operator"""
import numpy as np
import tvm
import topi
import topi.testing
from topi.util import get_const_tuple
import tvm.contrib.sparse as tvmsp
from collections import namedtuple
import time
def verify_dynamic_csrmv(batch, in_dim, out_dim, use_bias=True):
nr, nc, n = tvm.var("nr"), tvm.var("nc"), tvm.var("n")
dtype = 'float32'
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name='A')
B = tvm.placeholder((in_dim, 1), name='B')
C = tvm.placeholder((nr,), name='C')
D = topi.sparse.csrmv(A, B, C if use_bias else None)
s = tvm.create_schedule(D.op)
dtype = A.dtype
# get the test data
def get_ref_data():
a_np = np.maximum(np.random.uniform(size=(batch, in_dim)).astype(dtype)-0.5, 0.)
b_np = np.random.uniform(size=(in_dim, 1)).astype(dtype)-0.5
c_np = np.random.uniform(size=(batch, )).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, ctx)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0]-1
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(c_np, ctx)
d = tvm.nd.array(np.zeros((_nr, 1), dtype=dtype), ctx)
assert a.data.dtype == A.data.dtype
assert a.indices.dtype == A.indices.dtype
assert a.indptr.dtype == A.indptr.dtype
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmv")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
np.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4)
for device in ["llvm"]:
check_device(device)
def verify_dynamic_csrmm(batch, in_dim, out_dim, use_bias=True):
nr, nc, n = tvm.var("nr"), tvm.var("nc"), tvm.var("n")
dtype = 'float32'
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name='A')
B = tvm.placeholder((in_dim, out_dim), name='B')
C = tvm.placeholder((nr,), name='C')
D = topi.sparse.csrmm(A, B, C if use_bias else None)
s = tvm.create_schedule(D.op)
dtype = A.dtype
# get the test data
def get_ref_data():
a_np = np.maximum(np.random.uniform(size=(batch, in_dim)).astype(dtype)-0.5, 0.)
b_np = np.random.uniform(size=(in_dim, out_dim)).astype(dtype)-0.5
c_np = np.random.uniform(size=(batch, )).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, ctx)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0]-1
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(c_np, ctx)
d = tvm.nd.array(np.zeros((_nr, out_dim), dtype=dtype), ctx)
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmm")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
np.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-2, atol=1e-2)
for device in ["llvm"]:
check_device(device)
def verify_dense_si(batch, in_dim, out_dim, use_bias=True, dtype='float32'):
nonzeros = tvm.var('nonzeros')
A = tvmsp.placeholder(shape=(batch, in_dim), nonzeros=nonzeros, dtype=dtype, name='A')
B = tvm.placeholder((out_dim, in_dim), dtype=dtype, name='B')
C = tvm.placeholder((out_dim,), dtype=dtype, name='C')
D = topi.sparse.dense(A, B, C if use_bias else None)
s = tvm.create_schedule(D.op)
# get the test data
def get_ref_data():
mag = 10.
a_np = np.maximum(mag*(np.random.uniform(size=(batch, in_dim)).astype('float32')-0.5), 0.).astype(dtype)
b_np = (mag*(np.random.uniform(size=(out_dim, in_dim)).astype('float32')-.5)).astype(dtype)
c_np = (mag*(np.random.uniform(size=(out_dim,)).astype('float32')-.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(c_np, ctx)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), ctx)
f = tvm.build(s, [A.data, A.indices, A.indptr, B, C, D], device, name="dense")
f(a.data, a.indices, a.indptr, b, c, d)
np.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4)
check_device('llvm')
def verify_dense_sw(batch, in_dim, out_dim, use_bias=True, dtype='float32'):
nonzeros = tvm.var('nonzeros')
A = tvm.placeholder((batch, in_dim), dtype=dtype, name='A')
B = tvmsp.placeholder(shape=(out_dim, in_dim), nonzeros=nonzeros, dtype=dtype, name='B')
C = tvm.placeholder((out_dim,), dtype=dtype, name='C')
D = topi.sparse.dense(A, B, C if use_bias else None)
s = tvm.create_schedule(D.op)
# get the test data
def get_ref_data():
mag = 10.
a_np = (mag*(np.random.uniform(size=(batch, in_dim)).astype('float32')-.5)).astype(dtype)
b_np = np.maximum(mag*(np.random.uniform(size=(out_dim, in_dim)).astype('float32')-0.5), 0.).astype(dtype)
c_np = (mag*(np.random.uniform(size=(out_dim,)).astype('float32')-.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvm.nd.array(a_np, ctx)
b = tvmsp.array(b_np, ctx)
c = tvm.nd.array(c_np, ctx)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, B.data, B.indices, B.indptr, C, D], device, name="dense")
f(a, b.data, b.indices, b.indptr, c, d)
np.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4)
check_device('llvm')
def test_csrmv():
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, use_bias=False)
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, use_bias=True)
def test_csrmm():
M, K, N = 5, 7, 2
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, use_bias=False)
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, use_bias=True)
def test_dense_si():
M, K, N = 3, 5, 2
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='float32')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='float32')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='int32')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='int32')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='int16')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='int16')
def test_dense_sw():
M, K, N = 3, 5, 2
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='float32')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='float32')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='int32')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='int32')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='int16')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='int16')
def test_dense():
test_dense_si()
test_dense_sw()
if __name__ == "__main__":
test_csrmv()
test_csrmm()
test_dense()
| |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.linalg.blas import get_blas_funcs
try:
norm = get_blas_funcs("znrm2", dtype=np.float64)
except:
from scipy.linalg import norm
from numpy.random import RandomState
from qutip.qobj import Qobj, isket
from qutip.solver import Result
from qutip.expect import expect, expect_rho_vec
from qutip.superoperator import (spre, spost, mat2vec, vec2mat,
liouvillian)
from qutip.cy.spmatfuncs import cy_expect_psi_csr, spmv
from qutip.parallel import serial_map
from qutip.ui.progressbar import TextProgressBar
from qutip.solver import Options
from qutip.settings import debug
class StochasticSolverOptions:
"""Class of options for stochastic (piecewse deterministic process) PDP
solvers such as :func:`qutip.pdpsolve.ssepdpsolve`,
:func:`qutip.pdpsolve.smepdpsolve`.
Options can be specified either as arguments to the constructor::
sso = StochasticSolverOptions(nsubsteps=100, ...)
or by changing the class attributes after creation::
sso = StochasticSolverOptions()
sso.nsubsteps = 1000
The stochastic solvers :func:`qutip.pdpsolve.ssepdpsolve` and
:func:`qutip.pdpsolve.smepdpsolve` all take the same keyword arguments as
the constructor of these class, and internally they use these arguments to
construct an instance of this class, so it is rarely needed to explicitly
create an instance of this class.
Attributes
----------
H : :class:`qutip.Qobj`
System Hamiltonian.
state0 : :class:`qutip.Qobj`
Initial state vector (ket) or density matrix.
times : *list* / *array*
List of times for :math:`t`. Must be uniformly spaced.
c_ops : list of :class:`qutip.Qobj`
List of deterministic collapse operators.
sc_ops : list of :class:`qutip.Qobj`
List of stochastic collapse operators. Each stochastic collapse
operator will give a deterministic and stochastic contribution
to the equation of motion according to how the d1 and d2 functions
are defined.
e_ops : list of :class:`qutip.Qobj`
Single operator or list of operators for which to evaluate
expectation values.
m_ops : list of :class:`qutip.Qobj`
List of operators representing the measurement operators. The expected
format is a nested list with one measurement operator for each
stochastic increament, for each stochastic collapse operator.
args : dict / list
List of dictionary of additional problem-specific parameters.
Implicit methods can adjust tolerance via args = {'tol':value}
ntraj : int
Number of trajectors.
nsubsteps : int
Number of sub steps between each time-spep given in `times`.
d1 : function
Function for calculating the operator-valued coefficient to the
deterministic increment dt.
d2 : function
Function for calculating the operator-valued coefficient to the
stochastic increment(s) dW_n, where n is in [0, d2_len[.
d2_len : int (default 1)
The number of stochastic increments in the process.
dW_factors : array
Array of length d2_len, containing scaling factors for each
measurement operator in m_ops.
rhs : function
Function for calculating the deterministic and stochastic contributions
to the right-hand side of the stochastic differential equation. This
only needs to be specified when implementing a custom SDE solver.
generate_A_ops : function
Function that generates a list of pre-computed operators or super-
operators. These precomputed operators are used in some d1 and d2
functions.
generate_noise : function
Function for generate an array of pre-computed noise signal.
homogeneous : bool (True)
Wheter or not the stochastic process is homogenous. Inhomogenous
processes are only supported for poisson distributions.
solver : string
Name of the solver method to use for solving the stochastic
equations. Valid values are:
1/2 order algorithms: 'euler-maruyama', 'fast-euler-maruyama',
'pc-euler' is a predictor-corrector method which is more
stable than explicit methods,
1 order algorithms: 'milstein', 'fast-milstein', 'platen',
'milstein-imp' is semi-implicit Milstein method,
3/2 order algorithms: 'taylor15',
'taylor15-imp' is semi-implicit Taylor 1.5 method.
Implicit methods can adjust tolerance via args = {'tol':value},
default is {'tol':1e-6}
method : string ('homodyne', 'heterodyne', 'photocurrent')
The name of the type of measurement process that give rise to the
stochastic equation to solve. Specifying a method with this keyword
argument is a short-hand notation for using pre-defined d1 and d2
functions for the corresponding stochastic processes.
distribution : string ('normal', 'poisson')
The name of the distribution used for the stochastic increments.
store_measurements : bool (default False)
Whether or not to store the measurement results in the
:class:`qutip.solver.Result` instance returned by the solver.
noise : array
Vector specifying the noise.
normalize : bool (default True)
Whether or not to normalize the wave function during the evolution.
options : :class:`qutip.solver.Options`
Generic solver options.
map_func: function
A map function or managing the calls to single-trajactory solvers.
map_kwargs: dictionary
Optional keyword arguments to the map_func function function.
progress_bar : :class:`qutip.ui.BaseProgressBar`
Optional progress bar class instance.
"""
def __init__(self, H=None, state0=None, times=None, c_ops=[], sc_ops=[],
e_ops=[], m_ops=None, args=None, ntraj=1, nsubsteps=1,
d1=None, d2=None, d2_len=1, dW_factors=None, rhs=None,
generate_A_ops=None, generate_noise=None, homogeneous=True,
solver=None, method=None, distribution='normal',
store_measurement=False, noise=None, normalize=True,
options=None, progress_bar=None, map_func=None,
map_kwargs=None):
if options is None:
options = Options()
if progress_bar is None:
progress_bar = TextProgressBar()
self.H = H
self.d1 = d1
self.d2 = d2
self.d2_len = d2_len
self.dW_factors = dW_factors# if dW_factors else np.ones(d2_len)
self.state0 = state0
self.times = times
self.c_ops = c_ops
self.sc_ops = sc_ops
self.e_ops = e_ops
#if m_ops is None:
# self.m_ops = [[c for _ in range(d2_len)] for c in sc_ops]
#else:
# self.m_ops = m_ops
self.m_ops = m_ops
self.ntraj = ntraj
self.nsubsteps = nsubsteps
self.solver = solver
self.method = method
self.distribution = distribution
self.homogeneous = homogeneous
self.rhs = rhs
self.options = options
self.progress_bar = progress_bar
self.store_measurement = store_measurement
self.store_states = options.store_states
self.noise = noise
self.args = args
self.normalize = normalize
self.generate_noise = generate_noise
self.generate_A_ops = generate_A_ops
if self.ntraj > 1 and map_func:
self.map_func = map_func
else:
self.map_func = serial_map
self.map_kwargs = map_kwargs if map_kwargs is not None else {}
#Does any operator depend on time?
self.td = False
if not isinstance(H, Qobj):
self.td = True
for ops in c_ops:
if not isinstance(ops, Qobj):
self.td = True
for ops in sc_ops:
if not isinstance(ops, Qobj):
self.td = True
def main_ssepdpsolve(H, psi0, times, c_ops, e_ops, **kwargs):
"""
A stochastic (piecewse deterministic process) PDP solver for wavefunction
evolution. For most purposes, use :func:`qutip.mcsolve` instead for quantum
trajectory simulations.
Parameters
----------
H : :class:`qutip.Qobj`
System Hamiltonian.
psi0 : :class:`qutip.Qobj`
Initial state vector (ket).
times : *list* / *array*
List of times for :math:`t`. Must be uniformly spaced.
c_ops : list of :class:`qutip.Qobj`
Deterministic collapse operator which will contribute with a standard
Lindblad type of dissipation.
e_ops : list of :class:`qutip.Qobj` / callback function single
single operator or list of operators for which to evaluate
expectation values.
kwargs : *dictionary*
Optional keyword arguments. See
:class:`qutip.stochastic.StochasticSolverOptions`.
Returns
-------
output: :class:`qutip.solver.Result`
An instance of the class :class:`qutip.solver.Result`.
"""
if debug:
logger.debug(inspect.stack()[0][3])
if isinstance(e_ops, dict):
e_ops_dict = e_ops
e_ops = [e for e in e_ops.values()]
else:
e_ops_dict = None
sso = StochasticSolverOptions(H=H, state0=psi0, times=times, c_ops=c_ops,
e_ops=e_ops, **kwargs)
res = _ssepdpsolve_generic(sso, sso.options, sso.progress_bar)
if e_ops_dict:
res.expect = {e: res.expect[n]
for n, e in enumerate(e_ops_dict.keys())}
return res
def main_smepdpsolve(H, rho0, times, c_ops, e_ops, **kwargs):
"""
A stochastic (piecewse deterministic process) PDP solver for density matrix
evolution.
Parameters
----------
H : :class:`qutip.Qobj`
System Hamiltonian.
rho0 : :class:`qutip.Qobj`
Initial density matrix.
times : *list* / *array*
List of times for :math:`t`. Must be uniformly spaced.
c_ops : list of :class:`qutip.Qobj`
Deterministic collapse operator which will contribute with a standard
Lindblad type of dissipation.
sc_ops : list of :class:`qutip.Qobj`
List of stochastic collapse operators. Each stochastic collapse
operator will give a deterministic and stochastic contribution
to the eqaution of motion according to how the d1 and d2 functions
are defined.
e_ops : list of :class:`qutip.Qobj` / callback function single
single operator or list of operators for which to evaluate
expectation values.
kwargs : *dictionary*
Optional keyword arguments. See
:class:`qutip.stochastic.StochasticSolverOptions`.
Returns
-------
output: :class:`qutip.solver.Result`
An instance of the class :class:`qutip.solver.Result`.
"""
if debug:
logger.debug(inspect.stack()[0][3])
if isinstance(e_ops, dict):
e_ops_dict = e_ops
e_ops = [e for e in e_ops.values()]
else:
e_ops_dict = None
sso = StochasticSolverOptions(H=H, state0=rho0, times=times, c_ops=c_ops,
e_ops=e_ops, **kwargs)
res = _smepdpsolve_generic(sso, sso.options, sso.progress_bar)
if e_ops_dict:
res.expect = {e: res.expect[n]
for n, e in enumerate(e_ops_dict.keys())}
return res
# -----------------------------------------------------------------------------
# Generic parameterized stochastic SE PDP solver
#
def _ssepdpsolve_generic(sso, options, progress_bar):
"""
For internal use. See ssepdpsolve.
"""
if debug:
logger.debug(inspect.stack()[0][3])
N_store = len(sso.times)
N_substeps = sso.nsubsteps
dt = (sso.times[1] - sso.times[0]) / N_substeps
nt = sso.ntraj
data = Result()
data.solver = "sepdpsolve"
data.times = sso.tlist
data.expect = np.zeros((len(sso.e_ops), N_store), dtype=complex)
data.ss = np.zeros((len(sso.e_ops), N_store), dtype=complex)
data.jump_times = []
data.jump_op_idx = []
# effective hamiltonian for deterministic part
Heff = sso.H
for c in sso.c_ops:
Heff += -0.5j * c.dag() * c
progress_bar.start(sso.ntraj)
for n in range(sso.ntraj):
progress_bar.update(n)
psi_t = sso.state0.full().ravel()
states_list, jump_times, jump_op_idx = \
_ssepdpsolve_single_trajectory(data, Heff, dt, sso.times,
N_store, N_substeps,
psi_t, sso.state0.dims,
sso.c_ops, sso.e_ops)
data.states.append(states_list)
data.jump_times.append(jump_times)
data.jump_op_idx.append(jump_op_idx)
progress_bar.finished()
# average density matrices
if options.average_states and np.any(data.states):
data.states = [sum([data.states[m][n] for m in range(nt)]).unit()
for n in range(len(data.times))]
# average
data.expect = data.expect / nt
# standard error
if nt > 1:
data.se = (data.ss - nt * (data.expect ** 2)) / (nt * (nt - 1))
else:
data.se = None
# convert complex data to real if hermitian
data.expect = [np.real(data.expect[n, :])
if e.isherm else data.expect[n, :]
for n, e in enumerate(sso.e_ops)]
return data
def _ssepdpsolve_single_trajectory(data, Heff, dt, times, N_store, N_substeps, psi_t, dims, c_ops, e_ops):
"""
Internal function. See ssepdpsolve.
"""
states_list = []
phi_t = np.copy(psi_t)
prng = RandomState() # todo: seed it
r_jump, r_op = prng.rand(2)
jump_times = []
jump_op_idx = []
for t_idx, t in enumerate(times):
if e_ops:
for e_idx, e in enumerate(e_ops):
s = cy_expect_psi_csr(
e.data.data, e.data.indices, e.data.indptr, psi_t, 0)
data.expect[e_idx, t_idx] += s
data.ss[e_idx, t_idx] += s ** 2
else:
states_list.append(Qobj(psi_t, dims=dims))
for j in range(N_substeps):
if norm(phi_t) ** 2 < r_jump:
# jump occurs
p = np.array([norm(c.data * psi_t) ** 2 for c in c_ops])
p = np.cumsum(p / np.sum(p))
n = np.where(p >= r_op)[0][0]
# apply jump
psi_t = c_ops[n].data * psi_t
psi_t /= norm(psi_t)
phi_t = np.copy(psi_t)
# store info about jump
jump_times.append(times[t_idx] + dt * j)
jump_op_idx.append(n)
# get new random numbers for next jump
r_jump, r_op = prng.rand(2)
# deterministic evolution wihtout correction for norm decay
dphi_t = (-1.0j * dt) * (Heff.data * phi_t)
# deterministic evolution with correction for norm decay
dpsi_t = (-1.0j * dt) * (Heff.data * psi_t)
A = 0.5 * np.sum([norm(c.data * psi_t) ** 2 for c in c_ops])
dpsi_t += dt * A * psi_t
# increment wavefunctions
phi_t += dphi_t
psi_t += dpsi_t
# ensure that normalized wavefunction remains normalized
# this allows larger time step than otherwise would be possible
psi_t /= norm(psi_t)
return states_list, jump_times, jump_op_idx
# -----------------------------------------------------------------------------
# Generic parameterized stochastic ME PDP solver
#
def _smepdpsolve_generic(sso, options, progress_bar):
"""
For internal use. See smepdpsolve.
"""
if debug:
logger.debug(inspect.stack()[0][3])
N_store = len(sso.times)
N_substeps = sso.nsubsteps
dt = (sso.times[1] - sso.times[0]) / N_substeps
nt = sso.ntraj
data = Result()
data.solver = "smepdpsolve"
data.times = sso.times
data.expect = np.zeros((len(sso.e_ops), N_store), dtype=complex)
data.jump_times = []
data.jump_op_idx = []
# Liouvillian for the deterministic part.
# needs to be modified for TD systems
L = liouvillian(sso.H, sso.c_ops)
progress_bar.start(sso.ntraj)
for n in range(sso.ntraj):
progress_bar.update(n)
rho_t = mat2vec(sso.rho0.full()).ravel()
states_list, jump_times, jump_op_idx = \
_smepdpsolve_single_trajectory(data, L, dt, sso.times,
N_store, N_substeps,
rho_t, sso.rho0.dims,
sso.c_ops, sso.e_ops)
data.states.append(states_list)
data.jump_times.append(jump_times)
data.jump_op_idx.append(jump_op_idx)
progress_bar.finished()
# average density matrices
if options.average_states and np.any(data.states):
data.states = [sum([data.states[m][n] for m in range(nt)]).unit()
for n in range(len(data.times))]
# average
data.expect = data.expect / sso.ntraj
# standard error
if nt > 1:
data.se = (data.ss - nt * (data.expect ** 2)) / (nt * (nt - 1))
else:
data.se = None
return data
def _smepdpsolve_single_trajectory(data, L, dt, times, N_store, N_substeps, rho_t, dims, c_ops, e_ops):
"""
Internal function. See smepdpsolve.
"""
states_list = []
rho_t = np.copy(rho_t)
sigma_t = np.copy(rho_t)
prng = RandomState() # todo: seed it
r_jump, r_op = prng.rand(2)
jump_times = []
jump_op_idx = []
for t_idx, t in enumerate(times):
if e_ops:
for e_idx, e in enumerate(e_ops):
data.expect[e_idx, t_idx] += expect_rho_vec(e, rho_t)
else:
states_list.append(Qobj(vec2mat(rho_t), dims=dims))
for j in range(N_substeps):
if sigma_t.norm() < r_jump:
# jump occurs
p = np.array([expect(c.dag() * c, rho_t) for c in c_ops])
p = np.cumsum(p / np.sum(p))
n = np.where(p >= r_op)[0][0]
# apply jump
rho_t = c_ops[n] * rho_t * c_ops[n].dag()
rho_t /= expect(c_ops[n].dag() * c_ops[n], rho_t)
sigma_t = np.copy(rho_t)
# store info about jump
jump_times.append(times[t_idx] + dt * j)
jump_op_idx.append(n)
# get new random numbers for next jump
r_jump, r_op = prng.rand(2)
# deterministic evolution wihtout correction for norm decay
dsigma_t = spmv(L.data, sigma_t) * dt
# deterministic evolution with correction for norm decay
drho_t = spmv(L.data, rho_t) * dt
rho_t += drho_t
# increment density matrices
sigma_t += dsigma_t
rho_t += drho_t
return states_list, jump_times, jump_op_idx
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple RAM backed Search API stub."""
import base64
import bisect
import copy
import cPickle as pickle
import datetime
import functools
import logging
import math
import os
import random
import string
import tempfile
import threading
import urllib
import uuid
from google.appengine.datastore import document_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api.namespace_manager import namespace_manager
from google.appengine.api.search import query_parser
from google.appengine.api.search import QueryParser
from google.appengine.api.search import search
from google.appengine.api.search import search_service_pb
from google.appengine.api.search import search_util
from google.appengine.api.search.stub import document_matcher
from google.appengine.api.search.stub import expression_evaluator
from google.appengine.api.search.stub import simple_tokenizer
from google.appengine.api.search.stub import tokens
from google.appengine.runtime import apiproxy_errors
__all__ = ['IndexConsistencyError',
'Posting',
'PostingList',
'RamInvertedIndex',
'SearchServiceStub',
'SimpleIndex',
'FieldTypesDict',
]
_VISIBLE_PRINTABLE_ASCII = frozenset(
set(string.printable) - set(string.whitespace))
class IndexConsistencyError(Exception):
"""Deprecated 1.7.7. Accessed index with same name different consistency."""
class Posting(object):
"""Represents a occurrences of some token at positions in a document."""
def __init__(self, doc_id):
"""Initializer.
Args:
doc_id: The identifier of the document with token occurrences.
Raises:
TypeError: If an unknown argument is passed.
"""
self._doc_id = doc_id
self._positions = []
@property
def doc_id(self):
"""Return id of the document that the token occurred in."""
return self._doc_id
def AddPosition(self, position):
"""Adds the position in token sequence to occurrences for token."""
pos = bisect.bisect_left(self._positions, position)
if pos < len(self._positions) and self._positions[pos] == position:
return
self._positions.insert(pos, position)
def RemovePosition(self, position):
"""Removes the position in token sequence from occurrences for token."""
pos = bisect.bisect_left(self._positions, position)
if pos < len(self._positions) and self._positions[pos] == position:
del self._positions[pos]
def __cmp__(self, other):
if not isinstance(other, Posting):
return -2
return cmp(self.doc_id, other.doc_id)
@property
def positions(self):
return self._positions
def __repr__(self):
return search_util.Repr(
self, [('doc_id', self.doc_id), ('positions', self.positions)])
class PostingList(object):
"""Represents ordered positions of some token in document.
A PostingList consists of a document id and a sequence of positions
that the same token occurs in the document.
"""
def __init__(self):
self._postings = []
def Add(self, doc_id, position):
"""Adds the token position for the given doc_id."""
posting = Posting(doc_id=doc_id)
pos = bisect.bisect_left(self._postings, posting)
if pos < len(self._postings) and self._postings[
pos].doc_id == posting.doc_id:
posting = self._postings[pos]
else:
self._postings.insert(pos, posting)
posting.AddPosition(position)
def Remove(self, doc_id, position):
"""Removes the token position for the given doc_id."""
posting = Posting(doc_id=doc_id)
pos = bisect.bisect_left(self._postings, posting)
if pos < len(self._postings) and self._postings[
pos].doc_id == posting.doc_id:
posting = self._postings[pos]
posting.RemovePosition(position)
if not posting.positions:
del self._postings[pos]
@property
def postings(self):
return self._postings
def __iter__(self):
return iter(self._postings)
def __repr__(self):
return search_util.Repr(self, [('postings', self.postings)])
class _ScoredDocument(object):
"""A scored document_pb.Document."""
def __init__(self, document, score):
self._document = document
self._score = score
self._expressions = {}
@property
def document(self):
return self._document
@property
def score(self):
return self._score
@property
def expressions(self):
return self._expressions
def __repr__(self):
return search_util.Repr(
self, [('document', self.document), ('score', self.score)])
class _DocumentStatistics(object):
"""Statistics about terms occuring in a document."""
def __init__(self):
self._term_stats = {}
def __iter__(self):
for item in self._term_stats.items():
yield item
def IncrementTermCount(self, term):
"""Adds an occurrence of the term to the stats for the document."""
count = 0
if term in self._term_stats:
count = self._term_stats[term]
count += 1
self._term_stats[term] = count
def TermFrequency(self, term):
"""Returns the term frequency in the document."""
if term not in self._term_stats:
return 0
return self._term_stats[term]
@property
def term_stats(self):
"""Returns the collection of term frequencies in the document."""
return self._term_stats
def __eq__(self, other):
return self.term_stats == other.term_stats
def __hash__(self):
return hash(self.term_stats)
def __repr__(self):
return search_util.Repr(self, [('term_stats', self.term_stats)])
class FieldTypesDict(object):
"""Dictionary-like object for type mappings."""
def __init__(self):
self._field_types = []
def __contains__(self, name):
return name in [ f.name() for f in self._field_types ]
def __getitem__(self, name):
for f in self._field_types:
if name == f.name():
return f
raise KeyError, name
def AddFieldType(self, name, field_type):
field_types = None
for f in self._field_types:
if name == f.name():
field_types = f
if field_types is None:
field_types = document_pb.FieldTypes()
field_types.set_name(name)
self._field_types.append(field_types)
if field_type not in field_types.type_list():
field_types.add_type(field_type)
def __iter__(self):
return iter(sorted([f.name() for f in self._field_types]))
def __repr__(self):
return repr(self._field_types)
class RamInvertedIndex(object):
"""A simple RAM-resident inverted file over documents."""
def __init__(self, tokenizer):
self._tokenizer = tokenizer
self._inverted_index = {}
self._schema = FieldTypesDict()
self._document_ids = set([])
def _AddDocumentId(self, doc_id):
"""Adds the doc_id to set in index."""
self._document_ids.add(doc_id)
def _RemoveDocumentId(self, doc_id):
"""Removes the doc_id from the set in index."""
if doc_id in self._document_ids:
self._document_ids.remove(doc_id)
@property
def document_count(self):
return len(self._document_ids)
def _AddFieldType(self, name, field_type):
"""Adds the type to the list supported for a named field."""
self._schema.AddFieldType(name, field_type)
def GetDocumentStats(self, document):
"""Gets statistics about occurrences of terms in document."""
document_stats = _DocumentStatistics()
for field in document.field_list():
for token in self._tokenizer.TokenizeValue(field_value=field.value()):
document_stats.IncrementTermCount(token.chars)
return document_stats
def AddDocument(self, doc_id, document):
"""Adds a document into the index."""
token_position = 0
for field in document.field_list():
self._AddFieldType(field.name(), field.value().type())
self._AddTokens(doc_id, field.name(), field.value(), token_position)
self._AddDocumentId(doc_id)
def RemoveDocument(self, document):
"""Removes a document from the index."""
doc_id = document.id()
for field in document.field_list():
self._RemoveTokens(doc_id, field.name(), field.value())
self._RemoveDocumentId(doc_id)
def _AddTokens(self, doc_id, field_name, field_value, token_position):
"""Adds token occurrences for a given doc's field value."""
for token in self._tokenizer.TokenizeValue(field_value, token_position):
self._AddToken(doc_id, token)
self._AddToken(doc_id, token.RestrictField(field_name))
def _RemoveTokens(self, doc_id, field_name, field_value):
"""Removes tokens occurrences for a given doc's field value."""
for token in self._tokenizer.TokenizeValue(field_value=field_value):
self._RemoveToken(doc_id, token)
self._RemoveToken(doc_id, token.RestrictField(field_name))
def _AddToken(self, doc_id, token):
"""Adds a token occurrence for a document."""
postings = self._inverted_index.get(token)
if postings is None:
self._inverted_index[token] = postings = PostingList()
postings.Add(doc_id, token.position)
def _RemoveToken(self, doc_id, token):
"""Removes a token occurrence for a document."""
if token in self._inverted_index:
postings = self._inverted_index[token]
postings.Remove(doc_id, token.position)
if not postings.postings:
del self._inverted_index[token]
def GetPostingsForToken(self, token):
"""Returns all document postings which for the token."""
if token in self._inverted_index:
return self._inverted_index[token].postings
return []
def GetSchema(self):
"""Returns the schema for the index."""
return self._schema
def __repr__(self):
return search_util.Repr(self, [('_inverted_index', self._inverted_index),
('_schema', self._schema),
('document_count', self.document_count)])
def _ScoreRequested(params):
"""Returns True if match scoring requested, False otherwise."""
return params.has_scorer_spec() and params.scorer_spec().has_scorer()
class SimpleIndex(object):
"""A simple search service which uses a RAM-resident inverted file."""
def __init__(self, index_spec):
self._index_spec = index_spec
self._documents = {}
self._parser = simple_tokenizer.SimpleTokenizer(split_restricts=False)
self._inverted_index = RamInvertedIndex(simple_tokenizer.SimpleTokenizer())
@property
def index_spec(self):
"""Returns the index specification for the index."""
return self._index_spec
def IndexDocuments(self, documents, response):
"""Indexes an iterable DocumentPb.Document."""
for document in documents:
doc_id = document.id()
if not doc_id:
doc_id = str(uuid.uuid4())
document.set_id(doc_id)
try:
search._NewDocumentFromPb(document)
except ValueError, e:
new_status = response.add_status()
new_status.set_code(search_service_pb.SearchServiceError.INVALID_REQUEST)
new_status.set_error_detail(e.message)
continue
response.add_doc_id(doc_id)
if doc_id in self._documents:
old_document = self._documents[doc_id]
self._inverted_index.RemoveDocument(old_document)
self._documents[doc_id] = document
new_status = response.add_status()
new_status.set_code(search_service_pb.SearchServiceError.OK)
self._inverted_index.AddDocument(doc_id, document)
def DeleteDocuments(self, document_ids, response):
"""Deletes documents for the given document_ids."""
for document_id in document_ids:
if document_id in self._documents:
document = self._documents[document_id]
self._inverted_index.RemoveDocument(document)
del self._documents[document_id]
delete_status = response.add_status()
delete_status.set_code(search_service_pb.SearchServiceError.OK)
def Documents(self):
"""Returns the documents in the index."""
return self._documents.values()
def _TermFrequency(self, term, document):
"""Return the term frequency in the document."""
return self._inverted_index.GetDocumentStats(document).TermFrequency(term)
@property
def document_count(self):
"""Returns the count of documents in the index."""
return self._inverted_index.document_count
def _DocumentCountForTerm(self, term):
"""Returns the document count for documents containing the term."""
return len(self._PostingsForToken(tokens.Token(chars=term)))
def _InverseDocumentFrequency(self, term):
"""Returns inverse document frequency of term."""
doc_count = self._DocumentCountForTerm(term)
if doc_count:
return math.log10(self.document_count / float(doc_count))
else:
return 0
def _TermFrequencyInverseDocumentFrequency(self, term, document):
"""Returns the term frequency times inverse document frequency of term."""
return (self._TermFrequency(term, document) *
self._InverseDocumentFrequency(term))
def _ScoreDocument(self, document, score, terms):
"""Scores a document for the given query."""
if not score:
return 0
tf_idf = 0
for term in terms:
tf_idf += self._TermFrequencyInverseDocumentFrequency(term, document)
return tf_idf
def _PostingsForToken(self, token):
"""Returns the postings for the token."""
return self._inverted_index.GetPostingsForToken(token)
def _CollectTerms(self, node):
"""Get all search terms for scoring."""
if node.getType() in search_util.TEXT_QUERY_TYPES:
return set([query_parser.GetQueryNodeText(node).strip('"')])
elif node.children:
if node.getType() == QueryParser.EQ and len(node.children) > 1:
children = node.children[1:]
else:
children = node.children
result = set()
for term_set in (self._CollectTerms(child) for child in children):
result.update(term_set)
return result
return set()
def _CollectFields(self, node):
if node.getType() == QueryParser.EQ and node.children:
return set([query_parser.GetQueryNodeText(node.children[0])])
elif node.children:
result = set()
for term_set in (self._CollectFields(child) for child in node.children):
result.update(term_set)
return result
return set()
def _Evaluate(self, node, score=True):
"""Retrieve scored results for a search query."""
doc_match = document_matcher.DocumentMatcher(node, self._inverted_index)
matched_documents = doc_match.FilterDocuments(self._documents.itervalues())
terms = self._CollectTerms(node)
scored_documents = [
_ScoredDocument(doc, self._ScoreDocument(doc, score, terms))
for doc in matched_documents]
return scored_documents
def _Sort(self, docs, search_params, score):
if score:
return sorted(docs, key=lambda doc: doc.score, reverse=True)
if not search_params.sort_spec_size():
return sorted(docs, key=lambda doc: doc.document.order_id(), reverse=True)
def SortKey(scored_doc):
"""Return the sort key for a document based on the request parameters.
Arguments:
scored_doc: The document to score
Returns:
The sort key of a document. The sort key is a tuple, where the nth
element in the tuple corresponds to the value of the nth sort expression
evaluated on the document.
Raises:
Exception: if no default value is specified.
"""
expr_vals = []
for sort_spec in search_params.sort_spec_list():
if not (sort_spec.has_default_value_text() or
sort_spec.has_default_value_numeric()):
raise Exception('A default value must be specified for sorting.')
elif sort_spec.has_default_value_text():
default_value = sort_spec.default_value_text()
else:
default_value = sort_spec.default_value_numeric()
val = expression_evaluator.ExpressionEvaluator(
scored_doc, self._inverted_index, True).ValueOf(
sort_spec.sort_expression(), default_value=default_value)
if isinstance(val, datetime.datetime):
val = search_util.EpochTime(val)
expr_vals.append(val)
return tuple(expr_vals)
def SortCmp(x, y):
"""The comparison function for sort keys."""
for i, val_tuple in enumerate(zip(x, y)):
cmp_val = cmp(*val_tuple)
if cmp_val:
if search_params.sort_spec(i).sort_descending():
return -cmp_val
return cmp_val
return 0
return sorted(docs, key=SortKey, cmp=SortCmp)
def _AttachExpressions(self, docs, search_params):
if search_params.has_field_spec():
for doc in docs:
evaluator = expression_evaluator.ExpressionEvaluator(
doc, self._inverted_index)
for expr in search_params.field_spec().expression_list():
evaluator.Evaluate(expr)
return docs
def Search(self, search_request):
"""Searches the simple index for ."""
query = urllib.unquote(search_request.query())
query = query.strip()
score = _ScoreRequested(search_request)
if not query:
docs = [_ScoredDocument(doc, 0.0) for doc in self._documents.values()]
else:
if not isinstance(query, unicode):
query = unicode(query, 'utf-8')
query_tree = query_parser.ParseAndSimplify(query)
docs = self._Evaluate(query_tree, score=score)
docs = self._Sort(docs, search_request, score)
docs = self._AttachExpressions(docs, search_request)
return docs
def GetSchema(self):
"""Returns the schema for the index."""
return self._inverted_index.GetSchema()
def __repr__(self):
return search_util.Repr(self, [('_index_spec', self._index_spec),
('_documents', self._documents),
('_inverted_index', self._inverted_index)])
class SearchServiceStub(apiproxy_stub.APIProxyStub):
"""Simple RAM backed Search service stub.
This stub provides the search_service_pb.SearchService. But this is
NOT a subclass of SearchService itself. Services are provided by
the methods prefixed by "_Dynamic_".
"""
_VERSION = 1
def __init__(self, service_name='search', index_file=None):
"""Constructor.
Args:
service_name: Service name expected for all calls.
index_file: The file to which search indexes will be persisted.
"""
self.__indexes = {}
self.__index_file = index_file
self.__index_file_lock = threading.Lock()
super(SearchServiceStub, self).__init__(service_name)
self.Read()
def _InvalidRequest(self, status, exception):
status.set_code(search_service_pb.SearchServiceError.INVALID_REQUEST)
status.set_error_detail(exception.message)
def _UnknownIndex(self, status, index_spec):
status.set_code(search_service_pb.SearchServiceError.OK)
status.set_error_detail('no index for %r' % index_spec)
def _GetNamespace(self, namespace):
"""Get namespace name.
Args:
namespace: Namespace provided in request arguments.
Returns:
If namespace is None, returns the name of the current global namespace. If
namespace is not None, returns namespace.
"""
if namespace is not None:
return namespace
return namespace_manager.get_namespace()
def _GetIndex(self, index_spec, create=False):
namespace = self._GetNamespace(index_spec.namespace())
index = self.__indexes.setdefault(namespace, {}).get(index_spec.name())
if index is None:
if create:
index = SimpleIndex(index_spec)
self.__indexes[namespace][index_spec.name()] = index
else:
return None
return index
def _Dynamic_IndexDocument(self, request, response):
"""A local implementation of SearchService.IndexDocument RPC.
Index a new document or update an existing document.
Args:
request: A search_service_pb.IndexDocumentRequest.
response: An search_service_pb.IndexDocumentResponse.
"""
params = request.params()
index = self._GetIndex(params.index_spec(), create=True)
index.IndexDocuments(params.document_list(), response)
def _Dynamic_DeleteDocument(self, request, response):
"""A local implementation of SearchService.DeleteDocument RPC.
Args:
request: A search_service_pb.DeleteDocumentRequest.
response: An search_service_pb.DeleteDocumentResponse.
"""
params = request.params()
index_spec = params.index_spec()
index = self._GetIndex(index_spec)
if index is None:
self._UnknownIndex(response.add_status(), index_spec)
return
index.DeleteDocuments(params.doc_id_list(), response)
def _Dynamic_ListIndexes(self, request, response):
"""A local implementation of SearchService.ListIndexes RPC.
Args:
request: A search_service_pb.ListIndexesRequest.
response: An search_service_pb.ListIndexesResponse.
Raises:
ResponseTooLargeError: raised for testing admin console.
"""
if request.has_app_id():
if random.choice([True] + [False] * 9):
raise apiproxy_errors.ResponseTooLargeError()
for _ in xrange(random.randint(0, 2) * random.randint(5, 15)):
new_index_spec = response.add_index_metadata().mutable_index_spec()
new_index_spec.set_name(
random.choice(list(_VISIBLE_PRINTABLE_ASCII - set('!'))) +
''.join(random.choice(list(_VISIBLE_PRINTABLE_ASCII))
for _ in xrange(random.randint(
0, search.MAXIMUM_INDEX_NAME_LENGTH))))
response.mutable_status().set_code(
random.choice([search_service_pb.SearchServiceError.OK] * 10 +
[search_service_pb.SearchServiceError.TRANSIENT_ERROR] +
[search_service_pb.SearchServiceError.INTERNAL_ERROR]))
return
response.mutable_status().set_code(
search_service_pb.SearchServiceError.OK)
namespace = self._GetNamespace(request.params().namespace())
if namespace not in self.__indexes or not self.__indexes[namespace]:
return
keys, indexes = zip(*sorted(
self.__indexes[namespace].iteritems(), key=lambda v: v[0]))
position = 0
params = request.params()
if params.has_start_index_name():
position = bisect.bisect_left(keys, params.start_index_name())
if (not params.include_start_index() and position < len(keys)
and keys[position] == params.start_index_name()):
position += 1
elif params.has_index_name_prefix():
position = bisect.bisect_left(keys, params.index_name_prefix())
if params.has_offset():
position += params.offset()
end_position = position + params.limit()
prefix = params.index_name_prefix()
for index in indexes[min(position, len(keys)):min(end_position, len(keys))]:
index_spec = index.index_spec
if prefix and not index_spec.name().startswith(prefix):
break
metadata = response.add_index_metadata()
new_index_spec = metadata.mutable_index_spec()
new_index_spec.set_name(index_spec.name())
new_index_spec.set_namespace(index_spec.namespace())
if params.fetch_schema():
self._AddSchemaInformation(index, metadata)
def _AddSchemaInformation(self, index, metadata_pb):
schema = index.GetSchema()
for name in schema:
field_types = schema[name]
new_field_types = metadata_pb.add_field()
new_field_types.MergeFrom(field_types)
def _AddDocument(self, response, document, ids_only):
doc = response.add_document()
if ids_only:
doc.set_id(document.id())
else:
doc.MergeFrom(document)
def _Dynamic_ListDocuments(self, request, response):
"""A local implementation of SearchService.ListDocuments RPC.
Args:
request: A search_service_pb.ListDocumentsRequest.
response: An search_service_pb.ListDocumentsResponse.
"""
params = request.params()
index = self._GetIndex(params.index_spec(), create=True)
if index is None:
self._UnknownIndex(response.mutable_status(), params.index_spec())
return
num_docs = 0
start = not params.has_start_doc_id()
for document in sorted(index.Documents(), key=lambda doc: doc.id()):
if start:
if num_docs < params.limit():
self._AddDocument(response, document, params.keys_only())
num_docs += 1
else:
if document.id() >= params.start_doc_id():
start = True
if (document.id() != params.start_doc_id() or
params.include_start_doc()):
self._AddDocument(response, document, params.keys_only())
num_docs += 1
response.mutable_status().set_code(
search_service_pb.SearchServiceError.OK)
def _RandomSearchResponse(self, request, response):
random.seed()
if random.random() < 0.03:
raise apiproxy_errors.ResponseTooLargeError()
response.mutable_status().set_code(
random.choice([search_service_pb.SearchServiceError.OK] * 30 +
[search_service_pb.SearchServiceError.TRANSIENT_ERROR] +
[search_service_pb.SearchServiceError.INTERNAL_ERROR]))
params = request.params()
random.seed(params.query())
total = random.randint(0, 100)
if random.random() < 0.3:
total = 0
offset = 0
if params.has_offset():
offset = params.offset()
remaining = max(0, total - offset)
nresults = min(remaining, params.limit())
matched_count = offset + nresults
if remaining > nresults:
matched_count += random.randint(1, 100)
def RandomText(charset, min_len, max_len):
return ''.join(random.choice(charset)
for _ in xrange(random.randint(min_len, max_len)))
for i in xrange(nresults):
seed = '%s:%s' % (params.query(), i + offset)
random.seed(seed)
result = response.add_result()
doc = result.mutable_document()
doc_id = RandomText(string.letters + string.digits, 8, 10)
doc.set_id(doc_id)
random.seed(doc_id)
for _ in params.sort_spec_list():
result.add_score(random.random())
for name, probability in [('creator', 0.90), ('last_change', 0.40)]:
if random.random() < probability:
field = doc.add_field()
field.set_name(name)
value = field.mutable_value()
value.set_type(document_pb.FieldValue.TEXT)
value.set_string_value(
RandomText(string.letters + string.digits, 2, 10)
+ '@google.com')
field = doc.add_field()
field.set_name('content')
value = field.mutable_value()
value.set_type(document_pb.FieldValue.TEXT)
value.set_string_value(
RandomText(string.printable, 0, 15) + params.query() +
RandomText(string.printable + 10 * string.whitespace, 5, 5000))
for i in xrange(random.randint(0, 2)):
field = doc.add_field()
field.set_name(RandomText(string.letters, 3, 7))
value = field.mutable_value()
value.set_type(document_pb.FieldValue.TEXT)
value.set_string_value(RandomText(string.printable, 0, 100))
response.set_matched_count(matched_count)
def _DefaultFillSearchResponse(self, params, results, response):
"""Fills the SearchResponse with the first set of results."""
position_range = range(0, min(params.limit(), len(results)))
self._FillSearchResponse(results, position_range, params.cursor_type(),
_ScoreRequested(params), response)
def _CopyDocument(self, doc, doc_copy, field_names, ids_only=None):
"""Copies Document, doc, to doc_copy restricting fields to field_names."""
doc_copy.set_id(doc.id())
if ids_only:
return
if doc.has_language():
doc_copy.set_language(doc.language())
for field in doc.field_list():
if not field_names or field.name() in field_names:
doc_copy.add_field().CopyFrom(field)
doc_copy.set_order_id(doc.order_id())
def _FillSearchResponse(self, results, position_range, cursor_type, score,
response, field_names=None, ids_only=None):
"""Fills the SearchResponse with a selection of results."""
for i in position_range:
result = results[i]
search_result = response.add_result()
self._CopyDocument(result.document, search_result.mutable_document(),
field_names, ids_only)
if cursor_type == search_service_pb.SearchParams.PER_RESULT:
search_result.set_cursor(self._EncodeCursor(result.document))
if score:
search_result.add_score(result.score)
for field, expression in result.expressions.iteritems():
expr = search_result.add_expression()
expr.set_name(field)
if (isinstance(expression, float) or
isinstance(expression, long) or
isinstance(expression, int)):
expr.mutable_value().set_string_value(str(expression))
expr.mutable_value().set_type(document_pb.FieldValue.NUMBER)
else:
expr.mutable_value().set_string_value(expression)
expr.mutable_value().set_type(document_pb.FieldValue.HTML)
def _Dynamic_Search(self, request, response):
"""A local implementation of SearchService.Search RPC.
Args:
request: A search_service_pb.SearchRequest.
response: An search_service_pb.SearchResponse.
"""
if request.has_app_id():
self._RandomSearchResponse(request, response)
return
index = None
index = self._GetIndex(request.params().index_spec())
if index is None:
self._UnknownIndex(response.mutable_status(),
request.params().index_spec())
response.set_matched_count(0)
return
params = request.params()
try:
results = index.Search(params)
except query_parser.QueryException, e:
self._InvalidRequest(response.mutable_status(), e)
response.set_matched_count(0)
return
except document_matcher.ExpressionTreeException, e:
self._InvalidRequest(response.mutable_status(), e)
response.set_matched_count(0)
return
response.set_matched_count(len(results))
offset = 0
if params.has_cursor():
doc_id = self._DecodeCursor(params.cursor())
for i, result in enumerate(results):
if result.document.id() == doc_id:
offset = i + 1
break
elif params.has_offset():
offset = params.offset()
if offset < len(results):
limit = offset + params.limit()
if limit >= len(results):
range_end = len(results)
else:
range_end = limit
if params.cursor_type() == search_service_pb.SearchParams.SINGLE:
document = results[range_end - 1].document
response.set_cursor(self._EncodeCursor(document))
result_range = range(offset, range_end)
else:
result_range = range(0)
field_names = params.field_spec().name_list()
self._FillSearchResponse(results, result_range, params.cursor_type(),
_ScoreRequested(params), response, field_names,
params.keys_only())
response.mutable_status().set_code(search_service_pb.SearchServiceError.OK)
def _EncodeCursor(self, document):
return base64.urlsafe_b64encode(document.id())
def _DecodeCursor(self, cursor):
return base64.urlsafe_b64decode(cursor)
def __repr__(self):
return search_util.Repr(self, [('__indexes', self.__indexes)])
def Write(self):
"""Write search indexes to the index file.
This method is a no-op if index_file is set to None.
"""
if not self.__index_file:
return
descriptor, tmp_filename = tempfile.mkstemp(
dir=os.path.dirname(self.__index_file))
tmpfile = os.fdopen(descriptor, 'wb')
pickler = pickle.Pickler(tmpfile, protocol=1)
pickler.fast = True
pickler.dump((self._VERSION, self.__indexes))
tmpfile.close()
self.__index_file_lock.acquire()
try:
try:
os.rename(tmp_filename, self.__index_file)
except OSError:
os.remove(self.__index_file)
os.rename(tmp_filename, self.__index_file)
finally:
self.__index_file_lock.release()
def _ReadFromFile(self):
self.__index_file_lock.acquire()
try:
if os.path.isfile(self.__index_file):
version, indexes = pickle.load(open(self.__index_file, 'rb'))
if version == self._VERSION:
return indexes
logging.warning(
'Saved search indexes are not compatible with this version of the '
'SDK. Search indexes have been cleared.')
else:
logging.warning(
'Could not read search indexes from %s', self.__index_file)
except (AttributeError, LookupError, ImportError, NameError, TypeError,
ValueError, pickle.PickleError, IOError), e:
logging.warning(
'Could not read indexes from %s. Try running with the '
'--clear_search_index flag. Cause:\n%r' % (self.__index_file, e))
finally:
self.__index_file_lock.release()
return {}
def Read(self):
"""Read search indexes from the index file.
This method is a no-op if index_file is set to None.
"""
if not self.__index_file:
return
read_indexes = self._ReadFromFile()
if read_indexes:
self.__indexes = read_indexes
| |
from __future__ import absolute_import
import sugartensor as tf
# noinspection PyPackageRequirements
import numpy as np
__author__ = 'namju.kim@kakaobrain.com'
#
# transform sugar functions
#
@tf.sg_sugar_func
def sg_identity(tensor, opt):
r"""Returns the same tensor
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
name : If provided, it replaces current tensor's name
Returns:
A `Tensor`. Has the same content as `tensor`.
"""
return tf.identity(tensor, name=opt.name)
@tf.sg_sugar_func
def sg_cast(tensor, opt):
r"""Casts a tensor to a new type.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
dtype : The destination type.
name : If provided, it replaces current tensor's name
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`.
"""
assert opt.dtype is not None, 'dtype is mandatory.'
return tf.cast(tensor, opt.dtype, name=opt.name)
@tf.sg_sugar_func
def sg_float(tensor, opt):
r"""Casts a tensor to floatx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name : If provided, it replaces current tensor's name
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`.
"""
return tf.cast(tensor, tf.sg_floatx, name=opt.name)
@tf.sg_sugar_func
def sg_int(tensor, opt):
r"""Casts a tensor to intx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`.
"""
return tf.cast(tensor, tf.sg_intx, name=opt.name)
@tf.sg_sugar_func
def sg_expand_dims(tensor, opt):
r"""Inserts a new axis.
See tf.expand_dims() in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : Dimension to expand. Default is -1.
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor`.
"""
opt += tf.sg_opt(axis=-1)
return tf.expand_dims(tensor, opt.axis, name=opt.name)
@tf.sg_sugar_func
def sg_squeeze(tensor, opt):
r"""Removes axis of size 1 from the shape of a tensor.
See `tf.squeeze()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer.
axis to remove. Default is -1.
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor`.
"""
opt += tf.sg_opt(axis=[-1])
opt.axis = opt.axis if isinstance(opt.axis, (tuple, list)) else [opt.axis]
return tf.squeeze(tensor, opt.axis, name=opt.name)
@tf.sg_sugar_func
def sg_flatten(tensor, opt):
r"""Reshapes a tensor to `batch_size x -1`.
See `tf.reshape()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A 2-D tensor.
"""
dim = np.prod(tensor.get_shape().as_list()[1:])
return tf.reshape(tensor, [-1, dim], name=opt.name)
@tf.sg_sugar_func
def sg_reshape(tensor, opt):
r"""Reshapes a tensor.
See `tf.reshape()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
shape: A tuple/list of integers. The destination shape.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.shape is not None, 'shape is mandatory.'
return tf.reshape(tensor, opt.shape, name=opt.name)
@tf.sg_sugar_func
def sg_transpose(tensor, opt):
r"""Permutes the dimensions according to `opt.perm`.
See `tf.transpose()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
perm: A permutation of the dimensions of `tensor`. The target shape.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.perm is not None, 'perm is mandatory'
return tf.transpose(tensor, opt.perm, name=opt.name)
@tf.sg_sugar_func
def sg_argmax(tensor, opt):
r"""Returns the indices of the maximum values along the specified axis.
See `tf.argmax()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis: Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
opt += tf.sg_opt(axis=tensor.get_shape().ndims-1)
return tf.argmax(tensor, opt.axis, opt.name)
@tf.sg_sugar_func
def sg_argmin(tensor, opt):
r"""Returns the indices of the minimum values along the specified axis.
See `tf.argin()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis: Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
opt += tf.sg_opt(axis=tensor.get_shape().ndims - 1)
return tf.argmin(tensor, opt.axis, opt.name)
@tf.sg_sugar_func
def sg_concat(tensor, opt):
r"""Concatenates tensors along a axis.
See `tf.concat()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
target: A `Tensor`. Must have the same rank as `tensor`, and
all dimensions except `opt.dim` must be equal.
axis : Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.target is not None, 'target is mandatory.'
opt += tf.sg_opt(axis=tensor.get_shape().ndims-1)
target = opt.target if isinstance(opt.target, (tuple, list)) else [opt.target]
return tf.concat([tensor] + target, opt.axis, name=opt.name)
@tf.sg_sugar_func
def sg_one_hot(tensor, opt):
r"""Converts a tensor into a one-hot tensor.
See `tf.one_hot()` in tensorflow.
Args:
tensor: A `Tensor` ( automatically given by chain )
opt:
depth: The number of classes.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.depth is not None, 'depth is mandatory.'
return tf.one_hot(tensor, opt.depth, name=opt.name)
# noinspection PyUnusedLocal
@tf.sg_sugar_func
def sg_to_sparse(tensor, opt):
r"""Converts a dense tensor into a sparse tensor.
See `tf.SparseTensor()` in tensorflow.
Args:
tensor: A `Tensor` with zero-padding (automatically given by chain).
opt:
name: If provided, replace current tensor's name.
Returns:
A `SparseTensor`.
"""
indices = tf.where(tf.not_equal(tensor.sg_float(), 0.))
return tf.SparseTensor(indices=indices,
values=tf.gather_nd(tensor, indices) - 1, # for zero-based index
dense_shape=tf.shape(tensor).sg_cast(dtype=tf.int64))
@tf.sg_sugar_func
def sg_log(tensor, opt):
r"""Log transform a dense tensor
See `tf.log()` in tensorflow.
Args:
tensor: A `Tensor` ( automatically given by chain )
opt:
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.log(tensor + tf.sg_eps, name=opt.name)
@tf.sg_sugar_func
def sg_exp(tensor, opt):
r"""Exponential transform a dense tensor
See `tf.exp()` in tensorflow.
Args:
tensor: A `Tensor` ( automatically given by chain )
opt:
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.exp(tensor, name=opt.name)
#
# reduce functions
#
@tf.sg_sugar_func
def sg_sum(tensor, opt):
r"""Computes the sum of elements across axis of a tensor.
See `tf.reduce_sum()` in tensorflow.
Args:
tensor: A `Tensor` with zero-padding (automatically given by chain).
opt:
axis: A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_sum(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
@tf.sg_sugar_func
def sg_mean(tensor, opt):
r"""Computes the mean of elements across axis of a tensor.
See `tf.reduce_mean()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_mean(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
@tf.sg_sugar_func
def sg_prod(tensor, opt):
r"""Computes the product of elements across axis of a tensor.
See `tf.reduce_prod()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
@tf.sg_sugar_func
def sg_min(tensor, opt):
r"""Computes the minimum of elements across axis of a tensor.
See `tf.reduce_min()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
@tf.sg_sugar_func
def sg_max(tensor, opt):
r"""Computes the maximum of elements across axis of a tensor.
See `tf.reduce_max()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
@tf.sg_sugar_func
def sg_all(tensor, opt):
r"""Computes the "logical and" of elements across axis of a tensor.
See `tf.reduce_all()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_all(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
@tf.sg_sugar_func
def sg_any(tensor, opt):
r"""Computes the "logical or" of elements across axis of a tensor.
See `tf.reduce_any()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_any(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
#
# complicated transform function ( layer related )
#
@tf.sg_sugar_func
def sg_pool(tensor, opt):
r"""Performs the 2-D pooling on the `tensor`.
Mostly used with sg_conv().
Args:
tensor: A 4-D `Tensor` (automatically given by chain).
opt:
size: A tuple or list of integers of length 2 representing `[kernel height, kernel width]`.
Can be an int if both values are the same.
If not specified, (2, 2) is set implicitly.
stride: A tuple or list of integers of length 2 or 4 representing stride dimensions.
If the length is 2, i.e., (a, b), the stride is `[1, a, b, 1]`.
If the length is 4, i.e., (a, b, c, d), the stride is `[a, b, c, d]`.
Can be an int. If the length is an int, i.e., a, the stride is `[1, a, a, 1]`.
The default value is [1, 1, 1, 1].
avg: Boolean. If True, average pooling is applied. Otherwise, max pooling.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. The max pooled output tensor.
"""
# default stride and pad
opt += tf.sg_opt(stride=(1, 2, 2, 1), pad='VALID')
# shape stride
opt.stride = opt.stride if isinstance(opt.stride, (list, tuple)) else [1, opt.stride, opt.stride, 1]
opt.stride = [1, opt.stride[0], opt.stride[1], 1] if len(opt.stride) == 2 else opt.stride
# shape size
opt += tf.sg_opt(size=opt.stride)
opt.size = opt.size if isinstance(opt.size, (list, tuple)) else [1, opt.size, opt.size, 1]
opt.size = [1, opt.size[0], opt.size[1], 1] if len(opt.size) == 2 else opt.size
if opt.avg:
out = tf.nn.avg_pool(tensor, opt.size, opt.stride, opt.pad)
else:
out = tf.nn.max_pool(tensor, opt.size, opt.stride, opt.pad)
return tf.identity(out, name=opt.name)
@tf.sg_sugar_func
def sg_pool1d(tensor, opt):
r"""Performs the 1-D pooling on the `tensor`.
Args:
tensor: A 3-D `Tensor` (automatically passed by decorator).
opt:
size: A positive `integer` representing `[kernel width]`.
Default is 2.
stride: A positive `integer`. The number of entries by which
the filter is moved right at each step. Default is 2.
avg: Boolean. If True, average pooling is applied. Otherwise, max pooling.
name: If provided, replace current tensor's name.
Returns:
A tensor
"""
# default stride and pad
opt += tf.sg_opt(stride=2, pad='VALID')
opt += tf.sg_opt(size=opt.stride)
if opt.avg:
out = tf.nn.avg_pool(tensor.sg_expand_dims(axis=2),
(1, opt.size, 1, 1), (1, opt.stride, 1, 1), opt.pad)
else:
out = tf.nn.max_pool(tensor.sg_expand_dims(axis=2),
(1, opt.size, 1, 1), (1, opt.stride, 1, 1), opt.pad)
return tf.identity(out.sg_squeeze(axis=2), name=opt.name)
@tf.sg_sugar_func
def sg_lookup(tensor, opt):
r"""Looks up the `tensor`, which is the embedding matrix.
Args:
tensor: A tensor ( automatically given by chain )
opt:
emb: A 2-D `Tensor`. An embedding matrix.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.emb is not None, 'emb is mandatory.'
return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)
@tf.sg_sugar_func
def sg_reverse_seq(tensor, opt):
r"""Reverses variable length slices.
Before applying the pure tensorflow function tf.reverse_sequence,
this function calculates sequence lengths by counting non-zeros.
For example,
```
tensor = [[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]]
tensor.sg_reverse_seq()
=> [[3 2 1 0 0]
[5 4 0 0 0]]
```
Args:
tensor: A 2-D `Tensor` (automatically given by chain).
opt:
axis: Axis to reverse. Default is 1.
name : If provided, it replaces current tensor's name.
Returns:
A `Tensor` with the same shape and type as `tensor`.
"""
# default sequence dimension
opt += tf.sg_opt(axis=1)
seq_len = tf.not_equal(tensor, tf.zeros_like(tensor)).sg_int().sg_sum(axis=opt.axis)
return tf.reverse_sequence(tensor, seq_len, opt.axis, name=opt.name)
@tf.sg_sugar_func
def sg_periodic_shuffle(tensor, opt):
r""" Periodic shuffle transformation for SubPixel CNN.
(see [Shi et al. 2016](http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf)
Args:
tensor: A tensor (automatically given by chain).
opt:
factor: factor to multiply shape by. Default is 2.
name : If provided, it replaces current tensor's name.
Returns:
A tensor
"""
# default factor
opt += tf.sg_opt(factor=2)
# get current shape
batch, row, col, channel = tensor.get_shape().as_list()
# get target channel num
channel_target = channel // (opt.factor * opt.factor)
channel_factor = channel // channel_target
# intermediate shape for shuffling
shape_1 = [batch, row, col, channel_factor // opt.factor, channel_factor // opt.factor]
shape_2 = [batch, row * opt.factor, col * opt.factor, 1]
# reshape and transpose for periodic shuffling for each channel
out = []
for i in range(channel_target):
out.append((tensor[:, :, :, i*channel_factor:(i+1)*channel_factor])
.sg_reshape(shape=shape_1)
.sg_transpose(perm=(0, 1, 3, 2, 4))
.sg_reshape(shape=shape_2))
# final output
out = tf.concat(out, 3)
return tf.identity(out, name=opt.name)
@tf.sg_sugar_func
def sg_inverse_periodic_shuffle(tensor, opt):
r"""Inverse periodic shuffle transformation for SubPixel CNN.
(see [Shi et al. 2016](http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf)
Args:
tensor: A tensor (automatically given by chain).
opt:
factor: factor to multiply shape by. Default is 2.
name : If provided, it replaces current tensor's name.
Returns:
A tensor
"""
# default factor
opt += tf.sg_opt(factor=2)
# get current shape
batch, row, col, channel = tensor.get_shape().as_list()
# get target shape and channel num
channel_factor = opt.factor * opt.factor
# intermediate shape for shuffling
shape_1 = [batch, row // opt.factor, col // opt.factor, channel_factor // opt.factor, channel_factor // opt.factor]
shape_2 = [batch, row // opt.factor, col // opt.factor, channel_factor]
# reshape and transpose for periodic shuffling for each channel
out = []
for i in range(channel):
out.append(tensor[:, :, :, i]
.sg_expand_dims()
.sg_reshape(shape=shape_1)
.sg_transpose(perm=(0, 1, 3, 2, 4))
.sg_reshape(shape=shape_2))
# final output
out = tf.concat(out, 3)
return tf.identity(out, name=opt.name)
| |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_Bucket(unittest.TestCase):
def _make_one(self, client=None, name=None, properties=None):
from google.cloud.storage.bucket import Bucket
if client is None:
connection = _Connection()
client = _Client(connection)
bucket = Bucket(client, name=name)
bucket._properties = properties or {}
return bucket
def test_ctor(self):
NAME = 'name'
properties = {'key': 'value'}
bucket = self._make_one(name=NAME, properties=properties)
self.assertEqual(bucket.name, NAME)
self.assertEqual(bucket._properties, properties)
self.assertFalse(bucket._acl.loaded)
self.assertIs(bucket._acl.bucket, bucket)
self.assertFalse(bucket._default_object_acl.loaded)
self.assertIs(bucket._default_object_acl.bucket, bucket)
def test_blob(self):
from google.cloud.storage.blob import Blob
BUCKET_NAME = 'BUCKET_NAME'
BLOB_NAME = 'BLOB_NAME'
CHUNK_SIZE = 1024 * 1024
KEY = b'01234567890123456789012345678901' # 32 bytes
bucket = self._make_one(name=BUCKET_NAME)
blob = bucket.blob(
BLOB_NAME, chunk_size=CHUNK_SIZE, encryption_key=KEY)
self.assertIsInstance(blob, Blob)
self.assertIs(blob.bucket, bucket)
self.assertIs(blob.client, bucket.client)
self.assertEqual(blob.name, BLOB_NAME)
self.assertEqual(blob.chunk_size, CHUNK_SIZE)
self.assertEqual(blob._encryption_key, KEY)
def test_exists_miss(self):
from google.cloud.exceptions import NotFound
class _FakeConnection(object):
_called_with = []
@classmethod
def api_request(cls, *args, **kwargs):
cls._called_with.append((args, kwargs))
raise NotFound(args)
BUCKET_NAME = 'bucket-name'
bucket = self._make_one(name=BUCKET_NAME)
client = _Client(_FakeConnection)
self.assertFalse(bucket.exists(client=client))
expected_called_kwargs = {
'method': 'GET',
'path': bucket.path,
'query_params': {
'fields': 'name',
},
'_target_object': None,
}
expected_cw = [((), expected_called_kwargs)]
self.assertEqual(_FakeConnection._called_with, expected_cw)
def test_exists_hit(self):
class _FakeConnection(object):
_called_with = []
@classmethod
def api_request(cls, *args, **kwargs):
cls._called_with.append((args, kwargs))
# exists() does not use the return value
return object()
BUCKET_NAME = 'bucket-name'
bucket = self._make_one(name=BUCKET_NAME)
client = _Client(_FakeConnection)
self.assertTrue(bucket.exists(client=client))
expected_called_kwargs = {
'method': 'GET',
'path': bucket.path,
'query_params': {
'fields': 'name',
},
'_target_object': None,
}
expected_cw = [((), expected_called_kwargs)]
self.assertEqual(_FakeConnection._called_with, expected_cw)
def test_create_hit(self):
BUCKET_NAME = 'bucket-name'
DATA = {'name': BUCKET_NAME}
connection = _Connection(DATA)
PROJECT = 'PROJECT'
client = _Client(connection, project=PROJECT)
bucket = self._make_one(client=client, name=BUCKET_NAME)
bucket.create()
kw, = connection._requested
self.assertEqual(kw['method'], 'POST')
self.assertEqual(kw['path'], '/b')
self.assertEqual(kw['query_params'], {'project': PROJECT})
self.assertEqual(kw['data'], DATA)
def test_create_w_extra_properties(self):
BUCKET_NAME = 'bucket-name'
PROJECT = 'PROJECT'
CORS = [{
'maxAgeSeconds': 60,
'methods': ['*'],
'origin': ['https://example.com/frontend'],
'responseHeader': ['X-Custom-Header'],
}]
LIFECYCLE_RULES = [{
"action": {"type": "Delete"},
"condition": {"age": 365}
}]
LOCATION = 'eu'
STORAGE_CLASS = 'NEARLINE'
DATA = {
'name': BUCKET_NAME,
'cors': CORS,
'lifecycle': {'rule': LIFECYCLE_RULES},
'location': LOCATION,
'storageClass': STORAGE_CLASS,
'versioning': {'enabled': True},
}
connection = _Connection(DATA)
client = _Client(connection, project=PROJECT)
bucket = self._make_one(client=client, name=BUCKET_NAME)
bucket.cors = CORS
bucket.lifecycle_rules = LIFECYCLE_RULES
bucket.location = LOCATION
bucket.storage_class = STORAGE_CLASS
bucket.versioning_enabled = True
bucket.create()
kw, = connection._requested
self.assertEqual(kw['method'], 'POST')
self.assertEqual(kw['path'], '/b')
self.assertEqual(kw['query_params'], {'project': PROJECT})
self.assertEqual(kw['data'], DATA)
def test_acl_property(self):
from google.cloud.storage.acl import BucketACL
bucket = self._make_one()
acl = bucket.acl
self.assertIsInstance(acl, BucketACL)
self.assertIs(acl, bucket._acl)
def test_default_object_acl_property(self):
from google.cloud.storage.acl import DefaultObjectACL
bucket = self._make_one()
acl = bucket.default_object_acl
self.assertIsInstance(acl, DefaultObjectACL)
self.assertIs(acl, bucket._default_object_acl)
def test_path_no_name(self):
bucket = self._make_one()
self.assertRaises(ValueError, getattr, bucket, 'path')
def test_path_w_name(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
self.assertEqual(bucket.path, '/b/%s' % NAME)
def test_get_blob_miss(self):
NAME = 'name'
NONESUCH = 'nonesuch'
connection = _Connection()
client = _Client(connection)
bucket = self._make_one(name=NAME)
result = bucket.get_blob(NONESUCH, client=client)
self.assertIsNone(result)
kw, = connection._requested
self.assertEqual(kw['method'], 'GET')
self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, NONESUCH))
def test_get_blob_hit(self):
NAME = 'name'
BLOB_NAME = 'blob-name'
connection = _Connection({'name': BLOB_NAME})
client = _Client(connection)
bucket = self._make_one(name=NAME)
blob = bucket.get_blob(BLOB_NAME, client=client)
self.assertIs(blob.bucket, bucket)
self.assertEqual(blob.name, BLOB_NAME)
kw, = connection._requested
self.assertEqual(kw['method'], 'GET')
self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME))
def test_list_blobs_defaults(self):
NAME = 'name'
connection = _Connection({'items': []})
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
iterator = bucket.list_blobs()
blobs = list(iterator)
self.assertEqual(blobs, [])
kw, = connection._requested
self.assertEqual(kw['method'], 'GET')
self.assertEqual(kw['path'], '/b/%s/o' % NAME)
self.assertEqual(kw['query_params'], {'projection': 'noAcl'})
def test_list_blobs_w_all_arguments(self):
NAME = 'name'
MAX_RESULTS = 10
PAGE_TOKEN = 'ABCD'
PREFIX = 'subfolder'
DELIMITER = '/'
VERSIONS = True
PROJECTION = 'full'
FIELDS = 'items/contentLanguage,nextPageToken'
EXPECTED = {
'maxResults': 10,
'pageToken': PAGE_TOKEN,
'prefix': PREFIX,
'delimiter': DELIMITER,
'versions': VERSIONS,
'projection': PROJECTION,
'fields': FIELDS,
}
connection = _Connection({'items': []})
client = _Client(connection)
bucket = self._make_one(name=NAME)
iterator = bucket.list_blobs(
max_results=MAX_RESULTS,
page_token=PAGE_TOKEN,
prefix=PREFIX,
delimiter=DELIMITER,
versions=VERSIONS,
projection=PROJECTION,
fields=FIELDS,
client=client,
)
blobs = list(iterator)
self.assertEqual(blobs, [])
kw, = connection._requested
self.assertEqual(kw['method'], 'GET')
self.assertEqual(kw['path'], '/b/%s/o' % NAME)
self.assertEqual(kw['query_params'], EXPECTED)
def test_list_blobs(self):
NAME = 'name'
connection = _Connection({'items': []})
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
iterator = bucket.list_blobs()
blobs = list(iterator)
self.assertEqual(blobs, [])
kw, = connection._requested
self.assertEqual(kw['method'], 'GET')
self.assertEqual(kw['path'], '/b/%s/o' % NAME)
self.assertEqual(kw['query_params'], {'projection': 'noAcl'})
def test_delete_miss(self):
from google.cloud.exceptions import NotFound
NAME = 'name'
connection = _Connection()
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
self.assertRaises(NotFound, bucket.delete)
expected_cw = [{
'method': 'DELETE',
'path': bucket.path,
'_target_object': None,
}]
self.assertEqual(connection._deleted_buckets, expected_cw)
def test_delete_hit(self):
NAME = 'name'
GET_BLOBS_RESP = {'items': []}
connection = _Connection(GET_BLOBS_RESP)
connection._delete_bucket = True
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
result = bucket.delete(force=True)
self.assertIsNone(result)
expected_cw = [{
'method': 'DELETE',
'path': bucket.path,
'_target_object': None,
}]
self.assertEqual(connection._deleted_buckets, expected_cw)
def test_delete_force_delete_blobs(self):
NAME = 'name'
BLOB_NAME1 = 'blob-name1'
BLOB_NAME2 = 'blob-name2'
GET_BLOBS_RESP = {
'items': [
{'name': BLOB_NAME1},
{'name': BLOB_NAME2},
],
}
DELETE_BLOB1_RESP = DELETE_BLOB2_RESP = {}
connection = _Connection(GET_BLOBS_RESP, DELETE_BLOB1_RESP,
DELETE_BLOB2_RESP)
connection._delete_bucket = True
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
result = bucket.delete(force=True)
self.assertIsNone(result)
expected_cw = [{
'method': 'DELETE',
'path': bucket.path,
'_target_object': None,
}]
self.assertEqual(connection._deleted_buckets, expected_cw)
def test_delete_force_miss_blobs(self):
NAME = 'name'
BLOB_NAME = 'blob-name1'
GET_BLOBS_RESP = {'items': [{'name': BLOB_NAME}]}
# Note the connection does not have a response for the blob.
connection = _Connection(GET_BLOBS_RESP)
connection._delete_bucket = True
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
result = bucket.delete(force=True)
self.assertIsNone(result)
expected_cw = [{
'method': 'DELETE',
'path': bucket.path,
'_target_object': None,
}]
self.assertEqual(connection._deleted_buckets, expected_cw)
def test_delete_too_many(self):
NAME = 'name'
BLOB_NAME1 = 'blob-name1'
BLOB_NAME2 = 'blob-name2'
GET_BLOBS_RESP = {
'items': [
{'name': BLOB_NAME1},
{'name': BLOB_NAME2},
],
}
connection = _Connection(GET_BLOBS_RESP)
connection._delete_bucket = True
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
# Make the Bucket refuse to delete with 2 objects.
bucket._MAX_OBJECTS_FOR_ITERATION = 1
self.assertRaises(ValueError, bucket.delete, force=True)
self.assertEqual(connection._deleted_buckets, [])
def test_delete_blob_miss(self):
from google.cloud.exceptions import NotFound
NAME = 'name'
NONESUCH = 'nonesuch'
connection = _Connection()
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
self.assertRaises(NotFound, bucket.delete_blob, NONESUCH)
kw, = connection._requested
self.assertEqual(kw['method'], 'DELETE')
self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, NONESUCH))
def test_delete_blob_hit(self):
NAME = 'name'
BLOB_NAME = 'blob-name'
connection = _Connection({})
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
result = bucket.delete_blob(BLOB_NAME)
self.assertIsNone(result)
kw, = connection._requested
self.assertEqual(kw['method'], 'DELETE')
self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME))
def test_delete_blobs_empty(self):
NAME = 'name'
connection = _Connection()
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
bucket.delete_blobs([])
self.assertEqual(connection._requested, [])
def test_delete_blobs_hit(self):
NAME = 'name'
BLOB_NAME = 'blob-name'
connection = _Connection({})
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
bucket.delete_blobs([BLOB_NAME])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'DELETE')
self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME))
def test_delete_blobs_miss_no_on_error(self):
from google.cloud.exceptions import NotFound
NAME = 'name'
BLOB_NAME = 'blob-name'
NONESUCH = 'nonesuch'
connection = _Connection({})
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
self.assertRaises(NotFound, bucket.delete_blobs, [BLOB_NAME, NONESUCH])
kw = connection._requested
self.assertEqual(len(kw), 2)
self.assertEqual(kw[0]['method'], 'DELETE')
self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME))
self.assertEqual(kw[1]['method'], 'DELETE')
self.assertEqual(kw[1]['path'], '/b/%s/o/%s' % (NAME, NONESUCH))
def test_delete_blobs_miss_w_on_error(self):
NAME = 'name'
BLOB_NAME = 'blob-name'
NONESUCH = 'nonesuch'
connection = _Connection({})
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
errors = []
bucket.delete_blobs([BLOB_NAME, NONESUCH], errors.append)
self.assertEqual(errors, [NONESUCH])
kw = connection._requested
self.assertEqual(len(kw), 2)
self.assertEqual(kw[0]['method'], 'DELETE')
self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME))
self.assertEqual(kw[1]['method'], 'DELETE')
self.assertEqual(kw[1]['path'], '/b/%s/o/%s' % (NAME, NONESUCH))
def test_copy_blobs_wo_name(self):
SOURCE = 'source'
DEST = 'dest'
BLOB_NAME = 'blob-name'
class _Blob(object):
name = BLOB_NAME
path = '/b/%s/o/%s' % (SOURCE, BLOB_NAME)
connection = _Connection({})
client = _Client(connection)
source = self._make_one(client=client, name=SOURCE)
dest = self._make_one(client=client, name=DEST)
blob = _Blob()
new_blob = source.copy_blob(blob, dest)
self.assertIs(new_blob.bucket, dest)
self.assertEqual(new_blob.name, BLOB_NAME)
kw, = connection._requested
COPY_PATH = '/b/%s/o/%s/copyTo/b/%s/o/%s' % (SOURCE, BLOB_NAME,
DEST, BLOB_NAME)
self.assertEqual(kw['method'], 'POST')
self.assertEqual(kw['path'], COPY_PATH)
def test_copy_blobs_preserve_acl(self):
from google.cloud.storage.acl import ObjectACL
SOURCE = 'source'
DEST = 'dest'
BLOB_NAME = 'blob-name'
NEW_NAME = 'new_name'
BLOB_PATH = '/b/%s/o/%s' % (SOURCE, BLOB_NAME)
NEW_BLOB_PATH = '/b/%s/o/%s' % (DEST, NEW_NAME)
COPY_PATH = '/b/%s/o/%s/copyTo/b/%s/o/%s' % (SOURCE, BLOB_NAME,
DEST, NEW_NAME)
class _Blob(object):
name = BLOB_NAME
path = BLOB_PATH
connection = _Connection({}, {})
client = _Client(connection)
source = self._make_one(client=client, name=SOURCE)
dest = self._make_one(client=client, name=DEST)
blob = _Blob()
new_blob = source.copy_blob(blob, dest, NEW_NAME, client=client,
preserve_acl=False)
self.assertIs(new_blob.bucket, dest)
self.assertEqual(new_blob.name, NEW_NAME)
self.assertIsInstance(new_blob.acl, ObjectACL)
kw = connection._requested
self.assertEqual(len(kw), 2)
self.assertEqual(kw[0]['method'], 'POST')
self.assertEqual(kw[0]['path'], COPY_PATH)
self.assertEqual(kw[1]['method'], 'PATCH')
self.assertEqual(kw[1]['path'], NEW_BLOB_PATH)
def test_copy_blobs_w_name(self):
SOURCE = 'source'
DEST = 'dest'
BLOB_NAME = 'blob-name'
NEW_NAME = 'new_name'
class _Blob(object):
name = BLOB_NAME
path = '/b/%s/o/%s' % (SOURCE, BLOB_NAME)
connection = _Connection({})
client = _Client(connection)
source = self._make_one(client=client, name=SOURCE)
dest = self._make_one(client=client, name=DEST)
blob = _Blob()
new_blob = source.copy_blob(blob, dest, NEW_NAME)
self.assertIs(new_blob.bucket, dest)
self.assertEqual(new_blob.name, NEW_NAME)
kw, = connection._requested
COPY_PATH = '/b/%s/o/%s/copyTo/b/%s/o/%s' % (SOURCE, BLOB_NAME,
DEST, NEW_NAME)
self.assertEqual(kw['method'], 'POST')
self.assertEqual(kw['path'], COPY_PATH)
def test_rename_blob(self):
BUCKET_NAME = 'BUCKET_NAME'
BLOB_NAME = 'blob-name'
NEW_BLOB_NAME = 'new-blob-name'
DATA = {'name': NEW_BLOB_NAME}
connection = _Connection(DATA)
client = _Client(connection)
bucket = self._make_one(client=client, name=BUCKET_NAME)
class _Blob(object):
def __init__(self, name, bucket_name):
self.name = name
self.path = '/b/%s/o/%s' % (bucket_name, name)
self._deleted = []
def delete(self, client=None):
self._deleted.append(client)
blob = _Blob(BLOB_NAME, BUCKET_NAME)
renamed_blob = bucket.rename_blob(blob, NEW_BLOB_NAME, client=client)
self.assertIs(renamed_blob.bucket, bucket)
self.assertEqual(renamed_blob.name, NEW_BLOB_NAME)
self.assertEqual(blob._deleted, [client])
def test_etag(self):
ETAG = 'ETAG'
properties = {'etag': ETAG}
bucket = self._make_one(properties=properties)
self.assertEqual(bucket.etag, ETAG)
def test_id(self):
ID = 'ID'
properties = {'id': ID}
bucket = self._make_one(properties=properties)
self.assertEqual(bucket.id, ID)
def test_location_getter(self):
NAME = 'name'
before = {'location': 'AS'}
bucket = self._make_one(name=NAME, properties=before)
self.assertEqual(bucket.location, 'AS')
def test_location_setter(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
self.assertIsNone(bucket.location)
bucket.location = 'AS'
self.assertEqual(bucket.location, 'AS')
self.assertTrue('location' in bucket._changes)
def test_lifecycle_rules_getter(self):
NAME = 'name'
LC_RULE = {'action': {'type': 'Delete'}, 'condition': {'age': 42}}
rules = [LC_RULE]
properties = {'lifecycle': {'rule': rules}}
bucket = self._make_one(name=NAME, properties=properties)
self.assertEqual(bucket.lifecycle_rules, rules)
# Make sure it's a copy
self.assertIsNot(bucket.lifecycle_rules, rules)
def test_lifecycle_rules_setter(self):
NAME = 'name'
LC_RULE = {'action': {'type': 'Delete'}, 'condition': {'age': 42}}
rules = [LC_RULE]
bucket = self._make_one(name=NAME)
self.assertEqual(bucket.lifecycle_rules, [])
bucket.lifecycle_rules = rules
self.assertEqual(bucket.lifecycle_rules, rules)
self.assertTrue('lifecycle' in bucket._changes)
def test_cors_getter(self):
NAME = 'name'
CORS_ENTRY = {
'maxAgeSeconds': 1234,
'method': ['OPTIONS', 'GET'],
'origin': ['127.0.0.1'],
'responseHeader': ['Content-Type'],
}
properties = {'cors': [CORS_ENTRY, {}]}
bucket = self._make_one(name=NAME, properties=properties)
entries = bucket.cors
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], CORS_ENTRY)
self.assertEqual(entries[1], {})
# Make sure it was a copy, not the same object.
self.assertIsNot(entries[0], CORS_ENTRY)
def test_cors_setter(self):
NAME = 'name'
CORS_ENTRY = {
'maxAgeSeconds': 1234,
'method': ['OPTIONS', 'GET'],
'origin': ['127.0.0.1'],
'responseHeader': ['Content-Type'],
}
bucket = self._make_one(name=NAME)
self.assertEqual(bucket.cors, [])
bucket.cors = [CORS_ENTRY]
self.assertEqual(bucket.cors, [CORS_ENTRY])
self.assertTrue('cors' in bucket._changes)
def test_get_logging_w_prefix(self):
NAME = 'name'
LOG_BUCKET = 'logs'
LOG_PREFIX = 'pfx'
before = {
'logging': {
'logBucket': LOG_BUCKET,
'logObjectPrefix': LOG_PREFIX,
},
}
bucket = self._make_one(name=NAME, properties=before)
info = bucket.get_logging()
self.assertEqual(info['logBucket'], LOG_BUCKET)
self.assertEqual(info['logObjectPrefix'], LOG_PREFIX)
def test_enable_logging_defaults(self):
NAME = 'name'
LOG_BUCKET = 'logs'
before = {'logging': None}
bucket = self._make_one(name=NAME, properties=before)
self.assertIsNone(bucket.get_logging())
bucket.enable_logging(LOG_BUCKET)
info = bucket.get_logging()
self.assertEqual(info['logBucket'], LOG_BUCKET)
self.assertEqual(info['logObjectPrefix'], '')
def test_enable_logging(self):
NAME = 'name'
LOG_BUCKET = 'logs'
LOG_PFX = 'pfx'
before = {'logging': None}
bucket = self._make_one(name=NAME, properties=before)
self.assertIsNone(bucket.get_logging())
bucket.enable_logging(LOG_BUCKET, LOG_PFX)
info = bucket.get_logging()
self.assertEqual(info['logBucket'], LOG_BUCKET)
self.assertEqual(info['logObjectPrefix'], LOG_PFX)
def test_disable_logging(self):
NAME = 'name'
before = {'logging': {'logBucket': 'logs', 'logObjectPrefix': 'pfx'}}
bucket = self._make_one(name=NAME, properties=before)
self.assertIsNotNone(bucket.get_logging())
bucket.disable_logging()
self.assertIsNone(bucket.get_logging())
def test_metageneration(self):
METAGENERATION = 42
properties = {'metageneration': METAGENERATION}
bucket = self._make_one(properties=properties)
self.assertEqual(bucket.metageneration, METAGENERATION)
def test_metageneration_unset(self):
bucket = self._make_one()
self.assertIsNone(bucket.metageneration)
def test_metageneration_string_val(self):
METAGENERATION = 42
properties = {'metageneration': str(METAGENERATION)}
bucket = self._make_one(properties=properties)
self.assertEqual(bucket.metageneration, METAGENERATION)
def test_owner(self):
OWNER = {'entity': 'project-owner-12345', 'entityId': '23456'}
properties = {'owner': OWNER}
bucket = self._make_one(properties=properties)
owner = bucket.owner
self.assertEqual(owner['entity'], 'project-owner-12345')
self.assertEqual(owner['entityId'], '23456')
def test_project_number(self):
PROJECT_NUMBER = 12345
properties = {'projectNumber': PROJECT_NUMBER}
bucket = self._make_one(properties=properties)
self.assertEqual(bucket.project_number, PROJECT_NUMBER)
def test_project_number_unset(self):
bucket = self._make_one()
self.assertIsNone(bucket.project_number)
def test_project_number_string_val(self):
PROJECT_NUMBER = 12345
properties = {'projectNumber': str(PROJECT_NUMBER)}
bucket = self._make_one(properties=properties)
self.assertEqual(bucket.project_number, PROJECT_NUMBER)
def test_self_link(self):
SELF_LINK = 'http://example.com/self/'
properties = {'selfLink': SELF_LINK}
bucket = self._make_one(properties=properties)
self.assertEqual(bucket.self_link, SELF_LINK)
def test_storage_class_getter(self):
STORAGE_CLASS = 'http://example.com/self/'
properties = {'storageClass': STORAGE_CLASS}
bucket = self._make_one(properties=properties)
self.assertEqual(bucket.storage_class, STORAGE_CLASS)
def test_storage_class_setter_invalid(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
with self.assertRaises(ValueError):
bucket.storage_class = 'BOGUS'
self.assertFalse('storageClass' in bucket._changes)
def test_storage_class_setter_STANDARD(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
bucket.storage_class = 'STANDARD'
self.assertEqual(bucket.storage_class, 'STANDARD')
self.assertTrue('storageClass' in bucket._changes)
def test_storage_class_setter_NEARLINE(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
bucket.storage_class = 'NEARLINE'
self.assertEqual(bucket.storage_class, 'NEARLINE')
self.assertTrue('storageClass' in bucket._changes)
def test_storage_class_setter_COLDLINE(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
bucket.storage_class = 'COLDLINE'
self.assertEqual(bucket.storage_class, 'COLDLINE')
self.assertTrue('storageClass' in bucket._changes)
def test_storage_class_setter_MULTI_REGIONAL(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
bucket.storage_class = 'MULTI_REGIONAL'
self.assertEqual(bucket.storage_class, 'MULTI_REGIONAL')
self.assertTrue('storageClass' in bucket._changes)
def test_storage_class_setter_REGIONAL(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
bucket.storage_class = 'REGIONAL'
self.assertEqual(bucket.storage_class, 'REGIONAL')
self.assertTrue('storageClass' in bucket._changes)
def test_storage_class_setter_DURABLE_REDUCED_AVAILABILITY(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
bucket.storage_class = 'DURABLE_REDUCED_AVAILABILITY'
self.assertEqual(bucket.storage_class, 'DURABLE_REDUCED_AVAILABILITY')
self.assertTrue('storageClass' in bucket._changes)
def test_time_created(self):
import datetime
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'timeCreated': TIME_CREATED}
bucket = self._make_one(properties=properties)
self.assertEqual(bucket.time_created, TIMESTAMP)
def test_time_created_unset(self):
bucket = self._make_one()
self.assertIsNone(bucket.time_created)
def test_versioning_enabled_getter_missing(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
self.assertEqual(bucket.versioning_enabled, False)
def test_versioning_enabled_getter(self):
NAME = 'name'
before = {'versioning': {'enabled': True}}
bucket = self._make_one(name=NAME, properties=before)
self.assertEqual(bucket.versioning_enabled, True)
def test_versioning_enabled_setter(self):
NAME = 'name'
bucket = self._make_one(name=NAME)
self.assertFalse(bucket.versioning_enabled)
bucket.versioning_enabled = True
self.assertTrue(bucket.versioning_enabled)
def test_configure_website_defaults(self):
NAME = 'name'
UNSET = {'website': {'mainPageSuffix': None,
'notFoundPage': None}}
bucket = self._make_one(name=NAME)
bucket.configure_website()
self.assertEqual(bucket._properties, UNSET)
def test_configure_website(self):
NAME = 'name'
WEBSITE_VAL = {'website': {'mainPageSuffix': 'html',
'notFoundPage': '404.html'}}
bucket = self._make_one(name=NAME)
bucket.configure_website('html', '404.html')
self.assertEqual(bucket._properties, WEBSITE_VAL)
def test_disable_website(self):
NAME = 'name'
UNSET = {'website': {'mainPageSuffix': None,
'notFoundPage': None}}
bucket = self._make_one(name=NAME)
bucket.disable_website()
self.assertEqual(bucket._properties, UNSET)
def test_make_public_defaults(self):
from google.cloud.storage.acl import _ACLEntity
NAME = 'name'
permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
after = {'acl': permissive, 'defaultObjectAcl': []}
connection = _Connection(after)
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
bucket.acl.loaded = True
bucket.default_object_acl.loaded = True
bucket.make_public()
self.assertEqual(list(bucket.acl), permissive)
self.assertEqual(list(bucket.default_object_acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/%s' % NAME)
self.assertEqual(kw[0]['data'], {'acl': after['acl']})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def _make_public_w_future_helper(self, default_object_acl_loaded=True):
from google.cloud.storage.acl import _ACLEntity
NAME = 'name'
permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
after1 = {'acl': permissive, 'defaultObjectAcl': []}
after2 = {'acl': permissive, 'defaultObjectAcl': permissive}
if default_object_acl_loaded:
num_requests = 2
connection = _Connection(after1, after2)
else:
num_requests = 3
# We return the same value for default_object_acl.reload()
# to consume.
connection = _Connection(after1, after1, after2)
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
bucket.acl.loaded = True
bucket.default_object_acl.loaded = default_object_acl_loaded
bucket.make_public(future=True)
self.assertEqual(list(bucket.acl), permissive)
self.assertEqual(list(bucket.default_object_acl), permissive)
kw = connection._requested
self.assertEqual(len(kw), num_requests)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/%s' % NAME)
self.assertEqual(kw[0]['data'], {'acl': permissive})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
if not default_object_acl_loaded:
self.assertEqual(kw[1]['method'], 'GET')
self.assertEqual(kw[1]['path'], '/b/%s/defaultObjectAcl' % NAME)
# Last could be 1 or 2 depending on `default_object_acl_loaded`.
self.assertEqual(kw[-1]['method'], 'PATCH')
self.assertEqual(kw[-1]['path'], '/b/%s' % NAME)
self.assertEqual(kw[-1]['data'], {'defaultObjectAcl': permissive})
self.assertEqual(kw[-1]['query_params'], {'projection': 'full'})
def test_make_public_w_future(self):
self._make_public_w_future_helper(default_object_acl_loaded=True)
def test_make_public_w_future_reload_default(self):
self._make_public_w_future_helper(default_object_acl_loaded=False)
def test_make_public_recursive(self):
import mock
from google.cloud.storage.acl import _ACLEntity
_saved = []
class _Blob(object):
_granted = False
def __init__(self, bucket, name):
self._bucket = bucket
self._name = name
@property
def acl(self):
return self
# Faux ACL methods
def all(self):
return self
def grant_read(self):
self._granted = True
def save(self, client=None):
_saved.append(
(self._bucket, self._name, self._granted, client))
def item_to_blob(self, item):
return _Blob(self.bucket, item['name'])
NAME = 'name'
BLOB_NAME = 'blob-name'
permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
after = {'acl': permissive, 'defaultObjectAcl': []}
connection = _Connection(after, {'items': [{'name': BLOB_NAME}]})
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
bucket.acl.loaded = True
bucket.default_object_acl.loaded = True
with mock.patch('google.cloud.storage.bucket._item_to_blob',
new=item_to_blob):
bucket.make_public(recursive=True)
self.assertEqual(list(bucket.acl), permissive)
self.assertEqual(list(bucket.default_object_acl), [])
self.assertEqual(_saved, [(bucket, BLOB_NAME, True, None)])
kw = connection._requested
self.assertEqual(len(kw), 2)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/%s' % NAME)
self.assertEqual(kw[0]['data'], {'acl': permissive})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
self.assertEqual(kw[1]['method'], 'GET')
self.assertEqual(kw[1]['path'], '/b/%s/o' % NAME)
max_results = bucket._MAX_OBJECTS_FOR_ITERATION + 1
self.assertEqual(kw[1]['query_params'],
{'maxResults': max_results, 'projection': 'full'})
def test_make_public_recursive_too_many(self):
from google.cloud.storage.acl import _ACLEntity
PERMISSIVE = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
AFTER = {'acl': PERMISSIVE, 'defaultObjectAcl': []}
NAME = 'name'
BLOB_NAME1 = 'blob-name1'
BLOB_NAME2 = 'blob-name2'
GET_BLOBS_RESP = {
'items': [
{'name': BLOB_NAME1},
{'name': BLOB_NAME2},
],
}
connection = _Connection(AFTER, GET_BLOBS_RESP)
client = _Client(connection)
bucket = self._make_one(client=client, name=NAME)
bucket.acl.loaded = True
bucket.default_object_acl.loaded = True
# Make the Bucket refuse to make_public with 2 objects.
bucket._MAX_OBJECTS_FOR_ITERATION = 1
self.assertRaises(ValueError, bucket.make_public, recursive=True)
def test_page_empty_response(self):
from google.cloud.iterator import Page
connection = _Connection()
client = _Client(connection)
name = 'name'
bucket = self._make_one(client=client, name=name)
iterator = bucket.list_blobs()
page = Page(iterator, (), None)
iterator._page = page
blobs = list(page)
self.assertEqual(blobs, [])
self.assertEqual(iterator.prefixes, set())
def test_page_non_empty_response(self):
import six
from google.cloud.storage.blob import Blob
blob_name = 'blob-name'
response = {'items': [{'name': blob_name}], 'prefixes': ['foo']}
connection = _Connection()
client = _Client(connection)
name = 'name'
bucket = self._make_one(client=client, name=name)
def dummy_response():
return response
iterator = bucket.list_blobs()
iterator._get_next_page_response = dummy_response
page = six.next(iterator.pages)
self.assertEqual(page.prefixes, ('foo',))
self.assertEqual(page.num_items, 1)
blob = six.next(page)
self.assertEqual(page.remaining, 0)
self.assertIsInstance(blob, Blob)
self.assertEqual(blob.name, blob_name)
self.assertEqual(iterator.prefixes, set(['foo']))
def test_cumulative_prefixes(self):
import six
from google.cloud.storage.blob import Blob
BLOB_NAME = 'blob-name1'
response1 = {
'items': [{'name': BLOB_NAME}],
'prefixes': ['foo'],
'nextPageToken': 's39rmf9',
}
response2 = {
'items': [],
'prefixes': ['bar'],
}
connection = _Connection()
client = _Client(connection)
name = 'name'
bucket = self._make_one(client=client, name=name)
responses = [response1, response2]
def dummy_response():
return responses.pop(0)
iterator = bucket.list_blobs()
iterator._get_next_page_response = dummy_response
# Parse first response.
pages_iter = iterator.pages
page1 = six.next(pages_iter)
self.assertEqual(page1.prefixes, ('foo',))
self.assertEqual(page1.num_items, 1)
blob = six.next(page1)
self.assertEqual(page1.remaining, 0)
self.assertIsInstance(blob, Blob)
self.assertEqual(blob.name, BLOB_NAME)
self.assertEqual(iterator.prefixes, set(['foo']))
# Parse second response.
page2 = six.next(pages_iter)
self.assertEqual(page2.prefixes, ('bar',))
self.assertEqual(page2.num_items, 0)
self.assertEqual(iterator.prefixes, set(['foo', 'bar']))
class _Connection(object):
_delete_bucket = False
def __init__(self, *responses):
self._responses = responses
self._requested = []
self._deleted_buckets = []
@staticmethod
def _is_bucket_path(path):
# Now just ensure the path only has /b/ and one more segment.
return path.startswith('/b/') and path.count('/') == 2
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
self._requested.append(kw)
method = kw.get('method')
path = kw.get('path', '')
if method == 'DELETE' and self._is_bucket_path(path):
self._deleted_buckets.append(kw)
if self._delete_bucket:
return
else:
raise NotFound('miss')
try:
response, self._responses = self._responses[0], self._responses[1:]
except:
raise NotFound('miss')
else:
return response
class _Client(object):
def __init__(self, connection, project=None):
self._connection = connection
self.project = project
| |
# RiveScript-Python
#
# This code is released under the MIT License.
# See the "LICENSE" file for more information.
#
# https://www.rivescript.com/
from __future__ import unicode_literals
import copy
class SessionManager(object):
"""Base class for session management for RiveScript.
The session manager keeps track of getting and setting user variables,
for example when the ``<set>`` or ``<get>`` tags are used in RiveScript
or when the API functions like ``set_uservar()`` are called.
By default RiveScript stores user sessions in memory and provides methods
to export and import them (e.g. to persist them when the bot shuts down
so they can be reloaded). If you'd prefer a more 'active' session storage,
for example one that puts user variables into a database or cache, you can
create your own session manager that extends this class and implements its
functions.
See the ``eg/sessions`` example from the source of rivescript-python at
https://github.com/aichaos/rivescript-python for an example.
The constructor takes no required parameters. You can feel free to define
``__init__()`` however you need to.
"""
def set(self, username, args):
"""Set variables for a user.
Args:
username (str): The username to set variables for.
args (dict): Key/value pairs of variables to set for the user.
The values are usually strings, but they can be other types
as well (e.g. arrays or other dicts) for some internal data
structures such as input/reply history. A value of ``NoneType``
should indicate that the key should be deleted from the session
store.
"""
raise NotImplementedError
def get(self, username, key):
"""Retrieve a stored variable for a user.
If the user doesn't exist, this should return ``None``. If the user
*does* exist, but the key does not, this should return the
string value ``"undefined"``.
Args:
username (str): The username to retrieve variables for.
key (str): The specific variable name to retrieve.
Returns:
str: The value of the requested key, "undefined", or ``NoneType``.
"""
raise NotImplementedError
def get_any(self, username):
"""Retrieve all stored variables for a user.
If the user doesn't exist, this should return ``None``.
Args:
username (str): The username to retrieve variables for.
Returns:
dict: Key/value pairs of all stored data for the user, or ``NoneType``.
"""
raise NotImplementedError
def get_all(self):
"""Retrieve all variables about all users.
This should return a dict of dicts, where the top level keys are the
usernames of every user your bot has data for, and the values are dicts
of key/value pairs of those users. For example::
{ "user1": {
"topic": "random",
"name": "Alice",
},
"user2": {
"topic": "random",
"name": "Bob",
},
}
Returns:
dict
"""
raise NotImplementedError
def reset(self, username):
"""Reset all variables stored about a particular user.
Args:
username (str): The username to flush all data for.
"""
raise NotImplementedError
def reset_all(self):
"""Reset all variables for all users."""
raise NotImplementedError
def freeze(self, username):
"""Make a snapshot of the user's variables.
This should clone and store a snapshot of all stored variables for the
user, so that they can later be restored with ``thaw()``. This
implements the RiveScript ``freeze_uservars()`` method.
Args:
username (str): The username to freeze variables for.
"""
raise NotImplementedError
def thaw(self, username, action="thaw"):
"""Restore the frozen snapshot of variables for a user.
This should replace *all* of a user's variables with the frozen copy
that was snapshotted with ``freeze()``. If there are no frozen
variables, this function should be a no-op (maybe issue a warning?)
Args:
username (str): The username to restore variables for.
action (str):
An action to perform on the variables. Valid options are:
* ``thaw``: Restore the variables and delete the frozen copy (default).
* ``discard``: Don't restore the variables, just delete the frozen copy.
* ``keep``: Restore the variables and keep the copy still.
"""
raise NotImplementedError
def default_session(self):
"""The default session data for a new user.
You do not need to override this function. This returns a ``dict`` with
the default key/value pairs for new sessions. By default, the
session variables are as follows::
{
"topic": "random"
}
Returns:
dict: A dict of default key/value pairs for new user sessions.
"""
return dict(
topic="random",
)
class MemorySessionStorage(SessionManager):
"""The default in-memory session store for RiveScript.
This session manager keeps all user and state information in system
memory and doesn't persist anything to disk by default. This is suitable
for many simple use cases. User variables can be persisted and reloaded
from disk by using the RiveScript API functions ``get_uservars()`` and
``set_uservars()`` -- for example, you can get export all user variables
and save them to disk as a JSON file when your program shuts down, and on
its next startup, read the JSON file from disk and use ``set_uservars()``
to put them back into the in-memory session manager.
If you'd like to implement your own session manager, for example to use
a database to store/retrieve user variables, you should extend the base
``SessionManager`` class and implement all of its functions.
Parameters:
warn (function): A function to be called with an error message to
notify when one of the functions fails due to a user not existing.
If not provided, then no warnings will be emitted from this module.
"""
def __init__(self, warn=None, *args, **kwargs):
self._fwarn = warn
self._users = {}
self._frozen = {}
def _warn(self, *args, **kwargs):
if self._fwarn is not None:
self._fwarn(*args, **kwargs)
def set(self, username, vars):
if not username in self._users:
self._users[username] = self.default_session()
for key, value in vars.items():
if value is None:
self._users[username].pop(key, None)
else:
self._users[username][key] = value
def get(self, username, key, default="undefined"):
if not username in self._users:
return None
return self._users[username].get(key, default)
def get_any(self, username):
if not username in self._users:
return None
return copy.deepcopy(self._users[username])
def get_all(self):
return copy.deepcopy(self._users)
def reset(self, username):
del self._users[username]
def reset_all(self):
self._users = {}
def freeze(self, username):
if username in self._users:
self._frozen[username] = copy.deepcopy(self._users[username])
else:
self._warn("Can't freeze vars for user " + username + ": not found!")
def thaw(self, username, action="thaw"):
if username in self._frozen:
# What are we doing?
if action == "thaw":
# Thawing them out.
self._users[username] = copy.deepcopy(self._frozen[username])
del self._frozen[username]
elif action == "discard":
# Just discard the frozen copy.
del self._frozen[username]
elif action == "keep":
# Keep the frozen copy afterward.
self._users[username] = copy.deepcopy(self._frozen[username])
else:
self._warn("Unsupported thaw action")
else:
self._warn("Can't thaw vars for user " + username + ": not found!")
class NullSessionStorage(SessionManager):
"""The null session manager doesn't store any user variables.
This is used by the unit tests and isn't practical for real world usage,
as the bot would be completely unable to remember any user variables or
history.
"""
def set(self, *args, **kwargs):
pass
def get(self, *args, **kwargs):
return "undefined"
def get_any(self, *args, **kwargs):
return {}
def get_all(self, *args, **kwargs):
return {}
def reset(self, *args, **kwargs):
pass
def reset_all(self, *args, **kwargs):
pass
def freeze(self, *args, **kwargs):
pass
def thaw(self, *args, **kwargs):
pass
| |
__all__ = ['imread', 'imsave']
import numpy as np
from six import string_types
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
from .tifffile_plugin import imread as tif_imread, imsave as tif_imsave
def imread(fname, dtype=None, img_num=None, **kwargs):
"""Load an image from file.
Parameters
----------
fname : str or file
File name or file-like-object.
dtype : numpy dtype object or string specifier
Specifies data type of array elements.
img_num : int, optional
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
Addition keyword arguments to pass through (only applicable to Tiff
files for now, see `tifffile`'s `imread` function).
Notes
-----
Tiff files are handled by Christophe Golhke's tifffile.py [1]_, and support
many advanced image types including multi-page and floating point.
All other files are read using the Python Imaging Libary.
See PIL docs [2]_ for a list of supported formats.
References
----------
.. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
.. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
if hasattr(fname, 'lower') and dtype is None:
kwargs.setdefault('key', img_num)
if fname.lower().endswith(('.tiff', '.tif')):
return tif_imread(fname, **kwargs)
if isinstance(fname, string_types):
with open(fname, 'rb') as f:
im = Image.open(f)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
else:
im = Image.open(fname)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
def pil_to_ndarray(im, dtype=None, img_num=None):
"""Import a PIL Image object to an ndarray, in memory.
Parameters
----------
Refer to ``imread``.
"""
try:
# this will raise an IOError if the file is not readable
im.getdata()[0]
except IOError as e:
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
pillow_error_message = str(e)
error_message = ('Could not load "%s" \n'
'Reason: "%s"\n'
'Please see documentation at: %s'
% (im.filename, pillow_error_message, site))
raise ValueError(error_message)
frames = []
grayscale = None
i = 0
while 1:
try:
im.seek(i)
except EOFError:
break
frame = im
if img_num is not None and img_num != i:
im.getdata()[0]
i += 1
continue
if im.format == 'PNG' and im.mode == 'I' and dtype is None:
dtype = 'uint16'
if im.mode == 'P':
if grayscale is None:
grayscale = _palette_is_grayscale(im)
if grayscale:
frame = im.convert('L')
else:
frame = im.convert('RGB')
elif im.mode == '1':
frame = im.convert('L')
elif 'A' in im.mode:
frame = im.convert('RGBA')
elif im.mode == 'CMYK':
frame = im.convert('RGB')
if im.mode.startswith('I;16'):
shape = im.size
dtype = '>u2' if im.mode.endswith('B') else '<u2'
if 'S' in im.mode:
dtype = dtype.replace('u', 'i')
frame = np.fromstring(frame.tobytes(), dtype)
frame.shape = shape[::-1]
else:
frame = np.array(frame, dtype=dtype)
frames.append(frame)
i += 1
if img_num is not None:
break
if hasattr(im, 'fp') and im.fp:
im.fp.close()
if img_num is None and len(frames) > 1:
return np.array(frames)
elif frames:
return frames[0]
elif img_num:
raise IndexError('Could not find image #%s' % img_num)
def _palette_is_grayscale(pil_image):
"""Return True if PIL image in palette mode is grayscale.
Parameters
----------
pil_image : PIL image
PIL Image that is in Palette mode.
Returns
-------
is_grayscale : bool
True if all colors in image palette are gray.
"""
assert pil_image.mode == 'P'
# get palette as an array with R, G, B columns
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
# Not all palette colors are used; unused colors have junk values.
start, stop = pil_image.getextrema()
valid_palette = palette[start:stop]
# Image is grayscale if channel differences (R - G and G - B)
# are all zero.
return np.allclose(np.diff(valid_palette), 0)
def ndarray_to_pil(arr, format_str=None):
"""Export an ndarray to a PIL object.
Parameters
----------
Refer to ``imsave``.
"""
if arr.ndim == 3:
arr = img_as_ubyte(arr)
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
elif format_str in ['png', 'PNG']:
mode = 'I;16'
mode_base = 'I'
if arr.dtype.kind == 'f':
arr = img_as_uint(arr)
elif arr.max() < 256 and arr.min() >= 0:
arr = arr.astype(np.uint8)
mode = mode_base = 'L'
else:
arr = img_as_uint(arr)
else:
arr = img_as_ubyte(arr)
mode = 'L'
mode_base = 'L'
try:
array_buffer = arr.tobytes()
except AttributeError:
array_buffer = arr.tostring() # Numpy < 1.9
if arr.ndim == 2:
im = Image.new(mode_base, arr.T.shape)
try:
im.frombytes(array_buffer, 'raw', mode)
except AttributeError:
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
else:
image_shape = (arr.shape[1], arr.shape[0])
try:
im = Image.frombytes(mode, image_shape, array_buffer)
except AttributeError:
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
return im
def imsave(fname, arr, format_str=None, **kwargs):
"""Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Tiff files are handled by Christophe Golhke's tifffile.py [1]_,
and support many advanced image types including multi-page and
floating point.
All other image formats use the Python Imaging Libary.
See PIL docs [2]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
- Floating point and other integers -> img_as_uint16
References
----------
.. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
.. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, string_types) and format_str is None:
format_str = "PNG"
# Check for png in filename
if (isinstance(fname, string_types)
and fname.lower().endswith(".png")):
format_str = "PNG"
arr = np.asanyarray(arr)
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
use_tif = False
if hasattr(fname, 'lower'):
if fname.lower().endswith(('.tiff', '.tif')):
use_tif = True
if format_str is not None:
if format_str.lower() in ['tiff', 'tif']:
use_tif = True
if use_tif:
tif_imsave(fname, arr, **kwargs)
return
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % arr.shape)
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = ndarray_to_pil(arr, format_str=format_str)
img.save(fname, format=format_str, **kwargs)
| |
# -*- coding: utf-8 -*-
"""
Base settings file, common to all environments.
These settings can be overridden in local.py.
"""
import datetime
import os
import json
import hashlib
from datetime import timedelta
from collections import OrderedDict
os_env = os.environ
def parent_dir(path):
'''Return the parent of a directory.'''
return os.path.abspath(os.path.join(path, os.pardir))
HERE = os.path.dirname(os.path.abspath(__file__))
BASE_PATH = parent_dir(HERE) # website/ directory
APP_PATH = parent_dir(BASE_PATH)
ADDON_PATH = os.path.join(BASE_PATH, 'addons')
STATIC_FOLDER = os.path.join(BASE_PATH, 'static')
STATIC_URL_PATH = '/static'
ASSET_HASH_PATH = os.path.join(APP_PATH, 'webpack-assets.json')
ROOT = os.path.join(BASE_PATH, '..')
BCRYPT_LOG_ROUNDS = 12
with open(os.path.join(APP_PATH, 'package.json'), 'r') as fobj:
VERSION = json.load(fobj)['version']
# Hours before email confirmation tokens expire
EMAIL_TOKEN_EXPIRATION = 24
CITATION_STYLES_PATH = os.path.join(BASE_PATH, 'static', 'vendor', 'bower_components', 'styles')
# Minimum seconds between forgot password email attempts
SEND_EMAIL_THROTTLE = 30
# Hours before pending embargo/retraction/registration automatically becomes active
RETRACTION_PENDING_TIME = datetime.timedelta(days=2)
EMBARGO_PENDING_TIME = datetime.timedelta(days=2)
EMBARGO_TERMINATION_PENDING_TIME = datetime.timedelta(days=2)
REGISTRATION_APPROVAL_TIME = datetime.timedelta(days=2)
# Date range for embargo periods
EMBARGO_END_DATE_MIN = datetime.timedelta(days=2)
EMBARGO_END_DATE_MAX = datetime.timedelta(days=1460) # Four years
LOAD_BALANCER = False
PROXY_ADDRS = []
# May set these to True in local.py for development
DEV_MODE = False
DEBUG_MODE = False
SECURE_MODE = not DEBUG_MODE # Set secure cookie
PROTOCOL = 'https://' if SECURE_MODE else 'http://'
DOMAIN = PROTOCOL + 'localhost:5000/'
API_DOMAIN = PROTOCOL + 'localhost:8000/'
LOG_PATH = os.path.join(APP_PATH, 'logs')
TEMPLATES_PATH = os.path.join(BASE_PATH, 'templates')
ANALYTICS_PATH = os.path.join(BASE_PATH, 'analytics')
CORE_TEMPLATES = os.path.join(BASE_PATH, 'templates/log_templates.mako')
BUILT_TEMPLATES = os.path.join(BASE_PATH, 'templates/_log_templates.mako')
GNUPG_HOME = os.path.join(BASE_PATH, 'gpg')
GNUPG_BINARY = 'gpg'
# User management & registration
CONFIRM_REGISTRATIONS_BY_EMAIL = True
ALLOW_REGISTRATION = True
ALLOW_LOGIN = True
SEARCH_ENGINE = 'elastic' # Can be 'elastic', or None
ELASTIC_URI = 'localhost:9200'
ELASTIC_TIMEOUT = 10
ELASTIC_INDEX = 'website'
SHARE_ELASTIC_URI = ELASTIC_URI
SHARE_ELASTIC_INDEX = 'share'
# For old indices
SHARE_ELASTIC_INDEX_TEMPLATE = 'share_v{}'
# Sessions
COOKIE_NAME = 'osf'
# TODO: Override OSF_COOKIE_DOMAIN in local.py in production
OSF_COOKIE_DOMAIN = None
# server-side verification timeout
OSF_SESSION_TIMEOUT = 30 * 24 * 60 * 60 # 30 days in seconds
# TODO: Override SECRET_KEY in local.py in production
SECRET_KEY = 'CHANGEME'
SESSION_COOKIE_SECURE = SECURE_MODE
# TODO: Change to True after ticket #OSF-6339 has been resolved
SESSION_COOKIE_HTTPONLY = False
# local path to private key and cert for local development using https, overwrite in local.py
OSF_SERVER_KEY = None
OSF_SERVER_CERT = None
# Change if using `scripts/cron.py` to manage crontab
CRON_USER = None
# External services
USE_CDN_FOR_CLIENT_LIBS = True
USE_EMAIL = True
FROM_EMAIL = 'openscienceframework-noreply@osf.io'
SUPPORT_EMAIL = 'support@osf.io'
# SMTP Settings
MAIL_SERVER = 'smtp.sendgrid.net'
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = '' # Set this in local.py
# OR, if using Sendgrid's API
SENDGRID_API_KEY = None
# Mailchimp
MAILCHIMP_API_KEY = None
MAILCHIMP_WEBHOOK_SECRET_KEY = 'CHANGEME' # OSF secret key to ensure webhook is secure
ENABLE_EMAIL_SUBSCRIPTIONS = True
MAILCHIMP_GENERAL_LIST = 'Open Science Framework General'
#Triggered emails
OSF_HELP_LIST = 'Open Science Framework Help'
WAIT_BETWEEN_MAILS = timedelta(days=7)
NO_ADDON_WAIT_TIME = timedelta(weeks=8)
NO_LOGIN_WAIT_TIME = timedelta(weeks=4)
WELCOME_OSF4M_WAIT_TIME = timedelta(weeks=2)
NO_LOGIN_OSF4M_WAIT_TIME = timedelta(weeks=6)
NEW_PUBLIC_PROJECT_WAIT_TIME = timedelta(hours=24)
WELCOME_OSF4M_WAIT_TIME_GRACE = timedelta(days=12)
# TODO: Override in local.py
MAILGUN_API_KEY = None
# TODO: Override in local.py in production
UPLOADS_PATH = os.path.join(BASE_PATH, 'uploads')
MFR_CACHE_PATH = os.path.join(BASE_PATH, 'mfrcache')
MFR_TEMP_PATH = os.path.join(BASE_PATH, 'mfrtemp')
# Use Celery for file rendering
USE_CELERY = True
# Use GnuPG for encryption
USE_GNUPG = True
# File rendering timeout (in ms)
MFR_TIMEOUT = 30000
# TODO: Override in local.py in production
DB_HOST = 'localhost'
DB_PORT = os_env.get('OSF_DB_PORT', 27017)
DB_NAME = 'osf20130903'
DB_USER = None
DB_PASS = None
# Cache settings
SESSION_HISTORY_LENGTH = 5
SESSION_HISTORY_IGNORE_RULES = [
lambda url: '/static/' in url,
lambda url: 'favicon' in url,
lambda url: url.startswith('/api/'),
]
# TODO: Configuration should not change between deploys - this should be dynamic.
CANONICAL_DOMAIN = 'openscienceframework.org'
COOKIE_DOMAIN = '.openscienceframework.org' # Beaker
SHORT_DOMAIN = 'osf.io'
# TODO: Combine Python and JavaScript config
COMMENT_MAXLENGTH = 500
# Profile image options
PROFILE_IMAGE_LARGE = 70
PROFILE_IMAGE_MEDIUM = 40
PROFILE_IMAGE_SMALL = 20
# Conference options
CONFERENCE_MIN_COUNT = 5
WIKI_WHITELIST = {
'tags': [
'a', 'abbr', 'acronym', 'b', 'bdo', 'big', 'blockquote', 'br',
'center', 'cite', 'code',
'dd', 'del', 'dfn', 'div', 'dl', 'dt', 'em', 'embed', 'font',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins',
'kbd', 'li', 'object', 'ol', 'param', 'pre', 'p', 'q',
's', 'samp', 'small', 'span', 'strike', 'strong', 'sub', 'sup',
'table', 'tbody', 'td', 'th', 'thead', 'tr', 'tt', 'ul', 'u',
'var', 'wbr',
],
'attributes': [
'align', 'alt', 'border', 'cite', 'class', 'dir',
'height', 'href', 'id', 'src', 'style', 'title', 'type', 'width',
'face', 'size', # font tags
'salign', 'align', 'wmode', 'target',
],
# Styles currently used in Reproducibility Project wiki pages
'styles' : [
'top', 'left', 'width', 'height', 'position',
'background', 'font-size', 'text-align', 'z-index',
'list-style',
]
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
NODE_CATEGORY_MAP = OrderedDict([
('analysis', 'Analysis'),
('communication', 'Communication'),
('data', 'Data'),
('hypothesis', 'Hypothesis'),
('instrumentation', 'Instrumentation'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('project', 'Project'),
('software', 'Software'),
('other', 'Other'),
('', 'Uncategorized')
])
# Add-ons
# Load addons from addons.json
with open(os.path.join(ROOT, 'addons.json')) as fp:
addon_settings = json.load(fp)
ADDONS_REQUESTED = addon_settings['addons']
ADDONS_ARCHIVABLE = addon_settings['addons_archivable']
ADDONS_COMMENTABLE = addon_settings['addons_commentable']
ADDONS_BASED_ON_IDS = addon_settings['addons_based_on_ids']
ADDON_CATEGORIES = [
'documentation',
'storage',
'bibliography',
'other',
'security',
'citations',
]
SYSTEM_ADDED_ADDONS = {
# 'user': ['badges'],
'user': [],
'node': [],
}
# Piwik
# TODO: Override in local.py in production
PIWIK_HOST = None
PIWIK_ADMIN_TOKEN = None
PIWIK_SITE_ID = None
KEEN_PROJECT_ID = None
KEEN_WRITE_KEY = None
KEEN_READ_KEY = None
SENTRY_DSN = None
SENTRY_DSN_JS = None
# TODO: Delete me after merging GitLab
MISSING_FILE_NAME = 'untitled'
# Project Organizer
ALL_MY_PROJECTS_ID = '-amp'
ALL_MY_REGISTRATIONS_ID = '-amr'
ALL_MY_PROJECTS_NAME = 'All my projects'
ALL_MY_REGISTRATIONS_NAME = 'All my registrations'
# Most Popular and New and Noteworthy Nodes
POPULAR_LINKS_NODE = None # TODO Override in local.py in production.
NEW_AND_NOTEWORTHY_LINKS_NODE = None # TODO Override in local.py in production.
NEW_AND_NOTEWORTHY_CONTRIBUTOR_BLACKLIST = [] # TODO Override in local.py in production.
# FOR EMERGENCIES ONLY: Setting this to True will disable forks, registrations,
# and uploads in order to save disk space.
DISK_SAVING_MODE = False
# Seconds before another notification email can be sent to a contributor when added to a project
CONTRIBUTOR_ADDED_EMAIL_THROTTLE = 24 * 3600
# Google Analytics
GOOGLE_ANALYTICS_ID = None
GOOGLE_SITE_VERIFICATION = None
# Pingdom
PINGDOM_ID = None
DEFAULT_HMAC_SECRET = 'changeme'
DEFAULT_HMAC_ALGORITHM = hashlib.sha256
WATERBUTLER_URL = 'http://localhost:7777'
WATERBUTLER_ADDRS = ['127.0.0.1']
# Test identifier namespaces
DOI_NAMESPACE = 'doi:10.5072/FK2'
ARK_NAMESPACE = 'ark:99999/fk4'
EZID_USERNAME = 'changeme'
EZID_PASSWORD = 'changeme'
# Format for DOIs and ARKs
EZID_FORMAT = '{namespace}osf.io/{guid}'
USE_SHARE = True
SHARE_REGISTRATION_URL = ''
SHARE_API_DOCS_URL = ''
CAS_SERVER_URL = 'http://localhost:8080'
MFR_SERVER_URL = 'http://localhost:7778'
###### ARCHIVER ###########
ARCHIVE_PROVIDER = 'osfstorage'
MAX_ARCHIVE_SIZE = 5 * 1024 ** 3 # == math.pow(1024, 3) == 1 GB
MAX_FILE_SIZE = MAX_ARCHIVE_SIZE # TODO limit file size?
ARCHIVE_TIMEOUT_TIMEDELTA = timedelta(1) # 24 hours
ENABLE_ARCHIVER = True
JWT_SECRET = 'changeme'
JWT_ALGORITHM = 'HS256'
##### CELERY #####
DEFAULT_QUEUE = 'celery'
LOW_QUEUE = 'low'
MED_QUEUE = 'med'
HIGH_QUEUE = 'high'
LOW_PRI_MODULES = {
'framework.analytics.tasks',
'framework.celery_tasks',
'scripts.osfstorage.usage_audit',
'scripts.osfstorage.glacier_inventory',
'scripts.analytics.tasks',
'scripts.osfstorage.files_audit',
'scripts.osfstorage.glacier_audit',
'scripts.populate_new_and_noteworthy_projects',
'website.search.elastic_search',
}
MED_PRI_MODULES = {
'framework.email.tasks',
'scripts.send_queued_mails',
'scripts.triggered_mails',
'website.mailchimp_utils',
'website.notifications.tasks',
}
HIGH_PRI_MODULES = {
'scripts.approve_embargo_terminations',
'scripts.approve_registrations',
'scripts.embargo_registrations',
'scripts.refresh_addon_tokens',
'scripts.retract_registrations',
'website.archiver.tasks',
}
try:
from kombu import Queue, Exchange
except ImportError:
pass
else:
CELERY_QUEUES = (
Queue(LOW_QUEUE, Exchange(LOW_QUEUE), routing_key=LOW_QUEUE,
consumer_arguments={'x-priority': -1}),
Queue(DEFAULT_QUEUE, Exchange(DEFAULT_QUEUE), routing_key=DEFAULT_QUEUE,
consumer_arguments={'x-priority': 0}),
Queue(MED_QUEUE, Exchange(MED_QUEUE), routing_key=MED_QUEUE,
consumer_arguments={'x-priority': 1}),
Queue(HIGH_QUEUE, Exchange(HIGH_QUEUE), routing_key=HIGH_QUEUE,
consumer_arguments={'x-priority': 10}),
)
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_ROUTES = ('framework.celery_tasks.routers.CeleryRouter', )
CELERY_IGNORE_RESULT = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Default RabbitMQ broker
BROKER_URL = 'amqp://'
# Default RabbitMQ backend
CELERY_RESULT_BACKEND = 'amqp://'
# Modules to import when celery launches
CELERY_IMPORTS = (
'framework.celery_tasks',
'framework.celery_tasks.signals',
'framework.email.tasks',
'framework.analytics.tasks',
'website.mailchimp_utils',
'website.notifications.tasks',
'website.archiver.tasks',
'website.search.search',
'scripts.populate_new_and_noteworthy_projects',
'scripts.refresh_addon_tokens',
'scripts.retract_registrations',
'scripts.embargo_registrations',
'scripts.approve_registrations',
'scripts.approve_embargo_terminations',
'scripts.triggered_mails',
'scripts.send_queued_mails',
)
# Modules that need metrics and release requirements
# CELERY_IMPORTS += (
# 'scripts.osfstorage.glacier_inventory',
# 'scripts.osfstorage.glacier_audit',
# 'scripts.osfstorage.usage_audit',
# 'scripts.osfstorage.files_audit',
# 'scripts.analytics.tasks',
# 'scripts.analytics.upload',
# )
# celery.schedule will not be installed when running invoke requirements the first time.
try:
from celery.schedules import crontab
except ImportError:
pass
else:
# Setting up a scheduler, essentially replaces an independent cron job
CELERYBEAT_SCHEDULE = {
'5-minute-emails': {
'task': 'website.notifications.tasks.send_users_email',
'schedule': crontab(minute='*/5'),
'args': ('email_transactional',),
},
'daily-emails': {
'task': 'website.notifications.tasks.send_users_email',
'schedule': crontab(minute=0, hour=0),
'args': ('email_digest',),
},
'refresh_addons': {
'task': 'scripts.refresh_addon_tokens',
'schedule': crontab(minute=0, hour= 2), # Daily 2:00 a.m
'kwargs': {'dry_run': False, 'addons': {'box': 60, 'googledrive': 14, 'mendeley': 14}},
},
'retract_registrations': {
'task': 'scripts.retract_registrations',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'embargo_registrations': {
'task': 'scripts.embargo_registrations',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'approve_registrations': {
'task': 'scripts.approve_registrations',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'approve_embargo_terminations': {
'task': 'scripts.approve_embargo_terminations',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'triggered_mails': {
'task': 'scripts.triggered_mails',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'send_queued_mails': {
'task': 'scripts.send_queued_mails',
'schedule': crontab(minute=0, hour=12), # Daily 12 p.m.
'kwargs': {'dry_run': False},
},
'new-and-noteworthy': {
'task': 'scripts.populate_new_and_noteworthy_projects',
'schedule': crontab(minute=0, hour=2, day_of_week=6), # Saturday 2:00 a.m.
'kwargs': {'dry_run': False}
},
}
# Tasks that need metrics and release requirements
# CELERYBEAT_SCHEDULE.update({
# 'usage_audit': {
# 'task': 'scripts.osfstorage.usage_audit',
# 'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
# 'kwargs': {'send_mail': True},
# },
# 'glacier_inventory': {
# 'task': 'scripts.osfstorage.glacier_inventory',
# 'schedule': crontab(minute=0, hour= 0, day_of_week=0), # Sunday 12:00 a.m.
# 'args': (),
# },
# 'glacier_audit': {
# 'task': 'scripts.osfstorage.glacier_audit',
# 'schedule': crontab(minute=0, hour=6, day_of_week=0), # Sunday 6:00 a.m.
# 'kwargs': {'dry_run': False},
# },
# 'files_audit_0': {
# 'task': 'scripts.osfstorage.files_audit.0',
# 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m.
# 'kwargs': {'num_of_workers': 4, 'dry_run': False},
# },
# 'files_audit_1': {
# 'task': 'scripts.osfstorage.files_audit.1',
# 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m.
# 'kwargs': {'num_of_workers': 4, 'dry_run': False},
# },
# 'files_audit_2': {
# 'task': 'scripts.osfstorage.files_audit.2',
# 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m.
# 'kwargs': {'num_of_workers': 4, 'dry_run': False},
# },
# 'files_audit_3': {
# 'task': 'scripts.osfstorage.files_audit.3',
# 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m.
# 'kwargs': {'num_of_workers': 4, 'dry_run': False},
# },
# 'analytics': {
# 'task': 'scripts.analytics.tasks',
# 'schedule': crontab(minute=0, hour=2), # Daily 2:00 a.m.
# 'kwargs': {}
# },
# 'analytics-upload': {
# 'task': 'scripts.analytics.upload',
# 'schedule': crontab(minute=0, hour=6), # Daily 6:00 a.m.
# 'kwargs': {}
# },
# })
WATERBUTLER_JWE_SALT = 'yusaltydough'
WATERBUTLER_JWE_SECRET = 'CirclesAre4Squares'
WATERBUTLER_JWT_SECRET = 'ILiekTrianglesALot'
WATERBUTLER_JWT_ALGORITHM = 'HS256'
WATERBUTLER_JWT_EXPIRATION = 15
DRAFT_REGISTRATION_APPROVAL_PERIOD = datetime.timedelta(days=10)
assert (DRAFT_REGISTRATION_APPROVAL_PERIOD > EMBARGO_END_DATE_MIN), 'The draft registration approval period should be more than the minimum embargo end date.'
PREREG_ADMIN_TAG = "prereg_admin"
ENABLE_INSTITUTIONS = False
ENABLE_VARNISH = False
ENABLE_ESI = False
VARNISH_SERVERS = [] # This should be set in local.py or cache invalidation won't work
ESI_MEDIA_TYPES = {'application/vnd.api+json', 'application/json'}
# Used for gathering meta information about the current build
GITHUB_API_TOKEN = None
| |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from dateutil.relativedelta import relativedelta
import datetime
import logging
import time
from openerp.osv import osv, fields
import openerp.tools
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
_logger = logging.getLogger(__name__)
class account_analytic_invoice_line(osv.osv):
_name = "account.analytic.invoice.line"
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.quantity * line.price_unit
if line.analytic_account_id.pricelist_id:
cur = line.analytic_account_id.pricelist_id.currency_id
res[line.id] = self.pool.get('res.currency').round(cr, uid, cur, res[line.id])
return res
_columns = {
'product_id': fields.many2one('product.product','Product',required=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'name': fields.text('Description', required=True),
'quantity': fields.float('Quantity', required=True),
'uom_id': fields.many2one('product.uom', 'Unit of Measure',required=True),
'price_unit': fields.float('Unit Price', required=True),
'price_subtotal': fields.function(_amount_line, string='Sub Total', type="float",digits_compute= dp.get_precision('Account')),
}
_defaults = {
'quantity' : 1,
}
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', partner_id=False, price_unit=False, pricelist_id=False, company_id=None, context=None):
context = context or {}
uom_obj = self.pool.get('product.uom')
company_id = company_id or False
local_context = dict(context, company_id=company_id, force_company=company_id, pricelist=pricelist_id)
if not product:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
if partner_id:
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=local_context)
if part.lang:
local_context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=local_context)
price = False
if price_unit is not False:
price = price_unit
elif pricelist_id:
price = res.price
if price is False:
price = res.list_price
if not name:
name = self.pool.get('product.product').name_get(cr, uid, [res.id], context=local_context)[0][1]
if res.description_sale:
name += '\n'+res.description_sale
result.update({'name': name or False,'uom_id': uom_id or res.uom_id.id or False, 'price_unit': price})
res_final = {'value':result}
if result['uom_id'] != res.uom_id.id:
selected_uom = uom_obj.browse(cr, uid, result['uom_id'], context=local_context)
new_price = uom_obj._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uom_id'])
res_final['value']['price_unit'] = new_price
return res_final
class account_analytic_account(osv.osv):
_name = "account.analytic.account"
_inherit = "account.analytic.account"
def _analysis_all(self, cr, uid, ids, fields, arg, context=None):
dp = 2
res = dict([(i, {}) for i in ids])
parent_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
accounts = self.browse(cr, uid, ids, context=context)
for f in fields:
if f == 'user_ids':
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
if parent_ids:
cr.execute('SELECT DISTINCT("user") FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int((id * max_user) + x[0]) for x in result]
elif f == 'month_ids':
if parent_ids:
cr.execute('SELECT DISTINCT(month_id) FROM account_analytic_analysis_summary_month ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int(id * 1000000 + int(x[0])) for x in result]
elif f == 'last_worked_invoiced_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id;", (parent_ids,))
for account_id, sum in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = sum
elif f == 'ca_to_invoice':
for id in ids:
res[id][f] = 0.0
res2 = {}
for account in accounts:
cr.execute("""
SELECT product_id, sum(amount), user_id, to_invoice, sum(unit_amount), product_uom_id, line.name
FROM account_analytic_line line
LEFT JOIN account_analytic_journal journal ON (journal.id = line.journal_id)
WHERE account_id = %s
AND journal.type != 'purchase'
AND invoice_id IS NULL
AND to_invoice IS NOT NULL
GROUP BY product_id, user_id, to_invoice, product_uom_id, line.name""", (account.id,))
res[account.id][f] = 0.0
for product_id, price, user_id, factor_id, qty, uom, line_name in cr.fetchall():
price = -price
if product_id:
price = self.pool.get('account.analytic.line')._get_invoice_price(cr, uid, account, product_id, user_id, qty, context)
factor = self.pool.get('hr_timesheet_invoice.factor').browse(cr, uid, factor_id, context=context)
res[account.id][f] += price * qty * (100-factor.factor or 0.0) / 100.0
# sum both result on account_id
for id in ids:
res[id][f] = round(res.get(id, {}).get(f, 0.0), dp) + round(res2.get(id, 0.0), 2)
elif f == 'last_invoice_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute ("SELECT account_analytic_line.account_id, \
DATE(MAX(account_invoice.date_invoice)) \
FROM account_analytic_line \
JOIN account_invoice \
ON account_analytic_line.invoice_id = account_invoice.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_line.invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lid in cr.fetchall():
res[account_id][f] = lid
elif f == 'last_worked_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lwd in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = lwd
elif f == 'hours_qtt_non_invoiced':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
AND invoice_id IS NULL \
AND to_invoice IS NOT NULL \
GROUP BY account_analytic_line.account_id;",(parent_ids,))
for account_id, sua in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(sua, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'hours_quantity':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
GROUP BY account_analytic_line.account_id",(parent_ids,))
ff = cr.fetchall()
for account_id, hq in ff:
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(hq, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'ca_theorical':
# TODO Take care of pricelist and purchase !
for id in ids:
res[id][f] = 0.0
# Warning
# This computation doesn't take care of pricelist !
# Just consider list_price
if parent_ids:
cr.execute("""SELECT account_analytic_line.account_id AS account_id, \
COALESCE(SUM((account_analytic_line.unit_amount * pt.list_price) \
- (account_analytic_line.unit_amount * pt.list_price \
* hr.factor)), 0.0) AS somme
FROM account_analytic_line \
LEFT JOIN account_analytic_journal \
ON (account_analytic_line.journal_id = account_analytic_journal.id) \
JOIN product_product pp \
ON (account_analytic_line.product_id = pp.id) \
JOIN product_template pt \
ON (pp.product_tmpl_id = pt.id) \
JOIN account_analytic_account a \
ON (a.id=account_analytic_line.account_id) \
JOIN hr_timesheet_invoice_factor hr \
ON (hr.id=a.to_invoice) \
WHERE account_analytic_line.account_id IN %s \
AND a.to_invoice IS NOT NULL \
AND account_analytic_journal.type IN ('purchase', 'general')
GROUP BY account_analytic_line.account_id""",(parent_ids,))
for account_id, sum in cr.fetchall():
res[account_id][f] = round(sum, dp)
return res
def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
#Search all invoice lines not in cancelled state that refer to this analytic account
inv_line_obj = self.pool.get("account.invoice.line")
inv_lines = inv_line_obj.search(cr, uid, ['&', ('account_analytic_id', 'in', child_ids), ('invoice_id.state', 'not in', ['draft', 'cancel']), ('invoice_id.type', 'in', ['out_invoice', 'out_refund'])], context=context)
for line in inv_line_obj.browse(cr, uid, inv_lines, context=context):
if line.invoice_id.type == 'out_refund':
res[line.account_analytic_id.id] -= line.price_subtotal
else:
res[line.account_analytic_id.id] += line.price_subtotal
for acc in self.browse(cr, uid, res.keys(), context=context):
res[acc.id] = res[acc.id] - (acc.timesheet_ca_invoiced or 0.0)
res_final = res
return res_final
def _total_cost_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
cr.execute("""SELECT account_analytic_line.account_id, COALESCE(SUM(amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND amount<0 \
GROUP BY account_analytic_line.account_id""",(child_ids,))
for account_id, sum in cr.fetchall():
res[account_id] = round(sum,2)
res_final = res
return res_final
def _remaining_hours_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.quantity_max != 0:
res[account.id] = account.quantity_max - account.hours_quantity
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _remaining_hours_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.hours_qtt_est - account.timesheet_ca_invoiced, account.ca_to_invoice)
return res
def _hours_qtt_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.hours_quantity - account.hours_qtt_non_invoiced
if res[account.id] < 0:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _revenue_per_hour_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.hours_qtt_invoiced == 0:
res[account.id]=0.0
else:
res[account.id] = account.ca_invoiced / account.hours_qtt_invoiced
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _real_margin_rate_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.ca_invoiced == 0:
res[account.id]=0.0
elif account.total_cost != 0.0:
res[account.id] = -(account.real_margin / account.total_cost) * 100
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _fix_price_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
sale_obj = self.pool.get('sale.order')
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
sale_ids = sale_obj.search(cr, uid, [('project_id','=', account.id), ('state', '=', 'manual')], context=context)
for sale in sale_obj.browse(cr, uid, sale_ids, context=context):
res[account.id] += sale.amount_untaxed
for invoice in sale.invoice_ids:
if invoice.state != 'cancel':
res[account.id] -= invoice.amount_untaxed
return res
def _timesheet_ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
lines_obj = self.pool.get('account.analytic.line')
res = {}
inv_ids = []
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
line_ids = lines_obj.search(cr, uid, [('account_id','=', account.id), ('invoice_id','!=',False), ('to_invoice','!=', False), ('journal_id.type', '=', 'general'), ('invoice_id.type', 'in', ['out_invoice', 'out_refund'])], context=context)
for line in lines_obj.browse(cr, uid, line_ids, context=context):
if line.invoice_id not in inv_ids:
inv_ids.append(line.invoice_id)
if line.invoice_id.type == 'out_refund':
res[account.id] -= line.invoice_id.amount_untaxed
else:
res[account.id] += line.invoice_id.amount_untaxed
return res
def _remaining_ca_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.amount_max - account.ca_invoiced, account.fix_price_to_invoice)
return res
def _real_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_invoiced + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _theorical_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_theorical + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _is_overdue_quantity(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0)
for record in self.browse(cr, uid, ids, context=context):
if record.quantity_max > 0.0:
result[record.id] = int(record.hours_quantity > record.quantity_max)
else:
result[record.id] = 0
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
result = set()
for line in self.pool.get('account.analytic.line').browse(cr, uid, ids, context=context):
result.add(line.account_id.id)
return list(result)
def _get_total_estimation(self, account):
tot_est = 0.0
if account.fix_price_invoices:
tot_est += account.amount_max
if account.invoice_on_timesheets:
tot_est += account.hours_qtt_est
return tot_est
def _get_total_invoiced(self, account):
total_invoiced = 0.0
if account.fix_price_invoices:
total_invoiced += account.ca_invoiced
if account.invoice_on_timesheets:
total_invoiced += account.timesheet_ca_invoiced
return total_invoiced
def _get_total_remaining(self, account):
total_remaining = 0.0
if account.fix_price_invoices:
total_remaining += account.remaining_ca
if account.invoice_on_timesheets:
total_remaining += account.remaining_hours_to_invoice
return total_remaining
def _get_total_toinvoice(self, account):
total_toinvoice = 0.0
if account.fix_price_invoices:
total_toinvoice += account.fix_price_to_invoice
if account.invoice_on_timesheets:
total_toinvoice += account.ca_to_invoice
return total_toinvoice
def _sum_of_fields(self, cr, uid, ids, name, arg, context=None):
res = dict([(i, {}) for i in ids])
for account in self.browse(cr, uid, ids, context=context):
res[account.id]['est_total'] = self._get_total_estimation(account)
res[account.id]['invoiced_total'] = self._get_total_invoiced(account)
res[account.id]['remaining_total'] = self._get_total_remaining(account)
res[account.id]['toinvoice_total'] = self._get_total_toinvoice(account)
return res
_columns = {
'is_overdue_quantity' : fields.function(_is_overdue_quantity, method=True, type='boolean', string='Overdue Quantity',
store={
'account.analytic.line' : (_get_analytic_account, None, 20),
'account.analytic.account': (lambda self, cr, uid, ids, c=None: ids, ['quantity_max'], 10),
}),
'ca_invoiced': fields.function(_ca_invoiced_calc, type='float', string='Invoiced Amount',
help="Total customer invoiced amount for this account.",
digits_compute=dp.get_precision('Account')),
'total_cost': fields.function(_total_cost_calc, type='float', string='Total Costs',
help="Total of costs for this account. It includes real costs (from invoices) and indirect costs, like time spent on timesheets.",
digits_compute=dp.get_precision('Account')),
'ca_to_invoice': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Amount',
help="If invoice from analytic account, the remaining amount you can invoice to the customer based on the total costs.",
digits_compute=dp.get_precision('Account')),
'ca_theorical': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Theoretical Revenue',
help="Based on the costs you had on the project, what would have been the revenue if all these costs have been invoiced at the normal sale price provided by the pricelist.",
digits_compute=dp.get_precision('Account')),
'hours_quantity': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Total Worked Time',
help="Number of time you spent on the analytic account (from timesheet). It computes quantities on all journal of type 'general'."),
'last_invoice_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Last Invoice Date',
help="If invoice from the costs, this is the date of the latest invoiced."),
'last_worked_invoiced_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Invoiced Cost',
help="If invoice from the costs, this is the date of the latest work or cost that have been invoiced."),
'last_worked_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Cost/Work',
help="Date of the latest work done on this account."),
'hours_qtt_non_invoiced': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Time',
help="Number of time (hours/days) (from journal of type 'general') that can be invoiced if you invoice based on analytic account."),
'hours_qtt_invoiced': fields.function(_hours_qtt_invoiced_calc, type='float', string='Invoiced Time',
help="Number of time (hours/days) that can be invoiced plus those that already have been invoiced."),
'remaining_hours': fields.function(_remaining_hours_calc, type='float', string='Remaining Time',
help="Computed using the formula: Maximum Time - Total Worked Time"),
'remaining_hours_to_invoice': fields.function(_remaining_hours_to_invoice_calc, type='float', string='Remaining Time',
help="Computed using the formula: Expected on timesheets - Total invoiced on timesheets"),
'fix_price_to_invoice': fields.function(_fix_price_to_invoice_calc, type='float', string='Remaining Time',
help="Sum of quotations for this contract."),
'timesheet_ca_invoiced': fields.function(_timesheet_ca_invoiced_calc, type='float', string='Remaining Time',
help="Sum of timesheet lines invoiced for this contract."),
'remaining_ca': fields.function(_remaining_ca_calc, type='float', string='Remaining Revenue',
help="Computed using the formula: Max Invoice Price - Invoiced Amount.",
digits_compute=dp.get_precision('Account')),
'revenue_per_hour': fields.function(_revenue_per_hour_calc, type='float', string='Revenue per Time (real)',
help="Computed using the formula: Invoiced Amount / Total Time",
digits_compute=dp.get_precision('Account')),
'real_margin': fields.function(_real_margin_calc, type='float', string='Real Margin',
help="Computed using the formula: Invoiced Amount - Total Costs.",
digits_compute=dp.get_precision('Account')),
'theorical_margin': fields.function(_theorical_margin_calc, type='float', string='Theoretical Margin',
help="Computed using the formula: Theoretical Revenue - Total Costs",
digits_compute=dp.get_precision('Account')),
'real_margin_rate': fields.function(_real_margin_rate_calc, type='float', string='Real Margin Rate (%)',
help="Computes using the formula: (Real Margin / Total Costs) * 100.",
digits_compute=dp.get_precision('Account')),
'fix_price_invoices' : fields.boolean('Fixed Price'),
'invoice_on_timesheets' : fields.boolean("On Timesheets"),
'month_ids': fields.function(_analysis_all, multi='analytic_analysis', type='many2many', relation='account_analytic_analysis.summary.month', string='Month'),
'user_ids': fields.function(_analysis_all, multi='analytic_analysis', type="many2many", relation='account_analytic_analysis.summary.user', string='User'),
'hours_qtt_est': fields.float('Estimation of Hours to Invoice'),
'est_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Estimation"),
'invoiced_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Invoiced"),
'remaining_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Remaining", help="Expectation of remaining income for this contract. Computed as the sum of remaining subtotals which, in turn, are computed as the maximum between '(Estimation - Invoiced)' and 'To Invoice' amounts"),
'toinvoice_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total to Invoice", help=" Sum of everything that could be invoiced for this contract."),
'recurring_invoice_line_ids': fields.one2many('account.analytic.invoice.line', 'analytic_account_id', 'Invoice Lines', copy=True),
'recurring_invoices' : fields.boolean('Generate recurring invoices automatically'),
'recurring_rule_type': fields.selection([
('daily', 'Day(s)'),
('weekly', 'Week(s)'),
('monthly', 'Month(s)'),
('yearly', 'Year(s)'),
], 'Recurrency', help="Invoice automatically repeat at specified interval"),
'recurring_interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'recurring_next_date': fields.date('Date of Next Invoice'),
}
_defaults = {
'recurring_interval': 1,
'recurring_next_date': lambda *a: time.strftime('%Y-%m-%d'),
'recurring_rule_type':'monthly'
}
def open_sale_order_lines(self,cr,uid,ids,context=None):
if context is None:
context = {}
sale_ids = self.pool.get('sale.order').search(cr,uid,[('project_id','=',context.get('search_default_project_id',False)),('partner_id','in',context.get('search_default_partner_id',False))])
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Sales Order Lines to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'context': context,
'domain' : [('order_id','in',sale_ids)],
'res_model': 'sale.order.line',
'nodestroy': True,
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
template = self.browse(cr, uid, template_id, context=context)
if not ids:
res['value']['fix_price_invoices'] = template.fix_price_invoices
res['value']['amount_max'] = template.amount_max
if not ids:
res['value']['invoice_on_timesheets'] = template.invoice_on_timesheets
res['value']['hours_qtt_est'] = template.hours_qtt_est
if template.to_invoice.id:
res['value']['to_invoice'] = template.to_invoice.id
if template.pricelist_id.id:
res['value']['pricelist_id'] = template.pricelist_id.id
if not ids:
invoice_line_ids = []
for x in template.recurring_invoice_line_ids:
invoice_line_ids.append((0, 0, {
'product_id': x.product_id.id,
'uom_id': x.uom_id.id,
'name': x.name,
'quantity': x.quantity,
'price_unit': x.price_unit,
'analytic_account_id': x.analytic_account_id and x.analytic_account_id.id or False,
}))
res['value']['recurring_invoices'] = template.recurring_invoices
res['value']['recurring_interval'] = template.recurring_interval
res['value']['recurring_rule_type'] = template.recurring_rule_type
res['value']['recurring_invoice_line_ids'] = invoice_line_ids
return res
def onchange_recurring_invoices(self, cr, uid, ids, recurring_invoices, date_start=False, context=None):
value = {}
if date_start and recurring_invoices:
value = {'value': {'recurring_next_date': date_start}}
return value
def cron_account_analytic_account(self, cr, uid, context=None):
context = dict(context or {})
remind = {}
def fill_remind(key, domain, write_pending=False):
base_domain = [
('type', '=', 'contract'),
('partner_id', '!=', False),
('manager_id', '!=', False),
('manager_id.email', '!=', False),
]
base_domain.extend(domain)
accounts_ids = self.search(cr, uid, base_domain, context=context, order='name asc')
accounts = self.browse(cr, uid, accounts_ids, context=context)
for account in accounts:
if write_pending:
account.write({'state' : 'pending'})
remind_user = remind.setdefault(account.manager_id.id, {})
remind_type = remind_user.setdefault(key, {})
remind_partner = remind_type.setdefault(account.partner_id, []).append(account)
# Already expired
fill_remind("old", [('state', 'in', ['pending'])])
# Expires now
fill_remind("new", [('state', 'in', ['draft', 'open']), '|', '&', ('date', '!=', False), ('date', '<=', time.strftime('%Y-%m-%d')), ('is_overdue_quantity', '=', True)], True)
# Expires in less than 30 days
fill_remind("future", [('state', 'in', ['draft', 'open']), ('date', '!=', False), ('date', '<', (datetime.datetime.now() + datetime.timedelta(30)).strftime("%Y-%m-%d"))])
context['base_url'] = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
context['action_id'] = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'action_account_analytic_overdue_all')[1]
template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'account_analytic_cron_email_template')[1]
for user_id, data in remind.items():
context["data"] = data
_logger.debug("Sending reminder to uid %s", user_id)
self.pool.get('email.template').send_mail(cr, uid, template_id, user_id, force_send=True, context=context)
return True
def onchange_invoice_on_timesheets(self, cr, uid, ids, invoice_on_timesheets, context=None):
if not invoice_on_timesheets:
return {'value': {'to_invoice': False}}
result = {'value': {'use_timesheets': True}}
try:
to_invoice = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'hr_timesheet_invoice', 'timesheet_invoice_factor1')
result['value']['to_invoice'] = to_invoice[1]
except ValueError:
pass
return result
def hr_to_invoice_timesheets(self, cr, uid, ids, context=None):
domain = [('invoice_id','=',False),('to_invoice','!=',False), ('journal_id.type', '=', 'general'), ('account_id', 'in', ids)]
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Timesheets to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'domain' : domain,
'res_model': 'account.analytic.line',
'nodestroy': True,
}
def _prepare_invoice_data(self, cr, uid, contract, context=None):
context = context or {}
journal_obj = self.pool.get('account.journal')
if not contract.partner_id:
raise osv.except_osv(_('No Customer Defined!'),_("You must first select a Customer for Contract %s!") % contract.name )
fpos = contract.partner_id.property_account_position or False
journal_ids = journal_obj.search(cr, uid, [('type', '=','sale'),('company_id', '=', contract.company_id.id or False)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define a sale journal for the company "%s".') % (contract.company_id.name or '', ))
partner_payment_term = contract.partner_id.property_payment_term and contract.partner_id.property_payment_term.id or False
currency_id = False
if contract.pricelist_id:
currency_id = contract.pricelist_id.currency_id.id
elif contract.partner_id.property_product_pricelist:
currency_id = contract.partner_id.property_product_pricelist.currency_id.id
elif contract.company_id:
currency_id = contract.company_id.currency_id.id
invoice = {
'account_id': contract.partner_id.property_account_receivable.id,
'type': 'out_invoice',
'partner_id': contract.partner_id.id,
'currency_id': currency_id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'date_invoice': contract.recurring_next_date,
'origin': contract.code,
'fiscal_position': fpos and fpos.id,
'payment_term': partner_payment_term,
'company_id': contract.company_id.id or False,
}
return invoice
def _prepare_invoice_lines(self, cr, uid, contract, fiscal_position_id, context=None):
fpos_obj = self.pool.get('account.fiscal.position')
fiscal_position = None
if fiscal_position_id:
fiscal_position = fpos_obj.browse(cr, uid, fiscal_position_id, context=context)
invoice_lines = []
for line in contract.recurring_invoice_line_ids:
res = line.product_id
account_id = res.property_account_income.id
if not account_id:
account_id = res.categ_id.property_account_income_categ.id
account_id = fpos_obj.map_account(cr, uid, fiscal_position, account_id)
taxes = res.taxes_id or False
tax_id = fpos_obj.map_tax(cr, uid, fiscal_position, taxes)
invoice_lines.append((0, 0, {
'name': line.name,
'account_id': account_id,
'account_analytic_id': contract.id,
'price_unit': line.price_unit or 0.0,
'quantity': line.quantity,
'uos_id': line.uom_id.id or False,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, tax_id)],
}))
return invoice_lines
def _prepare_invoice(self, cr, uid, contract, context=None):
invoice = self._prepare_invoice_data(cr, uid, contract, context=context)
invoice['invoice_line'] = self._prepare_invoice_lines(cr, uid, contract, invoice['fiscal_position'], context=context)
return invoice
def recurring_create_invoice(self, cr, uid, ids, context=None):
return self._recurring_create_invoice(cr, uid, ids, context=context)
def _cron_recurring_create_invoice(self, cr, uid, context=None):
return self._recurring_create_invoice(cr, uid, [], automatic=True, context=context)
def _recurring_create_invoice(self, cr, uid, ids, automatic=False, context=None):
context = context or {}
invoice_ids = []
current_date = time.strftime('%Y-%m-%d')
if ids:
contract_ids = ids
else:
contract_ids = self.search(cr, uid, [('recurring_next_date','<=', current_date), ('state','=', 'open'), ('recurring_invoices','=', True), ('type', '=', 'contract')])
if contract_ids:
cr.execute('SELECT company_id, array_agg(id) as ids FROM account_analytic_account WHERE id IN %s GROUP BY company_id', (tuple(contract_ids),))
for company_id, ids in cr.fetchall():
for contract in self.browse(cr, uid, ids, context=dict(context, company_id=company_id, force_company=company_id)):
try:
invoice_values = self._prepare_invoice(cr, uid, contract, context=context)
invoice_ids.append(self.pool['account.invoice'].create(cr, uid, invoice_values, context=context))
next_date = datetime.datetime.strptime(contract.recurring_next_date or current_date, "%Y-%m-%d")
interval = contract.recurring_interval
if contract.recurring_rule_type == 'daily':
new_date = next_date+relativedelta(days=+interval)
elif contract.recurring_rule_type == 'weekly':
new_date = next_date+relativedelta(weeks=+interval)
elif contract.recurring_rule_type == 'monthly':
new_date = next_date+relativedelta(months=+interval)
else:
new_date = next_date+relativedelta(years=+interval)
self.write(cr, uid, [contract.id], {'recurring_next_date': new_date.strftime('%Y-%m-%d')}, context=context)
if automatic:
cr.commit()
except Exception:
if automatic:
cr.rollback()
_logger.exception('Fail to create recurring invoice for contract %s', contract.code)
else:
raise
return invoice_ids
class account_analytic_account_summary_user(osv.osv):
_name = "account_analytic_analysis.summary.user"
_description = "Hours Summary by User"
_order='user'
_auto = False
_rec_name = 'user'
def _unit_amount(self, cr, uid, ids, name, arg, context=None):
res = {}
account_obj = self.pool.get('account.analytic.account')
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
account_ids = [int(str(x/max_user - (x%max_user == 0 and 1 or 0))) for x in ids]
user_ids = [int(str(x-((x/max_user - (x%max_user == 0 and 1 or 0)) *max_user))) for x in ids]
parent_ids = tuple(account_ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
if parent_ids:
cr.execute('SELECT id, unit_amount ' \
'FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s ' \
'AND "user" IN %s',(parent_ids, tuple(user_ids),))
for sum_id, unit_amount in cr.fetchall():
res[sum_id] = unit_amount
for id in ids:
res[id] = round(res.get(id, 0.0), 2)
return res
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'user': fields.many2one('res.users', 'User'),
}
_depends = {
'res.users': ['id'],
'account.analytic.line': ['account_id', 'journal_id', 'unit_amount', 'user_id'],
'account.analytic.journal': ['type'],
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_user')
cr.execute('''CREATE OR REPLACE VIEW account_analytic_analysis_summary_user AS (
with mu as
(select max(id) as max_user from res_users)
, lu AS
(SELECT
l.account_id AS account_id,
coalesce(l.user_id, 0) AS user_id,
SUM(l.unit_amount) AS unit_amount
FROM account_analytic_line AS l,
account_analytic_journal AS j
WHERE (j.type = 'general' ) and (j.id=l.journal_id)
GROUP BY l.account_id, l.user_id
)
select (lu.account_id * mu.max_user) + lu.user_id as id,
lu.account_id as account_id,
lu.user_id as "user",
unit_amount
from lu, mu)''')
class account_analytic_account_summary_month(osv.osv):
_name = "account_analytic_analysis.summary.month"
_description = "Hours summary by month"
_auto = False
_rec_name = 'month'
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'month': fields.char('Month', size=32, readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'journal_id', 'unit_amount'],
'account.analytic.journal': ['type'],
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_month')
cr.execute('CREATE VIEW account_analytic_analysis_summary_month AS (' \
'SELECT ' \
'(TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') + (d.account_id * 1000000::bigint))::bigint AS id, ' \
'd.account_id AS account_id, ' \
'TO_CHAR(d.month, \'Mon YYYY\') AS month, ' \
'TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') AS month_id, ' \
'COALESCE(SUM(l.unit_amount), 0.0) AS unit_amount ' \
'FROM ' \
'(SELECT ' \
'd2.account_id, ' \
'd2.month ' \
'FROM ' \
'(SELECT ' \
'a.id AS account_id, ' \
'l.month AS month ' \
'FROM ' \
'(SELECT ' \
'DATE_TRUNC(\'month\', l.date) AS month ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE j.type = \'general\' ' \
'GROUP BY DATE_TRUNC(\'month\', l.date) ' \
') AS l, ' \
'account_analytic_account AS a ' \
'GROUP BY l.month, a.id ' \
') AS d2 ' \
'GROUP BY d2.account_id, d2.month ' \
') AS d ' \
'LEFT JOIN ' \
'(SELECT ' \
'l.account_id AS account_id, ' \
'DATE_TRUNC(\'month\', l.date) AS month, ' \
'SUM(l.unit_amount) AS unit_amount ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE (j.type = \'general\') and (j.id=l.journal_id) ' \
'GROUP BY l.account_id, DATE_TRUNC(\'month\', l.date) ' \
') AS l '
'ON (' \
'd.account_id = l.account_id ' \
'AND d.month = l.month' \
') ' \
'GROUP BY d.month, d.account_id ' \
')')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| |
import os
import sys
import boto
import subprocess
from landsat.downloader import Downloader
from landsat.landsat import Process
from boto.s3.key import Key
from shutil import rmtree
from datetime import datetime
from models import UserJob_Model, WorkerLog
from boto import utils
import socket
from sqs import (make_SQS_connection, get_queue, get_message, get_attributes,
delete_message_from_handle)
PATH_DOWNLOAD = os.getcwd() + '/download'
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
JOBS_QUEUE = 'snapsat_preview_queue'
REGION = 'us-west-2'
try:
INSTANCE_METADATA = utils.get_instance_metadata(timeout=0.5, num_retries=1)
INSTANCE_ID = INSTANCE_METADATA['instance-id']
except:
INSTANCE_ID = socket.gethostname()
def cleanup_downloads(folder_path):
"""Clean up download folder if process fails.
Return True if download folder empty.
"""
for file_object in os.listdir(folder_path):
file_object_path = os.path.join(folder_path, file_object)
if os.path.isfile(file_object_path):
os.remove(file_object_path)
else:
rmtree(file_object_path)
if not os.listdir(folder_path):
return True
else:
return False
def write_activity(statement, value, activity_type):
"""Write to activity log."""
WorkerLog.log_entry(INSTANCE_ID, statement, value, activity_type)
def checking_for_jobs():
"""Poll jobs queue for jobs."""
SQSconn = make_SQS_connection(REGION, AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
write_activity('SQS Connection', SQSconn.server_name(), 'success')
jobs_queue = get_queue(SQSconn, JOBS_QUEUE)
write_activity('Jobs queue', jobs_queue.name, 'success')
while True:
job_message = get_message(jobs_queue)
if job_message:
job_attributes = get_job_attributes(job_message)
delete_job_from_queue(SQSconn, job_message, jobs_queue)
# Process full res images
process_image(job_attributes)
# Begin checking for jobs
def get_job_attributes(job_message):
"""Get job attributes, log the result."""
job_attributes = None
try:
job_attributes = get_attributes(job_message[0])
write_activity('Job attributes',
str(job_attributes), 'success')
except Exception as e:
write_activity('Attribute retrieval fail because',
e.message, 'error')
return job_attributes
def delete_job_from_queue(SQSconn, job_message, jobs_queue):
"""Remove the job from the job queue."""
try:
del_status = delete_message_from_handle(SQSconn,
jobs_queue,
job_message[0])
write_activity('Delete status', unicode(del_status), 'success')
except Exception as e:
write_activity('Delete status', unicode(del_status), 'error')
write_activity('Delete message fail because ',
e.message, 'error')
def process_image(job_attributes):
"""Begin the image processing and log the results."""
try:
proc_status = process(job_attributes)
write_activity('Job process status',
unicode(proc_status), 'success')
except Exception as e:
proc_status = False
# If processing fails, send message to pyramid to update db
write_activity('Job process success',
unicode(proc_status), 'error')
write_activity('Job process fail because',
e.message, 'error')
cleanup_status = cleanup_downloads(PATH_DOWNLOAD)
write_activity('Cleanup downloads success',
cleanup_status, 'error')
UserJob_Model.set_job_status(job_attributes['job_id'], 10)
# begin process() breakdown here:
def download_and_set(job):
"""Download 3 band files for the given sceneid"""
# set worker instance id for job
UserJob_Model.set_worker_instance_id(job['job_id'], INSTANCE_ID)
scene_id = str(job['scene_id'])
input_path = os.path.join(PATH_DOWNLOAD, scene_id)
# Create a subdirectory
if not os.path.exists(input_path):
os.makedirs(input_path)
print 'Directory created.'
try:
b = Downloader(verbose=False, download_dir=PATH_DOWNLOAD)
bands = [job['band_1'], job['band_2'], job['band_3']]
b.download([scene_id], bands)
print 'Finished downloading.'
except:
raise Exception('Download failed')
return bands, input_path, scene_id
def resize_bands(bands, input_path, scene_id):
"""gdal resizes each band file and returns filenames to delete and rename"""
delete_me, rename_me = [], []
# Resize each band
for band in bands:
file_name = '{}/{}_B{}.TIF'.format(input_path, scene_id, band)
delete_me.append(file_name)
file_name2 = '{}.re'.format(file_name)
rename_me.append(file_name2)
subprocess.call(['gdal_translate', '-outsize', '10%', '10%',
file_name, file_name2])
if not os.path.exists(file_name2):
raise Exception('gdal_translate did not downsize images')
print 'Finished resizing three images.'
return delete_me, rename_me
def remove_and_rename(delete_me, rename_me):
"""delete and rename files"""
for i, o in zip(rename_me, delete_me):
os.remove(o)
os.rename(i, o)
def merge_images(input_path, bands):
"""Combine the 3 bands into 1 color image"""
try:
processor = Process(input_path, bands=bands, dst_path=PATH_DOWNLOAD,
verbose=False)
processor.run(pansharpen=False)
except:
raise Exception('Processing/landsat-util failed')
def name_files(bands, input_path, scene_id):
"""Give filenames to files for each band """
band_output = ''
for i in bands:
band_output = '{}{}'.format(band_output, i)
file_name = '{}_bands_{}'.format(scene_id, band_output)
file_tif = '{}.TIF'.format(os.path.join(input_path, file_name))
file_location = '{}png'.format(file_tif[:-3])
return file_location, file_name, file_tif
def tif_to_png(file_location, file_name, file_tif):
"""Convert a tif file to a png"""
subprocess.call(['convert', file_tif, file_location])
file_png = 'pre_{}.png'.format(file_name)
return file_png
def upload_to_s3(file_location, file_png, job):
"""Upload the processed file to S3, update job database"""
try:
print 'Uploading to S3'
conne = boto.connect_s3(aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
b = conne.get_bucket('snapsatpreviewsjoel')
k = Key(b)
k.key = file_png
k.set_contents_from_filename(file_location)
k.get_contents_to_filename(file_location)
hello = b.get_key(file_png)
# make public
hello.set_canned_acl('public-read')
out = unicode(hello.generate_url(0, query_auth=False, force_http=True))
print out
UserJob_Model.set_job_status(job['job_id'], 5, out)
except:
raise Exception('S3 Upload failed')
def delete_files(input_path):
"""Remove leftover files when we are done with them."""
try:
rmtree(input_path)
except OSError:
print input_path
print 'error deleting files'
def process(job):
"""Given bands and sceneID, download, image process, zip & upload to S3."""
# download and set vars
bands, input_path, scene_id = download_and_set(job)
# resize bands
delete_me, rename_me = resize_bands(bands, input_path, scene_id)
# remove original band files and rename downsized to correct name
remove_and_rename(delete_me, rename_me)
# call landsat-util to merge images
merge_images(input_path, bands)
# construct the file names
file_location, file_name, file_tif = name_files(bands,
input_path,
scene_id)
# convert from TIF to png
file_png = tif_to_png(file_location, file_name, file_tif)
# upload to s3
upload_to_s3(file_location, file_png, job)
# delete files
delete_files(input_path)
return True
if __name__ == '__main__':
checking_for_jobs()
| |
""" IO interface to GeoTiffs using GDAL. """
import struct
import sys
from math import ceil
import numpy as np
from .band import CompressedBand
from .. import errors
import osgeo.gdal
import osgeo.osr
import osgeo.gdalconst as gc
osgeo.gdal.UseExceptions()
ALL = -1
class GdalFileBand(object):
""" Read-only raster Band interface the reads data from a disk-bound
datasource.
"""
def __init__(self, band, dataset):
"""
Parameters
----------
band : osgeo.gdal.Band
dataset : osgeo.gdal.Dataset
"""
self.gdalband = band
self.dataset = dataset
return
def __del__(self):
self.dataset = None
self.gdalband = None
def getblock(self, yoff, xoff, ny, nx):
# Note that GDAL uses the alternative x,y convention
grid_ny, grid_nx = self.size
chunk = self.gdalband.ReadAsArray(xoff, grid_ny - yoff - ny, nx, ny)
if chunk is None:
raise IOError("failure reading slice from GDAL backend")
return chunk[::-1]
def setblock(self, yoff, xoff, array):
raise NotImplementedError()
def __iter__(self):
for i in range(self.dataset.RasterYSize):
yield self[i,:]
@property
def size(self):
return (self.dataset.RasterYSize, self.dataset.RasterXSize)
@property
def dtype(self):
return np.dtype(numpy_dtype(self.gdalband.DataType))
def SRS_from_WKT(s):
""" Return Proj.4 string, semimajor axis, and flattening """
sr = osgeo.osr.SpatialReference()
try:
sr.ImportFromWkt(s)
except RuntimeError:
sr = None
return sr
NUMPY_DTYPE = {"Byte": np.uint8,
"UInt16": np.uint16,
"Int16": np.int16,
"UInt32": np.uint32,
"Int32": np.int32,
"Float32": np.float32,
"Float64": np.float64,
"CInt16": np.complex64,
"CInt32": np.complex64,
"CFloat32": np.complex64,
"CFloat64": np.complex64}
def numpy_dtype(dt_int):
""" Return a numpy dtype that matches the band data type. """
name = osgeo.gdal.GetDataTypeName(dt_int)
if name in NUMPY_DTYPE:
return NUMPY_DTYPE[name]
else:
raise TypeError("GDAL data type {0} unknown to karta".format(dt_int))
def gdal_type(dtype):
""" Return a GDAL type that most closely matches numpy dtype
Notes
-----
Returns GDT_Int32 for np.int64, which may result in overflow.
"""
if dtype == np.uint8:
return osgeo.gdal.GDT_Byte
elif dtype == np.uint16:
return osgeo.gdal.GDT_UInt16
elif dtype == np.int8:
return osgeo.gdal.GDT_Byte # transform -127 -- 127 to 0 -- 255
elif dtype == np.int16:
return osgeo.gdal.GDT_Int16
elif (dtype == np.int32) or (dtype == np.int64):
return osgeo.gdal.GDT_Int32
elif dtype == np.float32:
return osgeo.gdal.GDT_Float32
elif dtype == np.float64:
return osgeo.gdal.GDT_Float64
elif dtype == np.complex64:
return osgeo.gdal.GDT_CFloat64
else:
raise TypeError("GDAL equivalent to type {0} unknown".format(dtype))
def read(fnm, in_memory, ibands=ALL, bandclass=CompressedBand):
""" Read a GeoTiff file and return a numpy array and a dictionary of header
information.
Parameters
----------
fnm : str
input datasource
in_memory : boolean
indicates whether array should be read fully into memory
ibands : int or list of ints
band number (1...)
bandclass : karta.raster.band class
if *in_memory* is `False`, use this class for band storage
Returns an band object and a dictionary of metadata
"""
hdr = dict()
dataset = osgeo.gdal.Open(fnm, gc.GA_ReadOnly)
if ibands == ALL:
ibands = list(range(1, dataset.RasterCount+1))
elif not hasattr(ibands, "__iter__"):
ibands = [ibands]
try:
hdr["nx"] = dataset.RasterXSize
hdr["ny"] = dataset.RasterYSize
transform = dataset.GetGeoTransform()
if transform is not None:
hdr["dx"] = transform[1]
hdr["dy"] = transform[5]
hdr["xulcorner"] = transform[0]
hdr["yulcorner"] = transform[3]
hdr["sx"] = transform[2]
hdr["sy"] = transform[4]
else:
raise AttributeError("No GeoTransform in geotiff file")
sr = SRS_from_WKT(dataset.GetProjectionRef())
if sr is not None:
hdr["srs"] = {"proj4": sr.ExportToProj4(),
"semimajor": sr.GetSemiMajor(),
"flattening": sr.GetInvFlattening(),
"name": sr.GetAttrValue('PROJCS')}
else:
hdr["srs"] = {"proj4": "",
"semimajor": 6370997.0,
"flattening": 1.0 / 298.257223563,
"name": "NA"}
max_dtype = 0
rasterbands = [dataset.GetRasterBand(i) for i in ibands]
hdr["nodata"] = rasterbands[0].GetNoDataValue()
nx = rasterbands[0].XSize
ny = rasterbands[0].YSize
if rasterbands[0].DataType > max_dtype:
max_dtype = rasterbands[0].DataType
if in_memory:
dtype = numpy_dtype(rasterbands[0].DataType)
bands = [bandclass((ny, nx), dtype) for _ in ibands]
for i, rb in enumerate(rasterbands):
_arr = rb.ReadAsArray(buf_obj=np.empty([ny, nx], dtype=dtype))
if _arr is None:
raise IOError("error reading GDAL band {}".format(i+1))
bands[i].setblock(0, 0, _arr.squeeze()[::-1])
else:
bands = [GdalFileBand(rb, dataset) for rb in rasterbands]
finally:
if in_memory:
dataset = None
return bands, hdr
def srs_from_crs(crs):
srs = osgeo.osr.SpatialReference()
# SpatialReference can't parse 'lonlat'
proj4 = crs.get_proj4().replace("lonlat", "latlong")
srs.ImportFromProj4(proj4)
return srs
def write(fnm, grid, compress=None, tiled=False, **kw):
""" Write a grid-like object with the GTiff driver.
Parameters
----------
fnm : string
path to write to
grid : raster.RegularGrid
opbject to write
compress : str, optional
compression type (default no compression). 'LZW', 'PACKBITS',
'DEFLATE', and 'LZMA' supported.
tiled : bool, optional
whether to write a tiled dataset (default False)
Additional keyword arguments passed directly to GDAL driver as creation
options.
Returns
-------
reference to *grid*
"""
co = []
if compress == "LZW":
co.append("COMPRESS=LZW")
elif compress == "PACKBITS":
co.append("COMPRESS=PACKBITS")
elif compress == "DEFLATE":
co.append("COMPRESS=DEFLATE")
elif compress == "LZMA":
co.append("COMPRESS=LZMA")
if tiled:
co.append("TILED=YES")
for k, v in kw.items():
co.append("{0}={1}".format(k,v))
driver = osgeo.gdal.GetDriverByName("GTiff")
ny, nx = grid.size
dataset = driver.Create(fnm, nx, ny, len(grid.bands), gdal_type(grid.values.dtype), co)
t = grid.transform
dataset.SetGeoTransform([t[0] + ny*t[4], t[2], -t[4],
t[1] + ny*t[3], t[5], -t[3]])
srs = srs_from_crs(grid.crs)
dataset.SetProjection(srs.ExportToWkt())
for i, _ in enumerate(grid.bands):
band = dataset.GetRasterBand(i+1)
band.SetNoDataValue(grid.nodata)
band.WriteArray(grid[::-1,:,i])
band = None
dataset = None
return grid
| |
# -*- coding: utf-8 -*-
import mock
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
AuthUserFactory,
)
from website.settings import MAILCHIMP_GENERAL_LIST, OSF_HELP_LIST
@pytest.fixture()
def user_one():
return AuthUserFactory()
@pytest.fixture()
def user_two():
return AuthUserFactory()
@pytest.fixture()
def url(user_one):
return '/{}users/{}/settings/'.format(API_BASE, user_one._id)
@pytest.mark.django_db
class TestUserSettingsGet:
def test_get(self, app, user_one, user_two, url):
# User unauthenticated
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# User accessing another user's settings
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
# Incorrect method
res = app.post(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 405
assert res.content_type == 'application/vnd.api+json'
# User authenticated
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['subscribe_osf_help_email'] is True
assert res.json['data']['attributes']['subscribe_osf_general_email'] is False
assert res.json['data']['attributes']['two_factor_enabled'] is False
assert res.json['data']['attributes']['two_factor_confirmed'] is False
assert res.json['data']['attributes']['secret'] is None
assert res.json['data']['type'] == 'user_settings'
# unconfirmed two_factor includes secret
addon = user_one.add_addon('twofactor')
res = app.get(url, auth=user_one.auth)
assert res.json['data']['attributes']['two_factor_enabled'] is True
assert res.json['data']['attributes']['two_factor_confirmed'] is False
assert res.json['data']['attributes']['secret'] == addon.totp_secret_b32
@pytest.mark.django_db
class TestUserSettingsUpdateTwoFactor:
@pytest.fixture()
def payload(self, user_one):
return {
'data': {
'type': 'user_settings',
'id': user_one._id,
'attributes': {}
}
}
def test_user_settings_type(self, app, user_one, url, payload):
payload['data']['type'] = 'Invalid type'
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 409
def test_update_two_factor_permissions(self, app, user_one, user_two, url, payload):
payload['data']['attributes']['two_factor_enabled'] = False
# Unauthenticated
res = app.patch_json_api(url, payload, expect_errors=True)
assert res.status_code == 401
# User modifying someone else's settings
res = app.patch_json_api(url, payload, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
def test_update_two_factor_enabled(self, app, user_one, url, payload):
# Invalid data type
payload['data']['attributes']['two_factor_enabled'] = 'Yes'
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == '"Yes" is not a valid boolean.'
# Already disabled - nothing happens, still disabled
payload['data']['attributes']['two_factor_enabled'] = False
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['attributes']['two_factor_enabled'] is False
assert res.json['data']['attributes']['secret'] is None
assert res.json['data']['attributes']['two_factor_confirmed'] is False
# Test enabling two factor
payload['data']['attributes']['two_factor_enabled'] = True
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['attributes']['two_factor_enabled'] is True
user_one.reload()
addon = user_one.get_addon('twofactor')
assert addon.deleted is False
assert addon.is_confirmed is False
assert res.json['data']['attributes']['secret'] == addon.totp_secret_b32
assert res.json['data']['attributes']['two_factor_confirmed'] is False
# Test already enabled - nothing happens, still enabled
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['attributes']['two_factor_enabled'] is True
assert res.json['data']['attributes']['secret'] == addon.totp_secret_b32
# Test disabling two factor
payload['data']['attributes']['two_factor_enabled'] = False
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['attributes']['two_factor_enabled'] is False
assert res.json['data']['attributes']['two_factor_confirmed'] is False
assert res.json['data']['attributes']['secret'] is None
user_one.reload()
addon = user_one.get_addon('twofactor')
assert addon is None
@mock.patch('addons.twofactor.models.UserSettings.verify_code')
def test_update_two_factor_verification(self, mock_verify_code, app, user_one, url, payload):
# Two factor not enabled
mock_verify_code.return_value = True
payload['data']['attributes']['two_factor_verification'] = 123456
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Two-factor authentication is not enabled.'
# Two factor invalid code
mock_verify_code.return_value = False
payload['data']['attributes']['two_factor_enabled'] = True
payload['data']['attributes']['two_factor_verification'] = 123456
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'The two-factor verification code you provided is invalid.'
# Test invalid data type
mock_verify_code.return_value = False
payload['data']['attributes']['two_factor_verification'] = 'abcd123'
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'A valid integer is required.'
# Test two factor valid code
mock_verify_code.return_value = True
del payload['data']['attributes']['two_factor_verification']
res = app.patch_json_api(url, payload, auth=user_one.auth)
payload['data']['attributes']['two_factor_verification'] = 654321
res = app.patch_json_api(url, payload, auth=user_one.auth)
assert res.json['data']['attributes']['two_factor_enabled'] is True
assert res.json['data']['attributes']['secret'] is None
assert res.json['data']['attributes']['two_factor_confirmed'] is True
assert res.status_code == 200
user_one.reload()
addon = user_one.get_addon('twofactor')
assert addon.deleted is False
assert addon.is_confirmed is True
@pytest.mark.django_db
class TestUserSettingsUpdateMailingList:
@pytest.fixture()
def payload(self, user_one):
return {
'data': {
'id': user_one._id,
'type': 'user_settings',
'attributes': {
'subscribe_osf_help_email': False,
'subscribe_osf_general_email': True
}
}
}
@pytest.fixture()
def bad_payload(self, user_one):
return {
'data': {
'id': user_one._id,
'type': 'user_settings',
'attributes': {
'subscribe_osf_help_email': False,
'subscribe_osf_general_email': '22',
}
}
}
@mock.patch('api.users.serializers.update_mailchimp_subscription')
def test_authorized_patch_200(self, mailchimp_mock, app, user_one, payload, url):
res = app.patch_json_api(url, payload, auth=user_one.auth)
assert res.status_code == 200
user_one.refresh_from_db()
assert user_one.osf_mailing_lists[OSF_HELP_LIST] is False
mailchimp_mock.assert_called_with(user_one, MAILCHIMP_GENERAL_LIST, True)
def test_bad_payload_patch_400(self, app, user_one, bad_payload, url):
res = app.patch_json_api(url, bad_payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == u'"22" is not a valid boolean.'
def test_anonymous_patch_401(self, app, url, payload):
res = app.patch_json_api(url, payload, expect_errors=True)
assert res.status_code == 401
assert res.content_type == 'application/vnd.api+json'
def test_unauthorized_patch_403(self, app, url, payload, user_two):
res = app.patch_json_api(url, payload, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert res.content_type == 'application/vnd.api+json'
@pytest.mark.django_db
class TestUpdateRequestedDeactivation:
@pytest.fixture()
def payload(self, user_one):
return {
'data': {
'id': user_one._id,
'type': 'user_settings',
'attributes': {
'deactivation_requested': True
}
}
}
@mock.patch('framework.auth.views.mails.send_mail')
def test_patch_requested_deactivation(self, mock_mail, app, user_one, user_two, url, payload):
# Logged out
res = app.patch_json_api(url, payload, expect_errors=True)
assert res.status_code == 401
# Logged in, requesting deactivation for another user
res = app.patch_json_api(url, payload, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
# Logged in, request to deactivate
assert user_one.requested_deactivation is False
res = app.patch_json_api(url, payload, auth=user_one.auth)
assert res.status_code == 200
user_one.reload()
assert user_one.requested_deactivation is True
# Logged in, deactivation already requested
res = app.patch_json_api(url, payload, auth=user_one.auth)
assert res.status_code == 200
user_one.reload()
assert user_one.requested_deactivation is True
# Logged in, request to cancel deactivate request
payload['data']['attributes']['deactivation_requested'] = False
res = app.patch_json_api(url, payload, auth=user_one.auth)
assert res.status_code == 200
user_one.reload()
assert user_one.requested_deactivation is False
@mock.patch('framework.auth.views.mails.send_mail')
def test_patch_invalid_type(self, mock_mail, app, user_one, url, payload):
assert user_one.email_last_sent is None
payload['data']['type'] = 'Invalid Type'
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 409
user_one.reload()
assert user_one.email_last_sent is None
assert mock_mail.call_count == 0
@mock.patch('framework.auth.views.mails.send_mail')
def test_exceed_throttle(self, mock_mail, app, user_one, url, payload):
assert user_one.email_last_sent is None
res = app.patch_json_api(url, payload, auth=user_one.auth)
assert res.status_code == 200
res = app.patch_json_api(url, payload, auth=user_one.auth)
assert res.status_code == 200
res = app.patch_json_api(url, payload, auth=user_one.auth, expect_errors=True)
assert res.status_code == 429
| |
# -*- test-case-name: flocker.node.agents.functional.test_cinder,flocker.node.agents.functional.test_cinder_behaviour -*- # noqa
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
A Cinder implementation of the ``IBlockDeviceAPI``.
"""
import time
from uuid import UUID
from bitmath import Byte, GiB
from eliot import Message
from pyrsistent import PRecord, field
from keystoneclient.openstack.common.apiclient.exceptions import (
NotFound as CinderNotFound,
HttpError as KeystoneHttpError,
)
from keystoneclient.auth import get_plugin_class
from keystoneclient.session import Session
from keystoneclient_rackspace.v2_0 import RackspaceAuth
from cinderclient.client import Client as CinderClient
from novaclient.client import Client as NovaClient
from novaclient.exceptions import NotFound as NovaNotFound
from novaclient.exceptions import ClientException as NovaClientException
from twisted.python.filepath import FilePath
from zope.interface import implementer, Interface
from ...common import (
interface_decorator, get_all_ips, ipaddress_from_string
)
from .blockdevice import (
IBlockDeviceAPI, BlockDeviceVolume, UnknownVolume, AlreadyAttachedVolume,
UnattachedVolume, get_blockdevice_volume,
)
from ._logging import (
NOVA_CLIENT_EXCEPTION, KEYSTONE_HTTP_ERROR, COMPUTE_INSTANCE_ID_NOT_FOUND,
OPENSTACK_ACTION, CINDER_CREATE
)
# The key name used for identifying the Flocker cluster_id in the metadata for
# a volume.
CLUSTER_ID_LABEL = u'flocker-cluster-id'
# The key name used for identifying the Flocker dataset_id in the metadata for
# a volume.
DATASET_ID_LABEL = u'flocker-dataset-id'
def _openstack_logged_method(method_name, original_name):
"""
Run a method and log additional information about any exceptions that are
raised.
:param str method_name: The name of the method of the wrapped object to
call.
:param str original_name: The name of the attribute of self where the
wrapped object can be found.
:return: A function which will call the method of the wrapped object and do
the extra exception logging.
"""
def _run_with_logging(self, *args, **kwargs):
original = getattr(self, original_name)
method = getattr(original, method_name)
# See https://clusterhq.atlassian.net/browse/FLOC-2054
# for ensuring all method arguments are serializable.
with OPENSTACK_ACTION(operation=[method_name, args, kwargs]):
try:
return method(*args, **kwargs)
except NovaClientException as e:
NOVA_CLIENT_EXCEPTION(
code=e.code,
message=e.message,
details=e.details,
request_id=e.request_id,
url=e.url,
method=e.method,
).write()
raise
except KeystoneHttpError as e:
KEYSTONE_HTTP_ERROR(
code=e.http_status,
message=e.message,
details=e.details,
request_id=e.request_id,
url=e.url,
method=e.method,
response=e.response.text,
).write()
raise
return _run_with_logging
def auto_openstack_logging(interface, original):
"""
Create a class decorator which will add OpenStack-specific exception
logging versions versions of all of the methods on ``interface``.
Specifically, some Nova and Cinder client exceptions will have all of their
details logged any time they are raised.
:param zope.interface.InterfaceClass interface: The interface from which to
take methods.
:param str original: The name of an attribute on instances of the decorated
class. The attribute should refer to a provider of ``interface``.
That object will have all of its methods called with additional
exception logging to make more details of the underlying OpenStack API
calls available.
:return: The class decorator.
"""
return interface_decorator(
"auto_openstack_logging",
interface,
_openstack_logged_method,
original,
)
class ICinderVolumeManager(Interface):
"""
The parts of ``cinderclient.v1.volumes.VolumeManager`` that we use.
See:
https://github.com/openstack/python-cinderclient/blob/master/cinderclient/v1/volumes.py#L135
"""
# The OpenStack Cinder API documentation says the size is in GB (multiples
# of 10 ** 9 bytes). Real world observations indicate size is actually in
# GiB (multiples of 2 ** 30). So this method is documented as accepting
# GiB values. https://bugs.launchpad.net/openstack-api-site/+bug/1456631
def create(size, metadata=None):
"""
Creates a volume.
:param size: Size of volume in GiB
:param metadata: Optional metadata to set on volume creation
:rtype: :class:`Volume`
"""
def list():
"""
Lists all volumes.
:rtype: list of :class:`Volume`
"""
def delete(volume_id):
"""
Delete a volume.
:param volume_id: The ID of the volume to delete.
:raise CinderNotFound: If no volume with the specified ID exists.
:return: ``None``
"""
def get(volume_id):
"""
Retrieve information about an existing volume.
:param volume_id: The ID of the volume about which to retrieve
information.
:return: A ``Volume`` instance describing the identified volume.
:rtype: :class:`Volume`
"""
def set_metadata(volume, metadata):
"""
Update/Set a volumes metadata.
:param volume: The :class:`Volume`.
:param metadata: A list of keys to be set.
"""
class INovaVolumeManager(Interface):
"""
The parts of ``novaclient.v2.volumes.VolumeManager`` that we use.
See:
https://github.com/openstack/python-novaclient/blob/master/novaclient/v2/volumes.py
"""
def create_server_volume(server_id, volume_id, device):
"""
Attach a volume identified by the volume ID to the given server ID.
:param server_id: The ID of the server
:param volume_id: The ID of the volume to attach.
:param device: The device name
:rtype: :class:`Volume`
"""
def delete_server_volume(server_id, attachment_id):
"""
Detach the volume identified by the volume ID from the given server ID.
:param server_id: The ID of the server
:param attachment_id: The ID of the volume to detach.
"""
def get(volume_id):
"""
Retrieve information about an existing volume.
:param volume_id: The ID of the volume about which to retrieve
information.
:return: A ``Volume`` instance describing the identified volume.
:rtype: :class:`Volume`
"""
class INovaServerManager(Interface):
"""
The parts of ``novaclient.v2.servers.ServerManager`` that we use.
See:
https://github.com/openstack/python-novaclient/blob/master/novaclient/v2/servers.py
"""
def list():
"""
Get a list of servers.
"""
def wait_for_volume(volume_manager, expected_volume,
expected_status=u'available',
time_limit=60):
"""
Wait for a ``Volume`` with the same ``id`` as ``expected_volume`` to be
listed and to have a ``status`` value of ``expected_status``.
:param ICinderVolumeManager volume_manager: An API for listing volumes.
:param Volume expected_volume: The ``Volume`` to wait for.
:param unicode expected_status: The ``Volume.status`` to wait for.
:param int time_limit: The maximum time, in seconds, to wait for the
``expected_volume`` to have ``expected_status``.
:raises Exception: If ``expected_volume`` with ``expected_status`` is not
listed within ``time_limit``.
:returns: The listed ``Volume`` that matches ``expected_volume``.
"""
start_time = time.time()
# Log stuff happening in this loop. FLOC-1833.
while True:
# Could be more efficient. FLOC-1831
for listed_volume in volume_manager.list():
if listed_volume.id == expected_volume.id:
# Could miss the expected status because race conditions.
# FLOC-1832
if listed_volume.status == expected_status:
return listed_volume
elapsed_time = time.time() - start_time
if elapsed_time < time_limit:
time.sleep(1.0)
else:
raise Exception(
'Timed out while waiting for volume. '
'Expected Volume: {!r}, '
'Expected Status: {!r}, '
'Elapsed Time: {!r}, '
'Time Limit: {!r}.'.format(
expected_volume, expected_status, elapsed_time, time_limit
)
)
def _extract_nova_server_addresses(addresses):
"""
:param dict addresses: A ``dict`` mapping OpenStack network names
to lists of address dictionaries in that network assigned to a
server.
:return: A ``set`` of all the IPv4 and IPv6 addresses from the
``addresses`` attribute of a ``Server``.
"""
all_addresses = set()
for network_name, addresses in addresses.items():
for address_info in addresses:
all_addresses.add(
ipaddress_from_string(address_info['addr'])
)
return all_addresses
@implementer(IBlockDeviceAPI)
class CinderBlockDeviceAPI(object):
"""
A cinder implementation of ``IBlockDeviceAPI`` which creates block devices
in an OpenStack cluster using Cinder APIs.
"""
def __init__(self,
cinder_volume_manager,
nova_volume_manager, nova_server_manager,
cluster_id):
"""
:param ICinderVolumeManager cinder_volume_manager: A client for
interacting with Cinder API.
:param INovaVolumeManager nova_volume_manager: A client for interacting
with Nova volume API.
:param INovaServerManager nova_server_manager: A client for interacting
with Nova servers API.
:param UUID cluster_id: An ID that will be included in the names of
Cinder block devices in order to associate them with a particular
Flocker cluster.
"""
self.cinder_volume_manager = cinder_volume_manager
self.nova_volume_manager = nova_volume_manager
self.nova_server_manager = nova_server_manager
self.cluster_id = cluster_id
def allocation_unit(self):
"""
1GiB is the minimum allocation unit described by the OpenStack
Cinder API documentation.
* http://developer.openstack.org/api-ref-blockstorage-v2.html#createVolume # noqa
Some Cinder storage drivers may actually allocate more than
this, but as long as the requested size is a multiple of this
unit, the Cinder API will always report the size that was
requested.
"""
return int(GiB(1).to_Byte().value)
def compute_instance_id(self):
"""
Find the ``ACTIVE`` Nova API server with a subset of the IPv4 and IPv6
addresses on this node.
"""
local_ips = get_all_ips()
api_ip_map = {}
matching_instances = []
for server in self.nova_server_manager.list():
# Servers which are not active will not have any IP addresses
if server.status != u'ACTIVE':
continue
api_addresses = _extract_nova_server_addresses(server.addresses)
# Only do subset comparison if there were *some* IP addresses;
# non-ACTIVE servers will have an empty list of IP addresses and
# lead to incorrect matches.
if api_addresses and api_addresses.issubset(local_ips):
matching_instances.append(server.id)
else:
for ip in api_addresses:
api_ip_map[ip] = server.id
# If we've got this correct there should only be one matching instance.
# But we don't currently test this directly. See FLOC-2281.
if len(matching_instances) == 1:
return matching_instances[0]
# If there was no match, or if multiple matches were found, log an
# error containing all the local and remote IPs.
COMPUTE_INSTANCE_ID_NOT_FOUND(
local_ips=local_ips, api_ips=api_ip_map
).write()
def create_volume(self, dataset_id, size):
"""
Create a block device using the ICinderVolumeManager.
The cluster_id and dataset_id are stored as metadata on the volume.
See:
http://docs.rackspace.com/cbs/api/v1.0/cbs-devguide/content/POST_createVolume_v1__tenant_id__volumes_volumes.html
"""
metadata = {
CLUSTER_ID_LABEL: unicode(self.cluster_id),
DATASET_ID_LABEL: unicode(dataset_id),
}
requested_volume = self.cinder_volume_manager.create(
size=int(Byte(size).to_GiB().value),
metadata=metadata,
)
Message.new(message_type=CINDER_CREATE,
blockdevice_id=requested_volume.id).write()
created_volume = wait_for_volume(
volume_manager=self.cinder_volume_manager,
expected_volume=requested_volume,
)
return _blockdevicevolume_from_cinder_volume(
cinder_volume=created_volume,
)
def list_volumes(self):
"""
Return ``BlockDeviceVolume`` instances for all the Cinder Volumes that
have the expected ``cluster_id`` in their metadata.
See:
http://docs.rackspace.com/cbs/api/v1.0/cbs-devguide/content/GET_getVolumesDetail_v1__tenant_id__volumes_detail_volumes.html
"""
flocker_volumes = []
for cinder_volume in self.cinder_volume_manager.list():
if _is_cluster_volume(self.cluster_id, cinder_volume):
flocker_volume = _blockdevicevolume_from_cinder_volume(
cinder_volume
)
flocker_volumes.append(flocker_volume)
return flocker_volumes
def attach_volume(self, blockdevice_id, attach_to):
"""
Attach a volume to an instance using the Nova volume manager.
"""
# The Cinder volume manager has an API for attaching volumes too.
# However, it doesn't actually attach the volume: it only updates
# internal state to indicate that the volume is attached! Basically,
# it is an implementation detail of how Nova attached volumes work and
# no one outside of Nova has any business calling it.
#
# See
# http://www.florentflament.com/blog/openstack-volume-in-use-although-vm-doesnt-exist.html
unattached_volume = get_blockdevice_volume(self, blockdevice_id)
if unattached_volume.attached_to is not None:
raise AlreadyAttachedVolume(blockdevice_id)
nova_volume = self.nova_volume_manager.create_server_volume(
# Nova API expects an ID string not UUID.
server_id=attach_to,
volume_id=unattached_volume.blockdevice_id,
# Have Nova assign a device file for us.
device=None,
)
attached_volume = wait_for_volume(
volume_manager=self.cinder_volume_manager,
expected_volume=nova_volume,
expected_status=u'in-use',
)
attached_volume = unattached_volume.set('attached_to', attach_to)
return attached_volume
def detach_volume(self, blockdevice_id):
our_id = self.compute_instance_id()
try:
cinder_volume = self.cinder_volume_manager.get(blockdevice_id)
except CinderNotFound:
raise UnknownVolume(blockdevice_id)
try:
self.nova_volume_manager.delete_server_volume(
server_id=our_id,
attachment_id=blockdevice_id
)
except NovaNotFound:
raise UnattachedVolume(blockdevice_id)
# This'll blow up if the volume is deleted from elsewhere. FLOC-1882.
wait_for_volume(
volume_manager=self.cinder_volume_manager,
expected_volume=cinder_volume,
expected_status=u'available',
)
def destroy_volume(self, blockdevice_id):
try:
self.cinder_volume_manager.delete(blockdevice_id)
except CinderNotFound:
raise UnknownVolume(blockdevice_id)
while True:
# Don't loop forever here. FLOC-1853
try:
self.cinder_volume_manager.get(blockdevice_id)
except CinderNotFound:
break
time.sleep(1.0)
def get_device_path(self, blockdevice_id):
# libvirt does not return the correct device path when additional
# disks have been attached using a client other than cinder. This is
# expected behaviour within Cinder and libvirt
# See https://bugs.launchpad.net/cinder/+bug/1387945 and
# http://libvirt.org/formatdomain.html#elementsDisks (target section)
# However, the correct device is named as a udev symlink which includes
# the first 20 characters of the blockedevice_id.
device_path = FilePath(
"/dev/disk/by-id/virtio-{}".format(blockdevice_id[:20]))
if not device_path.exists():
# If the device path does not exist, either virtio driver is
# not being used (e.g. Rackspace), or the user has modified
# their udev rules. The following code relies on Cinder
# returning the correct device path, which appears to work
# for Rackspace and will work with virtio if no disks have
# been attached outside Cinder.
try:
cinder_volume = self.cinder_volume_manager.get(blockdevice_id)
except CinderNotFound:
raise UnknownVolume(blockdevice_id)
# As far as we know you can not have more than one attachment,
# but, perhaps we're wrong and there should be a test for the
# multiple attachment case. FLOC-1854.
try:
[attachment] = cinder_volume.attachments
except ValueError:
raise UnattachedVolume(blockdevice_id)
device_path = FilePath(attachment['device'])
# It could be attached somewhere else...
# https://clusterhq.atlassian.net/browse/FLOC-1830
return device_path
def _is_cluster_volume(cluster_id, cinder_volume):
"""
:param UUID cluster_id: The uuid4 of a Flocker cluster.
:param Volume cinder_volume: The Volume with metadata to examine.
:return: ``True`` if ``cinder_volume`` metadata has a
``CLUSTER_ID_LABEL`` value matching ``cluster_id`` else ``False``.
"""
actual_cluster_id = cinder_volume.metadata.get(CLUSTER_ID_LABEL)
if actual_cluster_id is not None:
actual_cluster_id = UUID(actual_cluster_id)
if actual_cluster_id == cluster_id:
return True
return False
def _blockdevicevolume_from_cinder_volume(cinder_volume):
"""
:param Volume cinder_volume: The ``cinderclient.v1.volumes.Volume`` to
convert.
:returns: A ``BlockDeviceVolume`` based on values found in the supplied
cinder Volume.
"""
if cinder_volume.attachments:
# There should only be one. FLOC-1854.
[attachment_info] = cinder_volume.attachments
# Nova and Cinder APIs return ID strings. Convert to unicode.
server_id = attachment_info['server_id'].decode("ascii")
else:
server_id = None
return BlockDeviceVolume(
blockdevice_id=unicode(cinder_volume.id),
size=int(GiB(cinder_volume.size).to_Byte().value),
attached_to=server_id,
dataset_id=UUID(cinder_volume.metadata[DATASET_ID_LABEL])
)
@auto_openstack_logging(ICinderVolumeManager, "_cinder_volumes")
class _LoggingCinderVolumeManager(PRecord):
_cinder_volumes = field(mandatory=True)
@auto_openstack_logging(INovaVolumeManager, "_nova_volumes")
class _LoggingNovaVolumeManager(PRecord):
_nova_volumes = field(mandatory=True)
@auto_openstack_logging(INovaServerManager, "_nova_servers")
class _LoggingNovaServerManager(PRecord):
_nova_servers = field(mandatory=True)
def cinder_api(cinder_client, nova_client, cluster_id):
"""
:param cinderclient.v1.client.Client cinder_client: The Cinder API client
whose ``volumes`` attribute will be supplied as the
``cinder_volume_manager`` parameter of ``CinderBlockDeviceAPI``.
:param novaclient.v2.client.Client nova_client: The Nova API client whose
``volumes`` attribute will be supplied as the ``nova_volume_manager``
parameter of ``CinderBlockDeviceAPI``.
:param UUID cluster_id: A Flocker cluster ID.
:returns: A ``CinderBlockDeviceAPI``.
"""
logging_cinder = _LoggingCinderVolumeManager(
_cinder_volumes=cinder_client.volumes
)
logging_nova_volume_manager = _LoggingNovaVolumeManager(
_nova_volumes=nova_client.volumes
)
logging_nova_server_manager = _LoggingNovaServerManager(
_nova_servers=nova_client.servers
)
return CinderBlockDeviceAPI(
cinder_volume_manager=logging_cinder,
nova_volume_manager=logging_nova_volume_manager,
nova_server_manager=logging_nova_server_manager,
cluster_id=cluster_id,
)
def _openstack_auth_from_config(auth_plugin='password', **config):
"""
Create an OpenStack authentication plugin from the given configuration.
:param str auth_plugin: The name of the authentication plugin to create.
:param config: Parameters to supply to the authentication plugin. The
exact parameters depends on the authentication plugin selected.
:return: The authentication object.
"""
if auth_plugin == 'rackspace':
plugin_class = RackspaceAuth
else:
plugin_class = get_plugin_class(auth_plugin)
plugin_options = plugin_class.get_options()
plugin_kwargs = {}
for option in plugin_options:
# option.dest is the python compatible attribute name in the plugin
# implementation.
# option.dest is option.name with hyphens replaced with underscores.
if option.dest in config:
plugin_kwargs[option.dest] = config[option.dest]
return plugin_class(**plugin_kwargs)
def _openstack_verify_from_config(
verify_peer=True, verify_ca_path=None, **config):
"""
Create an OpenStack session from the given configuration.
This turns a pair of options (a boolean indicating whether to
verify, and a string for the path to the CA bundle) into a
requests-style single value.
If the ``verify_peer`` parameter is False, then no verification of
the certificate will occur. This setting is insecure! Although the
connections will be confidential, there is no authentication of the
peer. We're having a private conversation, but we don't know to
whom we are speaking.
If the ``verify_peer`` parameter is True (the default), then the
certificate will be verified.
If the ``verify_ca_path`` parameter is set, the certificate will be
verified against the CA bundle at the path given by the
``verify_ca_path`` parameter. This is useful for systems using
self-signed certificates or private CA's.
Otherwise, the certificate will be verified against the system CA's.
This is useful for systems using well-known public CA's.
:param bool verify_peer: Whether to check the peer's certificate.
:param str verify_ca_path: Path to CA bundle.
:param config: Other parameters in the config.
:return: A verify option that can be passed to requests (and also to
keystoneclient.session.Session)
"""
if verify_peer:
if verify_ca_path:
verify = verify_ca_path
else:
verify = True
else:
verify = False
return verify
def cinder_from_configuration(region, cluster_id, **config):
"""
Build a ``CinderBlockDeviceAPI`` using configuration and credentials in
``config``.
:param str region: The region "slug" for which to configure the object.
:param cluster_id: The unique cluster identifier for which to configure the
object.
"""
session = Session(
auth=_openstack_auth_from_config(**config),
verify=_openstack_verify_from_config(**config)
)
cinder_client = CinderClient(
session=session, region_name=region, version=1
)
nova_client = NovaClient(
session=session, region_name=region, version=2
)
return cinder_api(
cinder_client=cinder_client,
nova_client=nova_client,
cluster_id=cluster_id,
)
| |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import time
import re
import queue
import datetime
import signal
import argparse
import sys
import os
import zmq
from twisted.internet import reactor, protocol, threads
def agrparser():
parser = argparse.ArgumentParser(
description=' zmq(port) -> tcp/ip(ip/port) ')
parser.add_argument("--zp", dest='zmqport',
type=str, required=True, help='local port')
parser.add_argument("--rip", dest='remoteip',
type=str, required=True, help='remote ip')
parser.add_argument("--rp", dest='remoteport',
type=str, required=True, help='remote port')
args = parser.parse_args()
return args
ZS_PAIR_PORT = "inproc://ztotw"
connection = {}
q_conn = queue.Queue()
#profile handler
def now():
return datetime.datetime.now()
class Time():
def __init__(self):
pass
def f_f(name, data):
return ('%s +/ %s: %s /' %
(data.decode('latin-1'),
name, now())).encode('latin-1')
class EchoClient(protocol.Protocol):
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
connection[self.factory.connect_id] = self
# every connect will create dealer
self.zsocket = self.factory.zmq_handler.context.socket(zmq.DEALER)
self.zsocket.setsockopt(zmq.IDENTITY, self.factory.frontend_id)
self.zsocket.connect(ZS_PAIR_PORT)
self.tcpSend(self.factory.data)
self.closeTimer = time.time()
def dataReceived(self, data):
self.closeTimer = time.time()
data = Time.f_f('t.R', data)
self.zsocket.send_multipart([data,])
def tcpSend(self, data):
data = Time.f_f('t.S', data)
self.transport.write(data)
class EchoFactory(protocol.ClientFactory):
def __init__(self, zmq_handler, conn, data):
self.zmq_handler = zmq_handler
self.frontend_id = conn['frontend_id']
self.connect_id = b''.join([conn['connect_id'],
b',', conn['ip'].encode('latin-1'),
b':', str(conn['port']).encode('latin-1'),])
self.data = data
def buildProtocol(self, addr):
return EchoClient(self)
def clientConnectionFailed(self, connector, reason):
print ("Connection failed.")
reactor.stop()
def clientConnectionLost(self, connector, reason):
print ("Connection lost.")
#reactor.stop()
class ZmqHandler():
def __init__(self, conn):
self.conn = conn
def __enter__(self):
self.context = zmq.Context()
self.frontend = self.context.socket(zmq.ROUTER)
self.frontend.bind(''.join(
('tcp://',self.conn['ip'],':',self.conn['port'],)))
# every connect will create dealer
self.backend = self.context.socket(zmq.ROUTER)
self.backend.bind(ZS_PAIR_PORT)
self.poll = zmq.Poller()
self.poll.register(self.frontend, zmq.POLLIN)
self.poll.register(self.backend, zmq.POLLIN)
# every connect will create dealer
connection[b'stop_reactor'] = self
self.stop_reactor = self.context.socket(zmq.DEALER)
self.stop_reactor.setsockopt(zmq.IDENTITY, b'stop_reactor')
self.stop_reactor.connect(ZS_PAIR_PORT)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.frontend.close()
self.backend.close()
def run(self):
reactor.callLater(0.3, self.tcpConnect)
d = threads.deferToThread(self.read_write)
def backend_handler(self): # ROUTER
[zmq_backend_id, data,] = self.backend.recv_multipart()
if zmq_backend_id == b'stop_reactor':
raise zmq.ZMQError
data = Time.f_f('z.R', data)
self.frontend.send_multipart([zmq_backend_id, b'', data])
def frontend_handler(self): # ROUTER
[frontend_id, connect_id, data] = self.frontend.recv_multipart()
data = Time.f_f('z.P', data)
if connect_id and connect_id in connection:
data = Time.f_f('ztC', data)
if connection.get(connect_id):
connection[connect_id].tcpSend(data)
elif connect_id:
# get conn['server'] from packet header -> connect_id
conn = {}
conn['connect_id'], conn['ip'], conn['port'], conn['frontend_id'] = \
(re.findall(b'0x[\d\w]*', connect_id)[0],
b'.'.join(re.findall(b'(?<=[,.])\d+',connect_id)).decode('latin-1'),
int(re.findall(b'(?<=:)\d+', connect_id)[0]), frontend_id
)
q_conn.put((conn, data,))
else:
pass
def read_write(self):
while True:
sockets = dict(self.poll.poll())
if self.frontend in sockets:
if sockets[self.frontend] == zmq.POLLIN:
self.frontend_handler()
if self.backend in sockets:
if sockets[self.backend] == zmq.POLLIN:
try:
self.backend_handler()
except zmq.ZMQError:
break
reactor.stop()
return 0
def tcpConnect(self):
if not q_conn.empty():
conn, data = q_conn.get()
reactor.connectTCP(conn['ip'], conn['port'],
EchoFactory(self, conn, data))
reactor.callLater(0.3, self.tcpConnect)
return 0
def signal_handler(signum, frame):
connection[b'stop_reactor'].stop_reactor.send_multipart([b'',])
def main(conn):
with ZmqHandler(conn) as zmq_handler:
zmq_handler.run()
reactor.run()
print('stop')
sys.exit(0)
def z_to_tw(conn):
signal.signal(signal.SIGTERM, signal_handler)
main(conn)
if __name__ == '__main__':
args = agrparser()
connzc = {'ip': '127.0.0.1', 'port': args.zmqport,
'remote': {'ip': args.remoteip, 'port': args.remoteport} }
conn = {}
print('wait')
for d, k in zip((connzc,), ('zc',)):
conn[k] = d
os.system('fuser -k '+d['port']+'/tcp')
if d.get('remote'):
os.system('fuser -k '+d['remote']['port']+'/tcp')
print(conn)
print('start')
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
#signal.signal(signal.SIGQUIT, signal_handler)
main(conn['zc'])
| |
# Framework for setting Input and Output expectations in a unit test.
#
# This file defines the class ExpectedInputOutput which can be used to override
# sys.stdout to validate that expected outputs are printed and sys.stdin to
# inject mock responses in the standard input. This is useful in unit test, to
# validate that a command line tool behaves as expected.
#
# The following example tests a program printing a few lines, asking a question
# and acting according to the response:
#
# import io_expectation as expect
#
# mock_io = expect.ExpectedInputOutput()
# sys.stdin = mock_io
# sys.stdout = mock_io
#
# # Set expected I/Os.
# mock_io.set_expected_io(
# expect.InOrder(
# expect.Contains('initialization'),
# expect.Anything().repeatedly(),
# expect.Equals('Proceed?'),
# expect.Reply('yes'),
# expect.Prefix('Success')))
#
# # Run the program under test.
# print('Program initialization...')
# print('Loading resources...')
# print('Initialization complete.')
# print('Proceed? ')
# if raw_input() == 'yes':
# print('Success')
# else:
# print('Aborting')
#
# # Validate that the program matched all expectations.
# mock_io.assert_expectations_fulfilled()
#
# Some expectation can be abbreviated, for insatnce, the following two
# expectations are equivalent:
# io.set_expected_io(AnyOrder(Contains('foo'), Contains('bar')))
# io.set_expected_io(['foo', 'bar'])
#
# If no inputs are expected, expectation can be specified after the fact, which
# is usually more readable in unit tests. For instance:
#
# print('Some output text')
# mock_io.assert_output_was('Some output text')
#
# print('More output')
# print('Some more output')
# mock_io.assert_output_was(['More output', 'Some more output'])
import copy
import difflib
import re
import six
import sys
def default_expectation(expected_io):
"""Defines the behavior of python standard types when used as expectation.
This is used to allow syntactic sugar, where expectation can be specified
using a lightweight syntax. For instance, the following two statement produce
the same expectations:
io.set_expected_io(AnyOrder(Contains('foo'), Contains('bar')))
io.set_expected_io(['foo', 'bar'])
Args:
expected_io: python standard type
Returns:
ExpectBase subclass instance.
"""
if isinstance(expected_io, (list, tuple)):
return InOrder(*expected_io)
elif isinstance(expected_io, set):
return AnyOrder(*expected_io)
elif isinstance(expected_io, six.string_types):
return Contains(expected_io)
else:
return expected_io
class ExpectBase(object):
"""Base class for all expected response string matchers."""
def __init__(self):
self._consumed = False
self._fulfilled = False
self._saturated = False
self._greedy = False
self._thrifty = False
@property
def fulfilled(self):
"""True when the expectation base condition has been met.
The expectation could still accept more matches, until it's saturated.
"""
return self._fulfilled
@property
def saturated(self):
"""True when the expectation reached it's upper limit of allowed matches."""
return self._saturated
@property
def greedy(self):
"""If True, repeated expectation will match as much as possible, possibly
starving the following expectations."""
return self._greedy
@property
def thrifty(self):
"""If True, the expectation will only match if no other alternative
expectations match."""
return self._thrifty
def consume(self, string):
"""Matches a string against this expectation.
Consumer expectation sub-classes must override this function.
The expectation's internal states is updated accordingly. For instance,
sequence expectations advances their state to the next expected IO.
Args:
str: the string to match against the expectation.
Returns:
bool: True if the string matched successfully."""
self._consumed = True
return False
def test_consume(self, string):
"""Tests if the expectation would success at consuming a string.
Consumer expectation sub-classes must override this function.
The internal state of the expectation are left untouched.
Returns:
bool: True if the string would match successfully.
"""
return False
def produce(self):
"""Produces a string, as if entered by the user in response to a prompt.
Producer expectation sub-classes must override this function.
Returns:
str, the string produced by the expectation.
"""
return None
def repeatedly(self, min_repetition=0, max_repetition=None):
"""Shorthand for making this expectation repeat indefinitely.
Returns:
Repeatedly instance: wrapped version of self, which repeats 0 or more
times.
"""
return Repeatedly(self, min_repetition, max_repetition)
def times(self, min_repetition, max_repetition=None):
"""Shorthand for making this expectation repeat a given number of times.
Args:
min_repetition: int, the minimum number of times this expectation needs to
match for it to be fulfilled.
max_repetition: int, the number of repetition after which this expectation
is saturated and won't successfully match another time.
Returns:
Repeatedly instance: wrapped version of self, which repeats from
min_repetition to max_repetition times.
"""
max_repetition = max_repetition or min_repetition
return Repeatedly(self, min_repetition, max_repetition)
def apply_transform(self, fn):
"""Apply a transformation on the expectation definition.
The ExpectedInputOutput class calls this function on every expectations
registered, which is useful for 'patching' all expectations with the same
transformation. A common way to use this is to fix path delimiters to match
the OS format in expectations containing paths.
Args:
fn: callback function to call to transform the expectation. Callback
takes a string as input and returns a transforms version of that
string.
"""
pass
def __and__(self, other):
ret = And(self, other)
return ret
def __or__(self, other):
ret = Or(self, other)
return ret
class ExpectStringBase(ExpectBase):
def __init__(self, expected):
super(ExpectStringBase, self).__init__()
self._expected = expected
def consume(self, string):
self._consumed = True
self._fulfilled = self._saturated = self.test_consume(string)
return self._fulfilled
def test_consume(self, string):
return self._match(string)
def apply_transform(self, fn):
self._expected = fn(self._expected)
def _match(self, string):
raise NotImplementedError()
def description(self, saturated):
return '%s(%s)' % (type(self).__name__, repr(self._expected))
class Equals(ExpectStringBase):
"""Matches a string equal to the specified string.
Leading and trailing white-spaces are stripped.
"""
def _match(self, string):
return string.strip() == self._expected.strip()
class Contains(ExpectStringBase):
"""Matches a string containing a specific sub-string."""
def _match(self, string):
return self._expected in string
class Prefix(ExpectStringBase):
"""Matches a string having a specific prefix."""
def _match(self, string):
return string.strip().startswith(self._expected)
class Regex(ExpectStringBase):
"""Matches a string using a regular expression."""
def _match(self, other):
return re.match(self._expected, other, re.DOTALL)
class Anything(ExpectBase):
"""Matches anything once."""
def consume(self, string):
self._consumed = True
self._fulfilled = self._saturated = self.test_consume(string)
return True
def test_consume(self, string):
return True
def description(self, saturated):
return '%s()' % type(self).__name__
class And(ExpectBase):
def __init__(self, *args):
super(And, self).__init__()
self._expected_list = [default_expectation(expected) for expected in args]
@property
def fulfilled(self):
return all(e.fulfilled for e in self._expected_list)
@property
def saturated(self):
return any(e.saturated for e in self._expected_list)
def consume(self, string):
self._consumed = True
return all(e.consume(string) for e in self._expected_list)
def test_consume(self, string):
return all(e.test_consume(string) for e in self._expected_list)
def apply_transform(self, fn):
for expected in self._expected_list:
expected.apply_transform(fn)
def description(self, saturated):
parts = [a.description(saturated) for a in self._expected_list
if not a._consumed or a.saturated == saturated]
if len(parts) == 1:
return parts[0]
else:
return ' and '.join(parts)
class Or(ExpectBase):
def __init__(self, *args):
super(Or, self).__init__()
self._expected_list = [default_expectation(expected) for expected in args]
@property
def fulfilled(self):
return any(e.fulfilled for e in self._expected_list)
@property
def saturated(self):
return all(e.saturated for e in self._expected_list)
def consume(self, string):
self._consumed = True
return any(e.consume(string) for e in self._expected_list)
def test_consume(self, string):
return any(e.test_consume(string) for e in self._expected_list)
def apply_transform(self, fn):
for expected in self._expected_list:
expected.apply_transform(fn)
def description(self, saturated):
parts = [a.description(saturated) for a in self._expected_list
if not a._consumed or a.saturated == saturated]
if len(parts) == 1:
return parts[0]
else:
return ' or '.join(parts)
class Not(ExpectBase):
def __init__(self, expected):
super(Not, self).__init__()
self._expected = expected
self._thrifty = True
def consume(self, string):
self._consumed = True
self._fulfilled = not self._expected.consume(string)
self._saturated = not self._expected.saturated
return self._fulfilled
def test_consume(self, string):
return not self._expected.test_consume(string)
def apply_transform(self, fn):
self._expected.apply_transform(fn)
def description(self, saturated):
return 'Not(%s)' % (self._expected.description(not saturated))
class Repeatedly(ExpectBase):
"""Wraps an expectation to make it repeat a given number of times."""
def __init__(self, sub_expectation, min_repetition=0, max_repetition=None):
super(Repeatedly, self).__init__()
self._min_repetition = min_repetition
self._max_repetition = max_repetition
self._sub_expectation = default_expectation(sub_expectation)
self._current_repetition = 0
self._current_expectation = copy.deepcopy(self._sub_expectation)
self._thrifty = self._sub_expectation.thrifty
@property
def fulfilled(self):
return self._current_repetition >= self._min_repetition
@property
def saturated(self):
return (self._max_repetition is not None and
self._current_repetition >= self._max_repetition)
def consume(self, string):
self._consumed = True
result = self._current_expectation.consume(string)
if self._current_expectation.fulfilled:
self._current_repetition += 1
self._current_expectation = copy.deepcopy(self._sub_expectation)
return result
def test_consume(self, string):
return self._current_expectation.test_consume(string)
def produce(self):
result = self._current_expectation.produce()
if self._current_expectation.saturated:
self._current_repetition += 1
self._current_expectation = copy.deepcopy(self._sub_expectation)
return result
def apply_transform(self, fn):
self._sub_expectation.apply_transform(fn)
def description(self, saturated):
arg1 = max(self._min_repetition - self._current_repetition, 0)
arg2 = (self._max_repetition - self._current_repetition
if self._max_repetition is not None else None)
return '%s(%s%s%s)' % (
type(self).__name__, self._current_expectation.description(saturated),
', %d' % arg1 if arg1 > 0 or arg2 is not None else '',
', %d' % arg2 if arg2 else '')
class ExpectSequenceBase(ExpectBase):
"""Base class for all sequence-based expectations."""
def __init__(self, *args):
super(ExpectSequenceBase, self).__init__()
self._expected_list = [default_expectation(expected) for expected in args]
@property
def fulfilled(self):
return all(e.fulfilled for e in self._expected_list)
@property
def saturated(self):
return all(e.saturated for e in self._expected_list)
def apply_transform(self, fn):
for expected in self._expected_list:
expected.apply_transform(fn)
def description(self, saturated):
parts = [a.description(saturated) for a in self._expected_list
if not a._consumed or a.saturated == saturated]
if len(parts) == 1:
return parts[0]
else:
return '%s(%s)' % (type(self).__name__, ', '.join(parts))
class InOrder(ExpectSequenceBase):
"""Sequence of expectations that must match in right order."""
def consume(self, string):
self._consumed = True
to_consume = None
for i, expected in enumerate(self._expected_list):
matches = expected.test_consume(string)
if matches:
to_consume = (i, expected)
if expected.greedy or not expected.fulfilled:
break
elif expected.fulfilled:
continue
else:
break
if to_consume is not None:
i, expected = to_consume
consumed = expected.consume(string)
assert(consumed)
# We got a match somewhere down the sequence. Discard any preceding
# fulfilled expectations.
self._expected_list = self._expected_list[i:]
if expected.saturated:
self._expected_list.remove(expected)
return consumed
return False
def test_consume(self, string):
for expected in self._expected_list:
if expected.test_consume(string):
return True
elif not expected.fulfilled:
return False
return False
def produce(self):
for i, expected in enumerate(self._expected_list):
result = expected.produce()
if result:
# We got a match somewhere down the sequence. Discard any preceding
# fulfilled expectations.
self._expected_list = self._expected_list[i:]
if expected.saturated:
self._expected_list.remove(expected)
return result
elif not expected.fulfilled:
return None
return None
class AnyOrder(ExpectSequenceBase):
"""Sequence of expectation that can match in any order."""
def consume(self, string):
self._consumed = True
to_consume = None
for expected in self._expected_list:
if expected.test_consume(string):
to_consume = expected
if not expected.thrifty and (expected.greedy or not expected.fulfilled):
break
if to_consume is not None:
consumed = to_consume.consume(string)
assert(consumed)
if to_consume.saturated:
self._expected_list.remove(to_consume)
return True
return False
def test_consume(self, string):
return any(expected.test_consume(string)
for expected in self._expected_list)
def produce(self):
for expected in self._expected_list:
result = expected.produce()
if result:
if expected.saturated:
self._expected_list.remove(expected)
return result
return None
def Somewhere(expectation):
"""Match an expectation anywhere in a document."""
return InOrder(Anything().repeatedly(),
expectation,
Anything().repeatedly())
class Reply(ExpectBase):
"""Expects a read to the input pipe and replies with a specific string."""
def __init__(self, reply_string):
super(Reply, self).__init__()
self._reply_string = reply_string
def produce(self):
self._fulfilled = self._saturated = True
return self._reply_string
def _consume(self, line):
raise AssertionError('Expecting user input but got output line: %s' % line)
def description(self, saturated):
return '%s(%s)' % (type(self).__name__, self._reply_string)
class ExpectedInputOutput(object):
"""File object for overriding stdin/out, mocking inputs & checking outputs."""
def __init__(self):
self.set_transform_fn(None)
self.set_expected_io(None)
self._original_stdin = sys.stdin
self._original_stdout = sys.stdout
def set_transform_fn(self, transform_fn):
"""Callback to transform all expectations passed in set_expected_io.
Useful for 'patching' all expectations with the same transformation. A
common way to use this is to fix path delimiters to match the OS format in
expectations containing paths.
Args:
transform_fn: callback function to call to transform the expectation.
Callback takes a string as input and returns a transforms version of
that string.
"""
self._transform_fn = transform_fn
def set_expected_io(self, expected_io):
"""Set an expectation for the next sequence of IOs.
The expected IO can be specified as an instance of an ExpectBase child
class, or as a python standard type (str, list, etc.) which are mapped to
an expectation object using default_expectation.
If expected_io is None, next IOs will be ignored.
Args:
expected_io: instance of an ExpectBase subclass or any types accepted by
default_expectation, the expectation to apply all input and outputs
against.
"""
self._expected_io = self._patch_expected_io(expected_io)
self._cmd_output = six.StringIO()
def write(self, string):
"""File object 'write' method, matched against the next expected output.
Args:
string: str, string being written to stdout.
"""
self._original_stdout.write(string)
self._cmd_output.write(string)
def flush(self):
"""File object 'flush' method."""
self._original_stdout.flush()
def readline(self):
"""File object 'readline' method, replied using the next expected input.
Returns:
str, the mock string faking a read from stdin.
Raises:
AssertionError: raised when IOs do not match expectations.
"""
if self._expected_io is None:
raise AssertionError('Trying to readline but no expected IO was set.')
self._match_pending_outputs()
if self._expected_io.saturated:
raise AssertionError('No more user input prompt expected')
reply = self._expected_io.produce()
if not reply:
raise AssertionError(
'Unexpected user input prompt request. Expected:\n'
'%s' % self._expected_io.description(fulfilled=False))
reply += '\n'
self._original_stdout.write(reply)
return reply
def assert_expectations_fulfilled(self):
"""Asserts that all expectation are fulfilled.
Resets this object, ready to restart with a new set_expected_io.
Raises:
AssertionError: raised when IOs do not match expectations.
"""
self._match_pending_outputs()
if self._expected_io:
if not self._expected_io.fulfilled:
raise AssertionError('Pending IO expectation never fulfilled:\n%s' %
self._expected_io.description(saturated=False))
self.set_expected_io(None)
def assert_output_was(self, expected_output):
"""Asserts that the previous outputs matche the specified expectation.
Args:
expected_output: instance of an ExpectBase subclass, the expectation to
apply all previous outputs against.
Raises:
AssertionError: raised when previous outputs do not match expectations.
"""
self._expected_io = self._patch_expected_io(expected_output)
self.assert_expectations_fulfilled()
def _patch_expected_io(self, expected_io):
"""Patch the specified expectation, applying defaults and transforms.
Args:
expected_io: instance of an ExpectBase subclass or any types accepted by
default_expectation.
Returns:
Instance of an ExpectBase subclass.
"""
patched_expected_io = default_expectation(expected_io)
if patched_expected_io and self._transform_fn:
patched_expected_io.apply_transform(self._transform_fn)
return patched_expected_io
def _match_pending_outputs(self):
"""Match any pending IO against the expectations.
Raises:
AssertionError: raised when IOs do not match expectations.
"""
output_lines = self._cmd_output.getvalue().splitlines(True)
self._cmd_output = six.StringIO()
if self._expected_io:
for line in output_lines:
if self._expected_io.saturated:
raise AssertionError('No more output expected, but got: \'%s\'' %
line)
if not self._expected_io.consume(line):
raise AssertionError(
'Unexpected output:\n'
'%s' % '\n'.join(difflib.ndiff(
self._expected_io.description(saturated=False).splitlines(True),
repr(line).splitlines(True))))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for PXE bare-metal nodes.
"""
import os
import shutil
from nova.compute import instance_types
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
from nova.virt.disk import api as disk
pxe_opts = [
cfg.StrOpt('dnsmasq_pid_dir',
default='$state_path/baremetal/dnsmasq',
help='path to directory stores pidfiles of dnsmasq'),
cfg.StrOpt('dnsmasq_lease_dir',
default='$state_path/baremetal/dnsmasq',
help='path to directory stores leasefiles of dnsmasq'),
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
cfg.StrOpt('deploy_ramdisk',
help='Default ramdisk image ID used in deployment phase'),
cfg.StrOpt('net_config_template',
default='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
help='Template file for injected network config'),
cfg.StrOpt('pxe_append_params',
help='additional append parameters for baremetal PXE boot'),
cfg.StrOpt('pxe_config_template',
default='$pybasedir/nova/virt/baremetal/pxe_config.template',
help='Template file for PXE configuration'),
cfg.StrOpt('pxe_interface',
default='eth0'),
cfg.StrOpt('pxe_path',
default='/usr/lib/syslinux/pxelinux.0',
help='path to pxelinux.0'),
]
LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(pxe_opts, baremetal_group)
CHEETAH = None
def _get_cheetah():
global CHEETAH
if CHEETAH is None:
from Cheetah.Template import Template as CHEETAH
return CHEETAH
def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
deployment_aki_path, deployment_ari_path,
aki_path, ari_path):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
given all the required parameters.
The resulting file has both a "deploy" and "boot" label, which correspond
to the two phases of booting. This may be extended later.
"""
LOG.debug(_("Building PXE config for deployment %s.") % deployment_id)
pxe_options = {
'deployment_id': deployment_id,
'deployment_key': deployment_key,
'deployment_iscsi_iqn': deployment_iscsi_iqn,
'deployment_aki_path': deployment_aki_path,
'deployment_ari_path': deployment_ari_path,
'aki_path': aki_path,
'ari_path': ari_path,
'pxe_append_params': CONF.baremetal.pxe_append_params,
}
cheetah = _get_cheetah()
pxe_config = str(cheetah(
open(CONF.baremetal.pxe_config_template).read(),
searchList=[{'pxe_options': pxe_options,
'ROOT': '${ROOT}',
}]))
return pxe_config
def build_network_config(network_info):
# TODO(deva): fix assumption that device names begin with "eth"
# and fix assumption about ordering
try:
assert isinstance(network_info, list)
except AssertionError:
network_info = [network_info]
interfaces = []
for id, (network, mapping) in enumerate(network_info):
address_v6 = None
gateway_v6 = None
netmask_v6 = None
if CONF.use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
netmask_v6 = mapping['ip6s'][0]['netmask']
gateway_v6 = mapping['gateway_v6']
interface = {
'name': 'eth%d' % id,
'hwaddress': mapping['mac'],
'address': mapping['ips'][0]['ip'],
'gateway': mapping['gateway'],
'netmask': mapping['ips'][0]['netmask'],
'dns': ' '.join(mapping['dns']),
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
interfaces.append(interface)
cheetah = _get_cheetah()
network_config = str(cheetah(
open(CONF.baremetal.net_config_template).read(),
searchList=[
{'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6,
}
]))
return network_config
def get_deploy_aki_id(instance):
return instance.get('extra_specs', {}).\
get('deploy_kernel_id', CONF.baremetal.deploy_kernel)
def get_deploy_ari_id(instance):
return instance.get('extra_specs', {}).\
get('deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk)
def get_image_dir_path(instance):
"""Generate the dir for an instances disk"""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk"""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_pxe_config_file_path(instance):
"""Generate the path for an instances PXE config file"""
return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config')
def get_partition_sizes(instance):
type_id = instance['instance_type_id']
root_mb = instance['root_gb'] * 1024
# NOTE(deva): is there a way to get swap_mb directly from instance?
swap_mb = instance_types.get_instance_type(type_id)['swap']
# NOTE(deva): For simpler code paths on the deployment side,
# we always create a swap partition. If the flavor
# does not specify any swap, we default to 1MB
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb)
def get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name"""
return os.path.join(
CONF.baremetal.tftp_root,
'pxelinux.cfg',
"01-" + mac.replace(":", "-").lower()
)
def get_tftp_image_info(instance):
"""Generate the paths for tftp files for this instance
Raises NovaException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
instance['extra_specs'] and defaults are not set
"""
image_info = {
'kernel': [None, None],
'ramdisk': [None, None],
'deploy_kernel': [None, None],
'deploy_ramdisk': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
image_info['ramdisk'][0] = str(instance['ramdisk_id'])
image_info['deploy_kernel'][0] = get_deploy_aki_id(instance)
image_info['deploy_ramdisk'][0] = get_deploy_ari_id(instance)
except KeyError as e:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if uuid is None:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate PXE bootloader. The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class PXE(base.NodeDriver):
"""PXE bare metal driver"""
def __init__(self):
super(PXE, self).__init__()
def _collect_mac_addresses(self, context, node):
macs = []
macs.append(db.bm_node_get(context, node['id'])['prov_mac_address'])
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.append(nic['address'])
macs.sort()
return macs
def _generate_udev_rules(self, context, node):
# TODO(deva): fix assumption that device names begin with "eth"
# and fix assumption of ordering
macs = self._collect_mac_addresses(context, node)
rules = ''
for (i, mac) in enumerate(macs):
rules += 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' \
'ATTR{address}=="%(mac)s", ATTR{dev_id}=="0x0", ' \
'ATTR{type}=="1", KERNEL=="eth*", NAME="%(name)s"\n' \
% {'mac': mac.lower(),
'name': 'eth%d' % i,
}
return rules
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.baremetal.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id']
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
# NOTE(deva): We assume that if we're not using a kernel,
# then the target partition is the first partition
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
net_config = build_network_config(network_info)
udev_rules = self._generate_udev_rules(context, node)
injected_files.append(
('/etc/udev/rules.d/70-persistent-net.rules', udev_rules))
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=instance['metadata'],
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance"""
tftp_image_info = get_tftp_image_info(instance)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file"""
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.unlink_without_raise(get_image_dir_path(instance))
def activate_bootloader(self, context, node, instance):
"""Configure PXE boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
ramdisk
deploy_kernel
deploy_ramdisk
config
./pxelinux.cfg/
{mac} -> ../{uuid}/config
"""
image_info = get_tftp_image_info(instance)
(root_mb, swap_mb) = get_partition_sizes(instance)
pxe_config_file_path = get_pxe_config_file_path(instance)
image_file_path = get_image_file_path(instance)
deployment_key = bm_utils.random_alnum(32)
deployment_iscsi_iqn = "iqn-%s" % instance['uuid']
deployment_id = db.bm_deployment_create(
context,
deployment_key,
image_file_path,
pxe_config_file_path,
root_mb,
swap_mb
)
pxe_config = build_pxe_config(
deployment_id,
deployment_key,
deployment_iscsi_iqn,
image_info['deploy_kernel'][1],
image_info['deploy_ramdisk'][1],
image_info['kernel'][1],
image_info['ramdisk'][1],
)
bm_utils.write_to_file(pxe_config_file_path, pxe_config)
macs = self._collect_mac_addresses(context, node)
for mac in macs:
mac_path = get_pxe_mac_path(mac)
bm_utils.unlink_without_raise(mac_path)
bm_utils.create_link_without_raise(pxe_config_file_path, mac_path)
def deactivate_bootloader(self, context, node, instance):
"""Delete PXE bootloader images and config"""
try:
image_info = get_tftp_image_info(instance)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.unlink_without_raise(path)
bm_utils.unlink_without_raise(get_pxe_config_file_path(instance))
try:
macs = self._collect_mac_addresses(context, node)
except exception.DBError:
pass
else:
for mac in macs:
bm_utils.unlink_without_raise(get_pxe_mac_path(mac))
bm_utils.unlink_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
def activate_node(self, context, node, instance):
pass
def deactivate_node(self, context, node, instance):
pass
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.enums.types import access_role as gage_access_role
from google.ads.googleads.v9.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v9.resources.types import customer as gagr_customer
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.services",
marshal="google.ads.googleads.v9",
manifest={
"GetCustomerRequest",
"MutateCustomerRequest",
"CreateCustomerClientRequest",
"CustomerOperation",
"CreateCustomerClientResponse",
"MutateCustomerResponse",
"MutateCustomerResult",
"ListAccessibleCustomersRequest",
"ListAccessibleCustomersResponse",
},
)
class GetCustomerRequest(proto.Message):
r"""Request message for
[CustomerService.GetCustomer][google.ads.googleads.v9.services.CustomerService.GetCustomer].
Attributes:
resource_name (str):
Required. The resource name of the customer
to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
class MutateCustomerRequest(proto.Message):
r"""Request message for
[CustomerService.MutateCustomer][google.ads.googleads.v9.services.CustomerService.MutateCustomer].
Attributes:
customer_id (str):
Required. The ID of the customer being
modified.
operation (google.ads.googleads.v9.services.types.CustomerOperation):
Required. The operation to perform on the
customer
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v9.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operation = proto.Field(
proto.MESSAGE, number=4, message="CustomerOperation",
)
validate_only = proto.Field(proto.BOOL, number=5,)
response_content_type = proto.Field(
proto.ENUM,
number=6,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class CreateCustomerClientRequest(proto.Message):
r"""Request message for
[CustomerService.CreateCustomerClient][google.ads.googleads.v9.services.CustomerService.CreateCustomerClient].
Attributes:
customer_id (str):
Required. The ID of the Manager under whom
client customer is being created.
customer_client (google.ads.googleads.v9.resources.types.Customer):
Required. The new client customer to create.
The resource name on this customer will be
ignored.
email_address (str):
Email address of the user who should be
invited on the created client customer.
Accessible only to customers on the allow-list.
This field is a member of `oneof`_ ``_email_address``.
access_role (google.ads.googleads.v9.enums.types.AccessRoleEnum.AccessRole):
The proposed role of user on the created
client customer. Accessible only to customers on
the allow-list.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(proto.STRING, number=1,)
customer_client = proto.Field(
proto.MESSAGE, number=2, message=gagr_customer.Customer,
)
email_address = proto.Field(proto.STRING, number=5, optional=True,)
access_role = proto.Field(
proto.ENUM, number=4, enum=gage_access_role.AccessRoleEnum.AccessRole,
)
validate_only = proto.Field(proto.BOOL, number=6,)
class CustomerOperation(proto.Message):
r"""A single update on a customer.
Attributes:
update (google.ads.googleads.v9.resources.types.Customer):
Mutate operation. Only updates are supported
for customer.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
"""
update = proto.Field(
proto.MESSAGE, number=1, message=gagr_customer.Customer,
)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class CreateCustomerClientResponse(proto.Message):
r"""Response message for CreateCustomerClient mutate.
Attributes:
resource_name (str):
The resource name of the newly created
customer client.
invitation_link (str):
Link for inviting user to access the created
customer. Accessible to allowlisted customers
only.
"""
resource_name = proto.Field(proto.STRING, number=2,)
invitation_link = proto.Field(proto.STRING, number=3,)
class MutateCustomerResponse(proto.Message):
r"""Response message for customer mutate.
Attributes:
result (google.ads.googleads.v9.services.types.MutateCustomerResult):
Result for the mutate.
"""
result = proto.Field(
proto.MESSAGE, number=2, message="MutateCustomerResult",
)
class MutateCustomerResult(proto.Message):
r"""The result for the customer mutate.
Attributes:
resource_name (str):
Returned for successful operations.
customer (google.ads.googleads.v9.resources.types.Customer):
The mutated customer with only mutable fields after mutate.
The fields will only be returned when response_content_type
is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
customer = proto.Field(
proto.MESSAGE, number=2, message=gagr_customer.Customer,
)
class ListAccessibleCustomersRequest(proto.Message):
r"""Request message for
[CustomerService.ListAccessibleCustomers][google.ads.googleads.v9.services.CustomerService.ListAccessibleCustomers].
"""
class ListAccessibleCustomersResponse(proto.Message):
r"""Response message for
[CustomerService.ListAccessibleCustomers][google.ads.googleads.v9.services.CustomerService.ListAccessibleCustomers].
Attributes:
resource_names (Sequence[str]):
Resource name of customers directly
accessible by the user authenticating the call.
"""
resource_names = proto.RepeatedField(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import print_function
import sys
import unittest
import os
sys.path.insert(0, '../')
import format
from formattedText import FormattedText
import parse
fileTestCases = [{
'input': 'html/js/hotness.js',
'match': True,
'file': 'html/js/hotness.js'
}, {
'input': '/absolute/path/to/something.txt',
'match': True,
'file': '/absolute/path/to/something.txt'
}, {
'input': '/html/js/hotness.js42',
'match': True,
'file': '/html/js/hotness.js42'
}, {
'input': '/html/js/hotness.js',
'match': True,
'file': '/html/js/hotness.js'
}, {
'input': './asd.txt:83',
'match': True,
'file': './asd.txt',
'num': 83
}, {
'input': '.env.local',
'match': True,
'file': '.env.local'
}, {
'input': '.gitignore',
'match': True,
'file': '.gitignore'
}, {
'input': 'tmp/.gitignore',
'match': True,
'file': 'tmp/.gitignore'
}, {
'input': '.ssh/.gitignore',
'match': True,
'file': '.ssh/.gitignore'
}, {
'input': '.ssh/known_hosts',
'match': True,
'file': '.ssh/known_hosts'
# For now, don't worry about matching the following case perfectly,
# simply because it's complicated.
#}, {
# 'input': '~/.ssh/known_hosts',
# 'match': True,
}, {
# Arbitrarily ignore really short dot filenames
'input': '.a',
'match': False,
}, {
'input': 'flib/asd/ent/berkeley/two.py-22',
'match': True,
'file': 'flib/asd/ent/berkeley/two.py',
'num': 22
}, {
'input': 'flib/foo/bar',
'match': True,
'file': 'flib/foo/bar'
}, {
'input': 'flib/foo/bar ', # note space
'match': True,
'file': 'flib/foo/bar'
}, {
'input': 'foo/b ',
'match': True,
'file': 'foo/b'
}, {
'input': 'foo/bar/baz/',
'match': False
}, {
'input': 'flib/ads/ads.thrift',
'match': True,
'file': 'flib/ads/ads.thrift'
}, {
'input': 'banana hanana Wilde/ads/story.m',
'match': True,
'file': 'Wilde/ads/story.m'
}, {
'input': 'flib/asd/asd.py two/three/four.py',
'match': True,
'file': 'flib/asd/asd.py'
}, {
'input': 'asd/asd/asd/ 23',
'match': False
}, {
'input': 'foo/bar/TARGETS:23',
'match': True,
'num': 23,
'file': 'foo/bar/TARGETS'
}, {
'input': 'foo/bar/TARGETS-24',
'match': True,
'num': 24,
'file': 'foo/bar/TARGETS'
}, {
'input':
'fbcode/search/places/scorer/PageScorer.cpp:27:46:#include "search/places/scorer/linear_scores/MinutiaeVerbScorer.h',
'match': True,
'num': 27,
'file': 'fbcode/search/places/scorer/PageScorer.cpp'
}, {
# Pretty intense case
'input':
'fbcode/search/places/scorer/TARGETS:590:28: srcs = ["linear_scores/MinutiaeVerbScorer.cpp"]',
'match': True,
'num': 590,
'file': 'fbcode/search/places/scorer/TARGETS'
}, {
'input':
'fbcode/search/places/scorer/TARGETS:1083:27: "linear_scores/test/MinutiaeVerbScorerTest.cpp"',
'match': True,
'num': 1083,
'file': 'fbcode/search/places/scorer/TARGETS'
}, {
'input': '~/foo/bar/something.py',
'match': True,
'file': '~/foo/bar/something.py'
}, {
'input': '~/foo/bar/inHomeDir.py:22',
'match': True,
'file': '~/foo/bar/inHomeDir.py',
'num': 22,
}, {
'input': 'blarge assets/retina/victory@2x.png',
'match': True,
'file': 'assets/retina/victory@2x.png'
}, {
'input': '~/assets/retina/victory@2x.png',
'match': True,
'file': '~/assets/retina/victory@2x.png'
}, {
'input': 'So.many.periods.txt',
'match': True,
'file': 'So.many.periods.txt'
}, {
'input': 'SO.MANY.PERIODS.TXT',
'match': True,
'file': 'SO.MANY.PERIODS.TXT'
}, {
'input': 'blarg blah So.MANY.PERIODS.TXT:22',
'match': True,
'file': 'So.MANY.PERIODS.TXT',
'num': 0 # we ignore the number here
}, {
'input': 'SO.MANY&&PERIODSTXT',
'match': False
}, {
'input': 'test src/categories/NSDate+Category.h',
'match': True,
'file': 'src/categories/NSDate+Category.h'
}, {
'input': '~/src/categories/NSDate+Category.h',
'match': True,
'file': '~/src/categories/NSDate+Category.h'
}]
prependDirTestCases = [
{
'in': 'home/absolute/path.py',
'out': '/home/absolute/path.py'
}, {
'in': '~/www/asd.py',
'out': '~/www/asd.py'
}, {
'in': 'www/asd.py',
'out': '~/www/asd.py'
}, {
'in': 'foo/bar/baz/asd.py',
'out': parse.PREPEND_PATH + 'foo/bar/baz/asd.py'
}, {
'in': 'a/foo/bar/baz/asd.py',
'out': parse.PREPEND_PATH + 'foo/bar/baz/asd.py'
}, {
'in': 'b/foo/bar/baz/asd.py',
'out': parse.PREPEND_PATH + 'foo/bar/baz/asd.py'
}, {
'in': '',
'out': ''
}]
class TestParseFunction(unittest.TestCase):
def testPrependDir(self):
for testCase in prependDirTestCases:
inFile = testCase['in']
result = parse.prependDir(inFile)
expected = testCase['out']
if inFile[0:2] == '~/':
expected = os.path.expanduser(expected)
self.assertEqual(expected, result)
print('Tested %d dir cases.' % len(prependDirTestCases))
def testFileFuzz(self):
befores = ['M ', 'Modified: ', 'Changed: ', '+++ ',
'Banana asdasdoj pjo ']
afters = [' * Adapts AdsErrorCodestore to something',
':0:7: var AdsErrorCodeStore', ' jkk asdad']
for testCase in fileTestCases:
for before in befores:
for after in afters:
testInput = '%s%s%s' % (before, testCase['input'], after)
thisCase = testCase.copy()
thisCase['input'] = testInput
self.checkFileResult(thisCase)
print('Tested %d cases for file fuzz.' % len(fileTestCases))
def testUnresolvable(self):
fileLine = ".../something/foo.py"
result = parse.matchLine(fileLine)
lineObj = format.LineMatch(FormattedText(fileLine), result, 0)
self.assertTrue(
not lineObj.isResolvable(),
'"%s" should not be resolvable' % fileLine
)
print('Tested unresolvable case.')
def testResolvable(self):
toCheck = [case for case in fileTestCases if case['match']]
for testCase in toCheck:
result = parse.matchLine(testCase['input'])
lineObj = format.LineMatch(
FormattedText(testCase['input']), result, 0)
self.assertTrue(
lineObj.isResolvable(),
'Line "%s" was not resolvable' % testCase['input']
)
print('Tested %d resolvable cases.' % len(toCheck))
def testFileMatch(self):
for testCase in fileTestCases:
self.checkFileResult(testCase)
print('Tested %d cases.' % len(fileTestCases))
def checkFileResult(self, testCase):
result = parse.matchLine(testCase['input'])
if not result:
self.assertFalse(testCase['match'],
'Line "%s" did not match any regex' %
testCase['input'])
return
file, num, match = result
self.assertTrue(testCase['match'], 'Line "%s" did match' %
testCase['input'])
self.assertEqual(testCase['file'], file, 'files not equal |%s| |%s|' %
(testCase['file'], file))
self.assertEqual(testCase.get('num', 0), num, 'num matches not equal %d %d for %s'
% (testCase.get('num', 0), num, testCase.get('input')))
if __name__ == '__main__':
unittest.main()
| |
#
# Simple benchmarks for the processing package
#
import time, sys, multiprocess as processing, threading, queue as Queue, gc
processing.freezeSupport = processing.freeze_support
xrange = range
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in xrange(iterations):
q.put(a)
# q.putMany((a for i in xrange(iterations))
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through the queue in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in xrange(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = processing.Pipe()
cond = processing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = processing.Process(target=pipe_func, args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through connection in',elapsed,'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
a = seq[5]
elapsed = _timer()-t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in xrange(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in xrange(iterations):
c.notify()
c.wait()
elapsed = _timer()-t
c.release()
p.join()
print(iterations * 2, 'waits in', elapsed, 'seconds')
print('average number/sec:', iterations * 2 / elapsed)
####
def test():
manager = processing.Manager()
gc.disable()
print('\n\t######## testing Queue.Queue\n')
test_queuespeed(threading.Thread, Queue.Queue(),
threading.Condition())
print('\n\t######## testing processing.Queue\n')
test_queuespeed(processing.Process, processing.Queue(),
processing.Condition())
print('\n\t######## testing Queue managed by server process\n')
test_queuespeed(processing.Process, manager.Queue(),
manager.Condition())
print('\n\t######## testing processing.Pipe\n')
test_pipespeed()
print
print('\n\t######## testing list\n')
test_seqspeed(range(10))
print('\n\t######## testing list managed by server process\n')
test_seqspeed(manager.list(range(10)))
print('\n\t######## testing Array("i", ..., lock=False)\n')
test_seqspeed(processing.Array('i', range(10), lock=False))
print('\n\t######## testing Array("i", ..., lock=True)\n')
test_seqspeed(processing.Array('i', range(10), lock=True))
print()
print('\n\t######## testing threading.Lock\n')
test_lockspeed(threading.Lock())
print('\n\t######## testing threading.RLock\n')
test_lockspeed(threading.RLock())
print('\n\t######## testing processing.Lock\n')
test_lockspeed(processing.Lock())
print('\n\t######## testing processing.RLock\n')
test_lockspeed(processing.RLock())
print('\n\t######## testing lock managed by server process\n')
test_lockspeed(manager.Lock())
print('\n\t######## testing rlock managed by server process\n')
test_lockspeed(manager.RLock())
print()
print('\n\t######## testing threading.Condition\n')
test_conditionspeed(threading.Thread, threading.Condition())
print('\n\t######## testing processing.Condition\n')
test_conditionspeed(processing.Process, processing.Condition())
print('\n\t######## testing condition managed by a server process\n')
test_conditionspeed(processing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
processing.freezeSupport()
test()
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, copy, json, re
from frappe import _
from frappe.modules import get_doc_path
from jinja2 import TemplateNotFound
from frappe.utils import cint, strip_html
from frappe.utils.pdf import get_pdf
no_cache = 1
no_sitemap = 1
base_template_path = "templates/pages/print.html"
standard_format = "templates/print_formats/standard.html"
def get_context(context):
"""Build context for print"""
if not frappe.form_dict.doctype or not frappe.form_dict.name:
return {
"body": """<h1>Error</h1>
<p>Parameters doctype, name and format required</p>
<pre>%s</pre>""" % repr(frappe.form_dict)
}
doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
meta = frappe.get_meta(doc.doctype)
print_format = get_print_format_doc(None, meta = meta)
return {
"body": get_html(doc, print_format = print_format,
meta=meta, trigger_print = frappe.form_dict.trigger_print,
no_letterhead=frappe.form_dict.no_letterhead),
"css": get_print_style(frappe.form_dict.style, print_format),
"comment": frappe.session.user,
"title": doc.get(meta.title_field) if meta.title_field else doc.name
}
def get_print_format_doc(print_format_name, meta):
"""Returns print format document"""
if not print_format_name:
print_format_name = frappe.form_dict.format \
or meta.default_print_format or "Standard"
if print_format_name == "Standard":
return None
else:
return frappe.get_doc("Print Format", print_format_name)
def get_html(doc, name=None, print_format=None, meta=None,
no_letterhead=None, trigger_print=False):
print_settings = frappe.db.get_singles_dict("Print Settings")
if isinstance(no_letterhead, basestring):
no_letterhead = cint(no_letterhead)
elif no_letterhead is None:
no_letterhead = not cint(print_settings.with_letthead)
doc.flags.in_print = True
if not frappe.flags.ignore_print_permissions:
validate_print_permission(doc)
if doc.meta.is_submittable:
if doc.docstatus==0 and not print_settings.allow_print_for_draft:
frappe.throw(_("Not allowed to print draft documents"), frappe.PermissionError)
if doc.docstatus==2 and not print_settings.allow_print_for_cancelled:
frappe.throw(_("Not allowed to print cancelled documents"), frappe.PermissionError)
if hasattr(doc, "before_print"):
doc.before_print()
if not hasattr(doc, "print_heading"): doc.print_heading = None
if not hasattr(doc, "sub_heading"): doc.sub_heading = None
if not meta:
meta = frappe.get_meta(doc.doctype)
jenv = frappe.get_jenv()
format_data, format_data_map = [], {}
# determine template
if print_format:
if print_format.standard=="Yes" or print_format.custom_format:
template = jenv.from_string(get_print_format(doc.doctype,
print_format))
elif print_format.format_data:
# set format data
format_data = json.loads(print_format.format_data)
for df in format_data:
format_data_map[df.get("fieldname")] = df
if "visible_columns" in df:
for _df in df.get("visible_columns"):
format_data_map[_df.get("fieldname")] = _df
doc.format_data_map = format_data_map
template = "standard"
else:
# fallback
template = "standard"
else:
template = "standard"
if template == "standard":
template = jenv.get_template(standard_format)
letter_head = frappe._dict(get_letter_head(doc, no_letterhead) or {})
args = {
"doc": doc,
"meta": frappe.get_meta(doc.doctype),
"layout": make_layout(doc, meta, format_data),
"no_letterhead": no_letterhead,
"trigger_print": cint(trigger_print),
"letter_head": letter_head.content,
"footer": letter_head.footer,
"print_settings": frappe.get_doc("Print Settings")
}
html = template.render(args, filters={"len": len})
if cint(trigger_print):
html += trigger_print_script
return html
@frappe.whitelist()
def get_html_and_style(doc, name=None, print_format=None, meta=None,
no_letterhead=None, trigger_print=False):
"""Returns `html` and `style` of print format, used in PDF etc"""
if isinstance(doc, basestring) and isinstance(name, basestring):
doc = frappe.get_doc(doc, name)
if isinstance(doc, basestring):
doc = frappe.get_doc(json.loads(doc))
print_format = get_print_format_doc(print_format, meta=meta or frappe.get_meta(doc.doctype))
return {
"html": get_html(doc, name=name, print_format=print_format, meta=meta,
no_letterhead=no_letterhead, trigger_print=trigger_print),
"style": get_print_style(print_format=print_format)
}
@frappe.whitelist()
def download_multi_pdf(doctype, name, format=None):
# name can include names of many docs of the same doctype.
totalhtml = ""
# Pagebreak to be added between each doc html
pagebreak = """<p style="page-break-after:always;"></p>"""
options = {}
import json
result = json.loads(name)
# Get html of each doc and combine including page breaks
for i, ss in enumerate(result):
html = frappe.get_print(doctype, ss, format)
if i == len(result)-1:
totalhtml = totalhtml + html
else:
totalhtml = totalhtml + html + pagebreak
frappe.local.response.filename = "{doctype}.pdf".format(doctype=doctype.replace(" ", "-").replace("/", "-"))
# Title of pdf
options.update({
'title': doctype,
})
frappe.local.response.filecontent = get_pdf(totalhtml,options)
frappe.local.response.type = "download"
@frappe.whitelist()
def download_pdf(doctype, name, format=None):
html = frappe.get_print(doctype, name, format)
frappe.local.response.filename = "{name}.pdf".format(name=name.replace(" ", "-").replace("/", "-"))
frappe.local.response.filecontent = get_pdf(html)
frappe.local.response.type = "download"
@frappe.whitelist()
def report_to_pdf(html):
frappe.local.response.filename = "report.pdf"
frappe.local.response.filecontent = get_pdf(html, {"orientation": "Landscape"})
frappe.local.response.type = "download"
def validate_print_permission(doc):
if frappe.form_dict.get("key"):
if frappe.form_dict.key == doc.get_signature():
return
for ptype in ("read", "print"):
if (not frappe.has_permission(doc.doctype, ptype, doc)
and not frappe.has_website_permission(doc.doctype, ptype, doc)):
raise frappe.PermissionError(_("No {0} permission").format(ptype))
def get_letter_head(doc, no_letterhead):
if no_letterhead:
return {}
if doc.get("letter_head"):
return frappe.db.get_value("Letter Head", doc.letter_head, ["content", "footer"], as_dict=True)
else:
return frappe.db.get_value("Letter Head", {"is_default": 1}, ["content", "footer"], as_dict=True) or {}
def get_print_format(doctype, print_format):
if print_format.disabled:
frappe.throw(_("Print Format {0} is disabled").format(print_format.name),
frappe.DoesNotExistError)
# server, find template
path = os.path.join(get_doc_path(frappe.db.get_value("DocType", doctype, "module"),
"Print Format", print_format.name), frappe.scrub(print_format.name) + ".html")
if os.path.exists(path):
with open(path, "r") as pffile:
return pffile.read()
else:
if print_format.html:
return print_format.html
else:
frappe.throw(_("No template found at path: {0}").format(path),
frappe.TemplateNotFoundError)
def make_layout(doc, meta, format_data=None):
"""Builds a hierarchical layout object from the fields list to be rendered
by `standard.html`
:param doc: Document to be rendered.
:param meta: Document meta object (doctype).
:param format_data: Fields sequence and properties defined by Print Format Builder."""
layout, page = [], []
layout.append(page)
if format_data:
# extract print_heading_template from the first field
# and remove the field
if format_data[0].get("fieldname") == "print_heading_template":
doc.print_heading_template = format_data[0].get("options")
format_data = format_data[1:]
for df in format_data or meta.fields:
if format_data:
# embellish df with original properties
df = frappe._dict(df)
if df.fieldname:
original = meta.get_field(df.fieldname)
if original:
newdf = original.as_dict()
newdf.update(df)
df = newdf
df.print_hide = 0
if df.fieldtype=="Section Break" or page==[]:
if len(page) > 1 and not any(page[-1]):
# truncate prev section if empty
del page[-1]
page.append([])
if df.fieldtype=="Column Break" or (page[-1]==[] and df.fieldtype!="Section Break"):
page[-1].append([])
if df.fieldtype=="HTML" and df.options:
doc.set(df.fieldname, True) # show this field
if is_visible(df, doc) and has_value(df, doc):
page[-1][-1].append(df)
# if table, add the row info in the field
# if a page break is found, create a new docfield
if df.fieldtype=="Table":
df.rows = []
df.start = 0
df.end = None
for i, row in enumerate(doc.get(df.fieldname)):
if row.get("page_break"):
# close the earlier row
df.end = i
# new page, with empty section and column
page = [[[]]]
layout.append(page)
# continue the table in a new page
df = copy.copy(df)
df.start = i
df.end = None
page[-1][-1].append(df)
return layout
def is_visible(df, doc):
"""Returns True if docfield is visible in print layout and does not have print_hide set."""
if df.fieldtype in ("Section Break", "Column Break", "Button"):
return False
if hasattr(doc, "hide_in_print_layout"):
if df.fieldname in doc.hide_in_print_layout:
return False
if df.permlevel > 0 and not doc.has_permlevel_access_to(df.fieldname, df):
return False
return not doc.is_print_hide(df.fieldname, df)
def has_value(df, doc):
value = doc.get(df.fieldname)
if value in (None, ""):
return False
elif isinstance(value, basestring) and not strip_html(value).strip():
return False
elif isinstance(value, list) and not len(value):
return False
return True
def get_print_style(style=None, print_format=None, for_legacy=False):
print_settings = frappe.get_doc("Print Settings")
if not style:
style = print_settings.print_style or "Standard"
context = {
"print_settings": print_settings,
"print_style": style,
"font": get_font(print_settings, print_format, for_legacy)
}
css = frappe.get_template("templates/styles/standard.css").render(context)
try:
css += frappe.get_template("templates/styles/" + style.lower() + ".css").render(context)
except TemplateNotFound:
pass
# move @import to top
for at_import in list(set(re.findall("(@import url\([^\)]+\)[;]?)", css))):
css = css.replace(at_import, "")
# prepend css with at_import
css = at_import + css
if print_format and print_format.css:
css += "\n\n" + print_format.css
return css
def get_font(print_settings, print_format=None, for_legacy=False):
default = '"Helvetica Neue", Helvetica, Arial, "Open Sans", sans-serif'
if for_legacy:
return default
font = None
if print_format:
if print_format.font and print_format.font!="Default":
font = '{0}, sans-serif'.format(print_format.font)
if not font:
if print_settings.font and print_settings.font!="Default":
font = '{0}, sans-serif'.format(print_settings.font)
else:
font = default
return font
def get_visible_columns(data, table_meta, df):
"""Returns list of visible columns based on print_hide and if all columns have value."""
columns = []
doc = data[0] or frappe.new_doc(df.options)
def add_column(col_df):
return is_visible(col_df, doc) \
and column_has_value(data, col_df.get("fieldname"))
if df.get("visible_columns"):
# columns specified by column builder
for col_df in df.get("visible_columns"):
# load default docfield properties
docfield = table_meta.get_field(col_df.get("fieldname"))
if not docfield:
continue
newdf = docfield.as_dict().copy()
newdf.update(col_df)
if add_column(newdf):
columns.append(newdf)
else:
for col_df in table_meta.fields:
if add_column(col_df):
columns.append(col_df)
return columns
def column_has_value(data, fieldname):
"""Check if at least one cell in column has non-zero and non-blank value"""
has_value = False
for row in data:
value = row.get(fieldname)
if value:
if isinstance(value, basestring):
if strip_html(value).strip():
has_value = True
break
else:
has_value = True
break
return has_value
trigger_print_script = """
<script>
window.print();
// close the window after print
// NOTE: doesn't close if print is cancelled in Chrome
setTimeout(function() {
window.close();
}, 1000);
</script>
"""
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import random
import threading
import time
from desktop.lib.exceptions_renderable import PopupException
from django.utils.translation import ugettext as _
from kazoo.client import KazooClient
from libsentry.client import SentryClient
from libsentry.conf import HOSTNAME, PORT
from libsentry.sentry_site import get_sentry_server_ha_enabled, get_sentry_server_ha_has_security, get_sentry_server_ha_zookeeper_quorum, get_sentry_server_ha_zookeeper_namespace
LOG = logging.getLogger(__name__)
_api_cache = None
_api_cache_lock = threading.Lock()
def ha_error_handler(func):
def decorator(*args, **kwargs):
retries = 15
while retries > 0:
try:
return func(*args, **kwargs)
except SentryException, e:
raise e
except Exception, e:
retries -= 1
if not get_sentry_server_ha_enabled() or retries == 0:
raise e
else:
# Right now retries on any error and pull a fresh list of servers from ZooKeeper
LOG.info('Retrying fetching an available client in ZooKeeper.')
global _api_cache
_api_cache = None
time.sleep(1)
args[0].client = _get_client(args[0].client.username)
LOG.info('Picked %s' % args[0].client)
return decorator
def get_api(user):
client = _get_client(user.username)
return SentryApi(client)
class SentryApi(object):
def __init__(self, client):
self.client = client
@ha_error_handler
def create_sentry_role(self, roleName):
response = self.client.create_sentry_role(roleName)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def drop_sentry_role(self, roleName):
response = self.client.drop_sentry_role(roleName)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def alter_sentry_role_grant_privilege(self, roleName, tSentryPrivilege):
response = self.client.alter_sentry_role_grant_privilege(roleName, tSentryPrivilege)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def alter_sentry_role_revoke_privilege(self, roleName, tSentryPrivilege):
response = self.client.alter_sentry_role_revoke_privilege(roleName, tSentryPrivilege)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def alter_sentry_role_add_groups(self, roleName, groups):
response = self.client.alter_sentry_role_add_groups(roleName, groups)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def alter_sentry_role_delete_groups(self, roleName, groups):
response = self.client.alter_sentry_role_delete_groups(roleName, groups)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def list_sentry_roles_by_group(self, groupName=None):
response = self.client.list_sentry_roles_by_group(groupName)
if response.status.value == 0:
roles = []
for role in response.roles:
roles.append({
'name': role.roleName,
'groups': [group.groupName for group in role.groups]
})
return roles
else:
raise SentryException(response)
@ha_error_handler
def list_sentry_privileges_by_role(self, roleName, authorizableHierarchy=None):
response = self.client.list_sentry_privileges_by_role(roleName, authorizableHierarchy)
if response.status.value == 0:
return [self._massage_priviledge(privilege) for privilege in response.privileges]
else:
raise SentryException(response)
@ha_error_handler
def list_sentry_privileges_for_provider(self, groups, roleSet=None, authorizableHierarchy=None):
response = self.client.list_sentry_privileges_for_provider(groups, roleSet, authorizableHierarchy)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def list_sentry_privileges_by_authorizable(self, authorizableSet, groups=None, roleSet=None):
response = self.client.list_sentry_privileges_by_authorizable(authorizableSet, groups, roleSet)
_privileges = []
for authorizable, roles in response.privilegesMapByAuth.iteritems():
_roles = {}
for role, privileges in roles.privilegeMap.iteritems():
_roles[role] = [self._massage_priviledge(privilege) for privilege in privileges]
_privileges.append((self._massage_authorizable(authorizable), _roles))
if response.status.value == 0:
return _privileges
else:
raise SentryException(response)
@ha_error_handler
def drop_sentry_privileges(self, authorizableHierarchy):
response = self.client.drop_sentry_privilege(authorizableHierarchy)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def rename_sentry_privileges(self, oldAuthorizable, newAuthorizable):
response = self.client.rename_sentry_privilege(oldAuthorizable, newAuthorizable)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def _massage_priviledge(self, privilege):
return {
'scope': privilege.privilegeScope,
'server': privilege.serverName,
'database': privilege.dbName,
'table': privilege.tableName,
'URI': privilege.URI,
'action': 'ALL' if privilege.action == '*' else privilege.action.upper(),
'timestamp': privilege.createTime,
'grantOption': privilege.grantOption == 1,
}
def _massage_authorizable(self, authorizable):
return {
'server': authorizable.server,
'database': authorizable.db,
'table': authorizable.table,
'URI': authorizable.uri,
}
class SentryException(Exception):
def __init__(self, e):
super(SentryException, self).__init__(e)
self.message = e.status.message
def __str__(self):
return self.message
def _get_client(username):
if get_sentry_server_ha_enabled():
servers = _get_server_properties()
if servers:
server = random.choice(servers)
else:
raise PopupException(_('No Sentry servers are available.'))
else:
server = {
'hostname': HOSTNAME.get(),
'port': PORT.get()
}
return SentryClient(server['hostname'], server['port'], username)
# To move to a libzookeeper with decorator
def _get_server_properties():
global _api_cache
if _api_cache is None:
_api_cache_lock.acquire()
try:
if _api_cache is None:
if get_sentry_server_ha_has_security():
try:
from zookeeper.conf import CLUSTERS
sasl_server_principal = CLUSTERS.get()['default'].PRINCIPAL_NAME.get()
except Exception, e:
LOG.error("Could not get principal name from ZooKeeper app config: %s. Using 'zookeeper' as principal name." % e)
sasl_server_principal = 'zookeeper'
else:
sasl_server_principal = None
zk = KazooClient(hosts=get_sentry_server_ha_zookeeper_quorum(), read_only=True, sasl_server_principal=sasl_server_principal)
zk.start()
servers = []
namespace = get_sentry_server_ha_zookeeper_namespace()
children = zk.get_children("/%s/sentry-service/sentry-service/" % namespace)
for node in children:
data, stat = zk.get("/%s/sentry-service/sentry-service/%s" % (namespace, node))
server = json.loads(data.decode("utf-8"))
servers.append({'hostname': server['address'], 'port': server['sslPort'] if server['sslPort'] else server['port']})
zk.stop()
_api_cache = servers
finally:
_api_cache_lock.release()
return _api_cache
| |
import asyncio
from aiopg import connect
import functools
import unittest
from unittest import mock
import pytest
sa = pytest.importorskip("aiopg.sa") # noqa
from sqlalchemy import MetaData, Table, Column, Integer, String
meta = MetaData()
tbl = Table('sa_tbl2', meta,
Column('id', Integer, nullable=False,
primary_key=True),
Column('name', String(255)))
def check_prepared_transactions(func):
@functools.wraps(func)
def wrapper(self):
conn = yield from self.loop.run_until_complete(self.connect())
val = yield from conn.scalar('show max_prepared_transactions')
if not val:
raise unittest.SkipTest('Twophase transacions are not supported. '
'Set max_prepared_transactions to '
'a nonzero value')
return func(self)
return wrapper
class TestTransaction(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.loop.run_until_complete(self.start())
def tearDown(self):
self.loop.close()
@asyncio.coroutine
def start(self, **kwargs):
conn = yield from self.connect(**kwargs)
yield from conn.execute("DROP TABLE IF EXISTS sa_tbl2")
yield from conn.execute("CREATE TABLE sa_tbl2 "
"(id serial, name varchar(255))")
yield from conn.execute("INSERT INTO sa_tbl2 (name)"
"VALUES ('first')")
@asyncio.coroutine
def connect(self, **kwargs):
conn = yield from connect(database='aiopg',
user='aiopg',
password='passwd',
host='127.0.0.1',
loop=self.loop,
**kwargs)
self.addCleanup(conn.close)
engine = mock.Mock()
engine.dialect = sa.engine._dialect
@asyncio.coroutine
def release(*args):
return
yield
engine.release = release
ret = sa.SAConnection(conn, engine)
return ret
def test_without_transactions(self):
@asyncio.coroutine
def go():
conn1 = yield from self.connect()
conn2 = yield from self.connect()
res1 = yield from conn1.scalar(tbl.count())
self.assertEqual(1, res1)
yield from conn2.execute(tbl.delete())
res2 = yield from conn1.scalar(tbl.count())
self.assertEqual(0, res2)
self.loop.run_until_complete(go())
def test_connection_attr(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr = yield from conn.begin()
self.assertIs(tr.connection, conn)
self.loop.run_until_complete(go())
def test_root_transaction(self):
@asyncio.coroutine
def go():
conn1 = yield from self.connect()
conn2 = yield from self.connect()
tr = yield from conn1.begin()
self.assertTrue(tr.is_active)
yield from conn1.execute(tbl.delete())
res1 = yield from conn2.scalar(tbl.count())
self.assertEqual(1, res1)
yield from tr.commit()
self.assertFalse(tr.is_active)
self.assertFalse(conn1.in_transaction)
res2 = yield from conn2.scalar(tbl.count())
self.assertEqual(0, res2)
self.loop.run_until_complete(go())
def test_root_transaction_rollback(self):
@asyncio.coroutine
def go():
conn1 = yield from self.connect()
conn2 = yield from self.connect()
tr = yield from conn1.begin()
self.assertTrue(tr.is_active)
yield from conn1.execute(tbl.delete())
res1 = yield from conn2.scalar(tbl.count())
self.assertEqual(1, res1)
yield from tr.rollback()
self.assertFalse(tr.is_active)
res2 = yield from conn2.scalar(tbl.count())
self.assertEqual(1, res2)
self.loop.run_until_complete(go())
def test_root_transaction_close(self):
@asyncio.coroutine
def go():
conn1 = yield from self.connect()
conn2 = yield from self.connect()
tr = yield from conn1.begin()
self.assertTrue(tr.is_active)
yield from conn1.execute(tbl.delete())
res1 = yield from conn2.scalar(tbl.count())
self.assertEqual(1, res1)
yield from tr.close()
self.assertFalse(tr.is_active)
res2 = yield from conn2.scalar(tbl.count())
self.assertEqual(1, res2)
self.loop.run_until_complete(go())
def test_rollback_on_connection_close(self):
@asyncio.coroutine
def go():
conn1 = yield from self.connect()
conn2 = yield from self.connect()
tr = yield from conn1.begin()
yield from conn1.execute(tbl.delete())
res1 = yield from conn2.scalar(tbl.count())
self.assertEqual(1, res1)
yield from conn1.close()
res2 = yield from conn2.scalar(tbl.count())
self.assertEqual(1, res2)
del tr
self.loop.run_until_complete(go())
def test_root_transaction_commit_inactive(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr = yield from conn.begin()
self.assertTrue(tr.is_active)
yield from tr.commit()
self.assertFalse(tr.is_active)
with self.assertRaises(sa.InvalidRequestError):
yield from tr.commit()
self.loop.run_until_complete(go())
def test_root_transaction_rollback_inactive(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr = yield from conn.begin()
self.assertTrue(tr.is_active)
yield from tr.rollback()
self.assertFalse(tr.is_active)
yield from tr.rollback()
self.assertFalse(tr.is_active)
self.loop.run_until_complete(go())
def test_root_transaction_double_close(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr = yield from conn.begin()
self.assertTrue(tr.is_active)
yield from tr.close()
self.assertFalse(tr.is_active)
yield from tr.close()
self.assertFalse(tr.is_active)
self.loop.run_until_complete(go())
def test_inner_transaction_commit(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr1 = yield from conn.begin()
tr2 = yield from conn.begin()
self.assertTrue(tr2.is_active)
yield from tr2.commit()
self.assertFalse(tr2.is_active)
self.assertTrue(tr1.is_active)
yield from tr1.commit()
self.assertFalse(tr2.is_active)
self.assertFalse(tr1.is_active)
self.loop.run_until_complete(go())
def test_inner_transaction_rollback(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr1 = yield from conn.begin()
tr2 = yield from conn.begin()
self.assertTrue(tr2.is_active)
yield from conn.execute(tbl.insert().values(name='aaaa'))
yield from tr2.rollback()
self.assertFalse(tr2.is_active)
self.assertFalse(tr1.is_active)
res = yield from conn.scalar(tbl.count())
self.assertEqual(1, res)
self.loop.run_until_complete(go())
def test_inner_transaction_close(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr1 = yield from conn.begin()
tr2 = yield from conn.begin()
self.assertTrue(tr2.is_active)
yield from conn.execute(tbl.insert().values(name='aaaa'))
yield from tr2.close()
self.assertFalse(tr2.is_active)
self.assertTrue(tr1.is_active)
yield from tr1.commit()
res = yield from conn.scalar(tbl.count())
self.assertEqual(2, res)
self.loop.run_until_complete(go())
def test_nested_transaction_commit(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr1 = yield from conn.begin_nested()
tr2 = yield from conn.begin_nested()
self.assertTrue(tr1.is_active)
self.assertTrue(tr2.is_active)
yield from conn.execute(tbl.insert().values(name='aaaa'))
yield from tr2.commit()
self.assertFalse(tr2.is_active)
self.assertTrue(tr1.is_active)
res = yield from conn.scalar(tbl.count())
self.assertEqual(2, res)
yield from tr1.commit()
self.assertFalse(tr2.is_active)
self.assertFalse(tr1.is_active)
res = yield from conn.scalar(tbl.count())
self.assertEqual(2, res)
self.loop.run_until_complete(go())
def test_nested_transaction_commit_twice(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr1 = yield from conn.begin_nested()
tr2 = yield from conn.begin_nested()
yield from conn.execute(tbl.insert().values(name='aaaa'))
yield from tr2.commit()
self.assertFalse(tr2.is_active)
self.assertTrue(tr1.is_active)
yield from tr2.commit()
self.assertFalse(tr2.is_active)
self.assertTrue(tr1.is_active)
res = yield from conn.scalar(tbl.count())
self.assertEqual(2, res)
yield from tr1.close()
self.loop.run_until_complete(go())
def test_nested_transaction_rollback(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr1 = yield from conn.begin_nested()
tr2 = yield from conn.begin_nested()
self.assertTrue(tr1.is_active)
self.assertTrue(tr2.is_active)
yield from conn.execute(tbl.insert().values(name='aaaa'))
yield from tr2.rollback()
self.assertFalse(tr2.is_active)
self.assertTrue(tr1.is_active)
res = yield from conn.scalar(tbl.count())
self.assertEqual(1, res)
yield from tr1.commit()
self.assertFalse(tr2.is_active)
self.assertFalse(tr1.is_active)
res = yield from conn.scalar(tbl.count())
self.assertEqual(1, res)
self.loop.run_until_complete(go())
def test_nested_transaction_rollback_twice(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr1 = yield from conn.begin_nested()
tr2 = yield from conn.begin_nested()
yield from conn.execute(tbl.insert().values(name='aaaa'))
yield from tr2.rollback()
self.assertFalse(tr2.is_active)
self.assertTrue(tr1.is_active)
yield from tr2.rollback()
self.assertFalse(tr2.is_active)
self.assertTrue(tr1.is_active)
yield from tr1.commit()
res = yield from conn.scalar(tbl.count())
self.assertEqual(1, res)
self.loop.run_until_complete(go())
# TODO: add skip is twophase transactions disabled
@check_prepared_transactions
def test_twophase_transaction_commit(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr = yield from conn.begin_twophase()
yield from conn.execute(tbl.insert().values(name='aaaa'))
yield from tr.prepare()
self.assertTrue(tr.is_active)
yield from tr.commit()
self.assertFalse(tr.is_active)
res = yield from conn.scalar(tbl.count())
self.assertEqual(2, res)
self.loop.run_until_complete(go())
@check_prepared_transactions
def test_twophase_transaction_twice(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
tr = yield from conn.begin_twophase()
with self.assertRaises(sa.InvalidRequestError):
yield from conn.begin_twophase()
self.assertTrue(tr.is_active)
yield from tr.prepare()
yield from tr.commit()
self.loop.run_until_complete(go())
def test_transactions_sequence(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
yield from conn.execute(tbl.delete())
self.assertIsNone(conn._transaction)
tr1 = yield from conn.begin()
self.assertIs(tr1, conn._transaction)
yield from conn.execute(tbl.insert().values(name='a'))
res1 = yield from conn.scalar(tbl.count())
self.assertEqual(1, res1)
yield from tr1.commit()
self.assertIsNone(conn._transaction)
tr2 = yield from conn.begin()
self.assertIs(tr2, conn._transaction)
yield from conn.execute(tbl.insert().values(name='b'))
res2 = yield from conn.scalar(tbl.count())
self.assertEqual(2, res2)
yield from tr2.commit()
self.assertIsNone(conn._transaction)
self.loop.run_until_complete(go())
| |
from flask import has_request_context, _request_ctx_stack, flash, request, current_app, session
import flask_login
from flask_login.utils import _get_user
from frasco.ext import get_extension_state, has_extension
from frasco.ctx import ContextStack
from frasco.utils import match_email_domain, populate_obj
from frasco.models import db
from frasco.geoip import geolocate_country
from contextlib import contextmanager
from werkzeug.local import LocalProxy
import datetime
from .signals import user_signed_up, email_validated
from .password import update_password
from .tokens import generate_user_token, TOKEN_NS_VALIDATE_EMAIL
import logging
# this allows to set a current user without a request context
_no_req_ctx_user_stack = ContextStack()
logger = logging.getLogger('frasco.users')
@contextmanager
def user_login_context(user):
stack = _no_req_ctx_user_stack
if has_request_context():
if not hasattr(_request_ctx_stack.top, 'user_stack'):
_request_ctx_stack.top.user_stack = ContextStack()
stack = _request_ctx_stack.top.user_stack
stack.push(user)
try:
yield user
finally:
stack.pop()
def get_current_user(user=None):
if user:
return user
if user is False:
return None
if not has_request_context():
return _no_req_ctx_user_stack.top
user_stack = getattr(_request_ctx_stack.top, 'user_stack', None)
if user_stack and user_stack.top:
return user_stack.top
return _get_user()
def get_current_user_if_logged_in(user=None):
user = get_current_user(user)
if not user or not user.is_authenticated:
return
return user
current_user = LocalProxy(get_current_user)
def is_user_logged_in():
"""Checks if the user is logged in
"""
return current_user and current_user.is_authenticated
def login_user(user, *args, **kwargs):
state = get_extension_state('frasco_users')
provider = kwargs.pop('provider', state.options['default_auth_provider_name'])
skip_session = kwargs.pop('skip_session', False)
for validator in state.login_validators:
if not validator(user):
return False
if not flask_login.login_user(user, *args, **kwargs):
return False
if skip_session:
# remove the _user_id set by flask-login
# this will prevent setting the remember cookie and won't maintain the login state to the next request
session.pop('_user_id', None)
user.last_login_at = datetime.datetime.utcnow()
user.last_login_from = request.remote_addr
user.last_login_provider = provider
if not user.auth_providers:
user.auth_providers = []
if provider not in user.auth_providers:
user.auth_providers.append(provider)
if state.LoginModel:
login = state.LoginModel()
login.user = user
login.login_at = datetime.datetime.utcnow()
login.login_from = request.remote_addr
login.login_user_agent = request.user_agent.string
login.login_provider = provider
if has_extension('frasco_geoip'):
try:
login.login_country = geolocate_country(use_session_cache=False)
except:
pass
db.session.add(login)
logger.info('User #%s logged in' % user.id)
return True
def signup_user(email_or_user, password=None, provider=None, flash_messages=True, send_signal=True, validate_email=False, **kwargs):
state = get_extension_state('frasco_users')
if isinstance(email_or_user, state.Model):
user = email_or_user
else:
user = state.Model()
user.email = email_or_user.strip().lower()
if 'email' in kwargs:
user.email = kwargs.pop('email').strip().lower()
populate_obj(user, kwargs)
user.signup_provider = provider or state.options['default_auth_provider_name']
user.auth_providers = [user.signup_provider]
if has_request_context():
user.signup_from = request.remote_addr
if has_extension('frasco_geoip'):
try:
user.signup_country = geolocate_country(use_session_cache=False)
except:
pass
validate_user(user, flash_messages=flash_messages, is_signup=True)
if password:
update_password(user, password, flash_messages=flash_messages)
db.session.add(user)
db.session.flush()
if validate_email:
validate_user_email(user)
elif state.options['send_email_validation_email']:
send_user_validation_email(user)
if state.options["send_welcome_email"]:
from frasco.mail import send_mail
template = "users/welcome.txt" if state.options["send_welcome_email"] == True else state.options["send_welcome_email"]
send_mail(user.email, template, user=user, locale=getattr(user, 'locale', None))
logger.info('New signup as #%s' % user.id)
if send_signal:
user_signed_up.send(user=user)
return user
class UserValidationFailedError(Exception):
def __init__(self, reason=None):
super(UserValidationFailedError, self).__init__()
self.reason = reason
def validate_user(user, ignore_self=True, flash_messages=True, raise_error=True, is_signup=False):
"""Validates a new user object before saving it in the database.
Checks if a password is present unless must_provide_password is False.
Checks if the username is unique unless the option username_is_unique is set to False.
If the email column exists on the user object and the option email_is_unique is set to True,
also checks if the email is unique.
"""
state = get_extension_state('frasco_users')
email = user.email.strip().lower()
has_username = hasattr(user, 'username')
username = getattr(user, 'username', None)
if username:
username = username.strip()
if state.user_validators and state.override_builtin_user_validation:
for validator in state.user_validators:
if not validator(username, email, is_signup):
if raise_error:
raise UserValidationFailedError()
return False
return True
if has_username:
if not username and state.options["must_provide_username"]:
if flash_messages and state.options["must_provide_username_message"]:
flash(state.options["must_provide_username_message"], "error")
if raise_error:
raise UserValidationFailedError("username_missing")
return False
if username.lower() in state.options['forbidden_usernames']:
if flash_messages and state.options["username_taken_message"]:
flash(state.options["username_taken_message"], "error")
if raise_error:
raise UserValidationFailedError("username_forbidden")
return False
if len(username) < state.options['min_username_length']:
if flash_messages and state.options["username_too_short_message"]:
flash(state.options["username_too_short_message"], "error")
if raise_error:
raise UserValidationFailedError("username_too_short")
return False
if not state.options['allow_spaces_in_username'] and " " in username:
if flash_messages and state.options["username_has_spaces_message"]:
flash(state.options["username_has_spaces_message"], "error")
if raise_error:
raise UserValidationFailedError("username_has_spaces")
return False
if state.options["username_is_unique"]:
col = state.Model.username if state.options["username_case_sensitive"] else db.func.lower(state.Model.username)
uname = username if state.options["username_case_sensitive"] else username.lower()
q = state.Model.query.filter(col == uname)
if ignore_self and user.id:
q = q.filter(state.Model.id != user.id)
if q.count() > 0:
if flash_messages and state.options["username_taken_message"]:
flash(state.options["username_taken_message"], "error")
if raise_error:
raise UserValidationFailedError("username_taken")
return False
if not email and state.options["must_provide_email"]:
if flash_messages and state.options["must_provide_email_message"]:
flash(state.options["must_provide_email_message"], "error")
if raise_error:
raise UserValidationFailedError("email_missing")
return False
if email and state.options["email_is_unique"]:
q = state.Model.query.filter(state.Model.email == email)
if ignore_self and user.id:
q = q.filter(state.Model.id != user.id)
if q.count() > 0:
if flash_messages and state.options["email_taken_message"]:
flash(state.options["email_taken_message"], "error")
if raise_error:
raise UserValidationFailedError("email_taken")
return False
if email and state.options['email_allowed_domains'] is not None:
if not match_email_domain(email, state.options['email_allowed_domains']):
if flash_messages and state.options['email_domain_not_allowed_message']:
flash(state.options['email_domain_not_allowed_message'], 'error')
if raise_error:
raise UserValidationFailedError('email_domain_not_allowed')
return False
for validator in state.user_validators:
if not validator(username, email, is_signup):
if raise_error:
raise UserValidationFailedError()
return False
return True
def generate_email_validation_token(user):
return generate_user_token(user, TOKEN_NS_VALIDATE_EMAIL)
def send_user_validation_email(user):
from frasco.mail import send_mail
token = generate_email_validation_token(user)
send_mail(user.email, "users/validate_email", user=user, token=token, locale=getattr(user, 'locale', None))
return token
def validate_user_email(user):
user.email_validated = True
user.email_validated_at = datetime.datetime.utcnow()
email_validated.send(user)
def check_rate_limit(ip, remote_addr_prop, date_prop, flash_message=True):
state = get_extension_state('frasco_users')
since = datetime.datetime.now() - datetime.timedelta(seconds=state.options['rate_limit_period'])
count = state.Model.query.filter(getattr(state.Model, remote_addr_prop) == ip, getattr(state.Model, date_prop) >= since).count()
if count >= state.options['rate_limit_count']:
if flash_message and state.options["rate_limit_reached_message"]:
flash(state.options["rate_limit_reached_message"], "error")
return False
return True
| |
#!/usr/bin/env python
# OpenVirteX control script
# Heavily based on FlowVisor's fvctl
#import python utilities to parse arguments
import sys
from optparse import OptionParser
import urllib2
import json
import getpass
VERSION = '0.1'
SUPPORTED_PROTO = ['tcp']
def getUrl(opts, path):
return URL % (opts.host, opts.port, path)
def buildRequest(data, url, cmd):
j = { "id" : "ovxctl", "method" : cmd , "jsonrpc" : "2.0" }
h = {"Content-Type" : "application/json-rpc"}
if data is not None:
j['params'] = data
return urllib2.Request(url, json.dumps(j), h)
def pa_none(args, cmd):
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=USAGE.format(cmd), description=ldesc)
(options, args) = parser.parse_args(args)
return (options, args)
#Create calls
def pa_addControllers(args, cmd):
usage = "%s <tenant_id> <vdpid> <ctrlUrls>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_addControllers(gopts, opts, args):
if len(args) != 3:
print "addControllers: Must specify tenant id, virtual dpid, controller list"
sys.exit()
req = { "controllerUrls" : buildControllerList(args[2]), \
"tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":",""), 16) }
resp = connect(gopts, "tenant", "addControllers", data=req, passwd=getPasswd(gopts))
if resp:
print "Added controllers %s to switch %s" % (args[2], args[1])
print resp
def pa_createNetwork(args, cmd):
usage = "%s <protocol> <controller_urls> <ip_network> <ip_mask> [<service_type> <CIR> <CBS> <EBS>]" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def buildControllerList(ctrls):
if ctrls.lower() == "none":
return []
l = ctrls.split(',')
controllerUrls = []
for ctrl in l:
parts = ctrl.split(":")
if len(parts) < 3:
print "%s is not a valid controller url" % ctrl
sys.exit()
if parts[0] not in SUPPORTED_PROTO:
print "%s in %s is not a supported protocol" % (parts[0], ctrl)
sys.exit()
try:
int(parts[2])
except:
print "%s in %s is not a valid port number" % (parts[2], ctrl)
sys.exit()
controllerUrls.append(ctrl)
return controllerUrls
def do_createNetwork(gopts, opts, args):
if len(args) != 7:
# Commented by SJM: print "createNetwork : Must specify controllerUrls, network_ip, network_mask"
# SJM NIaaS: Modified this function to support DiffServ
print "createNetwork : Must specify controllerUrls, network_ip, network_mask, service_type, CIR, CBS, EBS"
# SJM NIaaS END
sys.exit()
type_map = { 'DF': 0, 'AF1': 32, 'AF2': 64, 'AF3': 96, 'EF': 0xb8 }
req = { "controllerUrls" : buildControllerList(args[0]), \
"networkAddress" : args[1], "mask" : int(args[2]), \
# SJM NIaaS: Add ToS, CIR, CBS and EBS parameters
"ToS" : type_map[args[3].upper()], "CIR" : int(args[4]), \
"CBS" : int(args[5]), "EBS" : int(args[6])
# SJM NIaaS END
}
network_id = connect(gopts, "tenant", "createNetwork", data=req, passwd=getPasswd(gopts))
if network_id:
print "Virtual network has been created (network_id %s)." % str(network_id)
def pa_createSwitch(args, cmd):
usage = "%s [options] <tenant_id> <physical_dpids>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
parser.add_option("-d", "--dpid", dest="dpid", type="str", default="0",
help="Specify the DPID for this switch")
return parser.parse_args(args)
def do_createSwitch(gopts, opts, args):
if len(args) != 2:
print ("createSwitch : must specify: " +
"virtual tenant_id and a comma separated list of physical dpids " +
"(e.g. 00:00:00:00:00:00:00:01) which will be associated to the virtual switch")
sys.exit()
dpids = [int(dpid.replace(":", ""), 16) for dpid in args[1].split(',')]
req = { "tenantId" : int(args[0]), "dpids" : dpids, "dpid" : int(opts.dpid.replace(":", ""), 16) }
reply = connect(gopts, "tenant", "createSwitch", data=req, passwd=getPasswd(gopts))
switchId = reply.get('vdpid')
if switchId:
switch_name = '00:' + ':'.join([("%x" % switchId)[i:i+2] for i in range(0, len(("%x" % switchId)), 2)])
print "Virtual switch has been created (tenant_id %s, switch_id %s)" % (args[0], switch_name)
def pa_createPort(args, cmd):
usage = "%s <tenant_id> <physical_dpid> <physical_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createPort(gopts, opts, args):
if len(args) != 3:
print ("createPort : must specify: " +
"virtual tenant_id, physical dpid " +
"(e.g. 00:00:00:00:00:00:00:01) and physical port")
sys.exit()
req = { "tenantId" : int(args[0]), "dpid" : int(args[1].replace(":", ""), 16), "port" : int(args[2]) }
reply = connect(gopts, "tenant", "createPort", data=req, passwd=getPasswd(gopts))
switchId = reply.get('vdpid')
portId = reply.get('vport')
if switchId and portId:
switch_name = '00:' + ':'.join([("%x" %int(switchId))[i:i+2] for i in range(0, len(("%x" %int(switchId))), 2)])
print "Virtual port has been created (tenant_id %s, switch_id %s, port_id %s)" % (args[0], switch_name, portId)
def pa_setInternalRouting(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <routing_algorithm> <backup_routes_num>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_setInternalRouting(gopts, opts, args):
if len(args) != 4:
print ("setInternalRouting : Must specify virtual tenant_id, virtual switch_id, " +
"algorithm (spf, manual) and number of backup routes")
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16),
"algorithm" : args[2], "backup_num" : int(args[3]) }
reply = connect(gopts, "tenant", "setInternalRouting", data=req, passwd=getPasswd(gopts))
tenantId = reply.get('tenantId')
switchId = reply.get('vdpid')
if tenantId and switchId:
print "Routing has be set for big switch (tenant_id %s, switch_id %s)" % (switchId, tenantId)
def pa_connectHost(args, cmd):
usage = "%s <tenant_id> <vitual_dpid> <virtual_port> <host_mac>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectHost(gopts, opts, args):
if len(args) != 4:
print "connectHost : Must specify virtual tenant_id, virtual switch_id, virtual port_id and host MAC address"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16),
"vport" : int(args[2]), "mac" : args[3] }
reply = connect(gopts, "tenant", "connectHost", data=req, passwd=getPasswd(gopts))
hostId = reply.get('hostId')
if hostId:
print "Host (host_id %s) has been connected to virtual port" % (hostId)
def pa_connectLink(args, cmd):
usage = "%s <tenant_id> <src_virtual_dpid> <src_virtual_port> <dst_virtual_dpid> <dst_virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectLink(gopts, opts, args):
if len(args) != 7:
print ("connectLink : Must specify tenant_id, src_virtual_dpid, src_virtual_port, dst_virtual_dpid, dst_virtual_port, "
+ "algorithm (spf, manual), number of backup routes")
sys.exit()
req = { "tenantId" : int(args[0]), "srcDpid" : int(args[1].replace(":", ""), 16),
"srcPort" : int(args[2]), "dstDpid" : int(args[3].replace(":", ""), 16),
"dstPort" : int(args[4]), "algorithm" : args[5], "backup_num" : int(args[6]) }
reply = connect(gopts, "tenant", "connectLink", data=req, passwd=getPasswd(gopts))
linkId = reply.get('linkId')
if linkId:
print "Virtual link (link_id %s) has been created" % (linkId)
def pa_setLinkPath(args, cmd):
usage = "%s <tenant_id> <link_id> <physical_path> <priority>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_setLinkPath(gopts, opts, args):
if len(args) != 4:
print "setLinkPath : Must specify tenant_id, link_id, the physical path that connect the end-points and the priority [0-255]"
sys.exit()
req = { "tenantId" : int(args[0]), "linkId" : int(args[1]), "path" : translate_path(args[2]), "priority" : int(args[3]) }
reply = connect(gopts, "tenant", "setLinkPath", data=req, passwd=getPasswd(gopts))
linkId = reply.get('linkId')
if linkId:
print "Virtual link (link_id %s) path has been set" % (linkId)
def pa_connectRoute(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <src_virtual_port> <dst_virtual_port> <physical_path> <priority>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectRoute(gopts, opts, args):
if len(args) != 6:
print ("connectRoute : Must specify tenant_id, virtual_dpid, src_virtual_port, dst_virtual_port, " +
"the physical path that connect the end-points and the priority [0-255]")
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16),
"srcPort" : int(args[2]), "dstPort" : int(args[3]),
"path" : translate_path(args[4]), "priority" : int(args[5]) }
reply = connect(gopts, "tenant", "connectRoute", data=req, passwd=getPasswd(gopts))
routeId = reply.get('routeId')
if routeId:
print "Big-switch internal route (route_id %s) has been created" % (routeId)
#Remove calls
def pa_removeNetwork(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_removeNetwork(gopts, opts, args):
if len(args) != 1:
print "removeNetwork : Must specify a virtual tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "tenant", "removeNetwork", data=req, passwd=getPasswd(gopts))
print "Network (tenant_id %s) has been removed" % (args[0])
def pa_removeSwitch(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_removeSwitch(gopts, opts, args):
if len(args) != 2:
print "removeSwitch : Must specify a virtual tenant_id and a virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16) }
result = connect(gopts, "tenant", "removeSwitch", data=req, passwd=getPasswd(gopts))
print "Switch (switch_id %s) has been removed" % (args[1])
def pa_removePort(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_removePort(gopts, opts, args):
if len(args) != 3:
print "removePort : Must specify a virtual tenant_id, a virtual switch_id and a virtual port_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16), "vport" : int(args[2])}
result = connect(gopts, "tenant", "removePort", data=req, passwd=getPasswd(gopts))
print "Port (port_id %s) has been removed from virtual switch (switch_id %s)" % (args[2], args[1])
def pa_disconnectHost(args, cmd):
usage = "%s <tenant_id> <host_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_disconnectHost(gopts, opts, args):
if len(args) != 2:
print "disconnectHost : Must specify a a virtual tenant_id and a host_id"
sys.exit()
req = { "tenantId" : int(args[0]), "hostId" : int(args[1]) }
result = connect(gopts, "tenant", "disconnectHost", data=req, passwd=getPasswd(gopts))
print "Host (host_id %s) has been disconnected from the virtual network (tenant_id %s)" % (args[1], args[0])
def pa_disconnectLink(args, cmd):
usage = "%s <tenant_id> <link_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_disconnectLink(gopts, opts, args):
if len(args) != 2:
print "disconnectLink : Must specify a a virtual tenant_id and a link_id"
sys.exit()
req = { "tenantId" : int(args[0]), "linkId" : int(args[1]) }
result = connect(gopts, "tenant", "disconnectLink", data=req, passwd=getPasswd(gopts))
print "Link (link_id %s) has been disconnected from the virtual network (tenant_id %s)" % (args[1], args[0])
def pa_disconnectRoute(args, cmd):
usage = "%s <tenant_id> <route_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_disconnectRoute(gopts, opts, args):
if len(args) != 3:
print "disconnectRoute : Must specify a virtual tenant_id, switch_id and a route_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16) , "routeId" : int(args[2]) }
result = connect(gopts, "tenant", "disconnectRoute", data=req, passwd=getPasswd(gopts))
print "Route (route_id %s) in virtual big-switch (switch_id %s) has been disconnected from the virtual network (tenant_id %s)" % (args[2], args[1], args[0])
#Runtime operations
def pa_startNetwork(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startNetwork(gopts, opts, args):
if len(args) != 1:
print "startNetwork : Must specify a tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "tenant", "startNetwork", data=req, passwd=getPasswd(gopts))
if result:
print "Network (tenant_id %s) has been booted" % (args[0])
def pa_startSwitch(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startSwitch(gopts, opts, args):
if len(args) != 2:
print "startSwitch : Must specify a tenant_id and a virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16)}
result = connect(gopts, "tenant", "startSwitch", data=req, passwd=getPasswd(gopts))
if result:
print "Switch (switch_id %s) has been booted in virtual network (tenant_id %s)" % (args[1], args[0])
def pa_startPort(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startPort(gopts, opts, args):
if len(args) != 3:
print "startPort : Must specify a tenant_id, a virtual switch_id and a virtual port_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16), "vport" : int(args[2])}
reply = connect(gopts, "tenant", "startPort", data=req, passwd=getPasswd(gopts))
tenantId = reply.get('tenantId')
switchId = reply.get('vdpid')
portId = reply.get('vport')
if tenantId and switchId and portId:
print "Port (port_id %s) has been started in virtual switch (tenant_id %s, switch_id %s)" % (portId, tenantId, switchId)
def pa_stopNetwork(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopNetwork(gopts, opts, args):
if len(args) != 1:
print "stopNetwork : Must specify a tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "tenant", "stopNetwork", data=req, passwd=getPasswd(gopts))
if result:
print "Network (tenant_id %s) has been shutdown" % (args[0])
def pa_stopSwitch(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopSwitch(gopts, opts, args):
if len(args) != 2:
print "stopSwitch : Must specify a tenant_id and a virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16)}
result = connect(gopts, "tenant", "stopSwitch", data=req, passwd=getPasswd(gopts))
if result:
print "Switch (switch_id %s) has been shutdown in virtual network (tenant_id %s)" % (args[1], args[0])
def pa_stopPort(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopPort(gopts, opts, args):
if len(args) != 3:
print "stopPort : Must specify a tenant_id, a virtual switch_id and a virtual port_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16), "vport" : int(args[2])}
result = connect(gopts, "tenant", "stopPort", data=req, passwd=getPasswd(gopts))
if result:
print "Port (port_id %s) has been shutdown in virtual switch (tenant_id %s, switch_id %s)" % (args[2], args[0], args[1])
def pa_getPhysicalFlowtable(args, cmd):
usage = "%s [<physical_dpid>]" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getPhysicalFlowtable(gopts, opts, args):
if len(args) > 1:
print "getPhysicalFlowtable : May specify optional physical dpid"
sys.exit()
req = {}
if len(args) == 1:
req["dpid"] = int(args[0].replace(":", ""), 16)
result = connect(gopts, "status", "getPhysicalFlowtable", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getPhysicalHosts(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getPhysicalHosts(gopts, opts, args):
if len(args) > 0:
print "getPhysicalHosts : No arguments"
sys.exit()
req = {}
result = connect(gopts, "status", "getPhysicalHosts", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getPhysicalTopology(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getPhysicalTopology(gopts, opts, args):
if len(args) > 0:
print "getPhysicalTopology : No arguments"
sys.exit()
req = {}
result = connect(gopts, "status", "getPhysicalTopology", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_listVirtualNetworks(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_listVirtualNetworks(gopts, opts, args):
if len(args) > 0:
print "listVirtualNetworks : No arguments"
sys.exit()
req = {}
result = connect(gopts, "status", "listVirtualNetworks", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualAddressMapping(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualAddressMapping(gopts, opts, args):
if len(args) != 1:
print "getVirtualAddressMapping : Must specify a tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "status", "getVirtualAddressMapping", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualFlowtable(args, cmd):
usage = "%s <tenant_id> [<virtual_dpid>]" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualFlowtable(gopts, opts, args):
if (len(args) == 0) or (len(args) > 2):
print "getVirtualFlowtable : Must specify a tenant_id, and optional virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
if len(args) == 2:
req["vdpid"] = int(args[1].replace(":", ""), 16)
result = connect(gopts, "status", "getVirtualFlowtable", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualHosts(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualHosts(gopts, opts, args):
if len(args) != 1:
print "getVirtualHosts : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualHosts", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualLinkMapping(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualLinkMapping(gopts, opts, args):
if len(args) != 1:
print "getVirtualHosts : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualLinkMapping", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualSwitchMapping(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualSwitchMapping(gopts, opts, args):
if len(args) != 1:
print "getVirtualSwitchMapping : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualSwitchMapping", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualTopology(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualTopology(gopts, opts, args):
if len(args) != 1:
print "getVirtualTopology : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualTopology", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
# Other methods
def translate_path(path_string):
hop_list = path_string.split(",")
path = ""
for hop in hop_list:
src, dst = hop.split("-")
src_dpid, src_port = src.split("/")
dst_dpid, dst_port = dst.split("/")
src_long_dpid = int(src_dpid.replace(":", ""), 16)
dst_long_dpid = int(dst_dpid.replace(":", ""), 16)
path = path + str(src_long_dpid) + "/" + str(src_port) + "-" + str(dst_long_dpid) + "/" + str(dst_port) + ","
if len(path) > 0:
path.rstrip(",")
return path
def pa_help(args, cmd):
usage = "%s <cmd>" % USAGE.format(cmd)
parser = OptionParser(usage=usage)
return parser.parse_args(args)
def do_help(gopts, opts, args):
if len(args) != 1:
raise IndexError
try:
(pa, func) = CMDS[args[0]]
pa(['--help'], args[0])
except KeyError, e:
print "Invalid command : %s is an unknown command." % args[0]
sys.exit()
def connect(opts, path, cmd, data=None, passwd=None):
try:
url = getUrl(opts, path)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, opts.ovx_user, passwd)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
req = buildRequest(data, url, cmd)
#ph = urllib2.urlopen(req)
ph = opener.open(req)
return parseResponse(ph.read())
except urllib2.URLError as e:
print e
sys.exit(1)
except urllib2.HTTPError as e:
if e.code == 401:
print "Authentication failed: invalid password"
sys.exit(1)
elif e.code == 504:
print "HTTP Error 504: Gateway timeout"
sys.exit(1)
else:
print e
except RuntimeError as e:
print e
def parseResponse(data):
j = json.loads(data)
if 'error' in j:
print j
sys.exit(1)
return j['result']
def printVersion(option, opt, value, parser):
"""Print ovxctl version and exit"""
print "ovxctl-%s" % VERSION
sys.exit()
def printHelp (option, opt, value, parser):
"""Print ovxctl help and exit"""
cmds = [x for x in CMDS.iterkeys()]
cmds.remove('help')
cmds.sort()
print parser.format_help().strip()
print "\n Available commands are: "
for x in cmds:
(sdesc, ldesc) = DESCS[x]
print " {0:25} {1:10}".format(x, sdesc)
print "\n See '%s help <command>' for more info on a specific command." % sys.argv[0]
sys.exit()
CMDS = {
'addControllers': (pa_addControllers, do_addControllers),
'createNetwork': (pa_createNetwork, do_createNetwork),
'createSwitch': (pa_createSwitch, do_createSwitch),
'createPort': (pa_createPort, do_createPort),
'setInternalRouting': (pa_setInternalRouting, do_setInternalRouting),
'connectHost': (pa_connectHost, do_connectHost),
'connectLink': (pa_connectLink, do_connectLink),
'setLinkPath': (pa_setLinkPath, do_setLinkPath),
'connectRoute': (pa_connectRoute, do_connectRoute),
'removeNetwork': (pa_removeNetwork, do_removeNetwork),
'removeSwitch': (pa_removeSwitch, do_removeSwitch),
'removePort': (pa_removePort, do_removePort),
'disconnectHost': (pa_disconnectHost, do_disconnectHost),
'disconnectLink': (pa_disconnectLink, do_disconnectLink),
'disconnectRoute': (pa_disconnectRoute, do_disconnectRoute),
'startNetwork': (pa_startNetwork, do_startNetwork),
'startSwitch': (pa_startSwitch, do_startSwitch),
'startPort': (pa_startPort, do_startPort),
'stopNetwork': (pa_stopNetwork, do_stopNetwork),
'stopSwitch': (pa_stopSwitch, do_stopSwitch),
'stopPort': (pa_stopPort, do_stopPort),
'getPhysicalFlowtable': (pa_getPhysicalFlowtable, do_getPhysicalFlowtable),
'getPhysicalHosts': (pa_getPhysicalHosts, do_getPhysicalHosts),
'getPhysicalTopology': (pa_getPhysicalTopology, do_getPhysicalTopology),
'listVirtualNetworks': (pa_listVirtualNetworks, do_listVirtualNetworks),
'getVirtualAddressMapping': (pa_getVirtualAddressMapping, do_getVirtualAddressMapping),
'getVirtualFlowtable': (pa_getVirtualFlowtable, do_getVirtualFlowtable),
'getVirtualHosts': (pa_getVirtualHosts, do_getVirtualHosts),
'getVirtualLinkMapping': (pa_getVirtualLinkMapping, do_getVirtualLinkMapping),
'getVirtualSwitchMapping': (pa_getVirtualSwitchMapping, do_getVirtualSwitchMapping),
'getVirtualTopology': (pa_getVirtualTopology, do_getVirtualTopology),
'help' : (pa_help, do_help)
}
DESCS = {
'addControllers' : ("Adds controllers to a virtual switch",
("Adds the specified list of controllers to a given virtual switch.\n"
"ExampleL addController <tenantId> <vdpid> <ctrlUrls>")),
'createNetwork' : ("Creates a virtual network",
("Creates a virtual network. Input: protocol, controllerIP, controller port, ip address, mask. "
"\nExample: createNetwork tcp 1.1.1.1 6634 192.168.1.0 24")),
'createSwitch' : ("Create virtual switch",
("Create a virtual switch. Must specify a tenant_id, and a list of the physical_dpids that will be part of the virtual switch."
"\nExample: createSwitch 1 00:00:00:00:00:00:00:01,00:00:00:00:00:00:00:02")),
'createPort' : ("Create virtual port",
("Create a virtual port. Must specify a tenant_id, a physical_dpid and a physical_port."
"\nExample: createPort 1 00:00:00:00:00:00:00:01 1")),
'setInternalRouting' : ("Set big-switch internal routing mechanism",
("Set big-switch internal routing mechanism. Must specify a tenant_id, a virtual switch_id, the routing type (spf, manual) "
"and the number (0-255) of the backup paths that have to be computed."
"\nExample: setInternalRouting 1 00:00:00:00:00:00:00:01 spf 128")),
'connectHost' : ("Connect host to a virtual port",
("Connect host to a virtual port. Must specify a tenant_id, a virtual switch_id, a virtual port_id and the host MAC address."
"\nExample: connectHost 1 00:a4:23:05:00:00:00:01 1 00:00:00:00:00:01")),
'connectLink' : ("Connect two virtual ports through a virtual link",
("Connect two virtual ports through a virtual link. Must specify a tenant_id, a virtual src_switch_id, a virtual src_port_id, "
"a virtual dst_switch_id, a virtual dst_port_id, the routing type (spf, manual) and the number (0-255) of the backup paths that have to be computed."
"\nExample: connectLink 1 00:a4:23:05:00:00:00:01 1 00:a4:23:05:00:00:00:02 1 spf 1")),
'setLinkPath' : ("Set the physical path of a virtual link",
("Set the physical path of a virtual link. Must specify a tenant_id, a virtual link_id, a physical path and a priority (0-255)."
"\nExample: connectLink 1 1 00:00:00:00:00:00:00:01/1-00:00:00:00:00:00:00:02/1,"
"00:00:00:00:00:00:00:2/2-00:00:00:00:00:00:00:3/1 128")),
'connectRoute' : ("Connect two virtual ports inside a virtual big-switch",
("Connect two virtual ports inside a virtual big-switch. Must specify a tenant_id, a virtual switch_id, a virtual src_port_id, "
"a virtual dst_port_id, a physical path and a priority (0-255)."
"\nExample: connectRoute 1 00:a4:23:05:00:00:00:01 1 2 00:00:00:00:00:00:00:01/1-00:00:00:00:00:00:00:02/1,"
"00:00:00:00:00:00:00:2/2-00:00:00:00:00:00:00:3/1 128")),
'removeNetwork' : ("Remove a virtual network",
("Remove a virtual network. Must specify a tenant_id."
"\nExample: removeNetwork 1")),
'removeSwitch' : ("Remove virtual switch",
("Remove a virtual switch. Must specify a tenant_id and a virtual switch_id."
"\nExample: removeSwitch 1 00:a4:23:05:00:00:00:01")),
'removePort' : ("Remove virtual port",
("Remove a virtual port. Must specify a tenant_id, a virtual switch_id and a virtual port_id."
"\nExample: removePort 1 00:a4:23:05:00:00:00:01 1")),
'disconnectHost' : ("Disconnect host from a virtual port",
("Disconnect host from a virtual port. Must specify a tenant_id and the host_id."
"\nExample: disconnectHost 1 1")),
'disconnectLink' : ("Disconnect link between two virtual ports",
("Disconnect link between two virtual ports. Must specify a tenant_id and the link_id."
"\nExample: disconnectLink 1 1")),
'disconnectRoute' : ("Disconnect big-switch internal route between two virtual ports",
("Disconnect big-switch internal route between two virtual ports. Must specify a tenant_id and the route_id."
"\nExample: disconnectRoute 1 00:a4:23:05:00:00:00:01 1")),
'startNetwork' : ("Start a virtual network",
("Start a virtual network. Must specify a tenant_id."
"\nExample: startNetwork 1")),
'startSwitch' : ("Start a virtual switch",
("Start a virtual switch. Must specify a tenant_id and a virtual switch_id."
"\nExample: startSwitch 1 00:a4:23:05:00:00:00:01")),
'startPort' : ("Start a virtual port",
("Start a virtual port. Must specify a tenant_id, a virtual switch_id and a virtual port_id."
"\nExample: startPort 1 00:a4:23:05:00:00:00:01 1")),
'stopNetwork' : ("Stop a virtual network",
("Stop a virtual network. Must specify a tenant_id."
"\nExample: stopNetwork 1")),
'stopSwitch' : ("Shutdown a virtual switch",
("Shutdown a virtual switch. Must specify a tenant_id and a virtual switch_id."
"\nExample: stopSwitch 1 00:a4:23:05:00:00:00:01")),
'stopPort' : ("Shutdown a virtual port",
("Shutdown a virtual port. Must specify a tenant_id, a virtual switch_id and a virtual port_id."
"\nExample: stopPort 1 00:a4:23:05:00:00:00:01 1")),
# Monitoring API - admin only
'getPhysicalFlowtable' : ("Get the physical flowtable of a specified switch or all switches",
("Get the physical flowtable of a specified switch or all switches. Specify optional physical switch_id."
"\nExample: getPhysicalFlowtable 00:00:00:00:00:00:00:01")),
'getPhysicalHosts' : ("Get a list of physical hosts",
("Get a list of physical hosts."
"\nExample: getPhysicalHosts")),
'getPhysicalTopology': ("Get the physical topology",
("Get the physical topology."
"\nExample: getPhysicalTopology")),
'listVirtualNetworks': ("Get a list of all virtual network tenant ID's",
("Get a list of all virtual network tenant ID's."
"\nExample: listVirtualNetworks")),
# Monitoring API - tenant restricted
'getVirtualAddressMapping' : ("Get the virtual to physical address mapping for a specified virtual network",
("Get the virtual to physical address mapping. Must specify a virtual network tenant_id."
"\nExample: getVirtualAddressMapping 1")),
'getVirtualFlowtable' : ("Get the flowtable in the specified virtual network",
("Get the flowtable in the specified virtual network. Must specify a virtual switch_id, optional virtual switch_id."
"\nExample: getVirtualFlowtable 00:a4:23:05:00:00:00:01")),
'getVirtualHosts' : ("Get list of hosts in virtual network",
("Get list of hosts in virtual network. Must specify a tenant_id",
"\nExample: getVirtualHosts 1")),
'getVirtualLinkMapping' : ("Get the virtual to physical link mapping",
("Get the virtual to physical link mapping. Must specify a tenant_id.",
"\nExample: getVirtualLinkMapping 1")),
'getVirtualSwitchMapping' : ("Get the virtual to physical switch mapping",
("Get the virtual to physical switch mapping. Must specify a tenant_id.",
"\nExample: getVirtualSwitchMapping 1")),
'getVirtualTopology' : ("Get the virtual topology",
("Get the virtual topology. Must specify a tenant_id.",
"\nExample: getVirtualTopology 1"))
}
USAGE="%prog {}"
URL = "http://%s:%s/%s"
def getPasswd(opts):
if opts.no_passwd:
return ""
else:
return getpass.getpass("Password: ")
def addCommonOpts (parser):
parser.add_option("-h", "--hostname", dest="host", default="localhost",
help="Specify the OpenVirteX host; default='localhost'")
parser.add_option("-p", "--port", dest="port", default="8080",
help="Specify the OpenVirteX web port; default=8080")
parser.add_option("-u", "--user", dest="ovx_user", default="admin",
help="OpenVirtex admin user; default='admin'")
parser.add_option("-n", "--no-passwd", action="store_true", dest="no_passwd", default=False,
help="Run ovxctl with no password; default false")
parser.add_option("-v", "--version", action="callback", callback=printVersion)
parser.add_option("--help", action="callback", callback=printHelp)
def parse_global_args (arglist):
usage = "%s [options] command [command_args]" % sys.argv[0]
args = []
while (len(arglist) != 0 and arglist[0] not in CMDS):
args.append(arglist[0])
arglist.pop(0)
parser = OptionParser(add_help_option=False, usage=usage)
addCommonOpts(parser)
(opts, pargs) = parser.parse_args(args)
return (opts, arglist, parser)
if __name__ == '__main__':
try:
(gopts, rargs, parser) = parse_global_args(sys.argv[1:])
if len(rargs) < 1:
raise IndexError
(parse_args, do_func) = CMDS[rargs[0]]
(opts, args) = parse_args(rargs[1:], rargs[0])
do_func(gopts, opts, args)
sys.exit(0)
except ValueError, e:
print "The argument types being sent to the function %s are incorrect. Please double check them." % sys.argv[1]
except IndexError, e:
print "%s is an unknown command" % sys.argv[-1]
except Exception, e:
print "uknown error"
printHelp(None,None,None,parser)
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SearchToken'
db.create_table('sentry_searchtoken', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('document', self.gf('django.db.models.fields.related.ForeignKey')(related_name='token_set', to=orm['sentry.SearchDocument'])),
('field', self.gf('django.db.models.fields.CharField')(default='text', max_length=64)),
('token', self.gf('django.db.models.fields.CharField')(max_length=128)),
('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
))
db.send_create_signal('sentry', ['SearchToken'])
# Adding unique constraint on 'SearchToken', fields ['document', 'field', 'token']
db.create_unique('sentry_searchtoken', ['document_id', 'field', 'token'])
# Adding model 'SearchDocument'
db.create_table('sentry_searchdocument', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'])),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Group'])),
('total_events', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date_changed', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['SearchDocument'])
# Adding unique constraint on 'SearchDocument', fields ['project', 'group']
db.create_unique('sentry_searchdocument', ['project_id', 'group_id'])
def backwards(self, orm):
# Removing unique constraint on 'SearchDocument', fields ['project', 'group']
db.delete_unique('sentry_searchdocument', ['project_id', 'group_id'])
# Removing unique constraint on 'SearchToken', fields ['document', 'field', 'token']
db.delete_unique('sentry_searchtoken', ['document_id', 'field', 'token'])
# Deleting model 'SearchToken'
db.delete_table('sentry_searchtoken')
# Deleting model 'SearchDocument'
db.delete_table('sentry_searchdocument')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_set'", 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| |
import arrow
from extensions import db
class Client(db.Model):
'''define the client node'''
__primarykey__ = 'company_id'
person = db.Label()
company_id = db.Property()
company_name = db.Property()
has_onboard = db.RelatedTo('Onboard')
@staticmethod
def create(company_id, company_name):
client = Client()
client.person = True
client.company_id = company_id
client.company_name = company_name
db.graph.create(client)
return client
@staticmethod
def list_all():
'''list all clients'''
return [_ for _ in Client.select(db.graph)]
@staticmethod
def list_all_with_compliance_status():
'''get a list of all clients with compliance status'''
cursor = db.graph.run((
"match (c:Client)-[:HAS_ONBOARD]->(o) "
"return c, o.completed AS completed, o.valid_onboard AS v "
"order by c.company_name"
))
return [{
'client': result['c'],
'completed': result['completed'],
'valid_onboard': result['v']} for result in cursor]
@staticmethod
def list_all_with_document_status():
'''get a list of all clients with document status'''
cursor = db.graph.run((
"match (c:Client)-[:HAS_ONBOARD]->()-[:MISSING_DOCUMENT]->(d)-[:FOR_STEP]->(s) "
"return c, d, s "
"order by c.company_name, s.step_number"
))
return [{
'client': result['c'],
'document_type': result['d']['document_type'],
'step_number': result['s']['step_number']} for result in cursor]
class Onboard(db.Model):
'''define the onboard node'''
completed = db.Property()
valid_onboard = db.Property()
time_created = db.Property()
time_completed = db.Property()
has_completed = db.RelatedTo('GenericStep')
invalid = db.RelatedTo('GenericStep')
must_follow = db.RelatedTo('GenericProcess')
missing_document = db.RelatedTo('GenericDocument')
submitted_document = db.RelatedTo('GenericDocument')
has_activity = db.RelatedTo('Activity')
@staticmethod
def create():
onboard = Onboard()
onboard.completed = False
onboard.valid_onboard = True
a = arrow.utcnow()
onboard.time_created = a.timestamp
onboard.time_completed = None
db.graph.create(onboard)
return onboard
@staticmethod
def compute_average():
'''calculate the average time to completion'''
ttc = [_.time_completed - _.time_created for _ in Onboard.select(db.graph) if _.time_completed]
if ttc:
ave_ttc = int(round(float(sum(ttc)) / len(ttc)))
return ave_ttc
return None
class BuildClientOnboard(object):
'''build the structure/relationships around the client node'''
def __init__(self, company_id, company_name):
self.client = Client.create(company_id, company_name)
self.onboard = Onboard.create()
def init_rels(self):
self.client.has_onboard.add(self.onboard)
db.graph.push(self.client)
return 'initial client steps structure built'
def init(self):
self.init_rels()
return 'initial client structure built'
class GenericProcess(db.Model):
has_step = db.RelatedTo('GenericStep')
first_step = db.RelatedTo('GenericStep')
last_step = db.RelatedTo('GenericStep')
next = db.RelatedTo('GenericStep')
requires_document = db.RelatedTo('GenericDocument')
@staticmethod
def create():
generic = GenericProcess()
db.graph.create(generic)
return generic
@staticmethod
def get_steps():
return db.graph.run((
"MATCH (:GenericProcess)-[:NEXT*]->(s) "
"RETURN s ORDER BY s.step_number"
))
class GenericStep(db.Model):
task_name = db.Property()
step_number = db.Property()
duration = db.Property()
next = db.RelatedTo('GenericStep')
depends_on = db.RelatedTo('GenericStep')
needs_document = db.RelatedTo('GenericDocument')
@staticmethod
def create(task_name, step_number, step_duration):
step = GenericStep()
step.task_name = task_name
step.step_number = step_number
step.duration = step_duration
db.graph.create(step)
return step
@staticmethod
def all():
return [_ for _ in GenericStep.select(db.graph)]
@staticmethod
def get_by_step_number(step_number):
return GenericStep.select(db.graph).where(step_number=step_number).first()
class GenericDocument(db.Model):
document_id = db.Property()
document_type = db.Property()
for_step = db.RelatedTo('GenericStep')
@staticmethod
def create(document_id, document_type):
document = GenericDocument()
document.document_id = document_id
document.document_type = document_type
db.graph.create(document)
return document
class BuildGenericProcess(object):
def __init__(self):
self.generic = GenericProcess.create()
self.steps = []
self.documents = []
self.tasks = [{
'task_name': 'get signed contracts',
'duration': 3
}, {
'task_name': 'get compliance documents',
'duration': 4
}, {
'task_name': 'compliance review',
'duration': 3,
}, {
'task_name': 'countersign contracts',
'duration': 5,
'depends_on': [0, 1, 2]
}, {
'task_name': 'account activation',
'duration': 3,
'depends_on': [3]
}]
self.document_metadata = [{
'type': 'signed contract',
'for_step': 0
}, {
'type': 'personal identification',
'for_step': 1
}, {
'type': 'tax identification',
'for_step': 1
}, {
'type': 'articles of incorporation',
'for_step': 1
}, {
'type': 'professional license',
'for_step': 1
}, {
'type': 'miscellaneous',
'for_step': 1
}, {
'type': 'compliance review',
'for_step': 2
}, {
'type': 'countersign contracts',
'for_step': 3
}, {
'type': 'account activation',
'for_step': 4
}]
for i in range(len(self.document_metadata)):
self.document_metadata[i]['id'] = i
def init_steps(self):
for step_number, task in enumerate(self.tasks):
self.steps.append(GenericStep.create(
task['task_name'], step_number, task['duration'])
)
return self.steps
def init_steps_rels(self):
prior_step = None
i = 0
for each_step, task in zip(self.steps, self.tasks):
if prior_step:
prior_step.next.add(each_step)
db.graph.push(prior_step)
if i == 0:
self.generic.first_step.add(each_step)
self.generic.next.add(each_step)
if i == len(self.steps)-1:
self.generic.last_step.add(each_step)
self.generic.has_step.add(each_step)
i += 1
if task.get('depends_on') is not None:
for each_depend in task['depends_on']:
each_step.depends_on.add(self.steps[each_depend])
db.graph.push(each_step)
prior_step = each_step
db.graph.push(self.generic)
return 'generic process steps structure built'
def init_docs(self):
for document in self.document_metadata:
self.documents.append(GenericDocument.create(document['id'], document['type']))
return self.documents
def init_docs_rels(self):
for document in self.documents:
self.generic.requires_document.add(document)
db.graph.push(self.generic)
return 'generic process document structure built'
def init_docs_steps_rels(self):
for document_number, meta in enumerate(self.document_metadata):
self.documents[document_number].for_step.add(self.steps[meta['for_step']])
db.graph.push(self.documents[document_number])
return 'generic process document step structure built'
def init(self):
self.init_steps()
self.init_steps_rels()
self.init_docs()
self.init_docs_rels()
self.init_docs_steps_rels()
return 'generic process structure built'
class BuildOnboardGenericProcess(object):
def __init__(self, company_id):
self.onboard = list(Client.select(db.graph).where(
company_id=company_id
).first().has_onboard)[0]
self.generic = GenericProcess.select(db.graph).first()
def init_rels(self):
self.onboard.must_follow.add(self.generic)
for document in GenericDocument.select(db.graph):
self.onboard.missing_document.add(document)
db.graph.push(self.onboard)
return "onboarding rels added"
def init(self):
self.init_rels()
return 'onboarding structure added to client'
class Activity(db.Model):
action_taken = db.RelatedTo('Action')
first_action = db.RelatedTo('Action')
last_action = db.RelatedTo('Action')
@staticmethod
def create():
activity = Activity()
db.graph.create(activity)
return activity
class Action(db.Model):
number = db.Property()
taken_at = db.Property()
has_completed = db.RelatedTo('GenericStep')
action_taken = db.RelatedTo('Action')
@staticmethod
def create(company_id):
action = Action()
a = arrow.utcnow()
action.number = action.get_num_actions(company_id)
action.taken_at = a.timestamp
db.graph.create(action)
return action
def _is_client_onboard_structure_built(self, company_id):
cursor = db.graph.run((
"match (:Client {company_id: '%s'})-[r:HAS_ONBOARD]->()"
"return r" % company_id
))
return cursor.forward()
def _is_onboard_activity_structure_built(self, company_id):
cursor = db.graph.run((
"match (:Client {company_id: '%s'})-[:HAS_ONBOARD]->()-[r:HAS_ACTIVITY]->()"
"return r" % company_id
))
return cursor.forward()
def _structure_is_built(self, company_id):
if self._is_client_onboard_structure_built(company_id) and self._is_onboard_activity_structure_built(company_id):
return True
return False
def get_num_actions(self, company_id):
if self._structure_is_built(company_id):
cursor = db.graph.run((
"match (:Client {company_id: '%s'})-[:HAS_ONBOARD]->()-[:HAS_ACTIVITY]->()-[:ACTION_TAKEN*]->(a) "
"return count(a) as num_actions" % company_id
))
return cursor.next()['num_actions']
return None
def add_has_completed_rel(self, company_id, step_number):
if self._structure_is_built(company_id):
step = GenericStep.get_by_step_number(step_number)
self.has_completed.add(step)
db.graph.push(self)
return self
raise LookupError('required graph structure missing')
class BuildOnboardActivity(object):
def __init__(self, company_id):
self.onboard = list(Client.select(db.graph).where(
company_id=company_id
).first().has_onboard)[0]
self.activity = Activity.create()
def init_activity_rels(self):
self.onboard.has_activity.add(self.activity)
db.graph.push(self.onboard)
return 'built onboard has activity structure'
def init(self):
self.init_activity_rels()
return 'built onboard activity structure'
class BuildAction(object):
def __init__(self, company_id):
self.company_id = company_id
self.onboard = list(Client.select(db.graph).where(
company_id=company_id
).first().has_onboard)[0]
self.activity = [_ for _ in self.onboard.has_activity][0]
self.actions = None
def _num_dependencies(self, step_number):
cursor = db.graph.run((
"match (s:GenericStep)-[:DEPENDS_ON*]->(ds) "
"where s.step_number=%d "
"return count(ds) AS num_depends" % step_number
))
return cursor.next()['num_depends']
def _completed_dependencies(self, step_number):
cursor = db.graph.run((
"match (s:GenericStep {step_number: %d})-[:DEPENDS_ON*]->(ds) "
"match (ds)<-[:HAS_COMPLETED]-(action) "
"match (:Client {company_id: '%s'})-[:HAS_ONBOARD]->()-[:HAS_ACTIVITY]->(activity) "
"match (activity)-[:ACTION_TAKEN*]->(action) "
"return distinct ds order by ds.step_number" % (step_number, self.company_id)
))
return [result['ds']['step_number'] for result in cursor]
def _depends_satisfied(self, step_number):
number_of_depends = self._num_dependencies(step_number)
completed_depends = self._completed_dependencies(step_number)
if number_of_depends == len(completed_depends):
return True
return False
def _mark_onboard_complete(self):
a = arrow.utcnow()
self.onboard.completed = True
self.onboard.time_completed = a.timestamp
db.graph.push(self.onboard)
return 'onboard process marked complete'
def _step_aware_mark_onboard_complete(self):
'''will mark the onboard process as complete if all the generic steps have been completed'''
if len(list(self.onboard.has_completed)) == len(GenericStep.all()):
self._mark_onboard_complete()
return 'onboard process not complete'
def _mark_step_complete(self, step_number):
step = GenericStep.select(db.graph).where(
step_number=step_number
).first()
self.onboard.has_completed.add(step)
db.graph.push(self.onboard)
return "marked step %d as complete" % step_number
def _mark_step_invalid(self, step_number):
step = GenericStep.select(db.graph).where(
step_number=step_number
).first()
self.onboard.invalid.add(step)
self.onboard.valid_onboard = False
db.graph.push(self.onboard)
return "marked step %d as invalid" % step_number
def _dependency_aware_mark_step_complete(self, step_number):
if self._depends_satisfied(step_number):
self._mark_step_complete(step_number)
return "step marked as valid and complete"
self._mark_step_complete(step_number)
self._mark_step_invalid(step_number)
return "step marked as invalid and complete"
def aware_mark_step_complete(self, step_number):
self._dependency_aware_mark_step_complete(step_number)
self._step_aware_mark_onboard_complete()
return "recorded action for step %d and appropriately adjusted onboard activity" % step_number
def _update_actions(self):
cursor = db.graph.run((
"match (:Activity)-[:ACTION_TAKEN*]->(action) "
"return action"
))
self.actions = [_ for _ in cursor]
return self.actions
def _is_first_action(self):
cursor = db.graph.run((
"match (:Client {company_id: '%s'})-[:HAS_ONBOARD]->()-[:HAS_ACTIVITY]->()-[:ACTION_TAKEN]->(action) "
"return action" % self.company_id
))
return not cursor.forward()
def _add_first_action(self):
action = Action.create(self.company_id)
db.graph.push(action)
self.activity.action_taken.add(action)
self.activity.first_action.add(action)
self.activity.last_action.add(action)
db.graph.push(self.activity)
return action
def _get_and_move_last_action(self, new_action):
last_action = [_ for _ in self.activity.last_action][0]
last_action.action_taken.add(new_action)
self.activity.last_action.remove(last_action)
self.activity.last_action.add(new_action)
db.graph.push(self.activity)
db.graph.push(last_action)
return last_action
def _add_next_action(self):
new_action = Action.create(self.company_id)
db.graph.push(new_action)
last_action = self._get_and_move_last_action(new_action)
db.graph.push(last_action)
return new_action
def _new_action(self):
if self._is_first_action():
return self._add_first_action()
return self._add_next_action()
def new_action(self, step_number):
'''add a new action node optionally marking a step as completed'''
action = self._new_action()
action.add_has_completed_rel(self.company_id, step_number)
# self.aware_mark_step_complete(step_number)
return action
class UpdateClientOnboard(object):
'''logic for updating the onboard node and structure in vicinity'''
def __init__(self, company_id):
self.company_id = company_id
self.onboard = list(Client.select(db.graph).where(
company_id=company_id
).first().has_onboard)[0]
def submit_document(self, document_id):
document = GenericDocument.select(db.graph).where(document_id=document_id).first()
self.onboard.submitted_document.add(document)
self.onboard.missing_document.remove(document)
db.graph.push(self.onboard)
return 'marked document_%d as submitted' % document_id
class Company(db.Model):
__primarykey__ = 'name'
name = db.Property()
@staticmethod
def push(company_name):
company = Company()
company.name = company_name
db.graph.push(company)
return company
class Employee(db.Model):
__primarykey__ = 'id'
person = db.Label()
id = db.Property()
email = db.Property()
first_name = db.Property()
last_name = db.Property()
street_address = db.Property()
city = db.Property()
state = db.Property()
zip_code = db.Property()
works_for = db.RelatedTo('Company')
worked_on = db.RelatedTo('Project')
has_access_to = db.RelatedTo('Application')
@staticmethod
def push(employee_id, employee_email):
employee = Employee()
employee.person = True
employee.id = employee_id
employee.email = employee_email
db.graph.push(employee)
return employee
class BuildEmployeeCompany(object):
def __init__(self, employee_id, employee_email, company_name):
self.employee = Employee.push(employee_id, employee_email)
self.company = Company.push(company_name)
def init_rels(self):
self.employee.works_for.add(self.company)
db.graph.push(self.employee)
return 'initial employee structure built'
def init(self):
self.init_rels()
return 'built employee company structure'
class Project(db.Model):
for_onboard = db.RelatedTo('Onboard')
for_client = db.RelatedTo('Client')
accessed_step = db.RelatedTo('GenericStep')
@staticmethod
def create():
project = Project()
db.graph.create(project)
return project
class BuildEmployeeInvolvement(object):
def __init__(self, employee_id, client_id):
self.employee = Employee.select(db.graph).where(
"_.id='%s'" % employee_id
).first()
self.project = Project.create()
self.client = Client.select(db.graph).where(
"_.company_id='%s'" % client_id
).first()
self.onboard = list(self.client.has_onboard)[0]
def init_rels(self):
self.employee.worked_on.add(self.project)
self.project.for_onboard.add(self.onboard)
self.project.for_client.add(self.client)
db.graph.push(self.employee)
db.graph.push(self.project)
return 'added employee involvement'
def init(self):
self.init_rels()
return 'built employee involvement structure'
class UpdateEmployeeAccess(object):
def __init__(self, employee_id):
self.employee_id = employee_id
def update_step_access(self, client_id, step_number):
return db.graph.run(
"MATCH (e:Employee)-[:WORKED_ON]->(p:Project)-[:FOR_CLIENT]->(c:Client) " +
"WHERE e.id='%s' AND c.company_id='%s' " % (self.employee_id, client_id) +
"MATCH (c)-[:HAS_ONBOARD]->()-[:MUST_FOLLOW]->()-[:HAS_STEP]->(s) " +
"WHERE s.step_number=%s " % str(step_number) +
"MERGE (p)-[:ACCESSED_STEP]->(s) " +
"RETURN e"
)
class Application(db.Model):
__primarykey__ = 'name'
crm = db.Label()
erp = db.Label()
compliance = db.Label()
cloud = db.Label()
name = db.Property()
# accessed_by = db.RelatedFrom('Employee')
uses_database = db.RelatedTo('Database')
@staticmethod
def push_crm(app_name):
crm_app = Application()
crm_app.crm = True
crm_app.cloud = True
crm_app.name = app_name
db.graph.push(crm_app)
return crm_app
@staticmethod
def push_erp(app_name):
erp_app = Application()
erp_app.erp = True
erp_app.name = app_name
db.graph.push(erp_app)
return erp_app
@staticmethod
def push_compliance(app_name):
comp_app = Application()
comp_app.compliance = True
comp_app.name = app_name
db.graph.push(comp_app)
return comp_app
class Database(db.Model):
type = db.Property()
in_use_by = db.RelatedFrom('Application')
@staticmethod
def push(database_type):
database = Database()
database.type = database_type
db.graph.push(database)
return database
class BuildCrmDatabase(object):
def __init__(self, app_name, database_type):
self.crm_app = Application.push_crm(app_name)
self.database = Database.push(database_type)
def build(self):
self.crm_app.uses_database.add(self.database)
db.graph.push(self.crm_app)
db.graph.push(self.database)
return 'structure built'
class BuildErpDatabase(object):
def __init__(self, app_name, database_type):
self.erp_app = Application.push_erp(app_name)
self.database = Database.push(database_type)
def build(self):
self.erp_app.uses_database.add(self.database)
db.graph.push(self.erp_app)
db.graph.push(self.database)
return 'structure built'
class BuildComplianceDatabase(object):
def __init__(self, app_name, database_type):
self.comp_app = Application.push_compliance(app_name)
self.database = Database.push(database_type)
def build(self):
self.comp_app.uses_database.add(self.database)
db.graph.push(self.comp_app)
db.graph.push(self.database)
return 'structure built'
class EmployeeAppAccess(object):
def __init__(self, app_label, employee_id):
'''app label means node label for application node'''
self.app_label = app_label
self.employee_id = employee_id
def build(self):
employee = Employee.select(db.graph).where(
"_.id='%s'" % self.employee_id
).first()
app = Application.select(db.graph).where(
"'%s' IN labels(_)" % self.app_label
).first()
employee.has_access_to.add(app)
db.graph.push(employee)
return 'built employee app access'
def build_model():
'''builds a sample data set using the model'''
COMPANY_ID_1 = 'company_id_1'
COMPANY_ID_2 = 'company_id_2'
# build the generic onboard process in the database
generic = BuildGenericProcess()
generic.init()
# initialize a new client by creating client and onboard structure
client_1 = BuildClientOnboard(COMPANY_ID_1, 'company_name_1')
client_1.init()
client_2 = BuildClientOnboard(COMPANY_ID_2, 'company_name_2')
client_2.init()
# initialize the structures for a clients onboard and the generic process
cli_1_onboard = BuildOnboardGenericProcess(COMPANY_ID_1)
cli_1_onboard.init()
cli_2_onboard = BuildOnboardGenericProcess(COMPANY_ID_2)
cli_2_onboard.init()
# initialize some employees
employee_1 = BuildEmployeeCompany('employee_id_1', 'employee_email_1', 'Citi')
employee_1.init()
employee_2 = BuildEmployeeCompany('employee_id_2', 'employee_email_2', 'Citi')
employee_2.init()
# mark employees as involved in work with particular clients
empl_cust_involve_1 = BuildEmployeeInvolvement('employee_id_1', COMPANY_ID_1)
empl_cust_involve_1.init()
empl_cust_involve_2 = BuildEmployeeInvolvement('employee_id_2', COMPANY_ID_2)
empl_cust_involve_2.init()
# track which steps have been accessed by a given employee
customer_access_1 = UpdateEmployeeAccess('employee_id_1')
customer_access_1.update_step_access(COMPANY_ID_1, 4)
customer_access_2 = UpdateEmployeeAccess('employee_id_2')
customer_access_2.update_step_access(COMPANY_ID_2, 2)
# create some databases for the company
crm = BuildCrmDatabase('Salesforce', 'cloud')
crm.build()
erp = BuildErpDatabase('SAP', 'Oracle1')
erp.build()
compliance = BuildComplianceDatabase('Actimize', 'SqlServer1')
compliance.build()
# build some structure to show which employees access which databases
app_access_1 = EmployeeAppAccess('Crm', 'employee_id_1')
app_access_1.build()
app_access_2 = EmployeeAppAccess('Erp', 'employee_id_2')
app_access_2.build()
app_access_3 = EmployeeAppAccess('Compliance', 'employee_id_2')
app_access_3.build()
update_cli_1 = UpdateClientOnboard(COMPANY_ID_1)
for i in range(len(GenericStep.all())):
update_cli_1.aware_mark_step_complete(i)
update_cli_2 = UpdateClientOnboard(COMPANY_ID_2)
update_cli_2.aware_mark_step_complete(3)
return 'model built'
def build_clients():
COMPANY_ID_0 = 'another-test-comp-id'
COMPANY_NAME_0 = 'another-test-comp-name'
COMPANY_ID_1 = 'one-more-test-comp-id'
COMPANY_NAME_1 = 'one-more-test-comp-name'
new_client_0 = BuildClientOnboard(COMPANY_ID_0, COMPANY_NAME_0)
new_client_0.init_rels()
new_client_1 = BuildClientOnboard(COMPANY_ID_1, COMPANY_NAME_1)
new_client_1.init_rels()
return 'clients created'
| |
#!/usr/bin/env python
from unittest import main, TestCase
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal, assert_index_equal, \
assert_series_equal
from neurokernel.pm import BasePortMapper, PortMapper
class test_base_port_mapper(TestCase):
def test_create(self):
portmap = np.arange(5)
pm = BasePortMapper('/foo[0:5]', portmap)
s = pd.Series(np.arange(5),
pd.MultiIndex(levels=[['foo'], [0, 1, 2, 3, 4]],
labels=[[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4]],
names=[0, 1]))
assert_series_equal(pm.portmap, s)
def test_from_pm(self):
# Ensure that modifying pm0 doesn't modify any other mapper created from it:
pm0 = BasePortMapper('/foo[0:5]', np.arange(5))
pm1 = BasePortMapper('/foo[0:5]', np.arange(5))
pm2 = BasePortMapper.from_pm(pm0)
pm0.portmap[('foo', 0)] = 10
assert_series_equal(pm2.portmap, pm1.portmap)
def test_copy(self):
# Ensure that modifying pm0 doesn't modify any other mapper created from it:
pm0 = BasePortMapper('/foo[0:5]', np.arange(5))
pm1 = BasePortMapper('/foo[0:5]', np.arange(5))
pm2 = pm0.copy()
pm0.portmap[('foo', 0)] = 10
assert_series_equal(pm2.portmap, pm1.portmap)
def test_len(self):
pm = BasePortMapper('/foo[0:5],/bar[0:5]')
assert len(pm) == 10
def test_equals(self):
# Check that mappers containing the same ports/indices are deemed equal:
pm0 = BasePortMapper('/foo[0:5],/bar[0:5]')
pm1 = BasePortMapper('/foo[0:5],/bar[0:5]')
assert pm0.equals(pm1)
assert pm1.equals(pm0)
# Check that mappers containing the same ports/indices in
# different orders are deemed equal:
pm0 = BasePortMapper('/foo[0:5],/bar[0:5]', range(10))
pm1 = BasePortMapper('/bar[0:5],/foo[0:5]', range(5, 10)+range(5))
assert pm0.equals(pm1)
assert pm1.equals(pm0)
# Check that mappers containing different ports/indices are deemed non-equal:
pm0 = BasePortMapper('/foo[0:5],/bar[1:5]/bar[0]')
pm1 = BasePortMapper('/foo[0:5],/bar[0:5]')
assert not pm0.equals(pm1)
assert not pm1.equals(pm0)
def test_from_index(self):
# Without a specified port map:
pm0 = BasePortMapper('/foo[0:5],/bar[0:5]')
pm1 = BasePortMapper.from_index(pm0.index)
assert_series_equal(pm0.portmap, pm1.portmap)
# With a specified port map:
pm0 = BasePortMapper('/foo[0:5],/bar[0:5]', range(5)*2)
pm1 = BasePortMapper.from_index(pm0.index, range(5)*2)
assert_series_equal(pm0.portmap, pm1.portmap)
# Ensure that modifying the map sequence used to create the
# port mapper doesn't have the side effect of altering the created
# mapper:
index = pd.MultiIndex(levels=[[u'foo'], [0, 1, 2, 3, 4]],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[0, 1])
portmap = np.arange(5)
pm1 = BasePortMapper.from_index(index, portmap)
portmap[0] = 10
assert_array_equal(pm1.portmap.values, np.arange(5))
def test_inds_to_ports(self):
# Without a specified port map:
pm = BasePortMapper('/foo[0:5],/bar[0:5]')
self.assertSequenceEqual(pm.inds_to_ports([4, 5]),
[('foo', 4), ('bar', 0)])
# With a specified port map:
pm = BasePortMapper('/foo[0:5],/bar[0:5]', range(10, 20))
self.assertSequenceEqual(pm.inds_to_ports([14, 15]),
[('foo', 4), ('bar', 0)])
def test_ports_to_inds(self):
# Without a specified port map:
pm = BasePortMapper('/foo[0:5],/bar[0:5]')
np.allclose(pm.ports_to_inds('/foo[4],/bar[0]'), [4, 5])
# Nonexistent ports should return an empty index array:
i = pm.ports_to_inds('/baz')
assert len(i) == 0 and i.dtype == np.int64
# With a specified port map:
pm = BasePortMapper('/foo[0:5],/bar[0:5]', range(10, 20))
np.allclose(pm.ports_to_inds('/foo[4],/bar[0]'), [14, 15])
i = pm.ports_to_inds('/baz')
assert len(i) == 0 and i.dtype == np.int64
def test_get_map(self):
# Try to get selector that is in the mapper:
pm = BasePortMapper('/foo[0:5],/bar[0:5]')
self.assertSequenceEqual(pm.get_map('/bar[0:5]').tolist(), range(5, 10))
# Try to get selector that is not in the mapper:
self.assertSequenceEqual(pm.get_map('/foo[5:10]').tolist(), [])
def test_set_map(self):
pm = BasePortMapper('/foo[0:5],/bar[0:5]')
pm.set_map('/bar[0:5]', range(5))
self.assertSequenceEqual(pm.portmap.ix[5:10].tolist(), range(5))
class test_port_mapper(TestCase):
def setUp(self):
self.data = np.random.rand(20)
def test_create(self):
# Empty selector, empty data:
pm = PortMapper('')
assert_series_equal(pm.portmap, pd.Series([], dtype=np.int64))
assert_array_equal(pm.data, np.array([]))
# Non-empty selector, empty data:
pm = PortMapper('/foo[0:3]')
assert_series_equal(pm.portmap,
pd.Series(np.arange(3),
pd.MultiIndex(levels=[['foo'], [0, 1, 2]],
labels=[[0, 0, 0], [0, 1, 2]],
names=[0, 1])))
assert_array_equal(pm.data, np.array([]))
# Empty selector, non-empty data:
self.assertRaises(Exception, PortMapper, '', [1, 2, 3])
# Non-empty selector, non-empty data:
data = np.random.rand(5)
portmap = np.arange(5)
pm = PortMapper('/foo[0:5]', data, portmap)
assert_array_equal(pm.data, data)
s = pd.Series(np.arange(5),
pd.MultiIndex(levels=[['foo'], [0, 1, 2, 3, 4]],
labels=[[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4]],
names=[0, 1]))
assert_series_equal(pm.portmap, s)
def test_from_pm(self):
# Ensure that modifying pm0 doesn't modify any other mapper created from it:
data = np.random.rand(5)
portmap = np.arange(5)
pm0 = PortMapper('/foo[0:5]', data, portmap)
pm1 = PortMapper('/foo[0:5]', data, portmap)
pm2 = PortMapper.from_pm(pm0)
data[0] = 1.0
pm0.data[1] = 1.0
pm0.portmap[('foo', 0)] = 10
assert_array_equal(pm2.data, pm1.data)
assert_series_equal(pm2.portmap, pm1.portmap)
def test_copy(self):
# Ensure that modifying pm0 doesn't modify any other mapper created from it:
data = np.random.rand(5)
portmap = np.arange(5)
pm0 = PortMapper('/foo[0:5]', data, portmap)
pm1 = PortMapper('/foo[0:5]', data, portmap)
pm2 = pm0.copy()
data[0] = 1.0
pm0.data[1] = 1.0
pm0.portmap[('foo', 0)] = 10
assert_array_equal(pm2.data, pm1.data)
assert_series_equal(pm2.portmap, pm1.portmap)
data = np.random.rand(5)
pm0 = PortMapper('/foo[0:5]', data, portmap, False)
pm1 = pm0.copy()
data[0] = 1.0
assert pm0.data[0] == 1.0
def test_dtype(self):
pm = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', self.data)
assert pm.dtype == np.float64
def test_equals(self):
pm0 = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', self.data)
pm1 = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', self.data)
assert pm0.equals(pm1)
assert pm1.equals(pm0)
pm0 = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', self.data)
pm1 = PortMapper('/foo/bar[0:10],/foo/baz[1:10],/foo/baz[0]', self.data)
assert not pm0.equals(pm1)
assert not pm1.equals(pm0)
pm0 = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', np.arange(20))
pm1 = PortMapper('/foo/bar[0:10],/foo/baz[0:10]',
np.concatenate((np.arange(10), np.arange(10))))
assert not pm0.equals(pm1)
assert not pm1.equals(pm0)
def test_get(self):
# Mapper with data:
pm = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', self.data)
np.allclose(self.data[0:10], pm['/foo/bar[0:10]'])
pm = PortMapper('/foo/bar[0:10],/foo/baz[0:10]')
# Mapper without data:
self.assertRaises(Exception, pm.__getitem__, '/foo/bar[0]')
def test_get_discontinuous(self):
pm = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', self.data)
np.allclose(self.data[[0, 2, 4, 6]],
pm['/foo/bar[0,2,4,6]'])
def test_get_sub(self):
pm = PortMapper('/foo/bar[0:5],/foo/baz[0:5]', self.data,
np.arange(5, 15))
np.allclose(self.data[5:10], pm['/foo/bar[0:5]'])
def test_get_ports(self):
pm = PortMapper('/foo/bar[0:10]', np.arange(10))
self.assertSequenceEqual(pm.get_ports(lambda x: x < 5),
[('foo', 'bar', 0),
('foo', 'bar', 1),
('foo', 'bar', 2),
('foo', 'bar', 3),
('foo', 'bar', 4)])
i = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0], dtype=np.bool)
self.assertSequenceEqual(pm.get_ports(i),
[('foo', 'bar', 0),
('foo', 'bar', 1),
('foo', 'bar', 2),
('foo', 'bar', 3),
('foo', 'bar', 4)])
def test_get_ports_as_inds(self):
pm = PortMapper('/foo[0:5]', np.array([0, 1, 0, 1, 0]))
np.allclose(pm.get_ports_as_inds(lambda x: np.asarray(x, dtype=np.bool)),
[1, 3])
def test_get_ports_nonzero(self):
pm = PortMapper('/foo[0:5]', np.array([0, 1, 0, 1, 0]))
self.assertSequenceEqual(pm.get_ports_nonzero(),
[('foo', 1),
('foo', 3)])
def test_set_scalar(self):
pm = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', self.data)
pm['/foo/baz[0:5]'] = 1.0
assert_array_equal(np.ones(5), pm['/foo/baz[0:5]'])
def test_set_array(self):
# Valid empty:
pm = PortMapper('/foo/bar[0:10],/foo/baz[0:10]')
new_data = np.arange(10).astype(np.double)
pm['/foo/bar[0:10]'] = new_data
assert_array_equal(new_data, pm.data[0:10])
# Valid nonempty:
pm = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', self.data)
new_data = np.arange(10).astype(np.double)
pm['/foo/bar[0:10]'] = new_data
assert_array_equal(new_data, pm.data[0:10])
def test_set_discontinuous(self):
pm = PortMapper('/foo/bar[0:10],/foo/baz[0:10]', self.data)
pm['/foo/*[0:2]'] = 1.0
np.allclose(np.ones(4), pm['/foo/*[0:2]'])
def test_get_by_inds(self):
data = np.random.rand(3)
pm = PortMapper('/foo[0:3]', data)
assert_array_equal(data[[0, 1]], pm.get_by_inds([0, 1]))
def test_set_by_inds(self):
data = np.random.rand(3)
pm = PortMapper('/foo[0:3]', data)
new_data = np.arange(2).astype(np.double)
pm.set_by_inds([0, 1], new_data)
assert_array_equal(new_data, pm.get_by_inds([0, 1]))
if __name__ == '__main__':
main()
| |
import datetime
from dateutil import parser
from itertools import tee
from jinja2 import Template, Environment, FileSystemLoader
import os
import re
import shutil
import time
import warnings
import yaml
DEFAULT_TIME = datetime.time(9, 0)
MAX_SLUG_LENGTH = 30
class Date:
def __init__(self, datetime):
self.datetime = datetime
self.date_format = '%b'
def GetDict(self):
dt = self.datetime
return {
'year': dt.year,
'month': dt.month,
'month_name': dt.strftime('%b'),
'day': dt.day,
'unix': self.Unix(),
}
def Unix(self):
return int(time.mktime(self.datetime.timetuple()))
def Format(self, date_format=None):
if not date_format:
date_format = self.date_format
return self.datetime.strftime(date_format)
def Rfc(self):
dt = self.datetime
if dt.tzinfo is None:
suffix = "-00:00"
else:
suffix = dt.strftime("%z")
suffix = suffix[:-2] + ":" + suffix[-2:]
return dt.strftime("%Y-%m-%dT%H:%M:%S") + suffix
def SetDateFormat(self, date_format):
self.date_format = date_format
def __sub__(self, to_subtract):
return self.Unix() - to_subtract.Unix()
@staticmethod
def Now():
return Date(datetime.datetime.now())
def ComputePermalink(type_name, slug, created_date, permalink_template='{{slug}}'):
"""Returns the permalink for the given item."""
permalink_data = {'slug': slug}
# If there's date information associated, include it in the permalink data.
if created_date:
permalink_data = dict(permalink_data.items())
permalink_data.update(created_date.GetDict().items())
return RenderTemplateString(permalink_template, permalink_data)
def ParseSnip(content):
"""Return the snippet based on the content."""
found = content.find('<!--more-->')
if found >= 0:
return content[:found]
def ParseDate(date):
"""Gets the permalink parameters based on the item's info."""
try:
if type(date) == str:
date_string = date
date = parser.parse(date_string)
#warnings.warn('Parsed %s into %s.' % (date_string, date))
dt = datetime.datetime.combine(date, DEFAULT_TIME)
return Date(dt)
except TypeError as e:
warnings.warn('Failed to parse date: %s.' % e)
return None
def GuessDate(path):
"""Based on the filesystem structure (eg. blah/2014/09/20/foo-bar.md),
extracts the date."""
regex = '.*\/([0-9]{4})\/([0-9]{2})\/([0-9]{2})\/.*'
match = re.match(regex, path)
if match:
date_tuple = map(int, match.groups())
date = datetime.datetime(*date_tuple)
return ParseDate(date)
# print(f'GuessDate failed on {path}')
def GuessType(path, mappings):
"""Return the type based on the path. The site config provides automatic
mappings based on path."""
for type_path, type_name in mappings.items():
if path.find(type_path) >= 0:
return type_name
def GuessSlugFromPath(path):
"""Returns the slug."""
if path.endswith('index.md'):
# If it ends with index, get the second last path component.
return path.split('/')[-2]
else:
# Otherwise, just get the filename.
return path.split('/')[-1].split('.')[0]
def GuessSlugFromTitle(title):
"""Return an automatically generated slug from title. Turn spaces into dashes,
lowercase everything, limit length."""
def IsValidChar(c):
return c.isalnum() or c == '-'
lower = title.lower()
slug = lower.replace(' ', '-')
slug = ''.join([c for c in slug if IsValidChar(c)])
slug = re.sub("-+", "-", slug)
return slug
def RenderTemplateString(template_string, data):
template = Template(template_string)
return template.render(data)
def RenderTemplate(template_root, filename, data):
env = Environment(loader=FileSystemLoader(template_root))
try:
template = env.get_template(filename)
except Exception:
raise Exception(f'Failed to find template {filename}.')
try:
out = template.render(data)
except Exception as e:
raise Exception(f'Failed to render template {filename}: "{e}".')
return out
def FindSplitIndices(lines):
"""Given some lines representing a markdown file with multiple entries in it,
find each split point."""
def CodeLine(line):
if line == '\n':
return 'N'
elif re.match('\w', line):
return 'T'
elif re.match('^===+$', line):
return 'D'
else:
return '?'
# Code lines: T if any text, N if new line, D if divider.
coded_lines = [CodeLine(line) for line in lines]
coded = ''.join(coded_lines)
#warnings.warn(coded)
# Look for patterns of NTDN in the coded lines string. If such a pattern is
# found, output the index.
return [m.start() for m in re.finditer('NTD', coded)]
def GetYamlMetadata(lines):
# Ignore empty leading lines.
for i, line in enumerate(lines):
if line == '\n':
del lines[i]
else:
break
# Extract the title.
title = lines[0].strip()
# Get the key: value pairs after the title.
separator_index = lines.index('\n')
yaml_lines = lines[2:separator_index]
data = yaml.load(''.join(yaml_lines), Loader=yaml.SafeLoader) or {}
data['title'] = title
return data
def Pairwise(iterable):
"""Returns a pairwise iterated list."""
a, b = tee(iterable)
next(b, None)
return list(zip(a, b))
def CopyAndOverwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
def DeletePath(path):
"""Remove file or directory at path."""
if os.path.isfile(path):
os.unlink(path)
else:
shutil.rmtree(path)
def FixBrokenLinks(content, permalink):
"""Given content (HTML or RSS), this will make all relative links into
absolute ones referring to the permalink."""
links = re.findall(r'<a href="(.+?)"', content, re.DOTALL) + \
re.findall(r'<img src="(.+?)"', content, re.DOTALL) + \
re.findall(r'<audio src="(.+?)"', content, re.DOTALL) + \
re.findall(r'<video src="(.+?)"', content, re.DOTALL)
# If the links are relative, make them absolute.
for link in links:
# If it doesn't have http or / at the beginning, it's a relative URL.
if not link.startswith('/') and not link.startswith('http') and not \
link.startswith('mailto'):
# If they are relative, rewrite them using the permalink
absolute_link = os.path.join(permalink, link)
content = content.replace(link, absolute_link)
#warnings.warn('Making relative link %s into absolute %s.' % (link,
# absolute_link))
return content
def FormatWikiLinks(html):
"""Given an html file, convert [[WikiLinks]] into *WikiLinks* just to ease
readability."""
wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]')
return wikilink.sub(r'*\1*', html)
def ResolveWikiLinks(html):
"""Given an html file, convert [[WikiLinks]] into links to the personal wiki:
<a href="https://z3.ca/WikiLinks">WikiLinks</a>"""
wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]')
def linkify(match):
wiki_root = 'https://z3.ca'
wiki_name = match.group(1).replace('\n', ' ')
wiki_slug = wiki_name.replace(' ', '_')
return f'<a class="wiki" href="{wiki_root}/{wiki_slug}">{wiki_name}</a>'
return wikilink.sub(linkify, html)
def StripHtmlTags(html):
return re.sub('<[^<]+?>|\n', ' ', html)
| |
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import resolve, reverse
from django.http import HttpResponseRedirect
from django.conf import settings
from console.forms import NewDomainForm, TagForm
from console.views import home, me_page, logout, domains, domain, tags,\
tag, containers, alarms, latest_alarms
from console.views_metrics import container_metrics, domain_metrics,\
container_metrics_per_tag, domain_metrics_per_tag
from console.models import IOReadContainerMetric, NetworkRXDomainMetric,\
UwsgiItApi
class HomeViewTests(TestCase):
@classmethod
def setUpClass(cls):
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
request_factory = RequestFactory()
cls.request = request_factory.get('/', follow=True)
cls.request.session = {}
cls.request_post = request_factory.post('/', follow=True, data={
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': cls.test_api.id,
'action_login': 1})
cls.request_post.session = {}
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_home_name_resolves_to_home_url(self):
url = reverse('console_home')
self.assertEqual(url, '/')
def test_home_url_resolves_to_home_view(self):
resolver = resolve('/')
self.assertEqual(resolver.func, home)
def test_home_returns_appropriate_html_respons_code(self):
response = home(self.request)
self.assertEqual(response.status_code, 200)
def test_home_contains_right_html(self):
response = home(self.request)
self.assertContains(response, 'id="id_action_login" name="action_login"')
self.assertNotContains(response, 'Latest Alarms')
def test_home_handles_logged_in_user(self):
self.request.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': settings.DEFAULT_API_URL}
response = home(self.request)
self.request.session = {}
self.assertContains(response, 'Latest Alarms')
self.assertNotContains(response, 'id="id_action_login" name="action_login"')
def test_home_view_login_redirects_to_me_html(self):
response = home(self.request_post)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/me/')
class MeViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.request_get = request_factory.get('/me/', follow=True)
cls.request_post = request_factory.post('/me/', follow=True)
cls.request_get.session = {}
cls.request_post.session = {}
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_me_name_resolves_to_me_url(self):
url = reverse('console_me')
self.assertEqual(url, '/me/')
def test_me_url_resolves_to_me_view(self):
resolver = resolve('/me/')
self.assertEqual(resolver.func, me_page)
def test_me_doesnt_allow_anonymous(self):
response = me_page(self.request_get)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = me_page(self.request_post)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_me_handles_logged_in_user(self):
self.request_get.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': settings.DEFAULT_API_URL}
response = me_page(self.request_get)
self.request_get.session = {}
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<tr><th><label for="id_company">Company:')
class DomainsViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.request_get = request_factory.get('/domains/', follow=True)
cls.request_post = request_factory.post('/domains/', follow=True)
cls.request_get.session = {}
cls.request_post.session = {}
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_me_name_resolves_to_me_url(self):
url = reverse('console_domains')
self.assertEqual(url, '/domains/')
def test_domains_url_resolves_to_domains_view(self):
resolver = resolve('/domains/')
self.assertEqual(resolver.func, domains)
def test_domains_doesnt_allow_anonymous(self):
response = domains(self.request_get)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = domains(self.request_post)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_domains_handles_logged_in_user(self):
self.request_get.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': settings.DEFAULT_API_URL}
response = domains(self.request_get)
self.request_get.session = {}
self.assertEqual(response.status_code, 200)
self.assertContains(response, NewDomainForm())
class TagsViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.request_get = request_factory.get('/tags/', follow=True)
cls.request_post = request_factory.post('/tags/', follow=True)
cls.request_get.session = {}
cls.request_post.session = {}
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_me_name_resolves_to_me_url(self):
url = reverse('console_tags')
self.assertEqual(url, '/tags/')
def test_tags_url_resolves_to_tags_view(self):
resolver = resolve('/tags/')
self.assertEqual(resolver.func, tags)
def test_tags_doesnt_allow_anonymous(self):
response = tags(self.request_get)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = tags(self.request_post)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_tags_handles_logged_in_user(self):
self.request_get.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': settings.DEFAULT_API_URL}
response = tags(self.request_get)
self.request_get.session = {}
self.assertEqual(response.status_code, 200)
self.assertContains(response, TagForm())
class Tag_ViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.url = '/tags/{}'.format(settings.TEST_TAG)
cls.request_get = request_factory.get(cls.url, follow=True)
cls.request_post = request_factory.post(cls.url, follow=True)
cls.request_get.session = {}
cls.request_post.session = {}
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_tag_name_resolves_to_metrics_url(self):
url = reverse('console_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, self.url)
def test_tag_url_resolves_to_metrics_view(self):
resolver = resolve(self.url)
self.assertEqual(resolver.func, tag)
def test_tag_doesnt_allow_anonymous(self):
response = tag(self.request_get, settings.TEST_TAG)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = tag(self.request_post, settings.TEST_TAG)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_tag_handles_logged_in_user(self):
self.request_get.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': settings.DEFAULT_API_URL}
response = tag(self.request_get, settings.TEST_TAG)
self.request_get.session = {}
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<h3><b>{tag}</b> Tag</h3>'.format(tag=settings.TEST_TAG))
class Domain_ViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.url = '/domains/{}'.format(settings.TEST_DOMAIN)
cls.request_get = request_factory.get(cls.url, follow=True)
cls.request_post = request_factory.post(cls.url, follow=True)
cls.request_get.session = {}
cls.request_post.session = {}
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_domain_name_resolves_to_metrics_url(self):
url = reverse('console_domain', args=(settings.TEST_DOMAIN,))
self.assertEqual(url, self.url)
def test_domain_url_resolves_to_metrics_view(self):
resolver = resolve(self.url)
self.assertEqual(resolver.func, domain)
def test_domain_doesnt_allow_anonymous(self):
response = domain(self.request_get, settings.TEST_DOMAIN)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = domain(self.request_post, settings.TEST_DOMAIN)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_domain_handles_logged_in_user(self):
self.request_get.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': settings.DEFAULT_API_URL}
response = domain(self.request_get, settings.TEST_DOMAIN)
self.request_get.session = {}
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<td>{id}</td>'.format(id=settings.TEST_DOMAIN))
class Containers_ViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.url = '/containers/{}'.format(settings.TEST_CONTAINER)
cls.request_get = request_factory.get(cls.url, follow=True)
cls.request_post = request_factory.post(cls.url, follow=True)
cls.request_get.session = {}
cls.request_post.session = {}
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_containers_name_resolves_to_containers_url(self):
url = reverse('console_containers', args=(settings.TEST_CONTAINER,))
self.assertEqual(url, self.url)
def test_containers_url_resolves_to_containers_view(self):
resolver = resolve(self.url)
self.assertEqual(resolver.func, containers)
def test_containers_doesnt_allow_anonymous(self):
response = containers(self.request_get, settings.TEST_CONTAINER)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = containers(self.request_post, settings.TEST_CONTAINER)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_containers_handles_logged_in_user(self):
self.request_get.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': settings.DEFAULT_API_URL}
response = containers(self.request_get, settings.TEST_CONTAINER)
self.request_get.session = {}
self.assertEqual(response.status_code, 200)
self.assertContains(response, '({id})</b>'.format(id=settings.TEST_CONTAINER))
class AlarmsViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.url = '/alarms/'
cls.request_get = request_factory.get(cls.url, follow=True)
cls.request_post = request_factory.post(cls.url, follow=True)
cls.request_get.session = {}
cls.request_post.session = {}
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_alarms_name_resolves_to_alarms_url(self):
url = reverse('console_alarms')
self.assertEqual(url, self.url)
def test_alarms_url_resolves_to_alarms_view(self):
resolver = resolve(self.url)
self.assertEqual(resolver.func, alarms)
def test_alarms_doesnt_allow_anonymous(self):
response = alarms(self.request_get)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = alarms(self.request_post)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_alarms_handles_logged_in_user(self):
self.request_get.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': settings.DEFAULT_API_URL}
response = alarms(self.request_get)
self.request_get.session = {}
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<form class="form-horizontal" role="form" method="POST">')
class Latest_alarmsViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.url = '/latest_alarms/'
cls.request_get = request_factory.get(cls.url, follow=True)
cls.request_post = request_factory.post(cls.url, follow=True)
cls.request_get.session = {}
cls.request_post.session = {}
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_latest_alarms_name_resolves_to_latest_alarms_url(self):
url = reverse('console_latest_alarms')
self.assertEqual(url, self.url)
def test_latest_alarms_url_resolves_to_latest_alarms_view(self):
resolver = resolve(self.url)
self.assertEqual(resolver.func, latest_alarms)
def test_latest_alarms_doesnt_allow_anonymous(self):
response = latest_alarms(self.request_get)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = latest_alarms(self.request_post)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_latest_alarms_handles_logged_in_user(self):
self.request_get.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': settings.DEFAULT_API_URL}
response = latest_alarms(self.request_get)
self.request_get.session = {}
self.assertEqual(response.status_code, 200)
class ContainerMetricViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.request_get = request_factory.get('/metrics/container/io.read/id/1/', follow=True)
cls.request_post = request_factory.post(
'/metrics/container/io.read/id/1/', follow=True,
data={'year': 2000, 'month': 1, 'day': 1})
cls.request_get.session = {}
cls.request_post.session = {}
json = [
[1407103492, 12288], [1407103808, 12288], [1407104120, 12288],
[1407104436, 12288], [1407104748, 12288], [1407105061, 12288],
[1407105374, 12288], [1407105687, 12288], [1407105997, 12288],
[1407106309, 12288], [1407106619, 12288], [1407106930, 12288],
[1407107241, 12288], [1407107552, 12288], [1407107865, 12288],
[1407108179, 12288], [1407108490, 12288], [1407108805, 12288],
[1407109116, 12288], [1407109428, 12288], [1407109740, 12288],
[1407110052, 12288], [1407110366, 12288], [1407110683, 12288],
[1407110995, 12288], [1407111311, 12288], [1407111621, 12288],
[1407111932, 12288], [1407112245, 12288], [1407112559, 12288],
[1407112872, 12288], [1407113183, 12288], [1407113495, 12288],
[1407113808, 12288], [1407114119, 12288], [1407114431, 12288],
[1407114744, 12288], [1407115055, 12288], [1407115366, 12288],
[1407115679, 12288], [1407115993, 12288], [1407116306, 12288],
[1407116619, 12288], [1407116932, 12288], [1407117246, 12288],
[1407117560, 12288], [1407117869, 12288], [1407118179, 12288],
[1407118499, 12288], [1407118819, 12288], [1407119132, 12288],
[1407119446, 12288], [1407119765, 12288], [1407120082, 12288],
[1407120398, 12288], [1407120711, 12288], [1407121025, 12288],
[1407121336, 12288], [1407121651, 12288], [1407121963, 12288],
[1407122274, 12288], [1407122585, 12288], [1407122897, 12288],
[1407123209, 12288], [1407123526, 12288], [1407123840, 12288],
[1407124151, 12288], [1407124464, 12288], [1407124777, 12288],
]
cls.test_metric = IOReadContainerMetric(
container=1,
year=2000,
month=1,
day=1,
json=json
)
cls.test_metric.save()
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
cls.test_metric.delete()
def test_IOReadContainerMetric_name_resolves_to_metrics_url(self):
url = reverse('console_container_io_read', args=(1,))
self.assertEqual(url, '/metrics/container/io.read/id/1/')
def test_IOReadContainerMetric_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/io.read/id/1/')
self.assertEqual(resolver.func, container_metrics)
def test_IOWriteContainerMetric_name_resolves_to_metrics_url(self):
url = reverse('console_container_io_write', args=(1,))
self.assertEqual(url, '/metrics/container/io.write/id/1/')
def test_IOWriteContainerMetric_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/io.write/id/1/')
self.assertEqual(resolver.func, container_metrics)
def test_NetworkRXContainerMetric_name_resolves_to_metrics_url(self):
url = reverse('console_container_net_rx', args=(1,))
self.assertEqual(url, '/metrics/container/net.rx/id/1/')
def test_NetworkRXContainerMetric_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/net.rx/id/1/')
self.assertEqual(resolver.func, container_metrics)
def test_NetworkTXContainerMetric_name_resolves_to_metrics_url(self):
url = reverse('console_container_net_tx', args=(1,))
self.assertEqual(url, '/metrics/container/net.tx/id/1/')
def test_NetworkTXContainerMetric_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/net.tx/id/1/')
self.assertEqual(resolver.func, container_metrics)
def test_CPUContainerMetric_name_resolves_to_metrics_url(self):
url = reverse('console_container_cpu', args=(1,))
self.assertEqual(url, '/metrics/container/cpu/id/1/')
def test_CPUContainerMetric_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/cpu/id/1/')
self.assertEqual(resolver.func, container_metrics)
def test_MemoryContainerMetric_name_resolves_to_metrics_url(self):
url = reverse('console_container_mem', args=(1,))
self.assertEqual(url, '/metrics/container/mem/id/1/')
def test_MemoryContainerMetric_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/mem/id/1/')
self.assertEqual(resolver.func, container_metrics)
def test_IOReadContainerMetricPerTag_name_resolves_to_metrics_url(self):
url = reverse('console_container_io_read_per_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, '/metrics/container/io.read/tag/{}/'.format(settings.TEST_TAG))
def test_IOReadContainerMetricPerTag_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/io.read/tag/{}/'.format(settings.TEST_TAG))
self.assertEqual(resolver.func, container_metrics_per_tag)
def test_IOWriteContainerMetricPerTag_name_resolves_to_metrics_url(self):
url = reverse('console_container_io_write_per_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, '/metrics/container/io.write/tag/{}/'.format(settings.TEST_TAG))
def test_IOWriteContainerMetricPerTag_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/io.write/tag/{}/'.format(settings.TEST_TAG))
self.assertEqual(resolver.func, container_metrics_per_tag)
def test_NetworkRXContainerMetricPerTag_name_resolves_to_metrics_url(self):
url = reverse('console_container_net_rx_per_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, '/metrics/container/net.rx/tag/{}/'.format(settings.TEST_TAG))
def test_NetworkRXContainerMetricPerTag_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/net.rx/tag/{}/'.format(settings.TEST_TAG))
self.assertEqual(resolver.func, container_metrics_per_tag)
def test_NetworkTXContainerMetricPerTag_name_resolves_to_metrics_url(self):
url = reverse('console_container_net_tx_per_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, '/metrics/container/net.tx/tag/{}/'.format(settings.TEST_TAG))
def test_NetworkTXContainerMetricPerTag_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/net.tx/tag/{}/'.format(settings.TEST_TAG))
self.assertEqual(resolver.func, container_metrics_per_tag)
def test_CPUContainerMetricPerTag_name_resolves_to_metrics_url(self):
url = reverse('console_container_cpu_per_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, '/metrics/container/cpu/tag/{}/'.format(settings.TEST_TAG))
def test_CPUContainerMetricPerTag_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/cpu/tag/{}/'.format(settings.TEST_TAG))
self.assertEqual(resolver.func, container_metrics_per_tag)
def test_MemoryContainerMetricPerTag_name_resolves_to_metrics_url(self):
url = reverse('console_container_mem_per_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, '/metrics/container/mem/tag/{}/'.format(settings.TEST_TAG))
def test_MemoryContainerMetricPerTag_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/container/mem/tag/{}/'.format(settings.TEST_TAG))
self.assertEqual(resolver.func, container_metrics_per_tag)
def test_container_view_doesnt_allows_anonymous(self):
response = container_metrics(
self.request_get, 1, **{'model': IOReadContainerMetric, 'absolute_values': False, 'average': False})
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = container_metrics(
self.request_post, 1, **{'model': IOReadContainerMetric, 'absolute_values': False, 'average': False})
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_container_view_handles_logged_in_user(self):
self.request_post.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': self.test_api.id}
response = container_metrics(
self.request_post, 1, **{'model': IOReadContainerMetric, 'absolute_values': False, 'average': False})
self.request_post.session = {}
self.assertEqual(response.status_code, 200)
class DomainMetricViewTests(TestCase):
@classmethod
def setUpClass(cls):
request_factory = RequestFactory()
cls.request_get = request_factory.get('/metrics/domain/net.rx/id/1/', follow=True)
cls.request_post = request_factory.post(
'/metrics/domain/net.rx/id/1/', follow=True,
data={'year': 2000, 'month': 1, 'day': 1})
cls.request_get.session = {}
cls.request_post.session = {}
json = [
[1407103492, 12288], [1407103808, 12288], [1407104120, 12288],
[1407104436, 12288], [1407104748, 12288], [1407105061, 12288],
[1407105374, 12288], [1407105687, 12288], [1407105997, 12288],
[1407106309, 12288], [1407106619, 12288], [1407106930, 12288],
[1407107241, 12288], [1407107552, 12288], [1407107865, 12288],
[1407108179, 12288], [1407108490, 12288], [1407108805, 12288],
[1407109116, 12288], [1407109428, 12288], [1407109740, 12288],
[1407110052, 12288], [1407110366, 12288], [1407110683, 12288],
[1407110995, 12288], [1407111311, 12288], [1407111621, 12288],
[1407111932, 12288], [1407112245, 12288], [1407112559, 12288],
[1407112872, 12288], [1407113183, 12288], [1407113495, 12288],
[1407113808, 12288], [1407114119, 12288], [1407114431, 12288],
[1407114744, 12288], [1407115055, 12288], [1407115366, 12288],
[1407115679, 12288], [1407115993, 12288], [1407116306, 12288],
[1407116619, 12288], [1407116932, 12288], [1407117246, 12288],
[1407117560, 12288], [1407117869, 12288], [1407118179, 12288],
[1407118499, 12288], [1407118819, 12288], [1407119132, 12288],
[1407119446, 12288], [1407119765, 12288], [1407120082, 12288],
[1407120398, 12288], [1407120711, 12288], [1407121025, 12288],
[1407121336, 12288], [1407121651, 12288], [1407121963, 12288],
[1407122274, 12288], [1407122585, 12288], [1407122897, 12288],
[1407123209, 12288], [1407123526, 12288], [1407123840, 12288],
[1407124151, 12288], [1407124464, 12288], [1407124777, 12288],
]
cls.test_metric = NetworkRXDomainMetric(
domain=1,
container=1,
year=2000,
month=1,
day=1,
json=json
)
cls.test_metric.save()
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
cls.test_metric.delete()
def test_NetworkRXDomainMetric_name_resolves_to_metrics_url(self):
url = reverse('console_domain_net_rx', args=(1,))
self.assertEqual(url, '/metrics/domain/net.rx/id/1/')
def test_NetworkRXDomainMetric_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/domain/net.rx/id/1/')
self.assertEqual(resolver.func, domain_metrics)
def test_NetworkTXDomainMetric_name_resolves_to_metrics_url(self):
url = reverse('console_domain_net_tx', args=(1,))
self.assertEqual(url, '/metrics/domain/net.tx/id/1/')
def test_NetworkTXDomainMetric_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/domain/net.tx/id/1/')
self.assertEqual(resolver.func, domain_metrics)
def test_HitsDomainMetric_name_resolves_to_metrics_url(self):
url = reverse('console_domain_hits', args=(1,))
self.assertEqual(url, '/metrics/domain/hits/id/1/')
def test_HitsDomainMetric_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/domain/hits/id/1/')
self.assertEqual(resolver.func, domain_metrics)
def test_NetworkRXDomainMetricPerTag_name_resolves_to_metrics_url(self):
url = reverse('console_domain_net_rx_per_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, '/metrics/domain/net.rx/tag/{}/'.format(settings.TEST_TAG))
def test_NetworkRXDomainMetricPerTag_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/domain/net.rx/tag/{}/'.format(settings.TEST_TAG))
self.assertEqual(resolver.func, domain_metrics_per_tag)
def test_NetworkTXDomainMetricPerTag_name_resolves_to_metrics_url(self):
url = reverse('console_domain_net_tx_per_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, '/metrics/domain/net.tx/tag/{}/'.format(settings.TEST_TAG))
def test_NetworkTXDomainMetricPerTag_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/domain/net.tx/tag/{}/'.format(settings.TEST_TAG))
self.assertEqual(resolver.func, domain_metrics_per_tag)
def test_HitsDomainMetricPerTag_name_resolves_to_metrics_url(self):
url = reverse('console_domain_hits_per_tag', args=(settings.TEST_TAG,))
self.assertEqual(url, '/metrics/domain/hits/tag/{}/'.format(settings.TEST_TAG))
def test_HitsDomainMetricPerTag_url_resolves_to_metrics_view(self):
resolver = resolve('/metrics/domain/hits/tag/{}/'.format(settings.TEST_TAG))
self.assertEqual(resolver.func, domain_metrics_per_tag)
def test_domain_view_doesnt_allows_anonymous(self):
response = domain_metrics(
self.request_get, 1, **{'model': NetworkRXDomainMetric, 'absolute_values': False, 'average': False})
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = domain_metrics(
self.request_post, 1, **{'model': NetworkRXDomainMetric, 'absolute_values': False, 'average': False})
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_domain_view_handles_logged_in_user(self):
self.request_post.session = {
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': self.test_api.id}
response = domain_metrics(
self.request_post, 1, **{'model': NetworkRXDomainMetric, 'absolute_values': False, 'average': False})
self.request_post.session = {}
self.assertEqual(response.status_code, 200)
class LogoutViewTest(TestCase):
@classmethod
def setUpClass(cls):
cls.test_api = UwsgiItApi(
url=settings.DEFAULT_API_URL,
name='TEST API')
cls.test_api.save()
request_factory = RequestFactory()
cls.request_get = request_factory.get('/logout/', follow=True)
cls.request_get.session = {}
cls.request_post = request_factory.post('/logout/', follow=True)
cls.request_post.session = {}
@classmethod
def tearDownClass(cls):
cls.test_api.delete()
def test_logout_name_resolves_to_logout_url(self):
url = reverse('console_logout')
self.assertEqual(url, '/logout/')
def test_logout_url_resolves_to_logout_view(self):
resolver = resolve('/logout/')
self.assertEqual(resolver.func, logout)
def test_logout_redirects_anonymous(self):
response = tags(self.request_post)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
response = tags(self.request_post)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertEqual(response.url, '/')
def test_logout_logs_out_logged_in_user(self):
#login
self.client.post('/', follow=True, data={
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': self.test_api.id,
'action_login': 1})
response = self.client.get('/me/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/logout/', follow=True)
self.assertRedirects(response, '/')
response = self.client.get('/me/', follow=True)
self.assertRedirects(response, '/')
#login
self.client.post('/', follow=True, data={
'username': settings.TEST_USER,
'password': settings.TEST_PASSWORD,
'api_url': self.test_api.id,
'action_login': 1})
response = self.client.get('/me/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/logout/', follow=True)
self.assertRedirects(response, '/')
response = self.client.get('/me/', follow=True)
self.assertRedirects(response, '/')
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""TFRecord sources and sinks."""
from __future__ import absolute_import
import logging
import struct
from functools import partial
import crcmod
from apache_beam import coders
from apache_beam.io import filebasedsink
from apache_beam.io.filebasedsource import FileBasedSource
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
__all__ = ['ReadFromTFRecord', 'WriteToTFRecord']
def _default_crc32c_fn(value):
"""Calculates crc32c by either snappy or crcmod based on installation."""
if not _default_crc32c_fn.fn:
try:
import snappy # pylint: disable=import-error
_default_crc32c_fn.fn = snappy._crc32c # pylint: disable=protected-access
except ImportError:
logging.warning('Couldn\'t find python-snappy so the implementation of '
'_TFRecordUtil._masked_crc32c is not as fast as it could '
'be.')
_default_crc32c_fn.fn = crcmod.predefined.mkPredefinedCrcFun('crc-32c')
return _default_crc32c_fn.fn(value)
_default_crc32c_fn.fn = None
class _TFRecordUtil(object):
"""Provides basic TFRecord encoding/decoding with consistency checks.
For detailed TFRecord format description see:
https://www.tensorflow.org/versions/master/api_docs/python/python_io.html#tfrecords-format-details
Note that masks and length are represented in LittleEndian order.
"""
@classmethod
def _masked_crc32c(cls, value, crc32c_fn=_default_crc32c_fn):
"""Compute a masked crc32c checksum for a value.
Args:
value: A string for which we compute the crc.
crc32c_fn: A function that can compute a crc32c.
This is a performance hook that also helps with testing. Callers are
not expected to make use of it directly.
Returns:
Masked crc32c checksum.
"""
crc = crc32c_fn(value)
return (((crc >> 15) | (crc << 17)) + 0xa282ead8) & 0xffffffff
@staticmethod
def encoded_num_bytes(record):
"""Return the number of bytes consumed by a record in its encoded form."""
# 16 = 8 (Length) + 4 (crc of length) + 4 (crc of data)
return len(record) + 16
@classmethod
def write_record(cls, file_handle, value):
"""Encode a value as a TFRecord.
Args:
file_handle: The file to write to.
value: A string content of the record.
"""
encoded_length = struct.pack('<Q', len(value))
file_handle.write('{}{}{}{}'.format(
encoded_length,
struct.pack('<I', cls._masked_crc32c(encoded_length)), #
value,
struct.pack('<I', cls._masked_crc32c(value))))
@classmethod
def read_record(cls, file_handle):
"""Read a record from a TFRecords file.
Args:
file_handle: The file to read from.
Returns:
None if EOF is reached; the paylod of the record otherwise.
Raises:
ValueError: If file appears to not be a valid TFRecords file.
"""
buf_length_expected = 12
buf = file_handle.read(buf_length_expected)
if not buf:
return None # EOF Reached.
# Validate all length related payloads.
if len(buf) != buf_length_expected:
raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' %
(buf_length_expected, buf.encode('hex')))
length, length_mask_expected = struct.unpack('<QI', buf)
length_mask_actual = cls._masked_crc32c(buf[:8])
if length_mask_actual != length_mask_expected:
raise ValueError('Not a valid TFRecord. Mismatch of length mask: %s' %
buf.encode('hex'))
# Validate all data related payloads.
buf_length_expected = length + 4
buf = file_handle.read(buf_length_expected)
if len(buf) != buf_length_expected:
raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' %
(buf_length_expected, buf.encode('hex')))
data, data_mask_expected = struct.unpack('<%dsI' % length, buf)
data_mask_actual = cls._masked_crc32c(data)
if data_mask_actual != data_mask_expected:
raise ValueError('Not a valid TFRecord. Mismatch of data mask: %s' %
buf.encode('hex'))
# All validation checks passed.
return data
class _TFRecordSource(FileBasedSource):
"""A File source for reading files of TFRecords.
For detailed TFRecords format description see:
https://www.tensorflow.org/versions/master/api_docs/python/python_io.html#tfrecords-format-details
"""
def __init__(self,
file_pattern,
coder,
compression_type,
validate):
"""Initialize a TFRecordSource. See ReadFromTFRecord for details."""
super(_TFRecordSource, self).__init__(
file_pattern=file_pattern,
compression_type=compression_type,
splittable=False,
validate=validate)
self._coder = coder
def read_records(self, file_name, offset_range_tracker):
if offset_range_tracker.start_position():
raise ValueError('Start position not 0:%s' %
offset_range_tracker.start_position())
current_offset = offset_range_tracker.start_position()
with self.open_file(file_name) as file_handle:
while True:
if not offset_range_tracker.try_claim(current_offset):
raise RuntimeError('Unable to claim position: %s' % current_offset)
record = _TFRecordUtil.read_record(file_handle)
if record is None:
return # Reached EOF
else:
current_offset += _TFRecordUtil.encoded_num_bytes(record)
yield self._coder.decode(record)
def _create_tfrcordio_source(
file_pattern=None, coder=None, compression_type=None):
# We intentionally disable validation for ReadAll pattern so that reading does
# not fail for globs (elements) that are empty.
return _TFRecordSource(file_pattern, coder, compression_type,
validate=False)
class ReadAllFromTFRecord(PTransform):
"""A ``PTransform`` for reading a ``PCollection`` of TFRecord files."""
def __init__(
self,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO,
**kwargs):
"""Initialize the ``ReadAllFromTFRecord`` transform.
Args:
coder: Coder used to decode each record.
compression_type: Used to handle compressed input files. Default value
is CompressionTypes.AUTO, in which case the file_path's extension will
be used to detect the compression.
**kwargs: optional args dictionary. These are passed through to parent
constructor.
"""
super(ReadAllFromTFRecord, self).__init__(**kwargs)
source_from_file = partial(
_create_tfrcordio_source, compression_type=compression_type,
coder=coder)
# Desired and min bundle sizes do not matter since TFRecord files are
# unsplittable.
self._read_all_files = ReadAllFiles(
splittable=False, compression_type=compression_type,
desired_bundle_size=0, min_bundle_size=0,
source_from_file=source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class ReadFromTFRecord(PTransform):
"""Transform for reading TFRecord sources."""
def __init__(self,
file_pattern,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO,
validate=True,
**kwargs):
"""Initialize a ReadFromTFRecord transform.
Args:
file_pattern: A file glob pattern to read TFRecords from.
coder: Coder used to decode each record.
compression_type: Used to handle compressed input files. Default value
is CompressionTypes.AUTO, in which case the file_path's extension will
be used to detect the compression.
validate: Boolean flag to verify that the files exist during the pipeline
creation time.
**kwargs: optional args dictionary. These are passed through to parent
constructor.
Returns:
A ReadFromTFRecord transform object.
"""
super(ReadFromTFRecord, self).__init__(**kwargs)
self._source = _TFRecordSource(file_pattern, coder, compression_type,
validate)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class _TFRecordSink(filebasedsink.FileBasedSink):
"""Sink for writing TFRecords files.
For detailed TFRecord format description see:
https://www.tensorflow.org/versions/master/api_docs/python/python_io.html#tfrecords-format-details
"""
def __init__(self, file_path_prefix, coder, file_name_suffix, num_shards,
shard_name_template, compression_type):
"""Initialize a TFRecordSink. See WriteToTFRecord for details."""
super(_TFRecordSink, self).__init__(
file_path_prefix=file_path_prefix,
coder=coder,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
mime_type='application/octet-stream',
compression_type=compression_type)
def write_encoded_record(self, file_handle, value):
_TFRecordUtil.write_record(file_handle, value)
class WriteToTFRecord(PTransform):
"""Transform for writing to TFRecord sinks."""
def __init__(self,
file_path_prefix,
coder=coders.BytesCoder(),
file_name_suffix='',
num_shards=0,
shard_name_template=None,
compression_type=CompressionTypes.AUTO,
**kwargs):
"""Initialize WriteToTFRecord transform.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix.
coder: Coder used to encode each record.
file_name_suffix: Suffix for the files written.
num_shards: The number of files (shards) used for output. If not set, the
default value will be used.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the file_path's extension will
be used to detect the compression.
**kwargs: Optional args dictionary. These are passed through to parent
constructor.
Returns:
A WriteToTFRecord transform object.
"""
super(WriteToTFRecord, self).__init__(**kwargs)
self._sink = _TFRecordSink(file_path_prefix, coder, file_name_suffix,
num_shards, shard_name_template,
compression_type)
def expand(self, pcoll):
return pcoll | Write(self._sink)
| |
"""
Support the OwnTracks platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import asyncio
import json
import logging
import base64
from collections import defaultdict
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.components.mqtt as mqtt
from homeassistant.const import STATE_HOME
from homeassistant.util import convert, slugify
from homeassistant.components import zone as zone_comp
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
DEPENDENCIES = ['mqtt']
REQUIREMENTS = ['libnacl==1.5.0']
_LOGGER = logging.getLogger(__name__)
BEACON_DEV_ID = 'beacon'
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_SECRET = 'secret'
CONF_WAYPOINT_IMPORT = 'waypoints'
CONF_WAYPOINT_WHITELIST = 'waypoint_whitelist'
EVENT_TOPIC = 'owntracks/+/+/event'
LOCATION_TOPIC = 'owntracks/+/+'
VALIDATE_LOCATION = 'location'
VALIDATE_TRANSITION = 'transition'
VALIDATE_WAYPOINTS = 'waypoints'
WAYPOINT_LAT_KEY = 'lat'
WAYPOINT_LON_KEY = 'lon'
WAYPOINT_TOPIC = 'owntracks/{}/{}/waypoint'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MAX_GPS_ACCURACY): vol.Coerce(float),
vol.Optional(CONF_WAYPOINT_IMPORT, default=True): cv.boolean,
vol.Optional(CONF_WAYPOINT_WHITELIST): vol.All(
cv.ensure_list, [cv.string]),
vol.Optional(CONF_SECRET): vol.Any(
vol.Schema({vol.Optional(cv.string): cv.string}),
cv.string)
})
def get_cipher():
"""Return decryption function and length of key.
Async friendly.
"""
from libnacl import crypto_secretbox_KEYBYTES as KEYLEN
from libnacl.secret import SecretBox
def decrypt(ciphertext, key):
"""Decrypt ciphertext using key."""
return SecretBox(key).decrypt(ciphertext)
return (KEYLEN, decrypt)
@asyncio.coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up an OwnTracks tracker."""
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
waypoint_import = config.get(CONF_WAYPOINT_IMPORT)
waypoint_whitelist = config.get(CONF_WAYPOINT_WHITELIST)
secret = config.get(CONF_SECRET)
mobile_beacons_active = defaultdict(list)
regions_entered = defaultdict(list)
def decrypt_payload(topic, ciphertext):
"""Decrypt encrypted payload."""
try:
keylen, decrypt = get_cipher()
except OSError:
_LOGGER.warning('Ignoring encrypted payload '
'because libsodium not installed.')
return None
if isinstance(secret, dict):
key = secret.get(topic)
else:
key = secret
if key is None:
_LOGGER.warning('Ignoring encrypted payload '
'because no decryption key known '
'for topic %s.', topic)
return None
key = key.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b'\0')
try:
ciphertext = base64.b64decode(ciphertext)
message = decrypt(ciphertext, key)
message = message.decode("utf-8")
_LOGGER.debug("Decrypted payload: %s", message)
return message
except ValueError:
_LOGGER.warning('Ignoring encrypted payload '
'because unable to decrypt using key '
'for topic %s.', topic)
return None
# pylint: disable=too-many-return-statements
def validate_payload(topic, payload, data_type):
"""Validate the OwnTracks payload."""
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
_LOGGER.error('Unable to parse payload as JSON: %s', payload)
return None
if isinstance(data, dict) and \
data.get('_type') == 'encrypted' and \
'data' in data:
plaintext_payload = decrypt_payload(topic, data['data'])
if plaintext_payload is None:
return None
else:
return validate_payload(topic, plaintext_payload, data_type)
if not isinstance(data, dict) or data.get('_type') != data_type:
_LOGGER.debug('Skipping %s update for following data '
'because of missing or malformatted data: %s',
data_type, data)
return None
if data_type == VALIDATE_TRANSITION or data_type == VALIDATE_WAYPOINTS:
return data
if max_gps_accuracy is not None and \
convert(data.get('acc'), float, 0.0) > max_gps_accuracy:
_LOGGER.info('Ignoring %s update because expected GPS '
'accuracy %s is not met: %s',
data_type, max_gps_accuracy, payload)
return None
if convert(data.get('acc'), float, 1.0) == 0.0:
_LOGGER.warning('Ignoring %s update because GPS accuracy '
'is zero: %s',
data_type, payload)
return None
return data
@callback
def async_owntracks_location_update(topic, payload, qos):
"""MQTT message received."""
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typelocation
data = validate_payload(topic, payload, VALIDATE_LOCATION)
if not data:
return
dev_id, kwargs = _parse_see_args(topic, data)
if regions_entered[dev_id]:
_LOGGER.debug(
"location update ignored - inside region %s",
regions_entered[-1])
return
hass.async_add_job(async_see(**kwargs))
async_see_beacons(dev_id, kwargs)
@callback
def async_owntracks_event_update(topic, payload, qos):
"""MQTT event (geofences) received."""
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typetransition
data = validate_payload(topic, payload, VALIDATE_TRANSITION)
if not data:
return
if data.get('desc') is None:
_LOGGER.error(
"Location missing from `Entering/Leaving` message - "
"please turn `Share` on in OwnTracks app")
return
# OwnTracks uses - at the start of a beacon zone
# to switch on 'hold mode' - ignore this
location = data['desc'].lstrip("-")
if location.lower() == 'home':
location = STATE_HOME
dev_id, kwargs = _parse_see_args(topic, data)
def enter_event():
"""Execute enter event."""
zone = hass.states.get("zone.{}".format(slugify(location)))
if zone is None and data.get('t') == 'b':
# Not a HA zone, and a beacon so assume mobile
beacons = mobile_beacons_active[dev_id]
if location not in beacons:
beacons.append(location)
_LOGGER.info("Added beacon %s", location)
else:
# Normal region
regions = regions_entered[dev_id]
if location not in regions:
regions.append(location)
_LOGGER.info("Enter region %s", location)
_set_gps_from_zone(kwargs, location, zone)
hass.async_add_job(async_see(**kwargs))
async_see_beacons(dev_id, kwargs)
def leave_event():
"""Execute leave event."""
regions = regions_entered[dev_id]
if location in regions:
regions.remove(location)
new_region = regions[-1] if regions else None
if new_region:
# Exit to previous region
zone = hass.states.get(
"zone.{}".format(slugify(new_region)))
_set_gps_from_zone(kwargs, new_region, zone)
_LOGGER.info("Exit to %s", new_region)
hass.async_add_job(async_see(**kwargs))
async_see_beacons(dev_id, kwargs)
else:
_LOGGER.info("Exit to GPS")
# Check for GPS accuracy
valid_gps = True
if 'acc' in data:
if data['acc'] == 0.0:
valid_gps = False
_LOGGER.warning(
'Ignoring GPS in region exit because accuracy'
'is zero: %s',
payload)
if (max_gps_accuracy is not None and
data['acc'] > max_gps_accuracy):
valid_gps = False
_LOGGER.info(
'Ignoring GPS in region exit because expected '
'GPS accuracy %s is not met: %s',
max_gps_accuracy, payload)
if valid_gps:
hass.async_add_job(async_see(**kwargs))
async_see_beacons(dev_id, kwargs)
beacons = mobile_beacons_active[dev_id]
if location in beacons:
beacons.remove(location)
_LOGGER.info("Remove beacon %s", location)
if data['event'] == 'enter':
enter_event()
elif data['event'] == 'leave':
leave_event()
else:
_LOGGER.error(
'Misformatted mqtt msgs, _type=transition, event=%s',
data['event'])
return
@callback
def async_owntracks_waypoint_update(topic, payload, qos):
"""List of waypoints published by a user."""
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typewaypoints
data = validate_payload(topic, payload, VALIDATE_WAYPOINTS)
if not data:
return
wayps = data['waypoints']
_LOGGER.info("Got %d waypoints from %s", len(wayps), topic)
for wayp in wayps:
name = wayp['desc']
pretty_name = parse_topic(topic, True)[1] + ' - ' + name
lat = wayp[WAYPOINT_LAT_KEY]
lon = wayp[WAYPOINT_LON_KEY]
rad = wayp['rad']
# check zone exists
entity_id = zone_comp.ENTITY_ID_FORMAT.format(slugify(pretty_name))
# Check if state already exists
if hass.states.get(entity_id) is not None:
continue
zone = zone_comp.Zone(hass, pretty_name, lat, lon, rad,
zone_comp.ICON_IMPORT, False)
zone.entity_id = entity_id
hass.async_add_job(zone.async_update_ha_state())
@callback
def async_see_beacons(dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# the battery state applies to the tracking device, not the beacon
kwargs.pop('battery', None)
for beacon in mobile_beacons_active[dev_id]:
kwargs['dev_id'] = "{}_{}".format(BEACON_DEV_ID, beacon)
kwargs['host_name'] = beacon
hass.async_add_job(async_see(**kwargs))
yield from mqtt.async_subscribe(
hass, LOCATION_TOPIC, async_owntracks_location_update, 1)
yield from mqtt.async_subscribe(
hass, EVENT_TOPIC, async_owntracks_event_update, 1)
if waypoint_import:
if waypoint_whitelist is None:
yield from mqtt.async_subscribe(
hass, WAYPOINT_TOPIC.format('+', '+'),
async_owntracks_waypoint_update, 1)
else:
for whitelist_user in waypoint_whitelist:
yield from mqtt.async_subscribe(
hass, WAYPOINT_TOPIC.format(whitelist_user, '+'),
async_owntracks_waypoint_update, 1)
return True
def parse_topic(topic, pretty=False):
"""Parse an MQTT topic owntracks/user/dev, return (user, dev) tuple.
Async friendly.
"""
parts = topic.split('/')
dev_id_format = ''
if pretty:
dev_id_format = '{} {}'
else:
dev_id_format = '{}_{}'
dev_id = slugify(dev_id_format.format(parts[1], parts[2]))
host_name = parts[1]
return (host_name, dev_id)
def _parse_see_args(topic, data):
"""Parse the OwnTracks location parameters, into the format see expects.
Async friendly.
"""
(host_name, dev_id) = parse_topic(topic, False)
kwargs = {
'dev_id': dev_id,
'host_name': host_name,
'gps': (data[WAYPOINT_LAT_KEY], data[WAYPOINT_LON_KEY])
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
if 'batt' in data:
kwargs['battery'] = data['batt']
return dev_id, kwargs
def _set_gps_from_zone(kwargs, location, zone):
"""Set the see parameters from the zone parameters.
Async friendly.
"""
if zone is not None:
kwargs['gps'] = (
zone.attributes['latitude'],
zone.attributes['longitude'])
kwargs['gps_accuracy'] = zone.attributes['radius']
kwargs['location_name'] = location
return kwargs
| |
from _rawffi import alt as _ffi
import _rawffi
from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof
from _ctypes.basics import keepalive_key, store_reference, ensure_objects
from _ctypes.basics import CArgObject, as_ffi_pointer
class ArrayMeta(_CDataMeta):
def __new__(self, name, cls, typedict):
res = type.__new__(self, name, cls, typedict)
if cls == (_CData,): # this is the Array class defined below
return res
ffiarray = res._ffiarray = _rawffi.Array(res._type_._ffishape_)
subletter = getattr(res._type_, '_type_', None)
if subletter == 'c':
def getvalue(self):
return _rawffi.charp2string(self._buffer.buffer,
self._length_)
def setvalue(self, val):
# we don't want to have buffers here
if len(val) > self._length_:
raise ValueError("%r too long" % (val,))
if isinstance(val, str):
_rawffi.rawstring2charp(self._buffer.buffer, val)
else:
for i in range(len(val)):
self[i] = val[i]
if len(val) < self._length_:
self._buffer[len(val)] = b'\x00'
res.value = property(getvalue, setvalue)
def getraw(self):
return _rawffi.charp2rawstring(self._buffer.buffer,
self._length_)
def setraw(self, buffer):
if len(buffer) > self._length_:
raise ValueError("%r too long" % (buffer,))
_rawffi.rawstring2charp(self._buffer.buffer, buffer)
res.raw = property(getraw, setraw)
elif subletter == 'u':
def getvalue(self):
return _rawffi.wcharp2unicode(self._buffer.buffer,
self._length_)
def setvalue(self, val):
# we don't want to have buffers here
if len(val) > self._length_:
raise ValueError("%r too long" % (val,))
if isinstance(val, str):
target = self._buffer
else:
target = self
for i in range(len(val)):
target[i] = val[i]
if len(val) < self._length_:
target[len(val)] = '\x00'
res.value = property(getvalue, setvalue)
res._ffishape_ = (ffiarray, res._length_)
res._fficompositesize_ = res._sizeofinstances()
return res
from_address = cdata_from_address
def _sizeofinstances(self):
size, alignment = self._ffiarray.size_alignment(self._length_)
return size
def _alignmentofinstances(self):
return self._type_._alignmentofinstances()
def _CData_output(self, resarray, base=None, index=-1):
# this seems to be a string if we're array of char, surprise!
from ctypes import c_char, c_wchar
if self._type_ is c_char:
return _rawffi.charp2string(resarray.buffer, self._length_)
if self._type_ is c_wchar:
return _rawffi.wcharp2unicode(resarray.buffer, self._length_)
res = self.__new__(self)
ffiarray = self._ffiarray.fromaddress(resarray.buffer, self._length_)
res._buffer = ffiarray
res._base = base
res._index = index
return res
def _CData_retval(self, resbuffer):
raise NotImplementedError
def from_param(self, value):
# array accepts very strange parameters as part of structure
# or function argument...
from ctypes import c_char, c_wchar
if issubclass(self._type_, (c_char, c_wchar)):
# XXX: this should maybe be stricer for py3 (c_char disallowing str?)
if isinstance(value, (bytes, str)):
if len(value) > self._length_:
raise ValueError("Invalid length")
value = self(*value)
elif not isinstance(value, self):
raise TypeError("expected string, %s found"
% (value.__class__.__name__,))
else:
if isinstance(value, tuple):
if len(value) > self._length_:
raise RuntimeError("Invalid length")
value = self(*value)
return _CDataMeta.from_param(self, value)
def array_get_slice_params(self, index):
if hasattr(self, '_length_'):
start, stop, step = index.indices(self._length_)
else:
step = index.step
if step is None:
step = 1
start = index.start
stop = index.stop
if start is None:
if step > 0:
start = 0
else:
raise ValueError("slice start is required for step < 0")
if stop is None:
raise ValueError("slice stop is required")
return start, stop, step
def array_slice_setitem(self, index, value):
start, stop, step = self._get_slice_params(index)
if ((step < 0 and stop >= start) or
(step > 0 and start >= stop)):
slicelength = 0
elif step < 0:
slicelength = (stop - start + 1) / step + 1
else:
slicelength = (stop - start - 1) / step + 1;
if slicelength != len(value):
raise ValueError("Can only assign slices of the same length")
for i, j in enumerate(range(start, stop, step)):
self[j] = value[i]
def array_slice_getitem(self, index):
start, stop, step = self._get_slice_params(index)
l = [self[i] for i in range(start, stop, step)]
letter = getattr(self._type_, '_type_', None)
if letter == 'c':
return b"".join(l)
if letter == 'u':
return "".join(l)
return l
class Array(_CData, metaclass=ArrayMeta):
_ffiargshape_ = 'P'
def __init__(self, *args):
if not hasattr(self, '_buffer'):
self._buffer = self._ffiarray(self._length_, autofree=True)
for i, arg in enumerate(args):
self[i] = arg
def _fix_index(self, index):
if index < 0:
index += self._length_
if 0 <= index < self._length_:
return index
else:
raise IndexError
_get_slice_params = array_get_slice_params
_slice_getitem = array_slice_getitem
_slice_setitem = array_slice_setitem
def _subarray(self, index):
"""Return a _rawffi array of length 1 whose address is the same as
the index'th item of self."""
address = self._buffer.itemaddress(index)
return self._ffiarray.fromaddress(address, 1)
def __setitem__(self, index, value):
if isinstance(index, slice):
self._slice_setitem(index, value)
return
index = self._fix_index(index)
cobj = self._type_.from_param(value)
if ensure_objects(cobj) is not None:
store_reference(self, index, cobj._objects)
arg = cobj._get_buffer_value()
if self._type_._fficompositesize_ is None:
self._buffer[index] = arg
# something more sophisticated, cannot set field directly
else:
from ctypes import memmove
dest = self._buffer.itemaddress(index)
memmove(dest, arg, self._type_._fficompositesize_)
def __getitem__(self, index):
if isinstance(index, slice):
return self._slice_getitem(index)
index = self._fix_index(index)
return self._type_._CData_output(self._subarray(index), self, index)
def __len__(self):
return self._length_
def _get_buffer_for_param(self):
return CArgObject(self, self._buffer.byptr())
def _get_buffer_value(self):
return self._buffer.buffer
def _to_ffi_param(self):
return self._get_buffer_value()
def _as_ffi_pointer_(self, ffitype):
return as_ffi_pointer(self, ffitype)
ARRAY_CACHE = {}
def create_array_type(base, length):
if not isinstance(length, int):
raise TypeError("Can't multiply a ctypes type by a non-integer")
if length < 0:
raise ValueError("Array length must be >= 0")
key = (base, length)
try:
return ARRAY_CACHE[key]
except KeyError:
name = "%s_Array_%d" % (base.__name__, length)
tpdict = dict(
_length_ = length,
_type_ = base
)
cls = ArrayMeta(name, (Array,), tpdict)
cls._ffiargtype = _ffi.types.Pointer(base.get_ffi_argtype())
ARRAY_CACHE[key] = cls
return cls
| |
import windows
import windows.generated_def as gdef
import struct
try:
unichr # Py2/Py3 compat
except NameError:
unichr = chr
# http://pubs.opengroup.org/onlinepubs/9629399/chap14.htm#tagcjh_19_03_07
## Array
# A conformant array is an array in which the maximum number of elements is not known beforehand and therefore is included in the representation of the array.
# A varying array is an array in which the actual number of elements passed in a given call varies and therefore is included in the representation of the array.
## Pointers
# NDR defines two classes of pointers that differ both in semantics and in representation
# - reference pointers, which cannot be null and cannot be aliases
# - full pointers, which can be null and can be an aliases
# - unique pointers, which can be null and cannot be aliases, and are transmitted as full pointers.
def pack_dword(x):
return struct.pack("<I", x)
def dword_pad(s):
if (len(s) % 4) == 0:
return s
return s + (b"P" * (4 - len(s) % 4))
class NdrUniquePTR(object):
"""Create a UNIQUE PTR around a given Ndr type"""
def __init__(self, subcls):
self.subcls = subcls
def pack(self, data):
subpack = self.subcls.pack(data)
if subpack is None:
return pack_dword(0)
return pack_dword(0x02020202) + subpack
def unpack(self, stream):
ptr = NdrLong.unpack(stream)
if not ptr:
return None
return self.subcls.unpack(stream)
def pack_in_struct(self, data, id):
if data is None:
return pack_dword(0), None
subpack = self.subcls.pack(data)
if subpack is None:
return pack_dword(0), None
return pack_dword(0x01010101 * (id + 1)), subpack
def unpack_in_struct(self, stream):
ptr = NdrLong.unpack(stream)
if not ptr:
return 0, NdrUnpackNone
return ptr, self.subcls
def parse(self, stream):
data = stream.partial_unpack("<I")
if data[0] == 0:
return None
return self.subcls.parse(stream)
def get_alignment(self):
# 14.3.2 Alignment of Constructed Types
# Pointer alignment is always modulo 4.
return 4
class NdrUnpackNone(object):
@classmethod
def unpack(cls, stream):
return None
class NdrRef(object):
# TESTING
def __init__(self, subcls):
self.subcls = subcls
def unpack(self, stream):
ptr = NdrLong.unpack(stream)
if not ptr:
raise ValueError("Ndr REF cannot be NULL")
return self.subcls.unpack(stream)
class NdrFixedArray(object):
def __init__(self, subcls, size):
self.subcls = subcls
self.size = size
def pack(self, data):
data = list(data)
assert len(data) == self.size
return dword_pad(b"".join([self.subcls.pack(elt) for elt in data]))
def unpack(self, stream):
return [self.subcls.unpack(stream) for i in range(self.size)]
def get_alignment(self):
return self.subcls.get_alignment()
class NdrSID(object):
@classmethod
def pack(cls, psid):
"""Pack a PSID
:param PSID psid:
"""
subcount = windows.winproxy.GetSidSubAuthorityCount(psid)
size = windows.winproxy.GetLengthSid(psid)
sid_data = windows.current_process.read_memory(psid.value, size)
return pack_dword(subcount[0]) + dword_pad(sid_data)
@classmethod
def unpack(cls, stream):
"""Unpack a PSID, partial implementation that returns a :class:`str` and not a PSID"""
subcount = NdrLong.unpack(stream)
return stream.read(8 + (subcount * 4))
@classmethod
def get_alignment(self):
# Not sur, but it seems to contain an array of long
return 4
class NdrVaryingCString(object):
@classmethod
def pack(cls, data):
"""Pack string ``data``. append ``\\x00`` if not present at the end of the string"""
if data is None:
return None
if not data.endswith('\x00'):
data += '\x00'
l = len(data)
result = struct.pack("<2I", 0, l)
result += data
return dword_pad(result)
@classmethod
def get_alignment(self):
# Not sur, but size is on 4 bytes so...
return 4
class NdrWString(object):
@classmethod
def pack(cls, data):
"""Pack string ``data``. append ``\\x00`` if not present at the end of the string"""
if data is None:
return None
if not data.endswith('\x00'):
data += '\x00'
data = data.encode("utf-16-le")
l = (len(data) // 2)
result = struct.pack("<3I", l, 0, l)
result += data
return dword_pad(result)
@classmethod
def unpack(cls, stream):
stream.align(4)
size1, zero, size2 = stream.partial_unpack("<3I")
assert size1 == size2
assert zero == 0
s = stream.read(size1 * 2)
return s.decode("utf-16-le")
@classmethod
def get_alignment(self):
# Not sur, but size is on 4 bytes so...
return 4
class NdrCString(object):
@classmethod
def pack(cls, data):
"""Pack string ``data``. append ``\\x00`` if not present at the end of the string"""
if data is None:
return None
if not data.endswith('\x00'):
data += '\x00'
l = len(data)
result = struct.pack("<3I", l, 0, l)
result += data
return dword_pad(result)
@classmethod
def get_alignment(self):
# Not sur, but size is on 4 bytes so...
return 4
# @classmethod
# def unpack(self, stream):
# maxcount, offset, count = stream.partial_unpack("<3I")
# return maxcount, offset, count
NdrUniqueCString = NdrUniquePTR(NdrCString)
NdrUniqueWString = NdrUniquePTR(NdrWString)
class NdrLong(object):
@classmethod
def pack(cls, data):
return struct.pack("<I", data)
@classmethod
def unpack(self, stream):
stream.align(4)
return stream.partial_unpack("<I")[0]
@classmethod
def get_alignment(self):
return 4
class NdrHyper(object):
@classmethod
def pack(cls, data):
return struct.pack("<Q", data)
@classmethod
def unpack(self, stream):
stream.align(8)
return stream.partial_unpack("<Q")[0]
@classmethod
def get_alignment(self):
return 8
class NdrShort(object):
@classmethod
def pack(cls, data):
return struct.pack("<H", data)
@classmethod
def unpack(self, stream):
return stream.partial_unpack("<H")[0]
@classmethod
def get_alignment(self):
return 2
class NdrByte(object):
@classmethod
def pack(self, data):
return struct.pack("<B", data)
@classmethod
def unpack(self, stream):
return stream.partial_unpack("<B")[0]
@classmethod
def get_alignment(self):
return 1
class NdrGuid(object):
@classmethod
def pack(cls, data):
if not isinstance(data, gdef.IID):
data = gdef.IID.from_string(data)
return bytes(bytearray(data))
@classmethod
def unpack(self, stream):
rawguid = stream.partial_unpack("16s")[0]
return gdef.IID.from_buffer_copy(rawguid)
@classmethod
def get_alignment(self):
return 1
class NdrContextHandle(object):
@classmethod
def pack(cls, data):
if not isinstance(data, gdef.IID):
data = gdef.IID.from_string(data)
return bytes(struct.pack("<I", 0) + bytearray(data))
@classmethod
def unpack(self, stream):
attributes, rawguid = stream.partial_unpack("<I16s")
return gdef.IID.from_buffer_copy(rawguid)
@classmethod
def get_alignment(self):
return 4
class NdrStructure(object):
"""a NDR structure that tries to respect the rules of pointer packing, this class should be subclassed with
an attribute ``MEMBERS`` describing the members of the class
"""
@classmethod
def pack(cls, data):
"""Pack data into the struct, ``data`` size must equals the number of members in the structure"""
if not (len(data) == len(cls.MEMBERS)):
print("Size mistach:")
print(" * data size = {0}".format(len(data)))
print(" * members size = {0}".format(len(cls.MEMBERS)))
print(" * data {0}".format(data))
print(" * members = {0}".format(cls.MEMBERS))
raise ValueError("NdrStructure packing number elements mismatch: structure has <{0}> members got <{1}>".format(len(cls.MEMBERS), len(data)))
conformant_size = []
res = []
res_size = 0
pointed = []
outstream = NdrWriteStream()
pointed_to_pack = []
# pointedoutstream = NdrWriteStream()
for i, (member, memberdata) in enumerate(zip(cls.MEMBERS, data)):
if hasattr(member, "pack_in_struct"):
x, y = member.pack_in_struct(memberdata, i)
assert len(x) == 4, "Pointer should be size 4"
# Write the pointer
outstream.align(4)
outstream.write(x)
if y is not None:
# Store the info to the pointed to pack
pointed_to_pack.append((member.subcls.get_alignment(), y))
# pointedoutstream.write(y)
elif hasattr(member, "pack_conformant"):
size, data = member.pack_conformant(memberdata)
outstream.align(member.get_alignment())
outstream.write(data)
conformant_size.append(size)
# res.append(data)
# res_size += len(data)
else:
packed_member = member.pack(memberdata)
outstream.align(member.get_alignment())
outstream.write(packed_member)
# Pack the pointed to the stream
for alignement, pointed_data in pointed_to_pack:
outstream.align(alignement)
outstream.write(pointed_data)
return dword_pad(b"".join(conformant_size)) + outstream.get_data()
@classmethod
def unpack(cls, stream):
"""Unpack the structure from the stream"""
conformant_members = [hasattr(m, "pack_conformant") for m in cls.MEMBERS]
is_conformant = any(conformant_members)
assert(conformant_members.count(True) <= 1), "Unpack conformant struct with more that one conformant MEMBER not implem"
data = []
if is_conformant:
conformant_size = NdrLong.unpack(stream)
post_subcls = []
for i, member in enumerate(cls.MEMBERS):
if conformant_members[i]:
data.append(member.unpack_conformant(stream, conformant_size))
else:
if hasattr(member, "unpack_in_struct"):
# print("[{0}] Dereferenced unpacking".format(i))
ptr, subcls = member.unpack_in_struct(stream)
if not ptr:
data.append(None)
else:
data.append(ptr)
post_subcls.append((i, subcls))
# print(post_subcls)
else:
data.append(member.unpack(stream))
# print("Applying deref unpack")
for i, entry in post_subcls:
new_data = entry.unpack(stream)
if getattr(entry, "post_unpack", None):
new_data = entry.post_unpack(new_data)
data[i] = new_data
return cls.post_unpack(data)
@classmethod
def post_unpack(cls, data):
return data
@classmethod
def get_alignment(self):
return max([x.get_alignment() for x in self.MEMBERS])
class NdrParameters(object):
"""a class to pack NDR parameters together to performs RPC call, this class should be subclassed with
an attribute ``MEMBERS`` describing the members of the class
"""
@classmethod
def pack(cls, data):
if not (len(data) == len(cls.MEMBERS)):
print("Size mistach:")
print(" * data size = {0}".format(len(data)))
print(" * members size = {0}".format(len(cls.MEMBERS)))
print(" * data {0}".format(data))
print(" * members = {0}".format(cls.MEMBERS))
raise ValueError("NdrParameters packing number elements mismatch: structure has <{0}> members got <{1}>".format(len(cls.MEMBERS), len(data)))
outstream = NdrWriteStream()
for (member, memberdata) in zip(cls.MEMBERS, data):
alignment = member.get_alignment()
outstream.align(alignment)
packed_member = member.pack(memberdata)
outstream.write(packed_member)
return outstream.get_data()
@classmethod
def unpack(cls, stream):
res = []
for member in cls.MEMBERS:
unpacked_member = member.unpack(stream)
res.append(unpacked_member)
return res
def get_alignment(self):
raise ValueError("NdrParameters should always be top type in NDR description")
class NdrConformantArray(object):
MEMBER_TYPE = None
@classmethod
def pack(cls, data):
ndrsize = NdrLong.pack(len(data))
return dword_pad(ndrsize + b"".join([cls.MEMBER_TYPE.pack(memberdata) for memberdata in data]))
@classmethod
def pack_conformant(cls, data):
ndrsize = NdrLong.pack(len(data))
ndrdata = dword_pad(b"".join([cls.MEMBER_TYPE.pack(memberdata) for memberdata in data]))
return ndrsize, ndrdata
@classmethod
def unpack(cls, stream):
nbelt = NdrLong.unpack(stream)
result = cls.unpack_conformant(stream, nbelt)
return cls._post_unpack(result)
@classmethod
def _post_unpack(cls, result):
return result
@classmethod
def unpack_conformant(cls, stream, size):
res = [cls.MEMBER_TYPE.unpack(stream) for i in range(size)]
stream.align(4)
return res
@classmethod
def get_alignment(self):
# TODO: test on array of Hyper
return max(4, self.MEMBER_TYPE.get_alignment())
class NdrConformantVaryingArrays(object):
MEMBER_TYPE = None
@classmethod
def pack(cls, data):
ndrsize = NdrLong.pack(len(data))
offset = NdrLong.pack(0)
return dword_pad(ndrsize + offset + ndrsize + b"".join([cls.MEMBER_TYPE.pack(memberdata) for memberdata in data]))
@classmethod
def unpack(cls, stream):
maxcount = NdrLong.unpack(stream)
offset = NdrLong.unpack(stream)
count = NdrLong.unpack(stream)
assert(offset == 0)
# assert(maxcount == count)
result = []
post_subcls = []
for i in range(count):
member = cls.MEMBER_TYPE
if hasattr(member, "unpack_in_struct"):
ptr, subcls = member.unpack_in_struct(stream)
if not ptr:
result.append(None)
else:
result.append(ptr)
post_subcls.append((i, subcls))
else:
data = member.unpack(stream)
result.append(data)
# Unpack pointers
for i, entry in post_subcls:
data = entry.unpack(stream)
result[i] = data
return cls._post_unpack(result)
@classmethod
def _post_unpack(cls, result):
return result
def get_alignment(self):
# TODO: test on array of Hyper
return max(4, self.MEMBER_TYPE.get_alignment())
class NdrWcharConformantVaryingArrays(NdrConformantVaryingArrays):
MEMBER_TYPE = NdrShort
@classmethod
def _post_unpack(self, result):
return u"".join(unichr(c) for c in result)
class NdrCharConformantVaryingArrays(NdrConformantVaryingArrays):
MEMBER_TYPE = NdrByte
class NdrHyperConformantVaryingArrays(NdrConformantVaryingArrays):
MEMBER_TYPE = NdrHyper
class NdrHyperConformantArray(NdrConformantArray):
MEMBER_TYPE = NdrHyper
class NdrLongConformantArray(NdrConformantArray):
MEMBER_TYPE = NdrLong
class NdrShortConformantArray(NdrConformantArray):
MEMBER_TYPE = NdrShort
class NdrByteConformantArray(NdrConformantArray):
MEMBER_TYPE = NdrByte
@classmethod
def _post_unpack(self, result):
return bytearray(result)
class NdrWcharConformantArray(NdrConformantArray):
MEMBER_TYPE = NdrShort
@classmethod
def _post_unpack(self, result):
return bytearray(result)
class NdrGuidConformantArray(NdrConformantArray):
MEMBER_TYPE = NdrGuid
class NdrStream(object):
"""A stream of bytes used for NDR unpacking"""
def __init__(self, data):
self.fulldata = data
self.data = data
def partial_unpack(self, format):
size = struct.calcsize(format)
toparse = self.data[:size]
self.data = self.data[size:]
return struct.unpack(format, toparse)
def read_aligned_dword(self, size):
aligned_size = size
if size % 4:
aligned_size = size + (4 - (size % 4))
retdata = self.data[:size]
self.data = self.data[aligned_size:]
return retdata
def read(self, size):
data = self.data[:size]
self.data = self.data[size:]
if len(data) < size:
raise ValueError("Could not read {0} from stream".format(size))
return data
def align(self, size):
"""Discard some bytes to align the remaining stream on ``size``"""
already_read = len(self.fulldata) - len(self.data)
if already_read % size:
# Realign
size_to_align = (size - (already_read % size))
self.data = self.data[size_to_align:]
# print("align {0}: {1}".format(size, size_to_align))
return size_to_align
# print("align {0}: 0".format(size))
return 0
class NdrWriteStream(object):
def __init__(self):
self.data_parts = []
self.data_size = 0
def get_data(self):
data = b"".join(self.data_parts)
assert len(data) == self.data_size
return data
def write(self, data):
self.data_parts.append(data)
self.data_size += len(data)
return None
def align(self, alignement):
if self.data_size % alignement == 0:
return
topadsize = (alignement) - (self.data_size % alignement)
self.write(b"P" * topadsize)
return
def make_parameters(types, name=None):
class NdrCustomParameters(NdrParameters):
MEMBERS = types
return NdrCustomParameters
def make_structure(types, name=None):
class NdrCustomStructure(NdrStructure):
MEMBERS = types
return NdrCustomStructure
| |
from __future__ import absolute_import, print_function
from failover import count, fail, Hysteresis, ok, second
import logging
from sys import stderr
from time import sleep
from units import unit
from unittest import TestCase, main
class TestTask(object):
def __init__(self, result=ok):
super(TestTask, self).__init__()
self.result = result
self.exception = None
return
def __call__(self):
if self.exception:
raise self.exception
return self.result
class HysteresisTest(TestCase):
def setUp(self):
logging.basicConfig(
stream=stderr, level=logging.DEBUG,
format=("%(asctime)s %(module)s [%(levelname)s] "
"%(filename)s:%(lineno)d: %(message)s"))
def test_name(self):
def no_op():
return True
checker = Hysteresis(task=no_op, name="hyster1",
fail_after=count(1),
ok_after=count(1))
self.assertEqual(repr(checker), "hyster1")
checker = Hysteresis(task=no_op,
fail_after=count(1),
ok_after=count(1))
self.assertTrue(repr(checker).startswith(
"<failover.hysteresis.Hysteresis"))
return
def test_counted_hysteresis(self):
test_task = TestTask()
checker = Hysteresis(
task=test_task, initial_state=ok,
fail_after=count(3),
ok_after=count(5))
self.assertTrue(checker())
self.assertTrue(checker())
test_task.result = fail
self.assertTrue(checker()) # First failure -- still ok
self.assertTrue(checker()) # Second failure -- still ok
self.assertFalse(checker()) # Third failure -- transition
self.assertFalse(checker())
test_task.result = ok
self.assertFalse(checker()) # First success -- still failed
self.assertFalse(checker()) # Second success -- still failed
self.assertFalse(checker()) # Third success -- still failed
self.assertFalse(checker()) # Fourth success -- still failed
self.assertTrue(checker()) # Fifth success -- transition
self.assertTrue(checker())
return
def test_timed_hysteresis(self):
test_task = TestTask()
checker = Hysteresis(
task=test_task, initial_state=ok,
fail_after=second(0.5),
ok_after=second(0.3))
self.assertTrue(checker())
self.assertTrue(checker())
test_task.result = fail
self.assertTrue(checker()) # Failure -- need a 0.5 sec delay
self.assertTrue(checker()) # Failure -- no delay, still ok
sleep(0.5)
self.assertFalse(checker()) # Failure -- delayed, so now failed
self.assertFalse(checker())
test_task.result = ok
self.assertFalse(checker()) # Success -- need a 0.3 sec delay
self.assertFalse(checker()) # Success -- no delay, still failed
sleep(0.3)
self.assertTrue(checker()) # Success -- delayed, so now ok
self.assertTrue(checker())
return
def test_failed_hysteresis(self):
test_task = TestTask()
checker = Hysteresis(
task=test_task, initial_state=ok,
fail_after=count(2),
ok_after=count(2))
self.assertTrue(checker())
# Throw an exception from the underlying task; hysteresis should still
# succeed
test_task.exception = ValueError()
self.assertTrue(checker())
self.assertTrue(checker())
self.assertTrue(checker())
self.assertTrue(checker())
# Start failing
test_task.exception = None
test_task.result = fail
self.assertTrue(checker())
self.assertFalse(checker())
self.assertFalse(checker())
self.assertFalse(checker())
# Throw an exception from the underlying task; hysteresis should still
# fail.
test_task.exception = ValueError()
self.assertFalse(checker())
self.assertFalse(checker())
self.assertFalse(checker())
self.assertFalse(checker())
def test_alternative_units(self):
test_task = TestTask()
checker = Hysteresis(
task=test_task, initial_state=ok,
fail_after=3,
ok_after=5)
self.assertTrue(checker())
self.assertTrue(checker())
test_task.result = fail
self.assertTrue(checker()) # First failure -- still ok
self.assertTrue(checker()) # Second failure -- still ok
self.assertFalse(checker()) # Third failure -- transition
self.assertFalse(checker())
test_task.result = ok
self.assertFalse(checker()) # First success -- still failed
self.assertFalse(checker()) # Second success -- still failed
self.assertFalse(checker()) # Third success -- still failed
self.assertFalse(checker()) # Fourth success -- still failed
self.assertTrue(checker()) # Fifth success -- transition
self.assertTrue(checker())
return
def test_reject_invalid(self):
meter = unit("m")
for c in [-2, -1, 0]:
try:
Hysteresis(task=None, fail_after=c, ok_after=1)
self.fail("Expected ValueError")
except ValueError:
pass
try:
Hysteresis(task=None, fail_after=count(c), ok_after=1)
self.fail("Expected ValueError")
except ValueError:
pass
try:
Hysteresis(task=None, fail_after=second(c), ok_after=1)
self.fail("Expected ValueError")
except ValueError:
pass
try:
Hysteresis(task=None, fail_after=1, ok_after=c)
self.fail("Expected ValueError")
except ValueError:
pass
try:
Hysteresis(task=None, fail_after=1, ok_after=count(c))
self.fail("Expected ValueError")
except ValueError:
pass
try:
Hysteresis(task=None, fail_after=1, ok_after=second(c))
self.fail("Expected ValueError")
except ValueError:
pass
try:
Hysteresis(task=None, fail_after=meter(1), ok_after=1)
self.fail("Expected ValueError")
except ValueError:
pass
try:
Hysteresis(task=None, fail_after="qwerty", ok_after=1)
self.fail("Expected TypeError")
except TypeError:
pass
try:
Hysteresis(task=None, fail_after=1, ok_after=meter(1))
self.fail("Expected ValError")
except ValueError:
pass
try:
Hysteresis(task=None, fail_after=1, ok_after="zxcvasdf")
self.fail("Expected TypeError")
except TypeError:
pass
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
import collections
import acq4.analysis.atlas.Atlas as Atlas
import os
from acq4.util import Qt
import acq4.util.DataManager as DataManager
from acq4.analysis.atlas.AuditoryCortex.CortexROI import CortexROI
import numpy as np
import pyqtgraph as pg
import scipy
from acq4.util.debug import Profiler
from six.moves import range
class AuditoryCortex(Atlas.Atlas):
DBIdentity = "AuditoryCortexAtlas" ## owner key used for asking DB which tables are safe to use
def __init__(self, state=None):
Atlas.Atlas.__init__(self, state=None)
self._ctrl = None
#self.setState(state)
self.state = state
def mapToAtlas(self, pos):
"""Maps obj into atlas coordinates."""
matrix = self.getMatrix(pos)
mapped = np.dot(matrix, [pos[0]*pos[1], pos[0], pos[1], 1])
return mapped
def getMatrix(self, pos):
"""Return the transformMatrix to use for the given pos."""
quads = self.state['quadrilaterals']
ind=None
for i, q in enumerate(quads):
if Qt.QPolygonF([Qt.QPointF(*x) for x in q]).containsPoint(Qt.QPointF(pos), Qt.Qt.OddEvenFill):
ind = i
if ind == None: ## in case pos is outside the quadrilaterals
bestMin = 1000
for i, q in enumerate(quads):
dist = [pg.Point(x-pos).length() for x in q]
minDist = min(dist)
dist.remove(minDist)
minDist += min(dist)
if minDist < bestMin:
bestMin = minDist
ind = i
m = self.state['transformMatrices'][ind]
return np.array([m[0], m[1]])
def setState(self, state):
#self._matrix = None
self.state = state
def solveBilinearTransform(self, points1, points2):
"""
Find a bilinear transformation matrix (2x4) that maps points1 onto points2
points must be specified as a list of 4 Vector, Point, QPointF, etc.
To use this matrix to map a point [x,y]::
mapped = np.dot(matrix, [x*y, x, y, 1])
"""
## A is 4 rows (points) x 4 columns (xy, x, y, 1)
## B is 4 rows (points) x 2 columns (x, y)
A = np.array([[points1[i].x()*points1[i].y(), points1[i].x(), points1[i].y(), 1] for i in range(4)])
B = np.array([[points2[i].x(), points2[i].y()] for i in range(4)])
## solve 2 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((2,4))
for i in range(2):
matrix[i] = scipy.linalg.solve(A, B[:,i]) ## solve Ax = B; x is one row of the desired transformation matrix
return matrix
def getState(self):
raise Exception("Must be reimplemented in subclass.")
def restoreState(self, state):
raise Exception("Must be reimplemented in subclass.")
def name(self):
return "AuditoryCortexAtlas"
def ctrlWidget(self, host=None):
if self._ctrl is None:
if host is None:
raise Exception("To initialize an A1AtlasCtrlWidget a host must be specified.")
self._ctrl = A1AtlasCtrlWidget(self, host)
return self._ctrl
class A1AtlasCtrlWidget(Atlas.AtlasCtrlWidget):
def __init__(self, atlas, host):
Atlas.AtlasCtrlWidget.__init__(self, atlas, host)
self.atlasDir = os.path.split(os.path.abspath(__file__))[0]
## add ThalamocorticalMarker to canvas
fh = DataManager.getHandle(os.path.join(self.atlasDir, 'images', 'ThalamocorticalMarker.svg'))
self.canvas.addFile(fh, pos=(-0.001283, -0.000205), scale=[3.78e-6, 3.78e-6], index=0, movable=False, z=10000)
## add CortexROI
self.roi = CortexROI([-1e-3, 0])
self.canvas.addGraphicsItem(self.roi, pos=(-1e-3, 1e-3), scale=[1e-3, 1e-3], name='CortexROI', movable=False)
self.roi.sigRegionChangeFinished.connect(self.roiChanged)
def loadState(self):
if self.sliceDir is None:
return
state = self.sliceDir.info()['atlas'][self.atlas.name()]
self.roi.setState(state['cortexROI'])
self.atlas.setState(state)
def saveState(self):
## Saves the position/configuration of the cortexROI, as well as transforms for mapping to atlas coordinates
if self.sliceDir is None:
return
## get state of ROI
cortexROI = self.roi.saveState()
quads = self.roi.getQuadrilaterals()
newQuads = []
for q in quads:
newQuads.append([(p.x(), p.y()) for p in q])
rects = self.roi.getNormalizedRects()
matrices = []
for i, q in enumerate(quads):
matrix = self.atlas.solveBilinearTransform([pg.Point(x) for x in q], [pg.Point(x) for x in rects[i]])
matrices.append([list(matrix[0]), list(matrix[1])])
state = {
'cortexROI':cortexROI,
'quadrilaterals':newQuads,
'normalizedRects': rects,
'transformMatrices': matrices
}
## write to slice directory meta-info
atlasInfo = self.sliceDir.info().get('atlas', {}).deepcopy()
atlasInfo[self.atlas.name()] = state
self.sliceDir.setInfo(atlas=atlasInfo)
## set state for atlas object
self.atlas.setState(state)
def generateDataArray(self, positions, dirType):
prof = Profiler("A1Atlas.generateDataArray", disabled=True)
if self.atlas.state is None:
self.saveState()
prof.mark('saved atlas state')
dirColumn = dirType + 'Dir'
if dirType == 'Protocol':
data = np.empty(len(positions), dtype=[('SliceDir', object),
(dirColumn, object),
#('layer', float),
#('depth', float),
('yPosSlice', float),
('yPosCell', float),
('percentDepth', float),
('xPosSlice', float),
('xPosCell', float),
('modXPosSlice', float),
('modXPosCell', float)])
fields = collections.OrderedDict([
('SliceDir', 'directory:Slice'),
(dirColumn, 'directory:'+dirType),
('yPosSlice', 'real'),
('yPosCell', 'real'),
('percentDepth', 'real'),
('xPosSlice', 'real'),
('xPosCell', 'real'),
('modXPosSlice', 'real'),
('modXPosCell', 'real')])
prof.mark("defined Protocol data array")
for i in range(len(positions)):
dh, pos = positions[i]
cellPos = self.dataModel.getCellInfo(dh)['userTransform']['pos']
mapped = self.atlas.mapToAtlas(pg.Point(pos)) ## needs to return %depth and modXPosSlice
#data[i] = (self.sliceDir, dh, mapped.x(), mapped.y(), mapped.z())
data[i]['SliceDir'] = self.sliceDir
data[i][dirColumn] = dh
data[i]['yPosSlice'] = pos[1]
data[i]['yPosCell'] = pos[1]-cellPos[1]
data[i]['percentDepth'] = mapped[1]
data[i]['xPosSlice'] = pos[0]
data[i]['xPosCell'] = pos[0]-cellPos[0]
data[i]['modXPosSlice'] = mapped[0]
data[i]['modXPosCell'] = mapped[0]-self.atlas.mapToAtlas(pg.Point(cellPos))[0]
prof.mark("filled protocol data array")
elif dirType == 'Cell':
data = np.empty(len(positions), dtype=[('SliceDir', object),
(dirColumn, object),
#('layer', float),
#('depth', float),
('yPosSlice', float),
#('yPosCell', float),
('percentDepth', float),
('xPosSlice', float),
#('xPosCell', float),
('modXPosSlice', float),
#('modXPosCell', float)
])
fields = collections.OrderedDict([
('SliceDir', 'directory:Slice'),
(dirColumn, 'directory:'+dirType),
('yPosSlice', 'real'),
('percentDepth', 'real'),
('xPosSlice', 'real'),
('modXPosSlice', 'real')])
prof.mark("defined cell data array")
for i in range(len(positions)):
dh, pos = positions[i]
#cellPos = self.dataModel.getCellInfo(dh)['pos']
mapped = self.atlas.mapToAtlas(pg.Point(pos)) ## needs to return %depth and modXPosSlice
#data[i] = (self.sliceDir, dh, mapped.x(), mapped.y(), mapped.z())
data[i]['SliceDir'] = self.sliceDir
data[i][dirColumn] = dh
data[i]['yPosSlice'] = pos[1]
#data['yPosCell'] = pos[1]-cellPos[1]
data[i]['percentDepth'] = mapped[1]
data[i]['xPosSlice'] = pos[0]
#data['xPosCell'] = pos[0]-cellPos[0]
data[i]['modXPosSlice'] = mapped[0]
#data['modXPosCell'] = mapped[0]-self.atlas.mapToAtlas(pg.Point(cellPos))[0]
prof.mark("filled cell data array")
else:
prof.finish()
raise Exception("Not sure how to structure data array for dirType=%s"%dirType)
prof.finish()
return data, fields
def roiChanged(self):
self.saveState()
class PreviousAuditoryCortex(Atlas.Atlas):
def __init__(self, canvas=None, state=None):
## define slice planes and the atlas images to use for each
scale = 3.78e-6
#scale = 5.5e-6
#pos = (-676*scale/2., -577*scale/2.)
#pos = (-681*scale/2., -231e-6)
#pos = (-681*scale/2., -231*scale/2.)
pos = (-0.001283, -0.000205)
#pos = (0.0, 0.0)
self.slicePlanes = advancedTypes.OrderedDict([
('Thalamocortical', [('ThalamocorticalMarker.svg', scale, pos)]),
('Coronal', []),
])
self.ctrl = None
self.canvas = canvas
if canvas is not None:
atlasDir = os.path.split(os.path.abspath(__file__))[0]
#fh = DataManager.getHandle(os.path.join(atlasDir, 'CN_coronal.png'))
#self.image = canvas.addImage(fh, pos=pos, scale=(scale, scale))
#self.image.setMovable(False)
self.images = []
self.ctrl = Qt.QWidget()
self.ui = CtrlTemplate.Ui_Form()
self.ui.setupUi(self.ctrl)
self.stateGroup = pyqtgraph.WidgetGroup(self.ctrl)
self.ui.slicePlaneCombo.clear()
for sp in self.slicePlanes:
self.ui.slicePlaneCombo.addItem(sp)
#self.ui.slicePlaneCombo.currentIndexChanged.connect(self.slicePlaneChanged)
#self.ui.hemisphereCombo.currentIndexChanged.connect(self.hemisphereChanged)
#self.ui.photoCheck.stateChanged.connect(self.photoCheckChanged)
#self.ui.drawingCheck.stateChanged.connect(self.drawingCheckChanged)
#self.ui.thicknessSpin.valueChanged.connect(self.thicknessSpinChanged)
self.stateGroup.sigChanged.connect(self.uiChanged)
#self.ui.reAlignAtlasBtn.clicked.connect(self.reAlignAtlas)
#self.connect(canvas, Qt.SIGNAL('itemTransformChangeFinished'), self.itemMoved) ## old style
self.canvas.sigItemTransformChangeFinished.connect(self.itemMoved) ## new style
Atlas.Atlas.__init__(self, state)
self.uiChanged()
def ctrlWidget(self, **args):
return self.ctrl
def saveState(self):
return self.state.copy()
def restoreState(self, state):
self.state.update(state)
self.update()
def update(self):
if self.ctrl is not None:
self.stateGroup.setState(self.state)
def uiChanged(self):
for item in self.images:
self.canvas.removeItem(item)
self.images = []
state = self.stateGroup.state()
slice = state['slicePlaneCombo']
hem = state['hemisphereCombo']
#flip = state['flipCheck']
imgs = self.slicePlanes[slice]
atlasDir = os.path.split(os.path.abspath(__file__))[0]
for imgFile, scale, pos in imgs:
fh = DataManager.getHandle(os.path.join(atlasDir, 'images', imgFile))
item = self.canvas.addFile(fh, pos=pos, scale=[scale,scale], index=0, movable=False, z=10000)
#item = self.canvas.addFile(fh, index=0, movable=False)
self.images.append(item)
def close(self):
for item in self.images:
self.canvas.removeItem(item)
self.images = []
def itemMoved(self, canvas, item):
"""Save an item's transformation if the user has moved it.
This is saved in the 'userTransform' attribute; the original position data is not affected."""
if item not in self.images:
return
#fh = self.items[item]
fh = item.handle()
trans = item.saveTransform()
fh.setInfo(userTransform=trans)
#print "saved", fh.shortName()
#def reAlignAtlas(self):
#file, scale, pos = self.slicePlanes[self.stateGroup.state()['slicePlaneCombo']]:
#trans = self.images[0].saveTransform()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# the code is modified from
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
from singa import layer
from singa import model
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return layer.Conv2d(
in_planes,
out_planes,
3,
stride=stride,
padding=1,
bias=False,
)
class BasicBlock(layer.Layer):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = layer.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = layer.BatchNorm2d(planes)
self.relu1 = layer.ReLU()
self.add = layer.Add()
self.relu2 = layer.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.add(out, residual)
out = self.relu2(out)
return out
class Bottleneck(layer.Layer):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = layer.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = layer.BatchNorm2d(planes)
self.relu1 = layer.ReLU()
self.conv2 = layer.Conv2d(planes,
planes,
3,
stride=stride,
padding=1,
bias=False)
self.bn2 = layer.BatchNorm2d(planes)
self.relu2 = layer.ReLU()
self.conv3 = layer.Conv2d(planes,
planes * self.expansion,
1,
bias=False)
self.bn3 = layer.BatchNorm2d(planes * self.expansion)
self.add = layer.Add()
self.relu3 = layer.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.add(out, residual)
out = self.relu3(out)
return out
__all__ = [
'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
]
class ResNet(model.Model):
def __init__(self, block, layers, num_classes=10, num_channels=3):
self.inplanes = 64
super(ResNet, self).__init__()
self.num_classes = num_classes
self.input_size = 224
self.dimension = 4
self.conv1 = layer.Conv2d(num_channels,
64,
7,
stride=2,
padding=3,
bias=False)
self.bn1 = layer.BatchNorm2d(64)
self.relu = layer.ReLU()
self.maxpool = layer.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1, layers1 = self._make_layer(block, 64, layers[0])
self.layer2, layers2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3, layers3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4, layers4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = layer.AvgPool2d(7, stride=1)
self.flatten = layer.Flatten()
self.fc = layer.Linear(num_classes)
self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
self.register_layers(*layers1, *layers2, *layers3, *layers4)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
conv = layer.Conv2d(
self.inplanes,
planes * block.expansion,
1,
stride=stride,
bias=False,
)
bn = layer.BatchNorm2d(planes * block.expansion)
def _downsample(x):
return bn(conv(x))
downsample = _downsample
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
def forward(x):
for layer in layers:
x = layer(x)
return x
return forward, layers
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.fc(x)
return x
def train_one_batch(self, x, y, dist_option, spars):
out = self.forward(x)
loss = self.softmax_cross_entropy(out, y)
if dist_option == 'plain':
self.optimizer(loss)
elif dist_option == 'half':
self.optimizer.backward_and_update_half(loss)
elif dist_option == 'partialUpdate':
self.optimizer.backward_and_partial_update(loss)
elif dist_option == 'sparseTopK':
self.optimizer.backward_and_sparse_update(loss,
topK=True,
spars=spars)
elif dist_option == 'sparseThreshold':
self.optimizer.backward_and_sparse_update(loss,
topK=False,
spars=spars)
return out, loss
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet.
Returns:
The created ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet.
Returns:
The created ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet.
Returns:
The created ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet.
Returns:
The created ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet.
Returns:
The created ResNet-152 model.
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
__all__ = [
'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
]
| |
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import sys, struct, socket, time, os
import disassemble
class Message:
DSI = 0
ISI = 1
Program = 2
GetStat = 3
OpenFile = 4
ReadFile = 5
CloseFile = 6
SetPosFile = 7
GetStatFile = 8
Continue = 0
Step = 1
StepOver = 2
def __init__(self, type, data, arg):
self.type = type
self.data = data
self.arg = arg
class EventHolder(QObject):
Exception = pyqtSignal()
Connected = pyqtSignal()
Closed = pyqtSignal()
BreakPointChanged = pyqtSignal()
Continue = pyqtSignal()
events = EventHolder()
#I don't want to deal with the whole threading trouble to complete big
#file transfers without the UI becoming unresponsive. There probably is
#a better way to code this, but this is what I came up with.
class TaskMgr:
def __init__(self):
self.taskQueue = []
def add(self, task):
if not self.taskQueue:
window.mainWidget.statusWidget.disconnectButton.setEnabled(True)
self.taskQueue.append(task)
def pop(self, task):
assert task == self.taskQueue.pop()
if not self.taskQueue:
window.mainWidget.tabWidget.setEnabled(True)
window.mainWidget.statusWidget.cancelButton.setEnabled(True)
window.mainWidget.statusWidget.disconnectButton.setEnabled(True)
window.mainWidget.statusWidget.progressBar.setValue(0)
window.mainWidget.statusWidget.progressInfo.setText("Connected")
else:
self.taskQueue[-1].resume()
def isBlocking(self):
if not self.taskQueue:
return False
return self.taskQueue[-1].blocking
def cancel(self):
self.taskQueue[-1].canceled = True
taskMgr = TaskMgr()
class Task:
def __init__(self, blocking, cancelable):
taskMgr.add(self)
self.canceled = False
self.blocking = blocking
self.cancelable = cancelable
window.mainWidget.tabWidget.setEnabled(not blocking)
window.mainWidget.statusWidget.cancelButton.setEnabled(cancelable)
def setInfo(self, info, maxValue):
self.info = info
self.maxValue = maxValue
window.mainWidget.statusWidget.progressInfo.setText(info)
window.mainWidget.statusWidget.progressBar.setRange(0, maxValue)
def update(self, progress):
self.progress = progress
window.mainWidget.statusWidget.progressBar.setValue(progress)
app.processEvents()
def resume(self):
window.mainWidget.tabWidget.setEnabled(not self.blocking)
window.mainWidget.statusWidget.cancelButton.setEnabled(self.cancelable)
window.mainWidget.statusWidget.progressInfo.setText(self.info)
window.mainWidget.statusWidget.progressBar.setRange(0, self.maxValue)
window.mainWidget.statusWidget.progressBar.setValue(self.progress)
def end(self):
taskMgr.pop(self)
class Thread:
cores = {
1: "Core 0",
2: "Core 1",
4: "Core 2"
}
def __init__(self, data, offs=0):
self.core = self.cores[struct.unpack_from(">I", data, offs)[0]]
self.priority = struct.unpack_from(">I", data, offs + 4)[0]
self.stackBase = struct.unpack_from(">I", data, offs + 8)[0]
self.stackEnd = struct.unpack_from(">I", data, offs + 12)[0]
self.entryPoint = struct.unpack_from(">I", data, offs + 16)[0]
namelen = struct.unpack_from(">I", data, offs + 20)[0]
self.name = data[offs + 24 : offs + 24 + namelen].decode("ascii")
class DirEntry:
def __init__(self, flags, size, name):
self.flags = flags
self.size = size
self.name = name
def isDir(self):
return self.flags & 0x80000000
class PyBugger:
def __init__(self):
super().__init__()
self.connected = False
self.breakPoints = []
self.basePath = b""
self.currentHandle = 0x12345678
self.files = {}
self.messageHandlers = {
Message.DSI: self.handleException,
Message.ISI: self.handleException,
Message.Program: self.handleException,
Message.GetStat: self.handleGetStat,
Message.OpenFile: self.handleOpenFile,
Message.ReadFile: self.handleReadFile,
Message.CloseFile: self.handleCloseFile,
Message.SetPosFile: self.handleSetPosFile,
Message.GetStatFile: self.handleGetStatFile
}
def handleException(self, msg):
exceptionState.load(msg.data, msg.type)
events.Exception.emit()
def handleGetStat(self, msg):
gamePath = msg.data.decode("ascii")
path = os.path.join(self.basePath, gamePath.strip("/vol"))
print("GetStat: %s" %gamePath)
self.sendFileMessage(os.path.getsize(path))
def handleOpenFile(self, msg):
mode = struct.pack(">I", msg.arg).decode("ascii").strip("\x00") + "b"
path = msg.data.decode("ascii")
print("Open: %s" %path)
f = open(os.path.join(self.basePath, path.strip("/vol")), mode)
self.files[self.currentHandle] = f
self.sendFileMessage(self.currentHandle)
self.currentHandle += 1
def handleReadFile(self, msg):
print("Read")
task = Task(blocking=False, cancelable=False)
bufferAddr, size, count, handle = struct.unpack(">IIII", msg.data)
data = self.files[handle].read(size * count)
task.setInfo("Sending file", len(data))
bytesSent = 0
while bytesSent < len(data):
length = min(len(data) - bytesSent, 0x8000)
self.sendall(b"\x03")
self.sendall(struct.pack(">II", bufferAddr, length))
self.sendall(data[bytesSent : bytesSent + length])
bufferAddr += length
bytesSent += length
task.update(bytesSent)
self.sendFileMessage(bytesSent // size)
task.end()
def handleCloseFile(self, msg):
print("Close")
self.files.pop(msg.arg).close()
self.sendFileMessage()
def handleSetPosFile(self, msg):
print("SetPos")
handle, pos = struct.unpack(">II", msg.data)
self.files[handle].seek(pos)
self.sendFileMessage()
def handleGetStatFile(self, msg):
print("GetStatFile")
f = self.files[msg.arg]
pos = f.tell()
f.seek(0, 2)
size = f.tell()
f.seek(pos)
self.sendFileMessage(size)
def connect(self, host):
self.s = socket.socket()
self.s.connect((host, 1559))
self.connected = True
self.closeRequest = False
events.Connected.emit()
def close(self):
self.sendall(b"\x01")
self.s.close()
self.connected = False
self.breakPoints = []
events.Closed.emit()
def updateMessages(self):
self.sendall(b"\x07")
count = struct.unpack(">I", self.recvall(4))[0]
for i in range(count):
type, ptr, length, arg = struct.unpack(">IIII", self.recvall(16))
data = None
if length:
data = self.recvall(length)
self.messageHandlers[type](Message(type, data, arg))
def read(self, addr, num):
self.sendall(b"\x02")
self.sendall(struct.pack(">II", addr, num))
data = self.recvall(num)
return data
def write(self, addr, data):
self.sendall(b"\x03")
self.sendall(struct.pack(">II", addr, len(data)))
self.sendall(data)
def writeCode(self, addr, instr):
self.sendall(b"\x04")
self.sendall(struct.pack(">II", addr, instr))
def getThreadList(self):
self.sendall(b"\x05")
length = struct.unpack(">I", self.recvall(4))[0]
data = self.recvall(length)
offset = 0
threads = []
while offset < length:
thread = Thread(data, offset)
threads.append(thread)
offset += 24 + len(thread.name)
return threads
def toggleBreakPoint(self, addr):
if addr in self.breakPoints: self.breakPoints.remove(addr)
else:
if len(self.breakPoints) >= 10:
return
self.breakPoints.append(addr)
self.sendall(b"\x0A")
self.sendall(struct.pack(">I", addr))
events.BreakPointChanged.emit()
def continueBreak(self): self.sendCrashMessage(Message.Continue)
def stepBreak(self): self.sendCrashMessage(Message.Step)
def stepOver(self): self.sendCrashMessage(Message.StepOver)
def sendCrashMessage(self, message):
self.sendMessage(message)
events.Continue.emit()
def sendMessage(self, message, data0=0, data1=0, data2=0):
self.sendall(b"\x06")
self.sendall(struct.pack(">IIII", message, data0, data1, data2))
def sendFileMessage(self, data0=0, data1=0, data2=0):
self.sendall(b"\x0F")
self.sendall(struct.pack(">IIII", 0, data0, data1, data2))
def getStackTrace(self):
self.sendall(b"\x08")
count = struct.unpack(">I", self.recvall(4))[0]
trace = struct.unpack(">%iI" %count, self.recvall(4 * count))
return trace
def pokeExceptionRegisters(self):
self.sendall(b"\x09")
data = struct.pack(">32I32d", *exceptionState.gpr, *exceptionState.fpr)
self.sendall(data)
def readDirectory(self, path):
self.sendall(b"\x0B")
self.sendall(struct.pack(">I", len(path)))
self.sendall(path.encode("ascii"))
entries = []
namelen = struct.unpack(">I", self.recvall(4))[0]
while namelen != 0:
flags = struct.unpack(">I", self.recvall(4))[0]
size = -1
if not flags & 0x80000000:
size = struct.unpack(">I", self.recvall(4))[0]
name = self.recvall(namelen).decode("ascii")
entries.append(DirEntry(flags, size, name))
namelen = struct.unpack(">I", self.recvall(4))[0]
return entries
def dumpFile(self, gamePath, outPath, task):
if task.canceled:
return
self.sendall(b"\x0C")
self.sendall(struct.pack(">I", len(gamePath)))
self.sendall(gamePath.encode("ascii"))
length = struct.unpack(">I", self.recvall(4))[0]
task.setInfo("Dumping %s" %gamePath, length)
with open(outPath, "wb") as f:
bytesDumped = 0
while bytesDumped < length:
data = self.s.recv(length - bytesDumped)
f.write(data)
bytesDumped += len(data)
task.update(bytesDumped)
def getModuleName(self):
self.sendall(b"\x0D")
length = struct.unpack(">I", self.recvall(4))[0]
return self.recvall(length).decode("ascii") + ".rpx"
def setPatchFiles(self, fileList, basePath):
self.basePath = basePath
self.sendall(b"\x0E")
fileBuffer = struct.pack(">I", len(fileList))
for path in fileList:
fileBuffer += struct.pack(">H", len(path))
fileBuffer += path.encode("ascii")
self.sendall(struct.pack(">I", len(fileBuffer)))
self.sendall(fileBuffer)
def clearPatchFiles(self):
self.sendall(b"\x10")
def sendall(self, data):
try:
self.s.sendall(data)
except socket.error:
self.connected = False
events.Closed.emit()
def recvall(self, num):
try:
data = b""
while len(data) < num:
data += self.s.recv(num - len(data))
except socket.error:
self.connected = False
events.Closed.emit()
return b"\x00" * num
return data
class HexSpinBox(QAbstractSpinBox):
def __init__(self, parent, stepSize = 1):
super().__init__(parent)
self._value = 0
self.stepSize = stepSize
def validate(self, text, pos):
if all([char in "0123456789abcdefABCDEF" for char in text]):
if not text:
return QValidator.Intermediate, text.upper(), pos
value = int(text, 16)
if value <= 0xFFFFFFFF:
self._value = value
if value % self.stepSize:
self._value -= value % self.stepSize
return QValidator.Acceptable, text.upper(), pos
return QValidator.Acceptable, text.upper(), pos
return QValidator.Invalid, text.upper(), pos
def stepBy(self, steps):
self._value = min(max(self._value + steps * self.stepSize, 0), 0x100000000 - self.stepSize)
self.lineEdit().setText("%X" %self._value)
def stepEnabled(self):
return QAbstractSpinBox.StepUpEnabled | QAbstractSpinBox.StepDownEnabled
def setValue(self, value):
self._value = value
self.lineEdit().setText("%X" %self._value)
def value(self):
return self._value
class ExceptionState:
exceptionNames = ["DSI", "ISI", "Program"]
def load(self, context, type):
#Convert tuple to list to make it mutable
self.gpr = list(struct.unpack_from(">32I", context, 8))
self.cr, self.lr, self.ctr, self.xer = struct.unpack_from(">4I", context, 0x88)
self.srr0, self.srr1, self.ex0, self.ex1 = struct.unpack_from(">4I", context, 0x98)
self.fpr = list(struct.unpack_from(">32d", context, 0xB8))
self.gqr = list(struct.unpack_from(">8I", context, 0x1BC))
self.psf = list(struct.unpack_from(">32d", context, 0x1E0))
self.exceptionName = self.exceptionNames[type]
def isBreakPoint(self):
return self.exceptionName == "Program" and self.srr1 & 0x20000
def format_hex(blob, offs):
return "%02X" %blob[offs]
def format_ascii(blob, offs):
if 0x30 <= blob[offs] <= 0x39 or 0x41 <= blob[offs] <= 0x5A or 0x61 <= blob[offs] <= 0x7A:
return chr(blob[offs])
return "?"
def format_float(blob, offs):
value = struct.unpack_from(">f", blob, offs)[0]
if abs(value) >= 1000000 or 0 < abs(value) < 0.000001:
return "%e" %value
return ("%.8f" %value).rstrip("0")
class MemoryViewer(QWidget):
class Format:
Hex = 0
Ascii = 1
Float = 2
Width = 1, 1, 4
Funcs = format_hex, format_ascii, format_float
def __init__(self, parent):
super().__init__(parent)
self.layout = QGridLayout()
for i in range(16):
self.layout.addWidget(QLabel("%X" %i, self), 0, i + 1)
self.addrLabels = []
for i in range(16):
label = QLabel("%X" %(i * 0x10), self)
self.layout.addWidget(label, i + 1, 0)
self.addrLabels.append(label)
self.dataCells = []
self.base = 0
self.format = self.Format.Hex
self.updateData()
self.setLayout(self.layout)
events.Connected.connect(self.connected)
def connected(self):
self.setBase(0x10000000)
def setFormat(self, format):
self.format = format
self.updateData()
def setBase(self, base):
window.mainWidget.tabWidget.memoryTab.memoryInfo.baseBox.setValue(base)
self.base = base
for i in range(16):
self.addrLabels[i].setText("%X" %(self.base + i * 0x10))
self.updateData()
def updateData(self):
for cell in self.dataCells:
self.layout.removeWidget(cell)
cell.setParent(None)
if bugger.connected:
blob = bugger.read(self.base, 0x100)
else:
blob = b"\x00" * 0x100
width = self.Width[self.format]
func = self.Funcs[self.format]
for i in range(16 // width):
for j in range(16):
label = QLabel(func(blob, j * 0x10 + i * width), self)
self.layout.addWidget(label, j + 1, i * width + 1, 1, width)
self.dataCells.append(label)
class MemoryInfo(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.dataTypeLabel = QLabel("Data type:")
self.dataTypeBox = QComboBox(self)
self.dataTypeBox.addItems(["Hex", "Ascii", "Float"])
self.dataTypeBox.currentIndexChanged.connect(self.updateDataType)
self.baseLabel = QLabel("Address:")
self.baseBox = HexSpinBox(self, 0x10)
self.baseButton = QPushButton("Update", self)
self.baseButton.clicked.connect(self.updateMemoryBase)
self.pokeAddr = HexSpinBox(self, 4)
self.pokeValue = HexSpinBox(self)
self.pokeButton = QPushButton("Poke", self)
self.pokeButton.clicked.connect(self.pokeMemory)
self.layout = QGridLayout()
self.layout.addWidget(self.baseLabel, 0, 0)
self.layout.addWidget(self.baseBox, 0, 1)
self.layout.addWidget(self.baseButton, 0, 2)
self.layout.addWidget(self.pokeAddr, 1, 0)
self.layout.addWidget(self.pokeValue, 1, 1)
self.layout.addWidget(self.pokeButton, 1, 2)
self.layout.addWidget(self.dataTypeLabel, 2, 0)
self.layout.addWidget(self.dataTypeBox, 2, 1, 1, 2)
self.setLayout(self.layout)
def updateDataType(self, index):
window.mainWidget.tabWidget.memoryTab.memoryViewer.setFormat(index)
def updateMemoryBase(self):
window.mainWidget.tabWidget.memoryTab.memoryViewer.setBase(self.baseBox.value())
def pokeMemory(self):
bugger.write(self.pokeAddr.value(), struct.pack(">I", self.pokeValue.value()))
window.mainWidget.tabWidget.memoryTab.memoryViewer.updateData()
class MemoryTab(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.memoryInfo = MemoryInfo(self)
self.memoryViewer = MemoryViewer(self)
self.layout = QHBoxLayout()
self.layout.addWidget(self.memoryInfo)
self.layout.addWidget(self.memoryViewer)
self.button = QPushButton("Dump", self)
self.button.clicked.connect(self.dump)
self.setLayout(self.layout)
def dump(self):
dumpStart = 0x1AB00000
dumpLength = 0x600000
dumpFile = "dump.bin"
with open(dumpFile, 'wb') as f:
f.write(bugger.read(dumpStart, dumpLength))
class DisassemblyWidget(QTextEdit):
def __init__(self, parent):
super().__init__(parent)
self.setTextInteractionFlags(Qt.NoTextInteraction)
self.currentInstruction = None
self.selectedAddress = 0
self.setBase(0)
events.BreakPointChanged.connect(self.updateHighlight)
events.Continue.connect(self.handleContinue)
def handleContinue(self):
self.currentInstruction = None
self.updateHighlight()
def setCurrentInstruction(self, instr):
self.currentInstruction = instr
self.setBase(instr - 0x20)
def setBase(self, base):
self.base = base
self.updateText()
self.updateHighlight()
def updateText(self):
if bugger.connected:
blob = bugger.read(self.base, 0x60)
else:
blob = b"\x00" * 0x60
text = ""
for i in range(24):
address = self.base + i * 4
value = struct.unpack_from(">I", blob, i * 4)[0]
instr = disassemble.disassemble(value, address)
text += "%08X: %08X %s\n" %(address, value, instr)
self.setPlainText(text)
def updateHighlight(self):
selections = []
for i in range(24):
address = self.base + i * 4
color = self.getColor(address)
if color:
cursor = self.textCursor()
cursor.movePosition(QTextCursor.Down, n=i)
cursor.select(QTextCursor.LineUnderCursor)
format = QTextCharFormat()
format.setBackground(QBrush(QColor(color)))
selection = QTextEdit.ExtraSelection()
selection.cursor = cursor
selection.format = format
selections.append(selection)
self.setExtraSelections(selections)
def getColor(self, addr):
colors = []
if addr in bugger.breakPoints:
colors.append((255, 0, 0))
if addr == self.currentInstruction:
colors.append((0, 255, 0))
if addr == self.selectedAddress:
colors.append((0, 0, 255))
if not colors:
return None
color = [sum(l)//len(colors) for l in zip(*colors)]
return "#%02X%02X%02X" %tuple(color)
def mousePressEvent(self, e):
super().mousePressEvent(e)
line = self.cursorForPosition(e.pos()).blockNumber()
self.selectedAddress = self.base + line * 4
if e.button() == Qt.MidButton:
bugger.toggleBreakPoint(self.selectedAddress)
self.updateHighlight()
class DisassemblyInfo(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.baseLabel = QLabel("Address:")
self.baseBox = HexSpinBox(self, 4)
self.baseButton = QPushButton("Update", self)
self.baseButton.clicked.connect(self.updateDisassemblyBase)
self.pokeBox = HexSpinBox(self)
self.pokeButton = QPushButton("Poke", self)
self.pokeButton.clicked.connect(self.poke)
self.layout = QGridLayout()
self.layout.addWidget(self.baseLabel, 0, 0)
self.layout.addWidget(self.baseBox, 0, 1)
self.layout.addWidget(self.baseButton, 0, 2)
self.layout.addWidget(self.pokeBox, 1, 0)
self.layout.addWidget(self.pokeButton, 1, 1, 1, 2)
self.setLayout(self.layout)
self.setMinimumWidth(300)
def updateDisassemblyBase(self):
window.mainWidget.tabWidget.disassemblyTab.disassemblyWidget.setBase(self.baseBox.value())
def poke(self):
disassembly = window.mainWidget.tabWidget.disassemblyTab.disassemblyWidget
if disassembly.selectedAddress:
bugger.writeCode(disassembly.selectedAddress, self.pokeBox.value())
disassembly.updateText()
class DisassemblyTab(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.disassemblyInfo = DisassemblyInfo(self)
self.disassemblyWidget = DisassemblyWidget(self)
self.layout = QHBoxLayout()
self.layout.addWidget(self.disassemblyInfo)
self.layout.addWidget(self.disassemblyWidget)
self.setLayout(self.layout)
events.Connected.connect(self.connected)
def connected(self):
self.disassemblyWidget.setBase(0x10000000)
class ThreadList(QTableWidget):
def __init__(self, parent):
super().__init__(0, 5, parent)
self.setHorizontalHeaderLabels(["Name", "Priority", "Core", "Stack", "Entry Point"])
self.setEditTriggers(self.NoEditTriggers)
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
events.Connected.connect(self.updateThreads)
def updateThreads(self):
threads = bugger.getThreadList()
self.setRowCount(len(threads))
for i in range(len(threads)):
thread = threads[i]
self.setItem(i, 0, QTableWidgetItem(thread.name))
self.setItem(i, 1, QTableWidgetItem(str(thread.priority)))
self.setItem(i, 2, QTableWidgetItem(thread.core))
self.setItem(i, 3, QTableWidgetItem("0x%x - 0x%x" %(thread.stackEnd, thread.stackBase)))
self.setItem(i, 4, QTableWidgetItem(hex(thread.entryPoint)))
class ThreadingTab(QTableWidget):
def __init__(self, parent):
super().__init__(parent)
self.threadList = ThreadList(self)
self.updateButton = QPushButton("Update", self)
self.updateButton.clicked.connect(self.threadList.updateThreads)
self.layout = QVBoxLayout()
self.layout.addWidget(self.threadList)
self.layout.addWidget(self.updateButton)
self.setLayout(self.layout)
class BreakPointList(QListWidget):
def __init__(self, parent):
super().__init__(parent)
self.itemDoubleClicked.connect(self.goToDisassembly)
events.BreakPointChanged.connect(self.updateList)
def updateList(self):
self.clear()
for bp in bugger.breakPoints:
self.addItem("0x%08X" %bp)
def goToDisassembly(self, item):
address = bugger.breakPoints[self.row(item)]
window.mainWidget.tabWidget.disassemblyTab.disassemblyWidget.setBase(address)
window.mainWidget.tabWidget.setCurrentIndex(1)
class BreakPointTab(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.list = BreakPointList(self)
self.button = QPushButton("Remove", self)
self.button.clicked.connect(self.removeBreakPoint)
self.layout = QVBoxLayout()
self.layout.addWidget(self.list)
self.layout.addWidget(self.button)
self.setLayout(self.layout)
def removeBreakPoint(self):
if self.list.currentRow() != -1:
bugger.toggleBreakPoint(bugger.breakPoints[self.list.currentRow()])
class RegisterTab(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.gprLabels = []
self.gprBoxes = []
self.fprLabels = []
self.fprBoxes = []
for i in range(32):
self.gprLabels.append(QLabel("r%i" %i, self))
self.fprLabels.append(QLabel("f%i" % i, self))
gprBox = HexSpinBox(self)
fprBox = QDoubleSpinBox(self)
fprBox.setRange(float("-inf"), float("inf"))
self.gprBoxes.append(gprBox)
self.fprBoxes.append(fprBox)
self.layout = QGridLayout()
for i in range(32):
self.layout.addWidget(self.gprLabels[i], i % 16, i // 16 * 2)
self.layout.addWidget(self.gprBoxes[i], i % 16, i // 16 * 2 + 1)
self.layout.addWidget(self.fprLabels[i], i % 16, i // 16 * 2 + 4)
self.layout.addWidget(self.fprBoxes[i], i % 16, i // 16 * 2 + 5)
self.setLayout(self.layout)
self.pokeButton = QPushButton("Poke", self)
self.resetButton = QPushButton("Reset", self)
self.pokeButton.clicked.connect(self.pokeRegisters)
self.resetButton.clicked.connect(self.updateRegisters)
self.layout.addWidget(self.pokeButton, 16, 0, 1, 4)
self.layout.addWidget(self.resetButton, 16, 4, 1, 4)
self.setEditEnabled(False)
events.Exception.connect(self.exceptionOccurred)
events.Continue.connect(lambda: self.setEditEnabled(False))
def setEditEnabled(self, enabled):
for i in range(32):
self.gprBoxes[i].setEnabled(enabled)
self.fprBoxes[i].setEnabled(enabled)
self.pokeButton.setEnabled(enabled)
self.resetButton.setEnabled(enabled)
def exceptionOccurred(self):
self.updateRegisters()
self.setEditEnabled(exceptionState.isBreakPoint())
def updateRegisters(self):
for i in range(32):
self.gprBoxes[i].setValue(exceptionState.gpr[i])
self.fprBoxes[i].setValue(exceptionState.fpr[i])
def pokeRegisters(self):
for i in range(32):
exceptionState.gpr[i] = self.gprBoxes[i].value()
exceptionState.fpr[i] = self.fprBoxes[i].value()
bugger.pokeExceptionRegisters()
class ExceptionInfo(QGroupBox):
def __init__(self, parent):
super().__init__("Info", parent)
self.typeLabel = QLabel(self)
self.layout = QVBoxLayout()
self.layout.addWidget(self.typeLabel)
self.setLayout(self.layout)
events.Exception.connect(self.updateInfo)
def updateInfo(self):
self.typeLabel.setText("Type: %s" %exceptionState.exceptionName)
class SpecialRegisters(QGroupBox):
def __init__(self, parent):
super().__init__("Special registers", parent)
self.cr = QLabel(self)
self.lr = QLabel(self)
self.ctr = QLabel(self)
self.xer = QLabel(self)
self.srr0 = QLabel(self)
self.srr1 = QLabel(self)
self.ex0 = QLabel(self)
self.ex1 = QLabel(self)
self.layout = QHBoxLayout()
self.userLayout = QFormLayout()
self.kernelLayout = QFormLayout()
self.userLayout.addRow("CR:", self.cr)
self.userLayout.addRow("LR:", self.lr)
self.userLayout.addRow("CTR:", self.ctr)
self.userLayout.addRow("XER:", self.xer)
self.kernelLayout = QFormLayout()
self.kernelLayout.addRow("SRR0:", self.srr0)
self.kernelLayout.addRow("SRR1:", self.srr1)
self.kernelLayout.addRow("EX0:", self.ex0)
self.kernelLayout.addRow("EX1:", self.ex1)
self.layout.addLayout(self.userLayout)
self.layout.addLayout(self.kernelLayout)
self.setLayout(self.layout)
events.Exception.connect(self.updateRegisters)
def updateRegisters(self):
self.cr.setText("%X" %exceptionState.cr)
self.lr.setText("%X" %exceptionState.lr)
self.ctr.setText("%X" %exceptionState.ctr)
self.xer.setText("%X" %exceptionState.xer)
self.srr0.setText("%X" %exceptionState.srr0)
self.srr1.setText("%X" %exceptionState.srr1)
self.ex0.setText("%X" %exceptionState.ex0)
self.ex1.setText("%X" %exceptionState.ex1)
class ExceptionInfoTab(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.exceptionInfo = ExceptionInfo(self)
self.specialRegisters = SpecialRegisters(self)
self.layout = QGridLayout()
self.layout.addWidget(self.exceptionInfo, 0, 0)
self.layout.addWidget(self.specialRegisters, 0, 1)
self.setLayout(self.layout)
class StackTrace(QListWidget):
def __init__(self, parent):
super().__init__(parent)
events.Exception.connect(self.updateTrace)
def updateTrace(self):
self.clear()
stackTrace = bugger.getStackTrace()
for address in (exceptionState.srr0, exceptionState.lr) + stackTrace:
self.addItem("%X" %address)
class BreakPointActions(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.continueButton = QPushButton("Continue", self)
self.stepButton = QPushButton("Step", self)
self.stepOverButton = QPushButton("Step over", self)
self.continueButton.clicked.connect(bugger.continueBreak)
self.stepButton.clicked.connect(bugger.stepBreak)
self.stepOverButton.clicked.connect(bugger.stepOver)
self.layout = QHBoxLayout()
self.layout.addWidget(self.continueButton)
self.layout.addWidget(self.stepButton)
self.layout.addWidget(self.stepOverButton)
self.setLayout(self.layout)
events.Exception.connect(self.updateButtons)
events.Continue.connect(self.disableButtons)
def disableButtons(self):
self.setButtonsEnabled(False)
def updateButtons(self):
self.setButtonsEnabled(exceptionState.isBreakPoint())
def setButtonsEnabled(self, enabled):
self.continueButton.setEnabled(enabled)
self.stepButton.setEnabled(enabled)
self.stepOverButton.setEnabled(enabled)
class StackTraceTab(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.stackTrace = StackTrace(self)
self.disassembly = DisassemblyWidget(self)
self.breakPointActions = BreakPointActions(self)
self.layout = QVBoxLayout()
hlayout = QHBoxLayout()
hlayout.addWidget(self.stackTrace)
hlayout.addWidget(self.disassembly)
self.layout.addLayout(hlayout)
self.layout.addWidget(self.breakPointActions)
self.setLayout(self.layout)
self.stackTrace.itemDoubleClicked.connect(self.jumpDisassembly)
events.Exception.connect(self.exceptionOccurred)
def exceptionOccurred(self):
self.disassembly.setCurrentInstruction(exceptionState.srr0)
def jumpDisassembly(self, item):
self.disassembly.setBase(int(item.text(), 16) - 0x20)
class ExceptionTab(QTabWidget):
def __init__(self, parent):
super().__init__(parent)
self.infoTab = ExceptionInfoTab(self)
self.registerTab = RegisterTab(self)
self.stackTab = StackTraceTab(self)
self.addTab(self.infoTab, "General")
self.addTab(self.registerTab, "Registers")
self.addTab(self.stackTab, "Stack trace")
events.Exception.connect(self.exceptionOccurred)
def exceptionOccurred(self):
self.setCurrentIndex(2) #Stack trace
def formatFileSize(size):
if size >= 1024 ** 3:
return "%.1f GiB" %(size / (1024 ** 3))
if size >= 1024 ** 2:
return "%.1f MiB" %(size / (1024 ** 2))
if size >= 1024:
return "%.1f KiB" %(size / 1024)
return "%i B" %size
class FileTreeNode(QTreeWidgetItem):
def __init__(self, parent, name, size, path):
super().__init__(parent)
self.name = name
self.size = size
self.path = path
self.setText(0, name)
if size == -1: #It's a folder
self.loaded = False
else: #It's a file
self.setText(1, formatFileSize(size))
self.loaded = True
def loadChildren(self):
if not self.loaded:
for i in range(self.childCount()):
child = self.child(i)
if not child.loaded:
self.child(i).loadContent()
self.loaded = True
def loadContent(self):
entries = bugger.readDirectory(self.path)
for entry in entries:
FileTreeNode(self, entry.name, entry.size, self.path + "/" + entry.name)
def dump(self, outdir, task):
if task.canceled:
return
outpath = os.path.join(outdir, self.name)
if self.size == -1:
if os.path.isfile(outpath):
os.remove(outpath)
if not os.path.exists(outpath):
os.mkdir(outpath)
self.loadChildren()
for i in range(self.childCount()):
self.child(i).dump(outpath, task)
else:
bugger.dumpFile(self.path, outpath, task)
class FileTreeWidget(QTreeWidget):
def __init__(self, parent):
super().__init__(parent)
self.setHeaderLabels(["Name", "Size"])
self.itemExpanded.connect(self.handleItemExpanded)
events.Connected.connect(self.initFileTree)
def initFileTree(self):
self.clear()
rootItem = FileTreeNode(self, "content", -1, "/vol/content")
rootItem.loadContent()
self.resizeColumnToContents(0)
def handleItemExpanded(self, item):
item.loadChildren()
self.resizeColumnToContents(0)
class FileSystemTab(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.fileTree = FileTreeWidget(self)
self.dumpButton = QPushButton("Dump", self)
self.dumpButton.clicked.connect(self.dump)
self.patchButton = QPushButton("Load patch", self)
self.patchButton.clicked.connect(self.loadPatch)
self.clearButton = QPushButton("Clear patch", self)
self.clearButton.clicked.connect(self.clearPatch)
self.clearButton.setEnabled(True)
self.layout = QVBoxLayout()
hlayout = QHBoxLayout()
hlayout.addWidget(self.dumpButton)
hlayout.addWidget(self.patchButton)
hlayout.addWidget(self.clearButton)
self.layout.addWidget(self.fileTree)
self.layout.addLayout(hlayout)
self.setLayout(self.layout)
def dump(self):
item = self.fileTree.currentItem()
if item:
outdir = QFileDialog.getExistingDirectory(self, "Dump")
if outdir:
task = Task(blocking=True, cancelable=True)
item.dump(outdir, task)
task.end()
def loadPatch(self):
patchDir = QFileDialog.getExistingDirectory(self, "Load patch")
if patchDir:
baseLength = len(patchDir)
fileList = []
for dirname, subdirs, files in os.walk(patchDir):
for filename in files:
gamePath = "/vol" + dirname[baseLength:].replace("\\", "/") + "/" + filename
fileList.append(gamePath)
bugger.setPatchFiles(fileList, patchDir)
self.clearButton.setEnabled(True)
def clearPatch(self):
bugger.clearPatchFiles()
self.clearButton.setEnabled(True)
class DebuggerTabs(QTabWidget):
def __init__(self, parent):
super().__init__(parent)
self.memoryTab = MemoryTab(self)
self.disassemblyTab = DisassemblyTab(self)
self.threadingTab = ThreadingTab(self)
self.breakPointTab = BreakPointTab(self)
self.exceptionTab = ExceptionTab(self)
self.fileSystemTab = FileSystemTab(self)
self.addTab(self.memoryTab, "Memory")
self.addTab(self.disassemblyTab, "Disassembly")
self.addTab(self.threadingTab, "Threads")
self.addTab(self.breakPointTab, "Breakpoints")
self.addTab(self.exceptionTab, "Exceptions")
self.addTab(self.fileSystemTab, "File System")
self.setTabEnabled(4, True)
events.Exception.connect(self.exceptionOccurred)
events.Connected.connect(self.connected)
events.Closed.connect(self.disconnected)
def exceptionOccurred(self):
self.setTabEnabled(4, True)
self.setCurrentIndex(4) #Exceptions
def connected(self):
self.setEnabled(True)
def disconnected(self):
self.setEnabled(True)
self.setTabEnabled(4, True)
class StatusWidget(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.serverLabel = QLabel("Wii U IP:")
self.serverBox = QLineEdit(self)
self.serverBox.returnPressed.connect(self.connect)
self.connectButton = QPushButton("Connect", self)
self.connectButton.clicked.connect(self.connect)
self.disconnectButton = QPushButton("Disconnect", self)
self.disconnectButton.clicked.connect(bugger.close)
self.disconnectButton.setEnabled(True)
self.progressBar = QProgressBar(self)
self.progressInfo = QLabel("Disconnected", self)
self.cancelButton = QPushButton("Cancel", self)
self.cancelButton.clicked.connect(taskMgr.cancel)
self.cancelButton.setEnabled(True)
self.layout = QGridLayout()
self.layout.addWidget(self.serverLabel, 0, 0)
self.layout.addWidget(self.serverBox, 1, 0)
self.layout.addWidget(self.connectButton, 0, 1)
self.layout.addWidget(self.disconnectButton, 1, 1)
self.layout.addWidget(self.progressBar, 2, 0)
self.layout.addWidget(self.cancelButton, 2, 1)
self.layout.addWidget(self.progressInfo, 3, 0, 1, 2)
self.setLayout(self.layout)
events.Connected.connect(self.connected)
events.Closed.connect(self.disconnected)
def connect(self):
try: bugger.connect(str(self.serverBox.text()))
except: pass
def connected(self):
self.progressInfo.setText("Connected")
self.connectButton.setEnabled(True)
self.serverBox.setEnabled(True)
self.disconnectButton.setEnabled(True)
def disconnected(self):
self.progressInfo.setText("Disconnected")
self.connectButton.setEnabled(True)
self.serverBox.setEnabled(True)
self.disconnectButton.setEnabled(True)
class MainWidget(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.tabWidget = DebuggerTabs(self)
self.statusWidget = StatusWidget(self)
self.layout = QVBoxLayout()
self.layout.addWidget(self.tabWidget)
self.layout.addWidget(self.statusWidget)
self.tabWidget.setEnabled(True)
self.setLayout(self.layout)
class MainWindow(QMainWindow):
def init(self):
self.mainWidget = MainWidget(self)
self.setCentralWidget(self.mainWidget)
self.setWindowTitle("DiiBugger")
self.resize(1080, 720)
self.timer = QTimer(self)
self.timer.setInterval(100)
self.timer.timeout.connect(self.updateBugger)
self.timer.start()
events.Connected.connect(self.updateTitle)
events.Closed.connect(self.updateTitle)
def updateTitle(self):
if bugger.connected:
name = bugger.getModuleName()
self.setWindowTitle("DiiBugger - %s" %name)
else:
self.setWindowTitle("DiiBugger")
def updateBugger(self):
if bugger.connected and not taskMgr.isBlocking():
bugger.updateMessages()
def closeEvent(self, e):
if taskMgr.taskQueue:
e.ignore()
else:
e.accept()
exceptionState = ExceptionState()
bugger = PyBugger()
app = QApplication(sys.argv)
app.setFont(QFontDatabase.systemFont(QFontDatabase.FixedFont))
window = MainWindow()
window.init()
window.show()
app.exec()
if bugger.connected:
bugger.close()
| |
import mimetypes
import os
import urlparse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.files.uploadedfile import UploadedFile
from django.core.files.uploadhandler import FileUploadHandler, \
StopFutureHandlers
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.utils.encoding import smart_str, force_unicode, filepath_to_uri
from google.appengine.api import files, memcache
from google.appengine.api.images import get_serving_url, NotImageError, \
TransformationError, BlobKeyRequiredError
from google.appengine.ext.blobstore import BlobInfo, BlobKey, delete, \
create_upload_url, BLOB_KEY_HEADER, BLOB_RANGE_HEADER, BlobReader
def prepare_upload(request, url, **kwargs):
return create_upload_url(url), {}
def serve_file(request, file, save_as, content_type, **kwargs):
if hasattr(file, 'file') and hasattr(file.file, 'blobstore_info'):
blobkey = file.file.blobstore_info.key()
elif hasattr(file, 'blobstore_info'):
blobkey = file.blobstore_info.key()
else:
raise ValueError("The provided file can't be served via the "
"Google App Engine Blobstore.")
response = HttpResponse(content_type=content_type)
response[BLOB_KEY_HEADER] = str(blobkey)
response['Accept-Ranges'] = 'bytes'
http_range = request.META.get('HTTP_RANGE')
if http_range is not None:
response[BLOB_RANGE_HEADER] = http_range
if save_as:
response['Content-Disposition'] = smart_str(
u'attachment; filename=%s' % save_as)
if file.size is not None:
response['Content-Length'] = file.size
return response
class BlobstoreStorage(Storage):
"""Google App Engine Blobstore storage backend."""
def _open(self, name, mode='rb'):
return BlobstoreFile(name, mode, self)
def _save(self, name, content):
name = name.replace('\\', '/')
if hasattr(content, 'file') and \
hasattr(content.file, 'blobstore_info'):
data = content.file.blobstore_info
elif hasattr(content, 'blobstore_info'):
data = content.blobstore_info
elif isinstance(content, File):
guessed_type = mimetypes.guess_type(name)[0]
file_name = files.blobstore.create(mime_type=guessed_type or 'application/octet-stream',
_blobinfo_uploaded_filename=name)
with files.open(file_name, 'a') as f:
for chunk in content.chunks():
f.write(chunk)
files.finalize(file_name)
data = files.blobstore.get_blob_key(file_name)
else:
raise ValueError("The App Engine storage backend only supports "
"BlobstoreFile instances or File instances.")
if isinstance(data, (BlobInfo, BlobKey)):
# We change the file name to the BlobKey's str() value.
if isinstance(data, BlobInfo):
data = data.key()
return '%s/%s' % (data, name.lstrip('/'))
else:
raise ValueError("The App Engine Blobstore only supports "
"BlobInfo values. Data can't be uploaded "
"directly. You have to use the file upload "
"handler.")
def delete(self, name):
delete(self._get_key(name))
def exists(self, name):
return self._get_blobinfo(name) is not None
def size(self, name):
return self._get_blobinfo(name).size
def url(self, name):
# try:
# return get_serving_url(self._get_blobinfo(name))
# except (NotImageError, TransformationError):
# return None
try:
image_url = memcache.get(name)
if not image_url:
try:
image_url = get_serving_url(self._get_blobinfo(name), secure_url=True)
memcache.add(key=name, value=image_url )
except (NotImageError, TransformationError):
return None
except:
return None
return image_url
def created_time(self, name):
return self._get_blobinfo(name).creation
def get_valid_name(self, name):
return force_unicode(name).strip().replace('\\', '/')
def get_available_name(self, name):
return name.replace('\\', '/')
def _get_key(self, name):
return BlobKey(name.split('/', 1)[0])
def _get_blobinfo(self, name):
return BlobInfo.get(self._get_key(name))
class DevBlobstoreStorage(BlobstoreStorage):
def url(self, name):
try:
return super(DevBlobstoreStorage, self).url(name)
except BlobKeyRequiredError:
return urlparse.urljoin(settings.MEDIA_URL, filepath_to_uri(name))
class BlobstoreFile(File):
def __init__(self, name, mode, storage):
self.name = name
self._storage = storage
self._mode = mode
self.blobstore_info = storage._get_blobinfo(name)
@property
def size(self):
return self.blobstore_info.size
def write(self, content):
raise NotImplementedError()
@property
def file(self):
if not hasattr(self, '_file'):
self._file = BlobReader(self.blobstore_info.key())
return self._file
class BlobstoreFileUploadHandler(FileUploadHandler):
"""
File upload handler for the Google App Engine Blobstore.
"""
def new_file(self, *args, **kwargs):
super(BlobstoreFileUploadHandler, self).new_file(*args, **kwargs)
blobkey = self.content_type_extra.get('blob-key')
self.active = blobkey is not None
if self.active:
self.blobkey = BlobKey(blobkey)
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the StringIO file.
"""
if not self.active:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.active:
return
return BlobstoreUploadedFile(
blobinfo=BlobInfo(self.blobkey),
charset=self.charset)
class BlobstoreUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, blobinfo, charset):
super(BlobstoreUploadedFile, self).__init__(
BlobReader(blobinfo.key()), blobinfo.filename,
blobinfo.content_type, blobinfo.size, charset)
self.blobstore_info = blobinfo
def open(self, mode=None):
pass
def chunks(self, chunk_size=1024 * 128):
self.file.seek(0)
while True:
content = self.read(chunk_size)
if not content:
break
yield content
def multiple_chunks(self, chunk_size=1024 * 128):
return True
| |
# -*- coding: utf-8 -*-
"""
This module provides function to estimate causal influences of signals on each
other.
Granger causality
*****************
Granger causality is a method to determine causal influence of one signal on
another based on autoregressive modelling. It was developed by Nobel prize
laureate Clive Granger and has been adopted in various numerical fields ever
since :cite:`granger-Granger69_424`. In its simplest form, the
method tests whether the past values of one signal help to reduce the
prediction error of another signal, compared to the past values of the latter
signal alone. If it does reduce the prediction error, the first signal is said
to Granger cause the other signal.
Limitations
-----------
The user must be mindful of the method's limitations, which are assumptions of
covariance stationary data, linearity imposed by the underlying autoregressive
modelling as well as the fact that the variables not included in the model will
not be accounted for :cite:`granger-Seth07_1667`.
Implementation
--------------
The mathematical implementation of Granger causality methods in this module
closely follows :cite:`granger-Ding06_0608035`.
Overview of Functions
---------------------
Various formulations of Granger causality have been developed. In this module
you will find function for time-series data to test pairwise Granger causality
(`pairwise_granger`).
Time-series Granger causality
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: _toctree/causality/
pairwise_granger
conditional_granger
Tutorial
--------
:doc:`View tutorial <../tutorials/granger_causality>`
Run tutorial interactively:
.. image:: https://mybinder.org/badge.svg
:target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master
?filepath=doc/tutorials/granger_causality.ipynb
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import warnings
from collections import namedtuple
import numpy as np
from neo.core import AnalogSignal
__all__ = (
"Causality",
"pairwise_granger",
"conditional_granger"
)
# the return type of pairwise_granger() function
Causality = namedtuple('Causality',
['directional_causality_x_y',
'directional_causality_y_x',
'instantaneous_causality',
'total_interdependence'])
def _bic(cov, order, dimension, length):
"""
Calculate Bayesian Information Criterion
Parameters
----------
cov : np.ndarray
covariance matrix of auto regressive model
order : int
order of autoregressive model
dimension : int
dimensionality of the data
length : int
number of time samples
Returns
-------
criterion : float
Bayesian Information Criterion
"""
sign, log_det_cov = np.linalg.slogdet(cov)
criterion = 2 * log_det_cov \
+ 2*(dimension**2)*order*np.log(length)/length
return criterion
def _aic(cov, order, dimension, length):
"""
Calculate Akaike Information Criterion
Parameters
----------
cov : np.ndarray
covariance matrix of auto regressive model
order : int
order of autoregressive model
dimension : int
dimensionality of the data
length : int
number of time samples
Returns
-------
criterion : float
Akaike Information Criterion
"""
sign, log_det_cov = np.linalg.slogdet(cov)
criterion = 2 * log_det_cov \
+ 2*(dimension**2)*order/length
return criterion
def _lag_covariances(signals, dimension, max_lag):
r"""
Determine covariances of time series and time shift of itself up to a
maximal lag
Parameters
----------
signals : np.ndarray
time series data
dimension : int
number of time series
max_lag : int
maximal time lag to be considered
Returns
-------
lag_corr : np.ndarray
correlations matrices of lagged signals
Covariance of shifted signals calculated according to the following
formula:
x: d-dimensional signal
x^T: transpose of d-dimensional signal
N: number of time points
\tau: lag
C(\tau) = \sum_{i=0}^{N-\tau} x[i]*x^T[\tau+i]
"""
length = np.size(signals[0])
if length < max_lag:
raise ValueError("Maximum lag larger than size of data")
# centralize time series
signals_mean = (signals - np.mean(signals, keepdims=True)).T
lag_covariances = np.zeros((max_lag+1, dimension, dimension))
# determine lagged covariance for different time lags
for lag in range(0, max_lag+1):
lag_covariances[lag] = \
np.mean(np.einsum('ij,ik -> ijk', signals_mean[:length-lag],
signals_mean[lag:]), axis=0)
return lag_covariances
def _yule_walker_matrix(data, dimension, order):
r"""
Generate matrix for Yule-Walker equation
Parameters
----------
data : np.ndarray
correlation of data shifted with lags up to order
dimension : int
dimensionality of data (e.g. number of channels)
order : int
order of the autoregressive model
Returns
-------
yule_walker_matrix : np.ndarray
matrix in Yule-Walker equation
Yule-Walker Matrix M is a block-structured symmetric matrix with
dimension (d \cdot p)\times(d \cdot p)
where
d: dimension of signal
p: order of autoregressive model
C(\tau): time-shifted covariances \tau -> d \times d matrix
The blocks of size (d \times d) are set as follows:
M_ij = C(j-i)^T
where 1 \leq i \leq j \leq p. The other entries are determined by
symmetry.
lag_covariances : np.ndarray
"""
lag_covariances = _lag_covariances(data, dimension, order)
yule_walker_matrix = np.zeros((dimension*order, dimension*order))
for block_row in range(order):
for block_column in range(block_row, order):
yule_walker_matrix[block_row*dimension: (block_row+1)*dimension,
block_column*dimension:
(block_column+1)*dimension] = \
lag_covariances[block_column-block_row].T
yule_walker_matrix[block_column*dimension:
(block_column+1)*dimension,
block_row*dimension:
(block_row+1)*dimension] = \
lag_covariances[block_column-block_row]
return yule_walker_matrix, lag_covariances
def _vector_arm(signals, dimension, order):
r"""
Determine coefficients of autoregressive model from time series data.
Coefficients of autoregressive model calculated via solving the linear
equation
M A = C
where
M: Yule-Waler Matrix
A: Coefficients of autoregressive model
C: Time-shifted covariances with positive lags
Covariance matrix C_0 is then given by
C_0 = C[0] - \sum_{i=0}^{p-1} A[i]C[i+1]
where p is the orde of the autoregressive model.
Parameters
----------
signals : np.ndarray
time series data
order : int
order of the autoregressive model
Returns
-------
coeffs : np.ndarray
coefficients of the autoregressive model
ry
covar_mat : np.ndarray
covariance matrix of
"""
yule_walker_matrix, lag_covariances = \
_yule_walker_matrix(signals, dimension, order)
positive_lag_covariances = np.reshape(lag_covariances[1:],
(dimension*order, dimension))
lstsq_coeffs = \
np.linalg.lstsq(yule_walker_matrix, positive_lag_covariances)[0]
coeffs = []
for index in range(order):
coeffs.append(lstsq_coeffs[index*dimension:(index+1)*dimension, ].T)
coeffs = np.stack(coeffs)
cov_matrix = np.copy(lag_covariances[0])
for i in range(order):
cov_matrix -= np.matmul(coeffs[i], lag_covariances[i+1])
return coeffs, cov_matrix
def _optimal_vector_arm(signals, dimension, max_order,
information_criterion='aic'):
"""
Determine optimal auto regressive model by choosing optimal order via
Information Criterion
Parameters
----------
signals : np.ndarray
time series data
dimension : int
dimensionality of the data
max_order : int
maximal order to consider
information_criterion : str
A function to compute the information criterion:
`bic` for Bayesian information_criterion,
`aic` for Akaike information criterion
Default: aic
Returns
-------
optimal_coeffs : np.ndarray
coefficients of the autoregressive model
optimal_cov_mat : np.ndarray
covariance matrix of
optimal_order : int
optimal order
"""
length = np.size(signals[0])
optimal_ic = np.infty
optimal_order = 1
optimal_coeffs = np.zeros((dimension, dimension, optimal_order))
optimal_cov_matrix = np.zeros((dimension, dimension))
for order in range(1, max_order + 1):
coeffs, cov_matrix = _vector_arm(signals, dimension, order)
if information_criterion == 'aic':
temp_ic = _aic(cov_matrix, order, dimension, length)
elif information_criterion == 'bic':
temp_ic = _bic(cov_matrix, order, dimension, length)
else:
raise ValueError("The specified information criterion is not"
"available. Please use 'aic' or 'bic'.")
if temp_ic < optimal_ic:
optimal_ic = temp_ic
optimal_order = order
optimal_coeffs = coeffs
optimal_cov_matrix = cov_matrix
return optimal_coeffs, optimal_cov_matrix, optimal_order
def pairwise_granger(signals, max_order, information_criterion='aic'):
r"""
Determine Granger Causality of two time series
Parameters
----------
signals : (N, 2) np.ndarray or neo.AnalogSignal
A matrix with two time series (second dimension) that have N time
points (first dimension).
max_order : int
Maximal order of autoregressive model.
information_criterion : {'aic', 'bic'}, optional
A function to compute the information criterion:
`bic` for Bayesian information_criterion,
`aic` for Akaike information criterion,
Default: 'aic'
Returns
-------
Causality
A `namedtuple` with the following attributes:
directional_causality_x_y : float
The Granger causality value for X influence onto Y.
directional_causality_y_x : float
The Granger causality value for Y influence onto X.
instantaneous_causality : float
The remaining channel interdependence not accounted for by
the directional causalities (e.g. shared input to X and Y).
total_interdependence : float
The sum of the former three metrics. It measures the dependence
of X and Y. If the total interdependence is positive, X and Y
are not independent.
Denote covariance matrix of signals
X by C|X - a real number
Y by C|Y - a real number
(X,Y) by C|XY - a (2 \times 2) matrix
directional causality X -> Y given by
log(C|X / C|XY_00)
directional causality Y -> X given by
log(C|Y / C|XY_11)
instantaneous causality of X,Y given by
log(C|XY_00 / C|XY_11)
total interdependence of X,Y given by
log( {C|X \cdot C|Y} / det{C|XY} )
Raises
------
ValueError
If the provided signal does not have a shape of Nx2.
If the determinant of the prediction error covariance matrix is not
positive.
Warns
-----
UserWarning
If the log determinant of the prediction error covariance matrix is
below the tolerance level of 1e-7.
Notes
-----
The formulas used in this implementation follows
:cite:`granger-Ding06_0608035`. The only difference being that we change
the equation 47 in the following way:
-R(k) - A(1)R(k - 1) - ... - A(m)R(k - m) = 0.
This forumlation allows for the usage of R values without transposition
(i.e. directly) in equation 48.
Examples
--------
Example 1. Independent variables.
>>> import numpy as np
>>> from elephant.causality.granger import pairwise_granger
>>> pairwise_granger(np.random.uniform(size=(1000, 2)), max_order=2)
Causality(directional_causality_x_y=0.0,
directional_causality_y_x=-0.0,
instantaneous_causality=0.0,
total_interdependence=0.0)
Example 2. Dependent variables. Y depends on X but not vice versa.
.. math::
\begin{array}{ll}
X_t \sim \mathcal{N}(0, 1) \\
Y_t = 3.5 \cdot X_{t-1} + \epsilon, \;
\epsilon \sim\mathcal{N}(0, 1)
\end{array}
In this case, the directional causality is non-zero.
>>> x = np.random.randn(1001)
>>> y = 3.5 * x[:-1] + np.random.randn(1000)
>>> signals = np.array([x[1:], y]).T # N x 2 matrix
>>> pairwise_granger(signals, max_order=1)
Causality(directional_causality_x_y=2.64,
directional_causality_y_x=0.0,
instantaneous_causality=0.0,
total_interdependence=2.64)
"""
if isinstance(signals, AnalogSignal):
signals = signals.magnitude
if not (signals.ndim == 2 and signals.shape[1] == 2):
raise ValueError("The input 'signals' must be of dimensions Nx2.")
# transpose (N,2) -> (2,N) for mathematical convenience
signals = signals.T
# signal_x and signal_y are (1, N) arrays
signal_x, signal_y = np.expand_dims(signals, axis=1)
coeffs_x, var_x, p_1 = _optimal_vector_arm(signal_x, 1, max_order,
information_criterion)
coeffs_y, var_y, p_2 = _optimal_vector_arm(signal_y, 1, max_order,
information_criterion)
coeffs_xy, cov_xy, p_3 = _optimal_vector_arm(signals, 2, max_order,
information_criterion)
sign, log_det_cov = np.linalg.slogdet(cov_xy)
tolerance = 1e-7
if sign <= 0:
raise ValueError(
"Determinant of covariance matrix must be always positive: "
"In this case its sign is {}".format(sign))
if log_det_cov <= tolerance:
warnings.warn("The value of the log determinant is at or below the "
"tolerance level. Proceeding with computation.",
UserWarning)
directional_causality_y_x = np.log(var_x[0]) - np.log(cov_xy[0, 0])
directional_causality_x_y = np.log(var_y[0]) - np.log(cov_xy[1, 1])
instantaneous_causality = \
np.log(cov_xy[0, 0]) + np.log(cov_xy[1, 1]) - log_det_cov
instantaneous_causality = np.asarray(instantaneous_causality)
total_interdependence = np.log(var_x[0]) + np.log(var_y[0]) - log_det_cov
# Round GC according to following scheme:
# Note that standard error scales as 1/sqrt(sample_size)
# Calculate significant figures according to standard error
length = np.size(signal_x)
asymptotic_std_error = 1/np.sqrt(length)
est_sig_figures = int((-1)*np.around(np.log10(asymptotic_std_error)))
directional_causality_x_y_round = np.around(directional_causality_x_y,
est_sig_figures)
directional_causality_y_x_round = np.around(directional_causality_y_x,
est_sig_figures)
instantaneous_causality_round = np.around(instantaneous_causality,
est_sig_figures)
total_interdependence_round = np.around(total_interdependence,
est_sig_figures)
return Causality(
directional_causality_x_y=directional_causality_x_y_round.item(),
directional_causality_y_x=directional_causality_y_x_round.item(),
instantaneous_causality=instantaneous_causality_round.item(),
total_interdependence=total_interdependence_round.item())
def conditional_granger(signals, max_order, information_criterion='aic'):
r"""
Determine conditional Granger Causality of the second time series on the
first time series, given the third time series. In other words, for time
series X_t, Y_t and Z_t, this function tests if Y_t influences X_t via Z_t.
Parameters
----------
signals : (N, 3) np.ndarray or neo.AnalogSignal
A matrix with three time series (second dimension) that have N time
points (first dimension). The time series to be conditioned on is the
third.
max_order : int
Maximal order of autoregressive model.
information_criterion : {'aic', 'bic'}, optional
A function to compute the information criterion:
`bic` for Bayesian information_criterion,
`aic` for Akaike information criterion,
Default: 'aic'
Returns
-------
conditional_causality_xy_z_round : float
The value of conditional causality of Y_t on X_t given Z_t. Zero value
indicates that causality of Y_t on X_t is solely dependent on Z_t.
Raises
------
ValueError
If the provided signal does not have a shape of Nx3.
Notes
-----
The formulas used in this implementation follows
:cite:`granger-Ding06_0608035`. Specifically, the Eq 35.
"""
if isinstance(signals, AnalogSignal):
signals = signals.magnitude
if not (signals.ndim == 2 and signals.shape[1] == 3):
raise ValueError("The input 'signals' must be of dimensions Nx3.")
# transpose (N,3) -> (3,N) for mathematical convenience
signals = signals.T
# signal_x, signal_y and signal_z are (1, N) arrays
signal_x, signal_y, signal_z = np.expand_dims(signals, axis=1)
signals_xz = np.vstack([signal_x, signal_z])
coeffs_xz, cov_xz, p_1 = _optimal_vector_arm(
signals_xz, dimension=2, max_order=max_order,
information_criterion=information_criterion)
coeffs_xyz, cov_xyz, p_2 = _optimal_vector_arm(
signals, dimension=3, max_order=max_order,
information_criterion=information_criterion)
conditional_causality_xy_z = np.log(cov_xz[0, 0]) - np.log(cov_xyz[0, 0])
# Round conditional GC according to following scheme:
# Note that standard error scales as 1/sqrt(sample_size)
# Calculate significant figures according to standard error
length = np.size(signal_x)
asymptotic_std_error = 1/np.sqrt(length)
est_sig_figures = int((-1)*np.around(np.log10(asymptotic_std_error)))
conditional_causality_xy_z_round = np.around(conditional_causality_xy_z,
est_sig_figures)
return conditional_causality_xy_z_round
| |
#!/usr/bin/env python
from functools import partial
import json
from operator import attrgetter
import os
from random import randint
import sys
import flask
from flask import Flask, request, render_template, make_response, jsonify
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.ext import baked
if sys.version_info[0] == 3:
xrange = range
DBHOST = os.environ.get('DBHOST', 'localhost')
try:
import MySQLdb
mysql_schema = "mysql:"
except ImportError:
mysql_schema = "mysql+pymysql:"
# setup
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = mysql_schema + '//benchmarkdbuser:benchmarkdbpass@%s:3306/hello_world?charset=utf8' % DBHOST
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
db = SQLAlchemy(app)
dbraw_engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], connect_args={'autocommit': True}, pool_reset_on_return=None)
bakery = baked.bakery()
# models
class World(db.Model):
__tablename__ = "World"
id = db.Column(db.Integer, primary_key=True)
randomNumber = db.Column(db.Integer)
# http://stackoverflow.com/questions/7102754/jsonify-a-sqlalchemy-result-set-in-flask
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id' : self.id,
'randomNumber': self.randomNumber
}
@staticmethod
def get(ident):
baked_query = bakery(lambda s: s.query(World))
return baked_query(db.session()).get(ident)
class Fortune(db.Model):
__tablename__ = "Fortune"
id = db.Column(db.Integer, primary_key=True)
message = db.Column(db.String)
# views
# flask.jsonify doesn't allow array at top level for security concern.
# So we should have oriiginal one.
def json_response(obj):
res = make_response(json.dumps(obj))
res.mimetype = "application/json"
return res
@app.route("/json")
def hello():
return jsonify(message='Hello, World!')
@app.route("/db")
def get_random_world():
num_queries = request.args.get("queries", 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
worlds = [World.get(randint(1, 10000)).serialize
for _ in xrange(num_queries)]
return json_response(worlds)
@app.route("/dbs")
def get_random_world_single():
wid = randint(1, 10000)
worlds = World.get(wid).serialize
return json_response(worlds)
@app.route("/dbraw")
def get_random_world_raw():
connection = dbraw_engine.connect()
num_queries = request.args.get("queries", 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
worlds = []
for i in xrange(num_queries):
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM World WHERE id = " + str(wid)).fetchone()
worlds.append({'id': result[0], 'randomNumber': result[1]})
connection.close()
return json_response(worlds)
@app.route("/dbsraw")
def get_random_world_single_raw():
connection = dbraw_engine.connect()
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM World WHERE id = " + str(wid)).fetchone()
worlds = {'id': result[0], 'randomNumber': result[1]}
connection.close()
return json_response(worlds)
@app.route("/fortunes")
def get_fortunes():
fortunes = list(Fortune.query.all())
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter('message'))
return render_template('fortunes.html', fortunes=fortunes)
@app.route("/fortunesraw")
def get_forutens_raw():
res = dbraw_engine.execute("SELECT * FROM Fortune")
fortunes = res.fetchall()
res.close()
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter('message'))
return render_template('fortunes.html', fortunes=fortunes)
@app.route("/updates")
def updates():
"""Test 5: Database Updates"""
num_queries = request.args.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
worlds = []
rp = partial(randint, 1, 10000)
ids = [rp() for _ in xrange(num_queries)]
ids.sort() # To avoid deadlock
for id in ids:
world = World.get(id)
world.randomNumber = rp()
worlds.append(world.serialize)
res = json_response(worlds)
db.session.commit()
return res
@app.route("/raw-updates")
def raw_updates():
"""Test 5: Database Updates"""
connection = dbraw_engine.connect()
try:
num_queries = request.args.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
worlds = []
rp = partial(randint, 1, 10000)
for i in xrange(num_queries):
world = connection.execute("SELECT * FROM World WHERE id=%s", (rp(),)).fetchone()
randomNumber = rp()
worlds.append({'id': world['id'], 'randomNumber': randomNumber})
connection.execute("UPDATE World SET randomNumber=%s WHERE id=%s", (randomNumber, world['id']))
return json_response(worlds)
finally:
connection.close()
@app.route('/plaintext')
def plaintext():
"""Test 6: Plaintext"""
response = make_response(b'Hello, World!')
response.content_type = 'text/plain'
return response
try:
import meinheld
meinheld.server.set_access_logger(None)
meinheld.set_keepalive(120)
except ImportError:
pass
# entry point for debugging
if __name__ == "__main__":
app.run(debug=True)
| |
# TODO
#
# @author Oktay Acikalin <oktay.acikalin@gmail.com>
# @copyright Oktay Acikalin
# @license MIT (LICENSE.txt)
from collections import OrderedDict
import pygame
from diamond.scene import Scene
from diamond.tilematrix import TileMatrix
from diamond import event
from diamond.tools.tilematrix.selection import Selection
class TilesheetScene(Scene):
def setup(self, shared_data):
super(TilesheetScene, self).setup()
self.shared_data = shared_data
self.add_default_listeners()
s_width, s_height = self.scene_manager.display.screen_size
self.screen_size = (s_width, s_height)
self.tilemaps = OrderedDict()
self.current_tilemap = None
# self.root_node.set_clipping_region(
# 0,
# 0,
# s_width,
# s_height,
# )
self.selection = Selection(self.root_node)
self.selection_mode = 'REPLACE'
for alias, filename in shared_data['sheet_files']:
tilematrix = TileMatrix(alias)
tilematrix.hide()
tilematrix.load_sheet_file(filename, alias)
# tilematrix.show_sector_coords = True
tilematrix.add_to(self.root_node)
self.tilemaps[alias] = tilematrix
vault = tilematrix.get_sheet(alias)
tile_size = tilematrix.get_tile_size()
self.selection.add_tilematrix(tilematrix)
sprites = vault.get_sprites().copy()
# print 'tile_size =', tile_size
# print 'found %d sprites' % len(sprites)
# TODO Remove all autotiles from the list.
# if hasattr(sheet_module, 'autotiles'):
# for item in chain(*sheet_module.autotiles.values()):
# del sprites[item]
rects = OrderedDict()
# Gain map size from tiles found
for sprite in sprites.itervalues():
rects[sprite.name] = pygame.Rect(sprite.get_action('none').get_frame(0).rect)
# print rects
size_in_pixel = pygame.Rect(rects.values()[0]).unionall(rects.values()).size
size_in_tiles = (size_in_pixel[0] / tile_size[0], size_in_pixel[1] / tile_size[1])
# print size_in_pixel, size_in_tiles
map_data = dict()
# Put tiles in proper map position.
overlapped_tiles = []
for key, val in rects.iteritems():
x, y = val.x / tile_size[0], val.y / tile_size[1]
# print key, val, (x, y)
pos = (x, y)
if pos in map_data:
overlapped_tiles.append(key)
else:
map_data[pos] = key
# Append overlapped tiles at the bottom of the map.
if overlapped_tiles:
# print overlapped_tiles
# First create an empty line as separator.
cur_x, cur_y = 0, size_in_tiles[1]
# Now add the overlapping tiles.
for key in overlapped_tiles:
if key.startswith(':') and not key.endswith(':inner'):
continue
if cur_x >= size_in_tiles[0]:
cur_x = 0
cur_y += 1
map_data[(cur_x, cur_y)] = key
cur_x += 1
size_in_tiles = size_in_tiles[0], cur_y + 1
# print map_data
points = [(key[0], key[1], 0, val) for key, val in map_data.iteritems()]
# print points
tilematrix.load_points(points)
# UPDATE: We probably don't need it anymore.
# Align sector to sheet size for faster movement. This also rebuilds
# the matrix with the data loaded above.
# print 'size_in_tiles =', size_in_tiles
# tilematrix.set_sector_size(*size_in_tiles)
iterator = iter(self.tilemaps)
# iterator.next()
self.set_current_tilemap(iterator.next())
self.bind(
event.add_listener(self.__on_mouse_motion_event, 'scene.event.system',
context__scene__is=self,
context__event__type__eq=pygame.locals.MOUSEMOTION),
event.add_listener(self.__on_switch_scene_keyup_event, 'scene.event.system',
context__scene__is=self,
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__eq=pygame.locals.K_SPACE),
event.add_listener(self.__on_mouse_button_pressed_event, 'scene.event.system',
context__scene__is=self,
context__event__type__eq=pygame.locals.MOUSEBUTTONDOWN),
event.add_listener(self.__on_mouse_button_released_event, 'scene.event.system',
context__scene__is=self,
context__event__type__eq=pygame.locals.MOUSEBUTTONUP),
# TODO add event for rmb down without shift to clear selection.
# TODO add event for rmb down with shift to remove selection at pos.
event.add_listener(self.__on_lshift_key_pressed_event, 'scene.event.system',
context__scene__is=self,
context__event__type__eq=pygame.locals.KEYDOWN,
context__event__key__eq=pygame.locals.K_LSHIFT),
event.add_listener(self.__on_lshift_key_released_event, 'scene.event.system',
context__scene__is=self,
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__eq=pygame.locals.K_LSHIFT),
event.add_listener(self.__on_next_layer_keyup_event, 'scene.event.system',
context__scene__is=self,
context__scene__selection_mode__eq='REPLACE',
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__eq=pygame.locals.K_PLUS),
event.add_listener(self.__on_previous_layer_keyup_event, 'scene.event.system',
context__scene__is=self,
context__scene__selection_mode__eq='REPLACE',
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__eq=pygame.locals.K_MINUS),
event.add_listener(self.__on_change_bg_color_keyup_event, 'scene.event.system',
context__scene__is=self,
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__in=(pygame.locals.K_1,
pygame.locals.K_2, pygame.locals.K_3,
pygame.locals.K_4, pygame.locals.K_5,
pygame.locals.K_6, pygame.locals.K_7)),
event.add_listener(self.__on_combine_frames_keyup_event, 'scene.event.system',
context__scene__is=self,
context__scene__selection_mode__eq='ADD',
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__eq=pygame.locals.K_c),
event.add_listener(self.__on_uncombine_frames_keyup_event, 'scene.event.system',
context__scene__is=self,
context__scene__selection_mode__eq='REPLACE',
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__eq=pygame.locals.K_c),
event.add_listener(self.__on_speedup_frames_keyup_event, 'scene.event.system',
context__scene__is=self,
context__scene__selection_mode__eq='ADD',
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__eq=pygame.locals.K_PLUS),
event.add_listener(self.__on_slowdown_frames_keyup_event, 'scene.event.system',
context__scene__is=self,
context__scene__selection_mode__eq='ADD',
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__eq=pygame.locals.K_MINUS),
event.add_listener(self.__on_equalize_frame_speed_keyup_event, 'scene.event.system',
context__scene__is=self,
context__scene__selection_mode__eq='ADD',
context__event__type__eq=pygame.locals.KEYUP,
context__event__mod__eq=pygame.locals.KMOD_LSHIFT,
context__event__key__eq=pygame.locals.K_0),
event.add_listener(self.__on_save_tilemap_vault_keyup_event, 'scene.event.system',
context__scene__is=self,
context__event__type__eq=pygame.locals.KEYUP,
context__event__key__eq=pygame.locals.K_s),
event.add_listener(self.__on_extract_autotile_keyup_event, 'scene.event.system',
context__scene__is=self,
context__event__type__eq=pygame.locals.KEYUP,
context__event__mod__eq=pygame.locals.KMOD_LCTRL,
context__event__key__eq=pygame.locals.K_a),
)
def __on_switch_scene_keyup_event(self, context):
self.scene_manager.hide_scene('tilesheet')
self.scene_manager.show_scene('tilemap')
def __on_lshift_key_pressed_event(self, context):
self.selection_mode = 'ADD'
# print self.selection_mode
def __on_lshift_key_released_event(self, context):
self.selection_mode = 'REPLACE'
# print self.selection_mode
def __on_next_layer_keyup_event(self, context):
keys = self.tilemaps.keys()
index = keys.index(self.current_tilemap)
if index + 1 < len(keys):
key = keys[index + 1]
self.set_current_tilemap(key)
def __on_previous_layer_keyup_event(self, context):
keys = self.tilemaps.keys()
index = keys.index(self.current_tilemap)
if index - 1 >= 0:
key = keys[index - 1]
self.set_current_tilemap(key)
def set_current_tilemap(self, alias):
if self.current_tilemap == alias:
return
[tilematrix.hide() for tilematrix in self.tilemaps.itervalues()]
tilematrix = self.tilemaps[alias]
tilematrix.show()
self.current_tilemap = alias
self.current_tile_size = tilematrix.get_tile_size()
self.current_tilemap_rect = tilematrix.get_virtual_rect().size
self.selection.clear_selection(tilematrix.name)
print 'Showing tilesheet: %s' % alias
def __on_mouse_motion_event(self, context):
'''
Tracks mouse movements and drags the map around.
Updates the selection while lmb is being pressed down.
'''
pos = context.event.pos
# print 'pos =', pos
tilemap = self.tilemaps[self.current_tilemap]
# tile_size = self.current_tile_size
# i_width, i_height = self.current_tilemap_rect
# s_width, s_height = self.screen_size
# # print self.current_tilemap_rect, self.screen_size
# # Tolerance boundaries.
# t_w, t_h = max(tile_size[0], 128), max(tile_size[1], 128)
# p_x = min(1.0, max(0, (pos[0] - t_w)) / float(s_width - t_w * 2))
# p_y = min(1.0, max(0, (pos[1] - t_h)) / float(s_height - t_h * 2))
# # print p_x, p_y
# x, y = max(0, (i_width - s_width)) * p_x * -1, max(0, (i_height - s_height)) * p_y * -1
# # print x, y
# tilemap.set_pos(int(x), int(y))
lmb_pressed = context.event.buttons == (1, 0, 0)
# rmb_pressed = context.event.buttons == (0, 0, 1)
mmb_pressed = context.event.buttons == (0, 1, 0)
if lmb_pressed:
# print self.selection_mode
if self.selection_mode == 'ADD':
self.selection.add_selection(tilemap.name, [pos], translate_pos=True)
else:
self.selection.end_selection(tilemap.name, pos, translate_pos=True)
elif mmb_pressed:
tilemap.set_pos_rel(*context.event.rel)
def __on_mouse_button_pressed_event(self, context):
pos = context.event.pos
# print 'pos =', pos
lmb_pressed = context.event.button == 1
# rmb_pressed = context.event.button == 3
if lmb_pressed:
tilemap = self.tilemaps[self.current_tilemap]
# print self.selection_mode
if self.selection_mode == 'ADD':
self.selection.add_selection(tilemap.name, [pos], translate_pos=True)
else:
self.selection.begin_selection(tilemap.name, pos, translate_pos=True)
def commit_selection(self):
tilemap = self.tilemaps[self.current_tilemap]
selection = self.selection.get_selection(tilemap.name)
selection = dict((key, val[0]) for key, val in selection.iteritems())
self.shared_data['selection'] = dict(
alias=tilemap.name,
points=selection,
)
# print self.shared_data
def __on_mouse_button_released_event(self, context):
pos = context.event.pos
# print 'pos =', pos
lmb_pressed = context.event.button == 1
# rmb_pressed = context.event.button == 3
if lmb_pressed:
tilemap = self.tilemaps[self.current_tilemap]
# print self.selection_mode
if self.selection_mode == 'REPLACE':
self.selection.end_selection(tilemap.name, pos, translate_pos=True)
self.commit_selection()
def __on_change_bg_color_keyup_event(self, context):
# TODO Move this into a class var.
colors = {
'49': (0.0, 0.0, 0.0, 1.0),
'50': (1.0, 0.0, 0.0, 1.0),
'51': (0.0, 1.0, 0.0, 1.0),
'52': (0.0, 0.0, 1.0, 1.0),
'53': (1.0, 0.0, 1.0, 1.0),
'54': (0.0, 1.0, 1.0, 1.0),
'55': (1.0, 1.0, 0.0, 1.0),
}
key = str(context.event.key)
if key in colors:
self.scene_manager.display.set_gl_clear_color(*colors[key])
def __on_combine_frames_keyup_event(self, context):
tilemap = self.tilemaps[self.current_tilemap]
selection = self.selection.get_selection(tilemap.name).items()
if not selection:
return
# print selection
# id = selection[0][1][0]
tile = tilemap.get_tile_at(*selection[0][0])[0]
action = tile.vault.get_actions().items()[0][1]
# print id
# print tile
# print tile.vault
# print action
# print 'current frames =',
# print action.get_frames()
for pos, layers in selection[1:]:
tile_ = tilemap.get_tile_at(*pos)[0]
action_ = tile_.vault.get_actions().items()[0][1]
frames_ = action_.get_frames()
# print frames_
for frame in frames_:
# print 'adding frame:', frame
action.add_frame(frame.copy())
# print 'all frames =',
# print action.get_frames()
tile.replace_vault(tile.vault)
self.selection.clear_selection(tilemap.name)
self.selection.add_selection(tilemap.name, [selection[0][0]], translate_pos=False)
self.commit_selection()
# TODO update sprites on all tilemaps!
def __on_uncombine_frames_keyup_event(self, context):
tilemap = self.tilemaps[self.current_tilemap]
selection = self.selection.get_selection(tilemap.name).items()
if not selection:
return
# print selection
for pos, layers in selection:
tile = tilemap.get_tile_at(*pos)[0]
action = tile.vault.get_actions().items()[0][1]
frame = action.get_frame(0)
action.clear_frames()
action.add_frame(frame.copy())
tile.replace_vault(tile.vault)
# TODO update sprites on all tilemaps!
def __on_speedup_frames_keyup_event(self, context):
tilemap = self.tilemaps[self.current_tilemap]
selection = self.selection.get_selection(tilemap.name).items()
if not selection:
return
# print selection
for pos, layers in selection:
tile = tilemap.get_tile_at(*pos)[0]
action = tile.vault.get_actions().items()[0][1]
for frame in action.get_frames():
frame.duration -= 10
frame.duration = max(10, frame.duration)
tile.replace_vault(tile.vault)
# TODO update sprites on all tilemaps!
def __on_slowdown_frames_keyup_event(self, context):
tilemap = self.tilemaps[self.current_tilemap]
selection = self.selection.get_selection(tilemap.name).items()
if not selection:
return
# print selection
for pos, layers in selection:
tile = tilemap.get_tile_at(*pos)[0]
action = tile.vault.get_actions().items()[0][1]
for frame in action.get_frames():
frame.duration += 10
frame.duration = min(60000, frame.duration)
tile.replace_vault(tile.vault)
# TODO update sprites on all tilemaps!
def __on_equalize_frame_speed_keyup_event(self, context):
tilemap = self.tilemaps[self.current_tilemap]
selection = self.selection.get_selection(tilemap.name).items()
if not selection:
return
# print selection
duration = []
for pos, layers in selection:
tile = tilemap.get_tile_at(*pos)[0]
action = tile.vault.get_actions().items()[0][1]
for frame in action.get_frames():
duration.append(frame.duration // 10 * 10)
# print duration
duration = sum(duration) / len(duration)
print duration
for pos, layers in selection:
tile = tilemap.get_tile_at(*pos)[0]
action = tile.vault.get_actions().items()[0][1]
for frame in action.get_frames():
frame.duration = duration
tile.replace_vault(tile.vault)
# print tile
# TODO update sprites on all tilemaps!
def __on_save_tilemap_vault_keyup_event(self, context):
print 'Saving tilesheet: %s' % self.current_tilemap
tilemap = self.tilemaps[self.current_tilemap]
tilemap.get_sheet(self.current_tilemap).save()
def _extract_autotile_type_a(self, tilemap, selection):
tile_size = tilemap.get_tile_size()
# print 'tile_size =', tile_size
t_w = tile_size[0] / 2
t_h = tile_size[1] / 2
ids = [val[0] for key, val in selection]
# print 'ids =', ids
tl_id, tr_id, bl_id, br_id = ids
vault = tilemap.get_tile_at(*selection[0][0])[0].vault.get_vault()
# print 'vault =', vault
autotile_group_id = '%s,%s,%s,%s' % tuple(ids)
# print 'autotile_group_id =', autotile_group_id
# Generate tl frame data.
tl_frames = []
frames = vault.get_sprite(tl_id).get_actions().values()[0].get_frames()
# print 'frames =', frames
for frame in frames:
tl_frames.append([frame.rect, frame.hotspot, frame.delta, frame.duration, frame.events])
# Generate tr frame data.
tr_frames = []
frames = vault.get_sprite(tr_id).get_actions().values()[0].get_frames()
# print 'frames =', frames
for frame in frames:
tr_frames.append([frame.rect, frame.hotspot, frame.delta, frame.duration, frame.events])
# Generate bl frame data.
bl_frames = []
frames = vault.get_sprite(bl_id).get_actions().values()[0].get_frames()
# print 'frames =', frames
for frame in frames:
bl_frames.append([frame.rect, frame.hotspot, frame.delta, frame.duration, frame.events])
# Generate br frame data.
br_frames = []
frames = vault.get_sprite(br_id).get_actions().values()[0].get_frames()
# print 'frames =', frames
for frame in frames:
br_frames.append([frame.rect, frame.hotspot, frame.delta, frame.duration, frame.events])
# Generate t frame data.
t_frames = []
for idx, frame in enumerate(tl_frames):
r_a = pygame.Rect(tl_frames[idx][0])
r_b = pygame.Rect(tr_frames[idx][0])
rects = [[r_a.x + t_w, r_a.y, r_a.w - t_w, r_a.h], [r_b.x, r_b.y, r_b.w - t_w, r_b.h]]
h_a = tl_frames[idx][1]
h_b = tr_frames[idx][1]
hotspots = [[h_a[0] + t_w, h_a[1]], [h_b[0], h_b[1]]]
d_a = tl_frames[idx][2]
d_b = tr_frames[idx][2]
deltas = [[d_a[0] + t_w, d_a[1]], [d_b[0], d_b[1]]]
duration = frame[3]
events = frame[4] + tr_frames[idx][4]
t_frames.append([rects, hotspots, deltas, duration, events])
# Generate b frame data.
b_frames = []
for idx, frame in enumerate(bl_frames):
r_a = pygame.Rect(bl_frames[idx][0])
r_b = pygame.Rect(br_frames[idx][0])
rects = [[r_a.x + t_w, r_a.y, r_a.w - t_w, r_a.h], [r_b.x, r_b.y, r_b.w - t_w, r_b.h]]
h_a = bl_frames[idx][1]
h_b = br_frames[idx][1]
hotspots = [[h_a[0] + t_w, h_a[1]], [h_b[0], h_b[1]]]
d_a = bl_frames[idx][2]
d_b = br_frames[idx][2]
deltas = [[d_a[0] + t_w, d_a[1]], [d_b[0], d_b[1]]]
duration = frame[3]
events = frame[4] + br_frames[idx][4]
b_frames.append([rects, hotspots, deltas, duration, events])
# Generate l frame data.
l_frames = []
for idx, frame in enumerate(tl_frames):
r_a = pygame.Rect(tl_frames[idx][0])
r_b = pygame.Rect(bl_frames[idx][0])
rects = [[r_a.x, r_a.y + t_h, r_a.w, r_a.h - t_h], [r_b.x, r_b.y, r_b.w, r_b.h - t_h]]
h_a = tl_frames[idx][1]
h_b = bl_frames[idx][1]
hotspots = [[h_a[0], h_a[1] + t_h], [h_b[0], h_b[1]]]
d_a = tl_frames[idx][2]
d_b = bl_frames[idx][2]
deltas = [[d_a[0], d_a[1] + t_h], [d_b[0], d_b[1]]]
duration = frame[3]
events = frame[4] + bl_frames[idx][4]
l_frames.append([rects, hotspots, deltas, duration, events])
# Generate r frame data.
r_frames = []
for idx, frame in enumerate(tr_frames):
r_a = pygame.Rect(tr_frames[idx][0])
r_b = pygame.Rect(br_frames[idx][0])
rects = [[r_a.x, r_a.y + t_h, r_a.w, r_a.h - t_h], [r_b.x, r_b.y, r_b.w, r_b.h - t_h]]
h_a = tr_frames[idx][1]
h_b = br_frames[idx][1]
hotspots = [[h_a[0], h_a[1] + t_h], [h_b[0], h_b[1]]]
d_a = tr_frames[idx][2]
d_b = br_frames[idx][2]
deltas = [[d_a[0], d_a[1] + t_h], [d_b[0], d_b[1]]]
duration = frame[3]
events = frame[4] + br_frames[idx][4]
r_frames.append([rects, hotspots, deltas, duration, events])
# Generate m frame data.
m_frames = []
for idx, frame in enumerate(tl_frames):
r_a = pygame.Rect(tl_frames[idx][0])
r_b = pygame.Rect(tr_frames[idx][0])
r_c = pygame.Rect(bl_frames[idx][0])
r_d = pygame.Rect(br_frames[idx][0])
rects = [
[r_d.x, r_d.y, r_d.w - t_w, r_d.h - t_h],
[r_c.x + t_w, r_c.y, r_c.w - t_w, r_c.h - t_h],
[r_b.x, r_b.y + t_h, r_b.w - t_w, r_b.h - t_h],
[r_a.x + t_w, r_a.y + t_h, r_a.w - t_w, r_a.h - t_h],
]
h_a = tl_frames[idx][1]
h_b = tr_frames[idx][1]
h_c = bl_frames[idx][1]
h_d = br_frames[idx][1]
hotspots = [
[h_d[0], h_d[1]],
[h_c[0] + t_w, h_c[1]],
[h_b[0], h_b[1] + t_h],
[h_a[0] + t_w, h_a[1] + t_h],
]
d_a = tl_frames[idx][2]
d_b = tr_frames[idx][2]
d_c = bl_frames[idx][2]
d_d = br_frames[idx][2]
deltas = [
[d_d[0], d_d[1]],
[d_c[0] + t_w, d_c[1]],
[d_b[0], d_b[1] + t_h],
[d_a[0] + t_w, d_a[1] + t_h],
]
duration = frame[3]
events = frame[4] + tr_frames[idx][4]
m_frames.append([rects, hotspots, deltas, duration, events])
# Generate inner (representation) frame data.
inner_frames = []
for idx, frame in enumerate(tl_frames):
r_a = pygame.Rect(tl_frames[idx][0])
r_b = pygame.Rect(tr_frames[idx][0])
r_c = pygame.Rect(bl_frames[idx][0])
r_d = pygame.Rect(br_frames[idx][0])
rects = [
[r_a.x, r_a.y, r_a.w - t_w, r_a.h - t_h],
[r_b.x + t_w, r_b.y, r_b.w - t_w, r_b.h - t_h],
[r_c.x, r_c.y + t_h, r_c.w - t_w, r_c.h - t_h],
[r_d.x + t_w, r_d.y + t_h, r_d.w - t_w, r_d.h - t_h],
]
h_a = tl_frames[idx][1]
h_b = tr_frames[idx][1]
h_c = bl_frames[idx][1]
h_d = br_frames[idx][1]
hotspots = [
[h_a[0], h_a[1]],
[h_b[0] + t_w, h_b[1]],
[h_c[0], h_c[1] + t_h],
[h_d[0] + t_w, h_d[1] + t_h],
]
d_a = tl_frames[idx][2]
d_b = tr_frames[idx][2]
d_c = bl_frames[idx][2]
d_d = br_frames[idx][2]
deltas = [
[d_a[0], d_a[1]],
[d_b[0] + t_w, d_b[1]],
[d_c[0], d_c[1] + t_h],
[d_d[0] + t_w, d_d[1] + t_h],
]
duration = frame[3]
events = frame[4] + tr_frames[idx][4]
inner_frames.append([rects, hotspots, deltas, duration, events])
autotile_data = OrderedDict()
autotile_data['inner'] = {'none': inner_frames} # Representation.
autotile_data['tl'] = {'none': tl_frames}
autotile_data['tr'] = {'none': tr_frames}
autotile_data['bl'] = {'none': bl_frames}
autotile_data['br'] = {'none': br_frames}
autotile_data['t'] = {'none': t_frames}
autotile_data['b'] = {'none': b_frames}
autotile_data['l'] = {'none': l_frames}
autotile_data['r'] = {'none': r_frames}
autotile_data['m'] = {'none': m_frames}
# print 'autotile_data =', autotile_data
# Rewrite keys to reflect autotile group.
sprite_data = OrderedDict()
for key, val in autotile_data.iteritems():
sprite_data[':A:%s:%s' % (autotile_group_id, key)] = val
for key, val in sprite_data.iteritems():
vault.add_sprite(key, val)
# print 'vault sprites =', vault.get_sprites()
# Now find position to place autotile representation at.
results = tilemap.find_in_sector(0, 0, sprite_data.keys()[0])
if results:
# print results
tm_x, tm_y = results[0][:2]
# And place it.
points = [(tm_x, tm_y, 0, sprite_data.keys()[0])]
# print 'points =', points
tile = tilemap.get_tile_at(tm_x, tm_y, 0)
tile.replace_vault(vault.get_sprite(':A:%s:%s' % (autotile_group_id, 'inner')))
# TODO update sprites on all tilemaps!
else:
tm_width, tm_height = tilemap.get_sector_size()
# print 'tilemap size =', (tm_width, tm_height)
tm_x, tm_y = 0, max(0, tm_height - 1)
found = False
while found is False:
for x in range(0, tm_width):
if tilemap.get_tile_id_at(x, tm_y, 0) is None:
found = x
break
if found is False:
tm_y += 1
tm_x = found
# And place it.
points = [(tm_x, tm_y, 0, sprite_data.keys()[0])]
# print 'points =', points
tilemap.load_points(points)
tilemap.set_sector_size(max(tm_width, tm_x + 1), tm_y + 1)
self.selection.clear_selection(tilemap.name)
self.selection.add_selection(tilemap.name, [points[0][:2]], translate_pos=False)
self.commit_selection()
def _extract_autotile_type_b(self, tilemap, selection):
pass
def __on_extract_autotile_keyup_event(self, context):
tilemap = self.tilemaps[self.current_tilemap]
selection = self.selection.get_selection(tilemap.name, sort=True).items()
if not selection:
return
# print selection
if len(selection) == 4:
print 'Could be autotile type A.'
self._extract_autotile_type_a(tilemap, selection)
elif len(selection) == 6:
print 'Could be autotile type B.'
self._extract_autotile_type_b(tilemap, selection)
else:
print 'Unknown amount of tiles selected. Type A required 6 tiles. Type B requires 4 tiles.'
return
| |
"""
Class Implementations (~Cases for dispatcher)
"""
import os
from . import interfaces
from . import support
from . import errors
from . import configuration
__all__ = [
'OpenProjectFromFilePath',
'OpenProjectFromName',
'OpenProjectFromDirectory',
'OpenProjectFallback'
]
class OpenProjectFromFilePath(interfaces.OpenProjectCaseInterface):
"""
Input is path to project file.
"""
def matches(self, _string):
"""
@type: _string: str
@rtype: bool
"""
path = support.ensure_end(_string, '.sublime-project')
return support.is_sublime_project_file(path)
def command(self, _string):
"""
@type: _string: str
@rtype: str
"""
path = support.ensure_end(_string, '.sublime-project')
return support.sublime_project_command(path)
class OpenProjectFromName(interfaces.OpenProjectCaseInterface):
"""
Attempt to open a project file by looking in a standardized location
for all project files (usually located in the user's
SublimeText packages directory).
"""
class Defaults(object):
projects_directory = configuration.PROJECTS_DIRECTORY
def __init__(self, projects_directory=None):
"""
Input is project file, in standard directory for sublime-project files.
"""
if projects_directory is None:
self.projects_directory = self.Defaults.projects_directory
else:
self.projects_directory = projects_directory
def matches(self, _string, projects_directory=None):
"""
@type: _string: str
@returns: bool
"""
if projects_directory is None:
projects_directory = self.projects_directory
name = support.ensure_end(_string, '.sublime-project')
return (name in self._dir_contents())
# return support.in_projects_directory(_string, projects_directory)
def command(self, _string, projects_directory=None):
"""
@type: _string: str
@type: projects_directory: NoneType or str
@rtype: str
"""
if projects_directory is None:
projects_directory = self.projects_directory
path = os.path.join(projects_directory, _string)
return support.sublime_project_command(path)
def _dir_contents(self):
"""
List contents of projects_directory.
@rtype: list of str
"""
if self.projects_directory is not None:
if os.path.exists(self.projects_directory):
return os.listdir(self.projects_directory)
# Fallthrough condition
return []
class OpenProjectFromDirectory(interfaces.OpenProjectCaseInterface):
"""
Open project file contained inside a directory.
Only works if directory contains only one project file.
"""
def matches(self, _string):
"""
Predicate. Does input string match this case?
@type: _string: str
@returns: bool
"""
return self._has_single_project_file(_string)
def command(self, _string):
"""
Generate bash command string.
Assumes _has_single_project_file has already been run.
@type: _string: str
@rtype: str
"""
project_files = self._find_project_files(_string)
try:
project_file_path = project_files[0]
except IndexError:
raise errors.NoProjectFilesFoundError(str.format(
"No project files found inside directory: {0}",
_string
))
return support.sublime_project_command(project_file_path)
def _find_project_files(self, _string):
"""
Return list of all project files in _string.
If _string is not a directory, return empty list.
@type: _string: str
@rtype: list of str
"""
if isinstance(_string, interfaces.ExistingDirectory):
project_names = support.find_project_files(_string)
project_paths = list(os.path.join(_string, name) for name in project_names)
return project_paths
else:
return []
def _has_single_project_file(self, directory):
"""
Predciate. Does directory contain exactly one project file?
"""
project_files = self._find_project_files(directory)
return (len(project_files) == 1)
class OpenProjectFallback(interfaces.OpenProjectCaseInterface):
"""
Fallback condition.
Creates new project and workspace files and opens that project.
... this probably requires an existing folder
"""
def matches(self, _string):
"""
@type: _string: str
@returns: bool
"""
if isinstance(_string, interfaces.ExistingPath):
return True
return False
def command(self, _string):
"""
@type: _string: str
@rtype: str
"""
name = self.parse_name(_string)
directory = self.parse_directory(_string)
self.ensure_directory(directory)
self.create_project(name, directory)
self.create_workspace(name, directory)
project_path = support.form_project_path(name, directory)
target_path = support.normalize_path(_string)
command = support.sublime_targeted_project_command(
path=project_path,
target=target_path
)
return command
def parse_name(self, _string):
"""
Return the name which should be used for the project file.
"""
path = support.normalize_path(_string)
if os.path.isdir(path):
# final part of directory
return self._get_final_dir(path)
elif os.path.isfile(path):
# file name, without path or extension
return self._get_file_no_ext(path)
else:
raise errors.SublpException(
"{0} does not know how to find project name for '{1}'.",
type(self).__name__, path
)
def parse_directory(self, _string):
"""
Return directory where project files should be contained.
"""
# Default projects directory
if configuration.DEFAULT_TO_PROJECTS_DIRECTORY:
if os.path.isdir(configuration.PROJECTS_DIRECTORY):
return configuration.PROJECTS_DIRECTORY
# Extract destination directory from input
path = support.normalize_path(_string)
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.split(path)[0]
else:
raise errors.SublpException(
"{0} does not know how to find directory for '{1}'.",
type(self).__name__, path
)
def create_project(self, name, directory):
"""
@type: name: str
@type: directory: str
@rtype: None
"""
path = support.form_project_path(name, directory)
support.write_json(path, {})
def create_workspace(self, name, directory):
"""
@type: name: str
@type: directory: str
@rtype: None
"""
path = support.form_workspace_path(name, directory)
support.write_json(path, {})
def ensure_directory(self, path):
"""
Create directory if it does not exist. Recursive.
@type: path: str
@param: path: Directory path.
"""
if not os.path.exists(path):
if not os.path.isdir(path):
os.makedirs(path)
def _get_final_dir(self, path):
"""
Returns final directory contained in path.
Path should *not* be a file.
@type: path: str
@param: path: a directory
@rtype: str
"""
return os.path.basename(support.normalize_path(path))
def _get_file_no_ext(self, path):
"""
@type: path: str
@rtype: str
"""
_, _fileext = os.path.split(path)
return os.path.splitext(_fileext)[0]
| |
# -*- coding: utf-8 -*-
# Author: Florian Mayer <florian.mayer@bitsrc.org>
from __future__ import absolute_import, print_function
from datetime import datetime
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from sunpy.spectra.spectrogram import (Spectrogram, LinearTimeSpectrogram,
_LinearView)
from sunpy.extern.six.moves import range
def is_linear(arr):
return np.array_equal(arr, np.linspace(arr[0], arr[-1], len(arr)))
def dict_eq(one, other):
ks = set(one.keys())
if ks != set(other.keys()):
return False
for key in ks:
if isinstance(one[key], np.ndarray):
if not np.array_equal(one[key], other[key]):
return False
else:
if one[key] != other[key]:
return False
return True
def mk_spec(image):
return Spectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10), datetime(2010, 10, 10, 1), 0
)
def test_subtract_bg():
# The idea is to generate background and add a random signal, perform
# background subtraction and see if the signal comes out again.
bg = np.linspace(0, 200, 200).astype(np.uint16)
bg.shape = (200, 1)
bg = bg + np.zeros((200, 3600))
signal = np.random.rand(200, 1800) * 255
signal = signal.astype(np.uint16)
image = bg
image[:, 1800:] += signal
spectrogram = mk_spec(image)
sbg = spectrogram.subtract_bg()
assert np.array_equal(
spectrogram.subtract_bg()[:, 1800:].data, signal
)
assert dict_eq(spectrogram._get_params(), sbg._get_params())
def test_auto_const_bg():
# The idea is to generate background and add a random signal, perform
# background subtraction and see if the signal comes out again.
x = np.linspace(0, 200, 200).astype(np.uint16)
bg = x.reshape(200, 1)
bg = bg + np.zeros((200, 3600))
signal = np.random.rand(200, 1800) * 255
signal = signal.astype(np.uint16)
image = bg
image[:, 1800:] += signal
spectrogram = mk_spec(image)
sbg = spectrogram.auto_const_bg()
assert np.array_equal(sbg, x.reshape(200, 1))
def test_randomized_auto_const_bg():
# The idea is to generate background and add a random signal, perform
# background subtraction and see if the signal comes out again.
# As this is a Monte-Carlo probabilistic algorithm this test might
# fail occasionally.
x = np.linspace(0, 200, 200).astype(np.uint16)
bg = x.reshape(200, 1)
bg = bg + np.zeros((200, 3600))
signal = np.random.rand(200, 1800) * 255
signal = signal.astype(np.uint16)
image = bg
image[:, 1800:] += signal
spectrogram = mk_spec(image)
sbg = spectrogram.randomized_auto_const_bg(1500)
assert np.array_equal(sbg, x.reshape(200, 1))
def test_slice_time_axis():
rnd = np.random.rand(200, 3600)
spectrogram = mk_spec(rnd)
new = spectrogram[:, 59:3599]
assert new.shape == (200, 3600 - 59 - 1)
assert new.t_init == 59
assert np.array_equal(new.time_axis,
np.linspace(0, 3600 - 60 - 1, 3600 - 59 - 1)
)
assert new.start == datetime(2010, 10, 10, 0, 0, 59)
assert np.array_equal(new.data, rnd[:, 59:3599])
def test_slice_freq_axis():
rnd = np.random.rand(200, 3600)
spectrogram = mk_spec(rnd)
new = spectrogram[100:150, :]
assert new.shape == (50, 3600)
assert np.array_equal(new.freq_axis, np.linspace(100, 149, 50))
assert np.array_equal(new.data, rnd[100:150, :])
def test_slice_both_axis():
rnd = np.random.rand(200, 3600)
spectrogram = mk_spec(rnd)
new = spectrogram[100:, 59:]
assert new.shape == (100, 3600 - 59)
assert new.t_init == 59
assert np.array_equal(new.time_axis, np.linspace(0, 3600 - 60, 3600 - 59))
assert new.start == datetime(2010, 10, 10, 0, 0, 59)
assert np.array_equal(new.freq_axis, np.linspace(100, 199, 100))
assert np.array_equal(new.data, rnd[100:, 59:])
def test_time_to_x():
image = np.zeros((200, 3600))
spectrogram = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10), datetime(2010, 10, 10, 1), 0, 1
)
ret = spectrogram.time_to_x(datetime(2010, 10, 10, 0, 0, 59))
assert isinstance(ret, int)
assert ret == 59
def test_time_to_x_nonlinear():
image = np.zeros((200, 3600))
spectrogram = Spectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10), datetime(2010, 10, 10, 1)
)
ret = spectrogram.time_to_x(datetime(2010, 10, 10, 0, 0, 59))
assert isinstance(ret, int)
assert ret == 59
def test_join():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10), datetime(2010, 10, 10, 0, 30), 0, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 0, 29),
datetime(2010, 10, 10, 1, 29), 1799, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=0
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
# The - 2 is because there is one second overlap.
assert z.shape == (200, 3 * 3600 - 2 - 1)
assert np.array_equal(z.data[:, :3598], one.data[:, :-2])
# assert np.array_equal(z[:, 3598:], ndimage.zoom(other, (1, 2)))
assert z.start == one.start
assert z.end == other.end
assert is_linear(z.time_axis)
assert isinstance(z, LinearTimeSpectrogram)
def test_join_dtype():
image = np.random.rand(200, 3600).astype(np.uint8)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10), datetime(2010, 10, 10, 0, 30), 0, 0.5,
)
image = np.random.rand(200, 3600).astype(np.uint8)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 0, 29),
datetime(2010, 10, 10, 1, 29), 1799, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=0
)
assert z.dtype == np.dtype('uint8')
def test_join_different_dtype():
image = np.random.rand(200, 3600).astype(np.uint16)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10), datetime(2010, 10, 10, 0, 30), 0, 0.5,
)
image = np.random.rand(200, 3600).astype(np.uint8)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 0, 29),
datetime(2010, 10, 10, 1, 29), 1799, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=0
)
assert z.dtype == np.dtype('uint16')
def test_join_midnight():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 23, 30),
datetime(2010, 10, 10, 23, 59, 59), 84600, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 0), datetime(2010, 10, 11, 1), 0, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=0
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
assert z.shape == (200, 3 * 3600 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
assert is_linear(z.time_axis)
assert isinstance(z, LinearTimeSpectrogram)
def test_join_month():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2012, 7, 31, 23, 30),
datetime(2012, 7, 31, 23, 59, 59), 84600, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2012, 8, 1), datetime(2012, 8, 1, 1), 0, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=0
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
assert z.shape == (200, 3 * 3600 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
assert is_linear(z.time_axis)
assert isinstance(z, LinearTimeSpectrogram)
def test_join_year():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2012, 12, 31, 23, 30),
datetime(2013, 1, 1, 0, 0, 0), 84600, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2013, 1, 1), datetime(2013, 1, 1, 1), 0, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=0
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
assert z.shape == (200, 3 * 3600 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
assert is_linear(z.time_axis)
assert isinstance(z, LinearTimeSpectrogram)
def test_join_over_midnight():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 23, 45),
datetime(2010, 10, 11, 0, 15,), 85500, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 15), datetime(2010, 10, 11, 1, 15), 900, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=0
)
# FIXME: not used?!
oz = other.resample_time(0.5)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
assert z.shape == (200, 3 * 3600 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
assert np.array_equal(z.time_axis[:3600], one.time_axis)
assert is_linear(z.time_axis)
assert isinstance(z, LinearTimeSpectrogram)
def test_join_gap():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 23, 45),
datetime(2010, 10, 11, 0, 15,), 85500, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 15, 1),
datetime(2010, 10, 11, 1, 15), 901, 1,
)
with pytest.raises(ValueError) as excinfo:
LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=0
)
assert excinfo.value.message == "Too large gap."
def test_join_with_gap():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 23, 45),
datetime(2010, 10, 11, 0, 15,), 85500, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 15), datetime(2010, 10, 11, 1, 15), 901, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=1, fill=0
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
# The + 2 is because there is one second without data inserted.
assert z.shape == (200, 3 * 3600 + 2 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
# Second data to unpack masked array
assert (z.data.data[:, 3600:3602] == 0).all()
assert is_linear(z.time_axis)
assert isinstance(z, LinearTimeSpectrogram)
def test_join_with_gap_fill():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 23, 45),
datetime(2010, 10, 11, 0, 15,), 85500, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 15), datetime(2010, 10, 11, 1, 15), 901, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=2, fill=np.NaN
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
# The + 2 is because there is one second without data inserted.
assert z.shape == (200, 3 * 3600 + 2 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
print(type(z.data))
# Second data to unpack masked array
assert np.isnan(z.data.data[:, 3600:3602]).all()
assert is_linear(z.time_axis)
assert isinstance(z, LinearTimeSpectrogram)
def test_join_nonlinear():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 23, 45),
datetime(2010, 10, 11, 0, 15,), 85500, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 15),
datetime(2010, 10, 11, 1, 15), 901, 1,
)
oz = other.resample_time(0.5)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=True, maxgap=2
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
assert z.shape == (200, 3 * 3600 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
assert np.array_equal(z.time_axis[:3600], one.time_axis)
assert np.array_equal(z.time_axis[3600:], oz.time_axis + 1801)
assert isinstance(z, Spectrogram)
def test_auto_t_init():
image = np.random.rand(200, 3600)
assert Spectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30)
).t_init == 900
def test_rescale():
image = np.random.rand(200, 3600) * 43
spec = Spectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30)
)
nspec = spec.rescale()
assert dict_eq(spec._get_params(), nspec._get_params())
assert_array_almost_equal(nspec.data.max(), 1)
assert nspec.data.min() == 0
def test_rescale_error():
image = np.zeros((200, 3600))
spec = Spectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30)
)
with pytest.raises(ValueError) as excinfo:
spec.rescale(0, 1)
assert (
excinfo.value.message ==
"Spectrogram needs to contain distinct values.")
def test_rescale_error2():
image = np.random.rand(200, 3600) * 43
spec = Spectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30)
)
with pytest.raises(ValueError) as excinfo:
spec.rescale(1, 1)
assert (excinfo.value.message ==
"Maximum and minimum must be different.")
def test_resample():
image = np.array([[0, 1, 2], [0, 1, 2]])
spec = LinearTimeSpectrogram(
image, np.array([0, 1, 2]), np.array([0]),
datetime(2012, 1, 1), datetime(2012, 1, 1, 0, 0, 3),
0, 1
)
r = spec.resample_time(0.5)
assert r.shape[1] == 5
assert np.array_equal(r.time_axis, np.linspace(0, 2, 5))
def test_upsample():
image = np.array([[0, 1, 2, 3], [0, 1, 2, 3]])
spec = LinearTimeSpectrogram(
image, np.array([0, 1, 2]), np.array([0]),
datetime(2012, 1, 1), datetime(2012, 1, 1, 0, 0, 3),
0, 1
)
r = spec.resample_time(2)
assert r.shape[1] == 2
def test_combine_freqs():
image = np.random.rand(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
image = np.random.rand(5, 3600)
spec2 = LinearTimeSpectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.array([9, 7, 5, 3, 1]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
comb = LinearTimeSpectrogram.combine_frequencies([spec, spec2])
stuff = [spec, spec2]
# print comb
for freq in range(10):
assert np.array_equal(
comb[9 - freq, :], stuff[freq % 2][4 - freq // 2, :]
)
def test_join_diff_freq():
image = np.random.rand(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
image = np.random.rand(5, 3600)
spec2 = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([9, 7, 5, 3, 1]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
1800,
0.25
)
with pytest.raises(ValueError) as excinfo:
LinearTimeSpectrogram.join_many([spec, spec2])
assert excinfo.value.message == "Frequency channels do not match."
def test_intersect_time():
image = np.random.rand(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
image = np.random.rand(5, 3600)
spec2 = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([9, 7, 5, 3, 1]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
901,
0.25
)
one, other = LinearTimeSpectrogram.intersect_time(
[spec, spec2]
)
assert one.shape[1] == other.shape[1]
assert one.shape[1] == 3596
assert np.array_equal(one.data, spec.data[:, 4:])
assert np.array_equal(other.data, spec2.data[:, :-4])
assert np.array_equal(one.time_axis, other.time_axis)
assert one.t_init == other.t_init
assert is_linear(one.time_axis)
assert is_linear(other.time_axis)
def test_check_linearity():
image = np.random.rand(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
assert spec.check_linearity()
spec.time_axis[1] += 0.1
assert not spec.check_linearity()
assert spec.check_linearity(0.1)
spec.time_axis[1] -= 0.1
# The average stays (almost) the same because there are 3600 items.
spec.time_axis[1] += 0.2 * 0.25
assert spec.check_linearity(None, 0.2)
def test_flatten():
flat = np.arange(5 * 3600)
image = flat.reshape(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
assert np.array_equal(flat, spec.data.flatten())
def test_in_interval():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
assert np.array_equal(spec.in_interval("00:15", "00:30").data, spec.data)
def test_in_interval2():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
assert np.array_equal(
spec.in_interval("2010-01-01T00:15:00", "00:30").data, spec.data
)
def test_linearize():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([20, 10, 5, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
# 0 1 2 3 4 5 6 7 8
# -------- ----------- ----- ---
# 20 17.5 15 12.5 10 7.5 5 2.5 0
linear = spec.linearize_freqs()
assert ((linear.freq_axis[:-1] - linear.freq_axis[1:]) == 2.5).all()
assert (linear[0] == image[0, :]).all()
assert (linear[1] == image[0, :]).all()
assert (linear[2] == image[0, :]).all()
assert (linear[3] == image[1, :]).all()
assert (linear[4] == image[1, :]).all()
assert (linear[5] == image[1, :]).all()
assert (linear[6] == image[2, :]).all()
assert (linear[7] == image[2, :]).all()
assert (linear[8] == image[3, :]).all()
def test_linear_view():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([20, 10, 5, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
linear = _LinearView(spec)
# assert ((linear.freq_axis[:-1] - linear.freq_axis[1:]) == 2.5).all()
assert (linear[0] == image[0, :]).all()
assert (linear[1] == image[0, :]).all()
assert (linear[2] == image[0, :]).all()
assert (linear[3] == image[1, :]).all()
assert (linear[4] == image[1, :]).all()
assert (linear[5] == image[1, :]).all()
assert (linear[6] == image[2, :]).all()
assert (linear[7] == image[2, :]).all()
assert (linear[8] == image[3, :]).all()
def test_linear_view_indexerror():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([20, 10, 5, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
linear = _LinearView(spec)
# assert ((linear.freq_axis[:-1] - linear.freq_axis[1:]) == 2.5).all()
with pytest.raises(IndexError):
linear[9]
def test_linear_view_negative():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([20, 10, 5, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
linear = _LinearView(spec)
# assert ((linear.freq_axis[:-1] - linear.freq_axis[1:]) == 2.5).all()
assert (linear[8] == image[3, :]).all()
assert (linear[-1] == image[3, :]).all()
def test_linear_view_freqs():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([20, 10, 5, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
linear = _LinearView(spec)
# assert ((linear.freq_axis[:-1] - linear.freq_axis[1:]) == 2.5).all()
assert linear.get_freq(0) == 20
assert linear.get_freq(1) == 20
assert linear.get_freq(2) == 20
assert linear.get_freq(3) == 10
assert linear.get_freq(4) == 10
assert linear.get_freq(5) == 10
assert linear.get_freq(6) == 5
assert linear.get_freq(7) == 5
assert linear.get_freq(8) == 0
| |
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fMRI Simulator test script
Test script for generating a run of a participant's data.
Authors: Cameron Ellis (Princeton) 2016
"""
import numpy as np
import math
from brainiak.utils import fmrisim as sim
def test_generate_signal():
# Inputs for generate_signal
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [3]
feature_type = ['cube']
feature_coordinates = np.array(
[[5, 5, 5]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
assert np.all(volume.shape == dimensions), "Check signal shape"
assert np.max(volume) == signal_magnitude, "Check signal magnitude"
assert np.sum(volume > 0) == math.pow(feature_size[0], 3), (
"Check feature size")
assert volume[5, 5, 5] == signal_magnitude, "Check signal location"
assert volume[5, 5, 1] == 0, "Check noise location"
feature_coordinates = np.array(
[[5, 5, 5], [3, 3, 3], [7, 7, 7]])
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=['loop', 'cavity', 'sphere'],
feature_size=[3],
signal_magnitude=signal_magnitude)
assert volume[5, 5, 5] == 0, "Loop is empty"
assert volume[3, 3, 3] == 0, "Cavity is empty"
assert volume[7, 7, 7] != 0, "Sphere is not empty"
def test_generate_stimfunction():
# Inputs for generate_stimfunction
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 100
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
assert stimfunction.shape[0] == duration * 1000, "stimfunc incorrect " \
"length"
eventNumber = np.sum(event_durations * len(onsets)) * 1000
assert np.sum(stimfunction) == eventNumber, "Event number"
# Create the signal function
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
stim_dur = stimfunction.shape[0] / (tr_duration * 1000)
assert signal_function.shape[0] == stim_dur, "The length did not change"
onsets = [10]
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
assert np.sum(signal_function < 0) > 0, "No values below zero"
def test_apply_signal():
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[5, 5, 5]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Inputs for generate_stimfunction
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 100
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
# Convolve the HRF with the stimulus sequence
signal = sim.apply_signal(signal_function=signal_function,
volume_signal=volume,
)
assert signal.shape == (dimensions[0], dimensions[1], dimensions[2],
duration / tr_duration), "The output is the " \
"wrong size"
signal = sim.apply_signal(signal_function=stimfunction,
volume_signal=volume,
)
assert np.any(signal == signal_magnitude), "The stimfunction is not binary"
def test_generate_noise():
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[5, 5, 5]])
signal_magnitude = [1]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Inputs for generate_stimfunction
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 100
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
# Convolve the HRF with the stimulus sequence
signal = sim.apply_signal(signal_function=signal_function,
volume_signal=volume,
)
# Generate the mask of the signal
mask, template = sim.mask_brain(signal, mask_threshold=0.1)
assert min(mask[mask > 0]) > 0.1, "Mask thresholding did not work"
assert len(np.unique(template) > 2), "Template creation did not work"
stimfunction_tr = stimfunction[::int(tr_duration * 1000)]
# Create the noise volumes (using the default parameters)
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
)
assert signal.shape == noise.shape, "The dimensions of signal and noise " \
"the same"
assert np.std(signal) < np.std(noise), "Noise was not created"
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={'sfnr': 10000, 'snr': 10000},
)
system_noise = np.std(noise[mask > 0], 1).mean()
assert system_noise <= 0.1, "Noise strength could not be manipulated"
def test_mask_brain():
# Inputs for generate_signal
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[4, 4, 4]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Mask the volume to be the same shape as a brain
mask, _ = sim.mask_brain(volume)
brain = volume * mask
assert np.sum(brain != 0) == np.sum(volume != 0), "Masking did not work"
assert brain[0, 0, 0] == 0, "Masking did not work"
assert brain[4, 4, 4] != 0, "Masking did not work"
feature_coordinates = np.array(
[[1, 1, 1]])
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Mask the volume to be the same shape as a brain
mask, _ = sim.mask_brain(volume)
brain = volume * mask
assert np.sum(brain != 0) < np.sum(volume != 0), "Masking did not work"
def test_calc_noise():
# Inputs for functions
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 100
tr_number = int(np.floor(duration / tr_duration))
dimensions_tr = np.array([10, 10, 10, tr_number])
# Preset the noise dict
nd_orig = {'auto_reg_sigma': 0.6,
'drift_sigma': 0.4,
'snr': 30,
'sfnr': 30,
'max_activity': 1000,
'fwhm': 4,
}
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
# Mask the volume to be the same shape as a brain
mask, template = sim.mask_brain(dimensions_tr, mask_threshold=0.2)
stimfunction_tr = stimfunction[::int(tr_duration * 1000)]
noise = sim.generate_noise(dimensions=dimensions_tr[0:3],
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=nd_orig,
)
# Check that noise_system is being calculated correctly
spatial_sd = 5
temporal_sd = 5
noise_system = sim._generate_noise_system(dimensions_tr,
spatial_sd,
temporal_sd)
precision = abs(noise_system[0, 0, 0, :].std() - spatial_sd)
assert precision < spatial_sd, 'noise_system calculated incorrectly'
precision = abs(noise_system[:, :, :, 0].std() - temporal_sd)
assert precision < spatial_sd, 'noise_system calculated incorrectly'
# Calculate the noise
nd_calc = sim.calc_noise(volume=noise,
mask=mask)
# How precise are these estimates
precision = abs(nd_calc['snr'] - nd_orig['snr'])
assert precision < nd_orig['snr'], 'snr calculated incorrectly'
precision = abs(nd_calc['sfnr'] - nd_orig['sfnr'])
assert precision < nd_orig['sfnr'], 'sfnr calculated incorrectly'
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import backup
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
from cinder import rpc
from cinder import volume
LOG = logging.getLogger(__name__)
class AdminController(wsgi.Controller):
"""Abstract base class for AdminControllers."""
collection = None # api collection to extend
# FIXME(clayg): this will be hard to keep up-to-date
# Concrete classes can expand or over-ride
valid_status = set(['creating',
'available',
'deleting',
'error',
'error_deleting', ])
def __init__(self, *args, **kwargs):
super(AdminController, self).__init__(*args, **kwargs)
# singular name of the resource
self.resource_name = self.collection.rstrip('s')
self.volume_api = volume.API()
self.backup_api = backup.API()
def _update(self, *args, **kwargs):
raise NotImplementedError()
def _get(self, *args, **kwargs):
raise NotImplementedError()
def _delete(self, *args, **kwargs):
raise NotImplementedError()
def validate_update(self, body):
update = {}
try:
update['status'] = body['status'].lower()
except (TypeError, KeyError):
raise exc.HTTPBadRequest(explanation=_("Must specify 'status'"))
if update['status'] not in self.valid_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid status"))
return update
def authorize(self, context, action_name):
# e.g. "snapshot_admin_actions:reset_status"
action = '%s_admin_actions:%s' % (self.resource_name, action_name)
extensions.extension_authorizer('volume', action)(context)
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = _("Updating %(resource)s '%(id)s' with '%(update)r'")
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = dict(id=id, update=update)
notifier = rpc.get_notifier('volumeStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self._update(context, id, update)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
notifier.info(context, self.collection + '.reset_status.end',
notifier_info)
return webob.Response(status_int=202)
@wsgi.action('os-force_delete')
def _force_delete(self, req, id, body):
"""Delete a resource, bypassing the check that it must be available."""
context = req.environ['cinder.context']
self.authorize(context, 'force_delete')
try:
resource = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
self._delete(context, resource, force=True)
return webob.Response(status_int=202)
class VolumeAdminController(AdminController):
"""AdminController for Volumes."""
collection = 'volumes'
# FIXME(jdg): We're appending additional valid status
# entries to the set we declare in the parent class
# this doesn't make a ton of sense, we should probably
# look at the structure of this whole process again
# Perhaps we don't even want any definitions in the abstract
# parent class?
valid_status = AdminController.valid_status.union(
set(['attaching', 'in-use', 'detaching']))
valid_attach_status = set(['detached', 'attached', ])
valid_migration_status = set(['migrating', 'error',
'completing', 'none',
'starting', ])
def _update(self, *args, **kwargs):
db.volume_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.volume_api.get(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete(*args, **kwargs)
def validate_update(self, body):
update = {}
status = body.get('status', None)
attach_status = body.get('attach_status', None)
migration_status = body.get('migration_status', None)
valid = False
if status:
valid = True
update = super(VolumeAdminController, self).validate_update(body)
if attach_status:
valid = True
update['attach_status'] = attach_status.lower()
if update['attach_status'] not in self.valid_attach_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid attach status"))
if migration_status:
valid = True
update['migration_status'] = migration_status.lower()
if update['migration_status'] not in self.valid_migration_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid migration status"))
if update['migration_status'] == 'none':
update['migration_status'] = None
if not valid:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'status', 'attach_status' "
"or 'migration_status' for update."))
return update
@wsgi.action('os-force_detach')
def _force_detach(self, req, id, body):
"""Roll back a bad detach after the volume been disconnected."""
context = req.environ['cinder.context']
self.authorize(context, 'force_detach')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
self.volume_api.terminate_connection(context, volume,
{}, force=True)
self.volume_api.detach(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume')
def _migrate_volume(self, req, id, body):
"""Migrate a volume to the specified host."""
context = req.environ['cinder.context']
self.authorize(context, 'migrate_volume')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
params = body['os-migrate_volume']
try:
host = params['host']
except KeyError:
raise exc.HTTPBadRequest(explanation=_("Must specify 'host'"))
force_host_copy = params.get('force_host_copy', False)
if isinstance(force_host_copy, basestring):
try:
force_host_copy = strutils.bool_from_string(force_host_copy,
strict=True)
except ValueError:
raise exc.HTTPBadRequest(
explanation=_("Bad value for 'force_host_copy'"))
elif not isinstance(force_host_copy, bool):
raise exc.HTTPBadRequest(
explanation=_("'force_host_copy' not string or bool"))
self.volume_api.migrate_volume(context, volume, host, force_host_copy)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume_completion')
def _migrate_volume_completion(self, req, id, body):
"""Complete an in-progress migration."""
context = req.environ['cinder.context']
self.authorize(context, 'migrate_volume_completion')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
params = body['os-migrate_volume_completion']
try:
new_volume_id = params['new_volume']
except KeyError:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'new_volume'"))
try:
new_volume = self._get(context, new_volume_id)
except exception.NotFound:
raise exc.HTTPNotFound()
error = params.get('error', False)
ret = self.volume_api.migrate_volume_completion(context, volume,
new_volume, error)
return {'save_volume_id': ret}
class SnapshotAdminController(AdminController):
"""AdminController for Snapshots."""
collection = 'snapshots'
def _update(self, *args, **kwargs):
db.snapshot_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.volume_api.get_snapshot(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete_snapshot(*args, **kwargs)
class BackupAdminController(AdminController):
"""AdminController for Backups."""
collection = 'backups'
valid_status = set(['available',
'error'
])
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = {'id': id, 'update': update}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self.backup_api.reset_status(context=context, backup_id=id,
status=update['status'])
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin actions."""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1"
updated = "2012-08-25T00:00:00+00:00"
def get_controller_extensions(self):
exts = []
for class_ in (VolumeAdminController, SnapshotAdminController,
BackupAdminController):
controller = class_()
extension = extensions.ControllerExtension(
self, class_.collection, controller)
exts.append(extension)
return exts
| |
import unittest
from scrapy.linkextractors.regex import RegexLinkExtractor
from scrapy.http import HtmlResponse
from scrapy.link import Link
from scrapy.linkextractors.htmlparser import HtmlParserLinkExtractor
from scrapy.linkextractors.sgml import SgmlLinkExtractor, BaseSgmlLinkExtractor
from tests import get_testdata
from tests.test_linkextractors import Base
class BaseSgmlLinkExtractorTestCase(unittest.TestCase):
# XXX: should we move some of these tests to base link extractor tests?
def test_basic(self):
html = """<html><head><title>Page title<title>
<body><p><a href="item/12.html">Item 12</a></p>
<p><a href="/about.html">About us</a></p>
<img src="/logo.png" alt="Company logo (not a link)" />
<p><a href="../othercat.html">Other category</a></p>
<p><a href="/">>></a></p>
<p><a href="/" /></p>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html)
lx = BaseSgmlLinkExtractor() # default: tag=a, attr=href
self.assertEqual(lx.extract_links(response),
[Link(url='http://example.org/somepage/item/12.html', text='Item 12'),
Link(url='http://example.org/about.html', text='About us'),
Link(url='http://example.org/othercat.html', text='Other category'),
Link(url='http://example.org/', text='>>'),
Link(url='http://example.org/', text='')])
def test_base_url(self):
html = """<html><head><title>Page title<title><base href="http://otherdomain.com/base/" />
<body><p><a href="item/12.html">Item 12</a></p>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html)
lx = BaseSgmlLinkExtractor() # default: tag=a, attr=href
self.assertEqual(lx.extract_links(response),
[Link(url='http://otherdomain.com/base/item/12.html', text='Item 12')])
# base url is an absolute path and relative to host
html = """<html><head><title>Page title<title><base href="/" />
<body><p><a href="item/12.html">Item 12</a></p></body></html>"""
response = HtmlResponse("https://example.org/somepage/index.html", body=html)
self.assertEqual(lx.extract_links(response),
[Link(url='https://example.org/item/12.html', text='Item 12')])
# base url has no scheme
html = """<html><head><title>Page title<title><base href="//noschemedomain.com/path/to/" />
<body><p><a href="item/12.html">Item 12</a></p></body></html>"""
response = HtmlResponse("https://example.org/somepage/index.html", body=html)
self.assertEqual(lx.extract_links(response),
[Link(url='https://noschemedomain.com/path/to/item/12.html', text='Item 12')])
def test_link_text_wrong_encoding(self):
html = """<body><p><a href="item/12.html">Wrong: \xed</a></p></body></html>"""
response = HtmlResponse("http://www.example.com", body=html, encoding='utf-8')
lx = BaseSgmlLinkExtractor()
self.assertEqual(lx.extract_links(response), [
Link(url='http://www.example.com/item/12.html', text=u'Wrong: \ufffd'),
])
def test_extraction_encoding(self):
body = get_testdata('link_extractor', 'linkextractor_noenc.html')
response_utf8 = HtmlResponse(url='http://example.com/utf8', body=body, headers={'Content-Type': ['text/html; charset=utf-8']})
response_noenc = HtmlResponse(url='http://example.com/noenc', body=body)
body = get_testdata('link_extractor', 'linkextractor_latin1.html')
response_latin1 = HtmlResponse(url='http://example.com/latin1', body=body)
lx = BaseSgmlLinkExtractor()
self.assertEqual(lx.extract_links(response_utf8), [
Link(url='http://example.com/sample_%C3%B1.html', text=''),
Link(url='http://example.com/sample_%E2%82%AC.html', text='sample \xe2\x82\xac text'.decode('utf-8')),
])
self.assertEqual(lx.extract_links(response_noenc), [
Link(url='http://example.com/sample_%C3%B1.html', text=''),
Link(url='http://example.com/sample_%E2%82%AC.html', text='sample \xe2\x82\xac text'.decode('utf-8')),
])
self.assertEqual(lx.extract_links(response_latin1), [
Link(url='http://example.com/sample_%F1.html', text=''),
Link(url='http://example.com/sample_%E1.html', text='sample \xe1 text'.decode('latin1')),
])
def test_matches(self):
url1 = 'http://lotsofstuff.com/stuff1/index'
url2 = 'http://evenmorestuff.com/uglystuff/index'
lx = BaseSgmlLinkExtractor()
self.assertEqual(lx.matches(url1), True)
self.assertEqual(lx.matches(url2), True)
class HtmlParserLinkExtractorTestCase(unittest.TestCase):
def setUp(self):
body = get_testdata('link_extractor', 'sgml_linkextractor.html')
self.response = HtmlResponse(url='http://example.com/index', body=body)
def test_extraction(self):
# Default arguments
lx = HtmlParserLinkExtractor()
self.assertEqual(lx.extract_links(self.response),
[Link(url='http://example.com/sample2.html', text=u'sample 2'),
Link(url='http://example.com/sample3.html', text=u'sample 3 text'),
Link(url='http://example.com/sample3.html', text=u'sample 3 repetition'),
Link(url='http://www.google.com/something', text=u''),
Link(url='http://example.com/innertag.html', text=u'inner tag'),])
def test_link_wrong_href(self):
html = """
<a href="http://example.org/item1.html">Item 1</a>
<a href="http://[example.org/item2.html">Item 2</a>
<a href="http://example.org/item3.html">Item 3</a>
"""
response = HtmlResponse("http://example.org/index.html", body=html)
lx = HtmlParserLinkExtractor()
self.assertEqual([link for link in lx.extract_links(response)], [
Link(url='http://example.org/item1.html', text=u'Item 1', nofollow=False),
Link(url='http://example.org/item3.html', text=u'Item 3', nofollow=False),
])
class SgmlLinkExtractorTestCase(Base.LinkExtractorTestCase):
extractor_cls = SgmlLinkExtractor
def test_deny_extensions(self):
html = """<a href="page.html">asd</a> and <a href="photo.jpg">"""
response = HtmlResponse("http://example.org/", body=html)
lx = SgmlLinkExtractor(deny_extensions="jpg")
self.assertEqual(lx.extract_links(response), [
Link(url='http://example.org/page.html', text=u'asd'),
])
def test_attrs_sgml(self):
html = """<html><area href="sample1.html"></area>
<a ref="sample2.html">sample text 2</a></html>"""
response = HtmlResponse("http://example.com/index.html", body=html)
lx = SgmlLinkExtractor(attrs="href")
self.assertEqual(lx.extract_links(response), [
Link(url='http://example.com/sample1.html', text=u''),
])
def test_link_nofollow(self):
html = """
<a href="page.html?action=print" rel="nofollow">Printer-friendly page</a>
<a href="about.html">About us</a>
<a href="http://google.com/something" rel="external nofollow">Something</a>
"""
response = HtmlResponse("http://example.org/page.html", body=html)
lx = SgmlLinkExtractor()
self.assertEqual([link for link in lx.extract_links(response)], [
Link(url='http://example.org/page.html?action=print', text=u'Printer-friendly page', nofollow=True),
Link(url='http://example.org/about.html', text=u'About us', nofollow=False),
Link(url='http://google.com/something', text=u'Something', nofollow=True),
])
class RegexLinkExtractorTestCase(unittest.TestCase):
# XXX: RegexLinkExtractor is not deprecated yet, but it must be rewritten
# not to depend on SgmlLinkExractor. Its speed is also much worse
# than it should be.
def setUp(self):
body = get_testdata('link_extractor', 'sgml_linkextractor.html')
self.response = HtmlResponse(url='http://example.com/index', body=body)
def test_extraction(self):
# Default arguments
lx = RegexLinkExtractor()
self.assertEqual(lx.extract_links(self.response),
[Link(url='http://example.com/sample2.html', text=u'sample 2'),
Link(url='http://example.com/sample3.html', text=u'sample 3 text'),
Link(url='http://www.google.com/something', text=u''),
Link(url='http://example.com/innertag.html', text=u'inner tag'),])
def test_link_wrong_href(self):
html = """
<a href="http://example.org/item1.html">Item 1</a>
<a href="http://[example.org/item2.html">Item 2</a>
<a href="http://example.org/item3.html">Item 3</a>
"""
response = HtmlResponse("http://example.org/index.html", body=html)
lx = RegexLinkExtractor()
self.assertEqual([link for link in lx.extract_links(response)], [
Link(url='http://example.org/item1.html', text=u'Item 1', nofollow=False),
Link(url='http://example.org/item3.html', text=u'Item 3', nofollow=False),
])
def test_html_base_href(self):
html = """
<html>
<head>
<base href="http://b.com/">
</head>
<body>
<a href="test.html"></a>
</body>
</html>
"""
response = HtmlResponse("http://a.com/", body=html)
lx = RegexLinkExtractor()
self.assertEqual([link for link in lx.extract_links(response)], [
Link(url='http://b.com/test.html', text=u'', nofollow=False),
])
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Runs a development application server for an application.
%(script)s [options] <application root>
Application root must be the path to the application to run in this server.
Must contain a valid app.yaml or app.yml file.
Options:
--address=ADDRESS, -a ADDRESS
Address to which this server should bind. (Default
%(address)s).
--clear_datastore, -c Clear the Datastore on startup. (Default false)
--debug, -d Use debug logging. (Default false)
--help, -h View this helpful message.
--port=PORT, -p PORT Port for the server to run on. (Default %(port)s)
--allow_skipped_files Allow access to files matched by app.yaml's
skipped_files (default False)
--auth_domain Authorization domain that this app runs in.
(Default gmail.com)
--backends Run the dev_appserver with backends support
(multiprocess mode).
--blobstore_path=DIR Path to directory to use for storing Blobstore
file stub data.
--clear_prospective_search Clear the Prospective Search subscription index
(Default false).
--datastore_path=DS_FILE Path to file to use for storing Datastore file
stub data.
(Default %(datastore_path)s)
--debug_imports Enables debug logging for module imports, showing
search paths used for finding modules and any
errors encountered during the import process.
--default_partition Default partition to use in the APPLICATION_ID.
(Default dev)
--disable_static_caching Never allow the browser to cache static files.
(Default enable if expiration set in app.yaml)
--disable_task_running When supplied, tasks will not be automatically
run after submission and must be run manually
in the local admin console.
--enable_sendmail Enable sendmail when SMTP not configured.
(Default false)
--high_replication Use the high replication datastore consistency
model. (Default false).
--history_path=PATH Path to use for storing Datastore history.
(Default %(history_path)s)
--multiprocess_min_port When running in multiprocess mode, specifies the
lowest port value to use when choosing ports. If
set to 0, select random ports.
(Default 9000)
--mysql_host=HOSTNAME MySQL database host.
Used by the Cloud SQL (rdbms) stub.
(Default '%(mysql_host)s')
--mysql_port=PORT MySQL port to connect to.
Used by the Cloud SQL (rdbms) stub.
(Default %(mysql_port)s)
--mysql_user=USER MySQL user to connect as.
Used by the Cloud SQL (rdbms) stub.
(Default %(mysql_user)s)
--mysql_password=PASSWORD MySQL password to use.
Used by the Cloud SQL (rdbms) stub.
(Default '%(mysql_password)s')
--mysql_socket=PATH MySQL Unix socket file path.
Used by the Cloud SQL (rdbms) stub.
(Default '%(mysql_socket)s')
--require_indexes Disallows queries that require composite indexes
not defined in index.yaml.
--show_mail_body Log the body of emails in mail stub.
(Default false)
--skip_sdk_update_check Skip checking for SDK updates. If false, fall back
to opt_in setting specified in .appcfg_nag
(Default false)
--smtp_host=HOSTNAME SMTP host to send test mail to. Leaving this
unset will disable SMTP mail sending.
(Default '%(smtp_host)s')
--smtp_port=PORT SMTP port to send test mail to.
(Default %(smtp_port)s)
--smtp_user=USER SMTP user to connect as. Stub will only attempt
to login if this field is non-empty.
(Default '%(smtp_user)s').
--smtp_password=PASSWORD Password for SMTP server.
(Default '%(smtp_password)s')
--task_retry_seconds How long to wait in seconds before retrying a
task after it fails during execution.
(Default '%(task_retry_seconds)s')
--use_sqlite Use the new, SQLite based datastore stub.
(Default false)
"""
from google.appengine.tools import os_compat
import getopt
import logging
import os
import signal
import sys
import tempfile
import traceback
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
from google.appengine.api import yaml_errors
from google.appengine.dist import py_zipimport
from google.appengine.tools import appcfg
from google.appengine.tools import appengine_rpc
from google.appengine.tools import dev_appserver
from google.appengine.tools import dev_appserver_multiprocess as multiprocess
DEFAULT_ADMIN_CONSOLE_SERVER = 'appengine.google.com'
ARG_ADDRESS = 'address'
ARG_ADMIN_CONSOLE_HOST = 'admin_console_host'
ARG_ADMIN_CONSOLE_SERVER = 'admin_console_server'
ARG_ALLOW_SKIPPED_FILES = 'allow_skipped_files'
ARG_AUTH_DOMAIN = 'auth_domain'
ARG_BACKENDS = 'backends'
ARG_BLOBSTORE_PATH = 'blobstore_path'
ARG_CLEAR_DATASTORE = 'clear_datastore'
ARG_CLEAR_PROSPECTIVE_SEARCH = 'clear_prospective_search'
ARG_DATASTORE_PATH = 'datastore_path'
ARG_DEBUG_IMPORTS = 'debug_imports'
ARG_DEFAULT_PARTITION = 'default_partition'
ARG_DISABLE_TASK_RUNNING = 'disable_task_running'
ARG_ENABLE_SENDMAIL = 'enable_sendmail'
ARG_HIGH_REPLICATION = 'high_replication'
ARG_HISTORY_PATH = 'history_path'
ARG_LOGIN_URL = 'login_url'
ARG_LOG_LEVEL = 'log_level'
ARG_MULTIPROCESS = multiprocess.ARG_MULTIPROCESS
ARG_MULTIPROCESS_API_PORT = multiprocess.ARG_MULTIPROCESS_API_PORT
ARG_MULTIPROCESS_API_SERVER = multiprocess.ARG_MULTIPROCESS_API_SERVER
ARG_MULTIPROCESS_APP_INSTANCE_ID = multiprocess.ARG_MULTIPROCESS_APP_INSTANCE_ID
ARG_MULTIPROCESS_BACKEND_ID = multiprocess.ARG_MULTIPROCESS_BACKEND_ID
ARG_MULTIPROCESS_BACKEND_INSTANCE_ID = multiprocess.ARG_MULTIPROCESS_BACKEND_INSTANCE_ID
ARG_MULTIPROCESS_MIN_PORT = multiprocess.ARG_MULTIPROCESS_MIN_PORT
ARG_MYSQL_HOST = 'mysql_host'
ARG_MYSQL_PASSWORD = 'mysql_password'
ARG_MYSQL_PORT = 'mysql_port'
ARG_MYSQL_SOCKET = 'mysql_socket'
ARG_MYSQL_USER = 'mysql_user'
ARG_PORT = 'port'
ARG_PROSPECTIVE_SEARCH_PATH = 'prospective_search_path'
ARG_REQUIRE_INDEXES = 'require_indexes'
ARG_SHOW_MAIL_BODY = 'show_mail_body'
ARG_SKIP_SDK_UPDATE_CHECK = 'skip_sdk_update_check'
ARG_SMTP_HOST = 'smtp_host'
ARG_SMTP_PASSWORD = 'smtp_password'
ARG_SMTP_PORT = 'smtp_port'
ARG_SMTP_USER = 'smtp_user'
ARG_STATIC_CACHING = 'static_caching'
ARG_TASK_RETRY_SECONDS = 'task_retry_seconds'
ARG_TRUSTED = 'trusted'
ARG_USE_SQLITE = 'use_sqlite'
SDK_PATH = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(os_compat.__file__)
)
)
)
PRODUCTION_VERSION = (2, 5)
WARN_ABOUT_PYTHON_VERSION = True
DEFAULT_ARGS = {
ARG_ADDRESS: 'localhost',
ARG_ADMIN_CONSOLE_HOST: None,
ARG_ADMIN_CONSOLE_SERVER: DEFAULT_ADMIN_CONSOLE_SERVER,
ARG_ALLOW_SKIPPED_FILES: False,
ARG_AUTH_DOMAIN: 'gmail.com',
ARG_BLOBSTORE_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.blobstore'),
ARG_CLEAR_DATASTORE: False,
ARG_CLEAR_PROSPECTIVE_SEARCH: False,
ARG_DATASTORE_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.datastore'),
ARG_DEFAULT_PARTITION: 'dev',
ARG_DISABLE_TASK_RUNNING: False,
ARG_ENABLE_SENDMAIL: False,
ARG_HIGH_REPLICATION: False,
ARG_HISTORY_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.datastore.history'),
ARG_LOGIN_URL: '/_ah/login',
ARG_LOG_LEVEL: logging.INFO,
ARG_MYSQL_HOST: 'localhost',
ARG_MYSQL_PASSWORD: '',
ARG_MYSQL_PORT: 3306,
ARG_MYSQL_SOCKET: '',
ARG_MYSQL_USER: '',
ARG_PORT: 8080,
ARG_PROSPECTIVE_SEARCH_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.prospective_search'),
ARG_REQUIRE_INDEXES: False,
ARG_SHOW_MAIL_BODY: False,
ARG_SKIP_SDK_UPDATE_CHECK: False,
ARG_SMTP_HOST: '',
ARG_SMTP_PASSWORD: '',
ARG_SMTP_PORT: 25,
ARG_SMTP_USER: '',
ARG_STATIC_CACHING: True,
ARG_TASK_RETRY_SECONDS: 30,
ARG_TRUSTED: False,
ARG_USE_SQLITE: False,
}
def PrintUsageExit(code):
"""Prints usage information and exits with a status code.
Args:
code: Status code to pass to sys.exit() after displaying usage information.
"""
render_dict = DEFAULT_ARGS.copy()
render_dict['script'] = os.path.basename(sys.argv[0])
print sys.modules['__main__'].__doc__ % render_dict
sys.stdout.flush()
sys.exit(code)
def ParseArguments(argv):
"""Parses command-line arguments.
Args:
argv: Command-line arguments, including the executable name, used to
execute this application.
Returns:
Tuple (args, option_dict) where:
args: List of command-line arguments following the executable name.
option_dict: Dictionary of parsed flags that maps keys from DEFAULT_ARGS
to their values, which are either pulled from the defaults, or from
command-line flags.
"""
option_dict = DEFAULT_ARGS.copy()
try:
opts, args = getopt.gnu_getopt(
argv[1:],
'a:cdhp:',
[ 'address=',
'admin_console_host=',
'admin_console_server=',
'allow_skipped_files',
'auth_domain=',
'backends',
'blobstore_path=',
'clear_datastore',
'clear_prospective_search',
'datastore_path=',
'debug',
'debug_imports',
'default_partition=',
'disable_static_caching',
'disable_task_running',
'enable_sendmail',
'help',
'high_replication',
'history_path=',
'multiprocess',
'multiprocess_api_port=',
'multiprocess_api_server',
'multiprocess_app_instance_id=',
'multiprocess_backend_id=',
'multiprocess_backend_instance_id=',
'multiprocess_min_port=',
'mysql_host=',
'mysql_password=',
'mysql_port=',
'mysql_socket=',
'mysql_user=',
'port=',
'require_indexes',
'show_mail_body',
'skip_sdk_update_check',
'smtp_host=',
'smtp_password=',
'smtp_port=',
'smtp_user=',
'task_retry_seconds=',
'trusted',
'use_sqlite',
])
except getopt.GetoptError, e:
print >>sys.stderr, 'Error: %s' % e
PrintUsageExit(1)
for option, value in opts:
if option in ('-h', '--help'):
PrintUsageExit(0)
if option in ('-d', '--debug'):
option_dict[ARG_LOG_LEVEL] = logging.DEBUG
if option in ('-p', '--port'):
try:
option_dict[ARG_PORT] = int(value)
if not (65535 > option_dict[ARG_PORT] > 0):
raise ValueError
except ValueError:
print >>sys.stderr, 'Invalid value supplied for port'
PrintUsageExit(1)
def expand_path(s):
return os.path.abspath(os.path.expanduser(s))
if option in ('-a', '--address'):
option_dict[ARG_ADDRESS] = value
if option == '--blobstore_path':
option_dict[ARG_BLOBSTORE_PATH] = expand_path(value)
if option == '--datastore_path':
option_dict[ARG_DATASTORE_PATH] = expand_path(value)
if option == '--prospective_search_path':
option_dict[ARG_PROSPECTIVE_SEARCH_PATH] = expand_path(value)
if option == '--skip_sdk_update_check':
option_dict[ARG_SKIP_SDK_UPDATE_CHECK] = True
if option == '--use_sqlite':
option_dict[ARG_USE_SQLITE] = True
if option == '--high_replication':
option_dict[ARG_HIGH_REPLICATION] = True
if option == '--history_path':
option_dict[ARG_HISTORY_PATH] = expand_path(value)
if option in ('-c', '--clear_datastore'):
option_dict[ARG_CLEAR_DATASTORE] = True
if option == '--clear_prospective_search':
option_dict[ARG_CLEAR_PROSPECTIVE_SEARCH] = True
if option == '--require_indexes':
option_dict[ARG_REQUIRE_INDEXES] = True
if option == '--mysql_host':
option_dict[ARG_MYSQL_HOST] = value
if option == '--mysql_port':
option_dict[ARG_MYSQL_PORT] = _ParsePort(value, '--mysql_port')
if option == '--mysql_user':
option_dict[ARG_MYSQL_USER] = value
if option == '--mysql_password':
option_dict[ARG_MYSQL_PASSWORD] = value
if option == '--mysql_socket':
option_dict[ARG_MYSQL_SOCKET] = value
if option == '--smtp_host':
option_dict[ARG_SMTP_HOST] = value
if option == '--smtp_port':
option_dict[ARG_SMTP_PORT] = _ParsePort(value, '--smtp_port')
if option == '--smtp_user':
option_dict[ARG_SMTP_USER] = value
if option == '--smtp_password':
option_dict[ARG_SMTP_PASSWORD] = value
if option == '--enable_sendmail':
option_dict[ARG_ENABLE_SENDMAIL] = True
if option == '--show_mail_body':
option_dict[ARG_SHOW_MAIL_BODY] = True
if option == '--auth_domain':
option_dict['_DEFAULT_ENV_AUTH_DOMAIN'] = value
if option == '--debug_imports':
option_dict['_ENABLE_LOGGING'] = True
if option == '--admin_console_server':
option_dict[ARG_ADMIN_CONSOLE_SERVER] = value.strip()
if option == '--admin_console_host':
option_dict[ARG_ADMIN_CONSOLE_HOST] = value
if option == '--allow_skipped_files':
option_dict[ARG_ALLOW_SKIPPED_FILES] = True
if option == '--disable_static_caching':
option_dict[ARG_STATIC_CACHING] = False
if option == '--disable_task_running':
option_dict[ARG_DISABLE_TASK_RUNNING] = True
if option == '--task_retry_seconds':
try:
option_dict[ARG_TASK_RETRY_SECONDS] = int(value)
if option_dict[ARG_TASK_RETRY_SECONDS] < 0:
raise ValueError
except ValueError:
print >>sys.stderr, 'Invalid value supplied for task_retry_seconds'
PrintUsageExit(1)
if option == '--trusted':
option_dict[ARG_TRUSTED] = True
if option == '--backends':
option_dict[ARG_BACKENDS] = value
if option == '--multiprocess':
option_dict[ARG_MULTIPROCESS] = value
if option == '--multiprocess_min_port':
option_dict[ARG_MULTIPROCESS_MIN_PORT] = value
if option == '--multiprocess_api_server':
option_dict[ARG_MULTIPROCESS_API_SERVER] = value
if option == '--multiprocess_api_port':
option_dict[ARG_MULTIPROCESS_API_PORT] = value
if option == '--multiprocess_app_instance_id':
option_dict[ARG_MULTIPROCESS_APP_INSTANCE_ID] = value
if option == '--multiprocess_backend_id':
option_dict[ARG_MULTIPROCESS_BACKEND_ID] = value
if option == '--multiprocess_backend_instance_id':
option_dict[ARG_MULTIPROCESS_BACKEND_INSTANCE_ID] = value
if option == '--default_partition':
option_dict[ARG_DEFAULT_PARTITION] = value
return args, option_dict
def _ParsePort(port, description):
"""Parses a port number from a string.
Args:
port: string
description: string to use in error messages.
Returns: integer between 0 and 65535
Raises:
ValueError if port is not a valid port number.
"""
try:
port = int(port)
if not (65535 > port > 0):
raise ValueError
return port
except ValueError:
print >>sys.stderr, 'Invalid value %s supplied for %s' % (port, description)
PrintUsageExit(1)
def MakeRpcServer(option_dict):
"""Create a new HttpRpcServer.
Creates a new HttpRpcServer to check for updates to the SDK.
Args:
option_dict: The dict of command line options.
Returns:
A HttpRpcServer.
"""
server = appengine_rpc.HttpRpcServer(
option_dict[ARG_ADMIN_CONSOLE_SERVER],
lambda: ('unused_email', 'unused_password'),
appcfg.GetUserAgent(),
appcfg.GetSourceName(),
host_override=option_dict[ARG_ADMIN_CONSOLE_HOST])
server.authenticated = True
return server
def SigTermHandler(signum, frame):
"""Handler for TERM signal.
Raises a KeyboardInterrupt to perform a graceful shutdown on SIGTERM signal.
"""
raise KeyboardInterrupt()
def main(argv):
"""Runs the development application server."""
args, option_dict = ParseArguments(argv)
if len(args) != 1:
print >>sys.stderr, 'Invalid arguments'
PrintUsageExit(1)
root_path = args[0]
if '_DEFAULT_ENV_AUTH_DOMAIN' in option_dict:
auth_domain = option_dict['_DEFAULT_ENV_AUTH_DOMAIN']
dev_appserver.DEFAULT_ENV['AUTH_DOMAIN'] = auth_domain
if '_ENABLE_LOGGING' in option_dict:
enable_logging = option_dict['_ENABLE_LOGGING']
dev_appserver.HardenedModulesHook.ENABLE_LOGGING = enable_logging
log_level = option_dict[ARG_LOG_LEVEL]
option_dict['root_path'] = os.path.realpath(root_path)
logging.getLogger().setLevel(log_level)
default_partition = option_dict[ARG_DEFAULT_PARTITION]
appinfo = None
try:
appinfo, matcher, _ = dev_appserver.LoadAppConfig(
root_path, {}, default_partition=default_partition)
except yaml_errors.EventListenerError, e:
logging.error('Fatal error when loading application configuration:\n%s', e)
return 1
except dev_appserver.InvalidAppConfigError, e:
logging.error('Application configuration file invalid:\n%s', e)
return 1
version_tuple = tuple(sys.version_info[:2])
expected_version = PRODUCTION_VERSION
if appinfo.runtime == 'python27':
expected_version = (2, 7)
if ARG_MULTIPROCESS not in option_dict and WARN_ABOUT_PYTHON_VERSION:
if version_tuple < expected_version:
sys.stderr.write('Warning: You are using a Python runtime (%d.%d) that '
'is older than the production runtime environment '
'(%d.%d). Your application may be dependent on Python '
'behaviors that have changed and may not work correctly '
'when deployed to production.\n' % (
version_tuple[0], version_tuple[1],
expected_version[0], expected_version[1]))
if version_tuple > expected_version:
sys.stderr.write('Warning: You are using a Python runtime (%d.%d) that '
'is more recent than the production runtime environment '
'(%d.%d). Your application may use features that are '
'not available in the production environment and may '
'not work correctly when deployed to production.\n' % (
version_tuple[0], version_tuple[1],
expected_version[0], expected_version[1]))
multiprocess.Init(argv, option_dict, root_path, appinfo)
dev_process = multiprocess.GlobalProcess()
port = option_dict[ARG_PORT]
login_url = option_dict[ARG_LOGIN_URL]
address = option_dict[ARG_ADDRESS]
require_indexes = option_dict[ARG_REQUIRE_INDEXES]
allow_skipped_files = option_dict[ARG_ALLOW_SKIPPED_FILES]
static_caching = option_dict[ARG_STATIC_CACHING]
skip_sdk_update_check = option_dict[ARG_SKIP_SDK_UPDATE_CHECK]
if (option_dict[ARG_ADMIN_CONSOLE_SERVER] != '' and
not dev_process.IsSubprocess()):
server = MakeRpcServer(option_dict)
if skip_sdk_update_check:
logging.info('Skipping update check.')
else:
update_check = appcfg.UpdateCheck(server, appinfo)
update_check.CheckSupportedVersion()
if update_check.AllowedToCheckForUpdates():
update_check.CheckForUpdates()
if dev_process.IsSubprocess():
logging.getLogger().setLevel(logging.WARNING)
try:
dev_appserver.SetupStubs(appinfo.application, **option_dict)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.error(str(exc_type) + ': ' + str(exc_value))
logging.debug(''.join(traceback.format_exception(
exc_type, exc_value, exc_traceback)))
return 1
http_server = dev_appserver.CreateServer(
root_path,
login_url,
port,
sdk_dir=SDK_PATH,
serve_address=address,
require_indexes=require_indexes,
allow_skipped_files=allow_skipped_files,
static_caching=static_caching,
default_partition=default_partition)
signal.signal(signal.SIGTERM, SigTermHandler)
dev_process.PrintStartMessage(appinfo.application, address, port)
if dev_process.IsInstance():
logging.getLogger().setLevel(logging.INFO)
try:
try:
http_server.serve_forever()
except KeyboardInterrupt:
if not dev_process.IsSubprocess():
logging.info('Server interrupted by user, terminating')
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.error('Error encountered:\n%s\nNow terminating.', info_string)
return 1
finally:
http_server.server_close()
finally:
done = False
while not done:
try:
multiprocess.Shutdown()
done = True
except KeyboardInterrupt:
pass
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
#!/usr/bin/env python
#
# Code to parse CUL usage data, do some analysis, and generate
# a StackScore. See README.md.
#
import datetime
import gzip
import logging
import optparse
import os
from random import SystemRandom
import re
import sys
import math
import numpy
class SkipLine(Exception):
"""Exception to indicate skipping certain types of line without adding to bad count or flagging"""
pass
class LineIterator(object):
"""
Class to encapsulate iteration over lines with up to max_bad ignored before error
"""
def __init__(self, file, data=''):
self.linenum = 0
self.max_bad = 10;
self.fh = gzip.open(file,'rb')
logging.info("Reading %sfrom %s" % (data,file))
self.bib_ids = set()
self.item_ids = set()
@property
def num_bib_ids(self):
return len(self.bib_ids)
@property
def num_item_ids(self):
return len(self.item_ids)
def __iter__(self):
return self
def readline(self, keep_comment=False):
"""Wrap self.fh.readline() with line counter, and StopIteration at EOF"""
self.line = self.fh.readline()
if (self.line == ''):
raise StopIteration
self.linenum += 1
self.line = self.line.strip()
if (self.line.startswith('#') and not keep_comment):
raise SkipLine
return(self.line)
def next(self):
"""Call self.next_time() up to self.max_bad times before aborting"""
attempt = 0
while (attempt < self.max_bad):
try:
return self.next_line()
except StopIteration as si:
raise si
except SkipLine:
# don't increment count of bad lines
pass
except Exception as e:
logging.warning(str(e))
attempt += 1
raise Exception('[%s line %d] Too many bad lines!' % (self.fh.name,self.linenum))
class CULChargeAndBrowse(LineIterator):
"""
Class providing iterator over change and browse data. Format of data
file is:
# CHARGE AND BROWSE COUNTS
#
# other comment lines
# ITEM_ID BIB_ID HISTORICAL_CHARGES HISTORICAL_BROWSES
47 86706 3 0
4672 44857 8 5
9001938 246202 0 0
"""
def __init__(self, file):
super(CULChargeAndBrowse, self).__init__(file,'charge and browse counts ')
first_line = self.readline(keep_comment=True)
if (first_line != '# CHARGE AND BROWSE COUNTS'):
raise Exception("Bad format for circ data in %s, bad first line '%s'" % (file,first_line))
def next_line(self):
"""Read next line else raise exception describing problem
The data includes lines where the change and browse counts are both
zero and should be skipped.
"""
self.readline()
try:
(item_id,bib_id,charges,browses) = self.line.split()
bib_id = int(bib_id)
charges = int(charges)
self.bib_ids.add(bib_id)
self.item_ids.add(int(item_id))
if (charges>10000):
raise Exception("excessive charge count: %d for bib_id=%d" % (charges,bib_id))
browses = int(browses)
if (browses>10000):
raise Exception("excessive browse count: %d for bib_id=%d" % (browses,bib_id))
if (charges==0 and browses==0):
raise SkipLine()
return (bib_id,charges,browses)
except SkipLine as sl:
raise sl
except Exception as e:
# provide file ane line num details in msg
raise Exception('[%s line %d] Ignoring "%s"] %s' % (self.fh.name,self.linenum,self.line,str(e)))
class CULCircTrans(LineIterator):
"""
Class providing iterator over circulation transaction data. Format of data
file is as follows and included lines with no item_id or bib_id which should
be ignored:
# CIRCULATION TRANSACTIONS
#
# other comments...
# TRANS_ID ITEM_ID BIB_ID DATE
143 3087926 1538011 15-JAN-00
144 5123416 3111111 22-FEB-00
145 1133333 511222 26-SEP-00
146 15-SEP-96
147 489988 2926664 20-DEC-99
148 09-JUL-00
"""
def __init__(self, file):
super(CULCircTrans,self).__init__(file,'circulation transactions ')
first_line = self.readline(keep_comment=True)
if (first_line != '# CIRCULATION TRANSACTIONS'):
raise Exception("Bad format for circ data in %s, bad first line '%s'" % (file,first_line))
def next_line(self):
"""Read next line else raise exception describing problem"""
self.readline()
try:
# first look for lines without item_id,bib_id, ie num-spaces-date, and skip
if (re.match(r'\d+\s+\d\d\-',self.line)):
raise SkipLine()
# else try to parse for real
(trans_id,item_id,bib_id,date) = self.line.split()
bib_id = int(bib_id)
self.bib_ids.add(bib_id)
self.item_ids.add(int(item_id))
date = datetime.datetime.strptime(date, "%d-%b-%y").date()
return (bib_id,date)
except SkipLine as sl:
raise sl
except Exception as e:
# provide file ane line num details in msg
raise Exception('[%s line %d] Ignoring "%s"] %s' % (self.fh.name,self.linenum,self.line,str(e)))
"""Read next line else raise exception describing problem"""
raise StopIteration
def make_randomized_subset(opt):
"""Make a subset dataset for fraction of the bib-ids"""
bib_ids = {}
fraction = opt.subset_fraction
r = SystemRandom() # a non-reporoducible random generator
logging.warning("Writing subset charge and browse to %s..." % opt.subset_charge_and_browse )
cab_fh = gzip.open( opt.subset_charge_and_browse, 'w')
cab_fh.write("# CHARGE AND BROWSE COUNTS\n")
cab_fh.write("# (randomized subset data, item_id=0)\n")
fake_bib_ids = set()
for (bib_id,charges,browses) in CULChargeAndBrowse(opt.charge_and_browse):
if (r.random()<=fraction):
# generate fake bib_id that we haven't used before, record and dump data
fake_bib_id = 1234567
while (fake_bib_id in fake_bib_ids):
fake_bib_id = r.randint(1,10000000)
bib_ids[bib_id] = fake_bib_id
fake_bib_ids.add(fake_bib_id)
# write, just use 0 for item_id as we don't use that at all
cab_fh.write("%d\t%d\t%d\t%d\n" % (0,fake_bib_id,charges,browses) )
cab_fh.close()
logging.warning("Writing subset circ trans to %s..." % opt.subset_circ_trans )
ct_fh = gzip.open( opt.subset_circ_trans, 'w')
ct_fh.write("# CIRCULATION TRANSACTIONS\n")
ct_fh.write("# (randomized subset data, trans_id=0, item_id=0)\n")
for (bib_id,date) in CULCircTrans(opt.circ_trans):
# select subset based on whether the bib_id was picked before
if (bib_id in bib_ids):
fake_bib_id = bib_ids[bib_id]
# an just for belt-and-brances, randomise the date by a year or so
fake_dt = datetime.datetime.combine(date,datetime.time.min) + datetime.timedelta(days=r.randint(-400,400))
fake_date = fake_dt.date().strftime('%d-%b-%y').upper()
# write, use 0 for trans_id and item_id as we don't use these at all
ct_fh.write(" %d\t%d\t%d\t%s\n" % (0,0,fake_bib_id,fake_date) )
ct_fh.close()
logging.info("Done subset")
def write_float_dist(data,file):
"""Write summary of distribution of floats to file"""
hist, bin_edges = numpy.histogram(data.values(), bins=100)
total_bib_ids = len(data)
logging.info("Writing summary distribution to %s..." % file)
fh = open(file, 'w')
fh.write("# Binned distribution %s\n#\n" % file)
fh.write("# total bib_ids = %d\n#\n" % total_bib_ids)
fh.write("#start\tend\tcount\tfraction\n")
for j, val in enumerate(hist):
fh.write("%.1f\t%.1f\t%d\t%.7f\n" % (bin_edges[j],bin_edges[j+1],val,float(val)/total_bib_ids))
fh.close()
def write_dist(data,file,all_bib_ids=0,extra_score_one=0):
"""Write summary of distribution of data to file
data is a dict[bib_id] with some counts a values, the integer value of the count
is taken
"""
total_bib_ids = 0
total_counts = 0
num_bib_ids = {} #inverted: number of bib_ids with given count
example_bib_id = {}
for bib_id in data:
count = int(data[bib_id])
if (count>0):
total_bib_ids += 1
total_counts += count
if (count in num_bib_ids):
num_bib_ids[count] = num_bib_ids[count] + 1
else:
num_bib_ids[count] = 1
if (opt.examples):
example_bib_id[count] = bib_id
else:
# default not to include specific example bib_id for individual data sources
example_bib_id[count] = '-'
if (extra_score_one>0):
num_bib_ids[1] = num_bib_ids.get(1,0) + extra_score_one
if (1 not in example_bib_id):
example_bib_id[1] = '-'
if (all_bib_ids==0):
all_bib_ids = total_bib_ids + extra_score_one
logging.info("Writing distribution to %s..." % file)
fh = open(file, 'w')
fh.write("# Distribution %s\n" % file)
fh.write("#\n# total bib_ids with non-zero counts = %d\n" % total_bib_ids)
if (extra_score_one>0):
fh.write("# extra bib_ids with score one = %d\n" % extra_score_one)
fh.write("# total bib_ids with usage data + extras = %d\n" % all_bib_ids)
fh.write("# sum of all counts = %d\n" % total_counts)
fh.write("#\n# col1 = int(count)\n# col2 = num_bib_ids\n")
fh.write("# col3 = fraction of total bib_ids with non-zero counts for this metric\n")
fh.write("# col4 = fraction of all bib_ids with any usage data\n")
fh.write("# col5 = example bib_id (prepend: https://newcatalog.library.cornell.edu/catalog/)\n")
for count in sorted(num_bib_ids.keys()):
fh.write("%d\t%d\t%.7f\t%.7f\t%s\n" %
(count,num_bib_ids[count],
float(num_bib_ids[count])/total_bib_ids,
float(num_bib_ids[count])/all_bib_ids,
example_bib_id[count]))
fh.close()
def write_stackscores(scores,file):
"""Write individual StackScores to gzipped file
Note that this data will include only bib_ids mentioned in the usage data. It
will not include extra bib_ids that are assigned StackScore 1.
"""
logging.info("Writing StackScores to %s..." % file)
fh = gzip.open(file, 'w')
fh.write("# StackScores by bib_id, %s\n#\n" % file)
fh.write("# total bib_ids = %d\n#\n" % len(scores))
fh.write("#bib_id\tStackScore\n")
for bib_id in sorted(scores.keys()):
fh.write("%d\t%d\n" % (bib_id,scores[bib_id]))
fh.close()
def analyze_distributions(opt):
"""Analyze distributions of source data"""
all_bib_ids = {}
charge = {}
browse = {}
bits = {}
for (bib_id,charges,browses) in CULChargeAndBrowse(opt.charge_and_browse):
charge[bib_id] = charge.get(bib_id,0) + charges
if (charges>0):
bits[bib_id] = bits.get(bib_id,0) | 1
all_bib_ids[bib_id] = 1
browse[bib_id] = browse.get(bib_id,0) + browses
if (browses>0):
bits[bib_id] = bits.get(bib_id,0) | 2
all_bib_ids[bib_id] = 1
circ = {}
for (bib_id,date) in CULCircTrans(opt.circ_trans):
circ[bib_id] = circ.get(bib_id,0) + 1
bits[bib_id] = bits.get(bib_id,0) | 4
all_bib_ids[bib_id] = 1
num_bib_ids = len(all_bib_ids)
write_dist(charge,'charge_dist.dat',num_bib_ids)
write_dist(browse,'browse_dist.dat',num_bib_ids)
write_dist(circ,'circ_dist.dat',num_bib_ids)
# Look at overlaps between groups from bitwise
exc_totals = {0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0}
inc_totals = {1:0,2:0,4:0}
for bib_id in bits:
exc_totals[bits[bib_id]] += 1
for b in (1,2,4):
if (bits[bib_id] & b):
inc_totals[b] += 1
file = 'usage_venn.dat'
logging.info("Writing %s..." % file)
fh = open(file,'w')
fh.write("# Overlaps in different types of usage data:\n");
just = 'just '
for n in range(1,8):
desc = []
if (n & 1):
desc.append('browse')
if (n & 2):
desc.append('charge')
if (n & 4):
desc.append('circ')
if (n==7):
just = ''
out_of = ''
if (n in (1,2,4)):
out_of = ' (out of %d items with this data)' % inc_totals[n]
fh.write("%7d items have %s%s data%s\n" % (exc_totals[n],just,'+'.join(desc),out_of))
fh.close()
def compute_raw_scores(opt):
"""Read in usage data and compute StackScores
Score is calculated according to:
score = charges * charge_weight +
browses * browse_weight +
sum_over_all_circ_trans( circ_weight + 0.5 ^ (circ_trans_age / circ_halflife) )
because recent circulation transactions are also reflected in the charge counts, this
means that a circulation that happens today will score (charge_weight+circ_weight) whereas
on the happened circ_halflife ago will score (charge_weight+0.5*circ_weight). An old
circulation event that is recored only in the charge counts will score just charge_weight.
"""
scores = {}
charge_weight = 2
browse_weight = 1
circ_weight = 2
circ_halflife = 5.0 * 365.0 # number of days back that circ trans has half circ_weight
cab = CULChargeAndBrowse(opt.charge_and_browse)
for (bib_id,charges,browses) in cab:
#print "%d %d %d" % (bib_id,charges,browses)
scores[bib_id] = scores.get(bib_id,0) + charges*charge_weight + browses*browse_weight
logging.info("Found %d bib_ids in charge and browse data" % (cab.num_bib_ids))
today = datetime.datetime.now().date()
ct = CULCircTrans(opt.circ_trans)
for (bib_id,date) in ct:
age = (today - date).days # age in years since circ transaction
score = circ_weight * math.pow(0.5, age/circ_halflife )
#print "%d %s %.3f %.3f" % (bib_id,str(date),age,score)
scores[bib_id] = scores.get(bib_id,0) + score
logging.info("Found %d bib_ids in circulation and transaction data" % (ct.num_bib_ids))
write_float_dist(scores, opt.raw_scores_dist)
return(scores)
def read_reference_dist(file):
"""Read reference distribution from file
File format has # for comment linesm then data:
#stackscore fraction
100 0.00001013
99 0.00001013
98 0.00003045
...
"""
logging.info("Reading reference distribution from %s..." % (file))
fh = open(file,'r')
dist = {}
total = 0.0
for line in fh:
if (re.match('^#',line)):
continue
(stackscore, fraction)= line.split()
fraction = float(fraction)
dist[int(stackscore)] = fraction
total += fraction
#print "%d %f" % (stackscore,fraction)
if (abs(1.0-total)>0.000001):
logging.warning("Expected distribution from %s to sum to 1.0, got %f" % (file,total))
return dist
def compute_stackscore(scores, dist, opt):
"""Compute StackScores on a scale of 1-100 to match reference distribution
The score of 1 will be reserved for all items that have no usage data as is done for
the Harvard StackScore. The reference distribution is suppied in dist and is assumed to
sum be over the range 1 to 100 and sum to 1.0.
We do not expect the the scores data to include all bib_ids, the total number of items
is taken from the input parameter opt.total_bib_ids if specified (!=0) and thus there
will be at least (opt.total_bib_ids - len(scores)) items that will get score 1.
"""
if (opt.total_bib_ids):
total_items = opt.total_bib_ids
if (len(scores)>total_items):
raise Exception("Sanity check failed: more scores (%d) than total_bib_ids (%d)!" % (len(scores),total_bib_ids))
extra_items_with_score_one = (total_items-len(scores))
else:
total_items = len(scores)
extra_items_with_score_one = 0
# Get counts of items for each raw score (which may be a float)
counts = {}
for bib_id in scores:
score = scores[bib_id]
counts[score] = counts.get(score,0) + 1
# Determine StackScore for each score by matching to the reference distribution in
# dist. We do this starting from StackScore 100 and adding extra raw scores in
# to meet the cumulative total most closely.
num_scores = len(counts)
logging.info("Have %d distinct raw scores from %d items" % (num_scores,len(scores)))
count = 0
stackscore_by_score = {}
stackscore_counts = {}
n = 0
ss = 100
ss_frac = dist[ss] # cumulative fraction we want to get to for this StackScore
for score in reversed(sorted(counts.keys())):
# do we add to this StackScore or the next lower?
n = counts[score]
ss_count = int(ss_frac*total_items) # integer cumulative count
if ((count+n > ss_count) and ((count+n)-ss_count) > (ss_count-count) and ss>1):
# should add to next lower StackScore
ss -= 1
ss_frac += dist[ss]
count += n
stackscore_by_score[score] = ss
stackscore_counts[ss] = stackscore_counts.get(ss,0) + n
# add in extra counts for score 1
if (ss!=1 and ss!=2):
logging.warning("Distribution seems odd: expected to have ss==1 or ss==2 after normalizing, got ss=%d" % (ss))
stackscore_counts[1] = stackscore_counts.get(1,0) + extra_items_with_score_one
# write table comparing with reference distribution
fh = open(opt.stackscore_comp,'w')
fh.write("# Comparison of StackScore distribution with reference distribution\n#\n")
fh.write("#score\trecords\tfraction\treference_fraction\n")
for ss in range(1,101):
fh.write("%d\t%d\t%.7f\t%.7f\n" % (ss,stackscore_counts.get(ss,0),float(stackscore_counts.get(ss,0))/total_items,dist.get(ss,0)))
fh.close()
# now we have lookup table of score->StackScore, make set of StackScores, dump
# them and write out the distribution
stackscore={}
for bib_id in scores:
stackscore[bib_id]=stackscore_by_score[scores[bib_id]]
if (opt.stackscores):
write_stackscores(stackscore, opt.stackscores)
write_dist(stackscore, opt.stackscore_dist, extra_score_one=extra_items_with_score_one)
##################################################################
# Options and arguments
p = optparse.OptionParser(description='Parser for CUL usage data',
usage='usage: %prog [[opts]] [file1] .. [fileN]')
p.add_option('--charge-and-browse', action='store', default='testdata/subset-charge-and-browse-counts.tsv.gz',
help="Charge and browse num_bib_ids, gzipped input file (default %default)")
p.add_option('--circ-trans', action='store', default='testdata/subset-circ-trans.tsv.gz',
help="Circulation transactions, gzipped input file (default %default)")
p.add_option('--total-bib-ids', action='store', type='int', default=0,
help="Total number of bib_ids in the catalog (omit to use only input data)")
p.add_option('--reference-dist', action='store', default='reference_dist.dat',
help="Reference distribution over the range 1..100 to match to (default %default)")
p.add_option('--raw-scores-dist', action='store', default='raw_scores_dist.dat',
help="Distribution of raw scores (default %default)")
p.add_option('--stackscores', action='store',
help="StackScores output file (not written by default, will be gzipped)")
p.add_option('--stackscore_dist', action='store', default='stackscore_dist.dat',
help="StackScore distribution output file (default %default)")
p.add_option('--stackscore_comp', action='store', default='stackscore_dist_comp.dat',
help="StackScore distribution comparison with reference (default %default)")
p.add_option('--logfile', action='store',
help="Send log output to specified file")
p.add_option('--examples', action='store_true',
help="Include example bib_id in distribution outputs")
p.add_option('--verbose', '-v', action='store_true',
help="verbose, show additional informational messages")
p.add_option('--analyze', action='store_true',
help="Do analysis of input distributions")
p.add_option('--make-randomized-subset', action='store_true',
help="Make a smaller subset of the input data and write out again")
p.add_option('--subset-fraction', action='store', type='float', default=0.01,
help="Fraction of data to include in subset (0.0<=fraction<=1.0, default %default)")
p.add_option('--subset-charge-and-browse', action='store', default='subset-charge-and-browse-counts.tsv.gz',
help="Name of output file for subset charge and browse counts (default %default)")
p.add_option('--subset-circ-trans', action='store', default='subset-circ-trans.tsv.gz',
help="Name of output file for subset circulation transactions (default %default)")
(opt, args) = p.parse_args()
level = logging.INFO if opt.verbose else logging.WARN
if (opt.logfile):
logging.basicConfig(filename=opt.logfile, level=level)
else:
logging.basicConfig(level=level)
logging.info("STARTED at %s" % (datetime.datetime.now()))
if (opt.make_randomized_subset):
make_randomized_subset(opt)
elif (opt.analyze):
analyze_distributions(opt)
else:
scores = compute_raw_scores(opt)
dist = read_reference_dist(opt.reference_dist)
compute_stackscore(scores, dist, opt)
logging.info("FINISHED at %s" % (datetime.datetime.now()))
| |
# pyshyacc.py - PLY grammar definition for pysh
#
# Copyright 2007 Patrick Mezard
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
"""PLY grammar file.
"""
import os
import sys
import pyshlex
tokens = pyshlex.tokens
from ply import yacc
import sherrors
class IORedirect:
def __init__(self, op, filename, io_number=None):
self.op = op
self.filename = filename
self.io_number = io_number
class HereDocument:
def __init__(self, op, name, content, io_number=None):
self.op = op
self.name = name
self.content = content
self.io_number = io_number
def make_io_redirect(p):
"""Make an IORedirect instance from the input 'io_redirect' production."""
name, io_number, io_target = p
assert name=='io_redirect'
if io_target[0]=='io_file':
io_type, io_op, io_file = io_target
return IORedirect(io_op, io_file, io_number)
elif io_target[0]=='io_here':
io_type, io_op, io_name, io_content = io_target
return HereDocument(io_op, io_name, io_content, io_number)
else:
assert False, "Invalid IO redirection token %s" % repr(io_type)
class SimpleCommand:
"""
assigns contains (name, value) pairs.
"""
def __init__(self, words, redirs, assigns):
self.words = list(words)
self.redirs = list(redirs)
self.assigns = list(assigns)
class Pipeline:
def __init__(self, commands, reverse_status=False):
self.commands = list(commands)
assert self.commands #Grammar forbids this
self.reverse_status = reverse_status
class AndOr:
def __init__(self, op, left, right):
self.op = str(op)
self.left = left
self.right = right
class ForLoop:
def __init__(self, name, items, cmds):
self.name = str(name)
self.items = list(items)
self.cmds = list(cmds)
class WhileLoop:
def __init__(self, condition, cmds):
self.condition = list(condition)
self.cmds = list(cmds)
class UntilLoop:
def __init__(self, condition, cmds):
self.condition = list(condition)
self.cmds = list(cmds)
class FunDef:
def __init__(self, name, body):
self.name = str(name)
self.body = body
class BraceGroup:
def __init__(self, cmds):
self.cmds = list(cmds)
class IfCond:
def __init__(self, cond, if_cmds, else_cmds):
self.cond = list(cond)
self.if_cmds = if_cmds
self.else_cmds = else_cmds
class Case:
def __init__(self, name, items):
self.name = name
self.items = items
class SubShell:
def __init__(self, cmds):
self.cmds = cmds
class RedirectList:
def __init__(self, cmd, redirs):
self.cmd = cmd
self.redirs = list(redirs)
def get_production(productions, ptype):
"""productions must be a list of production tuples like (name, obj) where
name is the production string identifier.
Return the first production named 'ptype'. Raise KeyError if None can be
found.
"""
for production in productions:
if production is not None and production[0]==ptype:
return production
raise KeyError(ptype)
#-------------------------------------------------------------------------------
# PLY grammar definition
#-------------------------------------------------------------------------------
def p_multiple_commands(p):
"""multiple_commands : newline_sequence
| complete_command
| multiple_commands complete_command"""
if len(p)==2:
if p[1] is not None:
p[0] = [p[1]]
else:
p[0] = []
else:
p[0] = p[1] + [p[2]]
def p_complete_command(p):
"""complete_command : list separator
| list"""
if len(p)==3 and p[2] and p[2][1] == '&':
p[0] = ('async', p[1])
else:
p[0] = p[1]
def p_list(p):
"""list : list separator_op and_or
| and_or"""
if len(p)==2:
p[0] = [p[1]]
else:
#if p[2]!=';':
# raise NotImplementedError('AND-OR list asynchronous execution is not implemented')
p[0] = p[1] + [p[3]]
def p_and_or(p):
"""and_or : pipeline
| and_or AND_IF linebreak pipeline
| and_or OR_IF linebreak pipeline"""
if len(p)==2:
p[0] = p[1]
else:
p[0] = ('and_or', AndOr(p[2], p[1], p[4]))
def p_maybe_bang_word(p):
"""maybe_bang_word : Bang"""
p[0] = ('maybe_bang_word', p[1])
def p_pipeline(p):
"""pipeline : pipe_sequence
| bang_word pipe_sequence"""
if len(p)==3:
p[0] = ('pipeline', Pipeline(p[2][1:], True))
else:
p[0] = ('pipeline', Pipeline(p[1][1:]))
def p_pipe_sequence(p):
"""pipe_sequence : command
| pipe_sequence PIPE linebreak command"""
if len(p)==2:
p[0] = ['pipe_sequence', p[1]]
else:
p[0] = p[1] + [p[4]]
def p_command(p):
"""command : simple_command
| compound_command
| compound_command redirect_list
| function_definition"""
if p[1][0] in ( 'simple_command',
'for_clause',
'while_clause',
'until_clause',
'case_clause',
'if_clause',
'function_definition',
'subshell',
'brace_group',):
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ('redirect_list', RedirectList(p[1], p[2][1:]))
else:
raise NotImplementedError('%s command is not implemented' % repr(p[1][0]))
def p_compound_command(p):
"""compound_command : brace_group
| subshell
| for_clause
| case_clause
| if_clause
| while_clause
| until_clause"""
p[0] = p[1]
def p_subshell(p):
"""subshell : LPARENS compound_list RPARENS"""
p[0] = ('subshell', SubShell(p[2][1:]))
def p_compound_list(p):
"""compound_list : term
| newline_list term
| term separator
| newline_list term separator"""
productions = p[1:]
try:
sep = get_production(productions, 'separator')
if sep[1]!=';':
raise NotImplementedError()
except KeyError:
pass
term = get_production(productions, 'term')
p[0] = ['compound_list'] + term[1:]
def p_term(p):
"""term : term separator and_or
| and_or"""
if len(p)==2:
p[0] = ['term', p[1]]
else:
if p[2] is not None and p[2][1] == '&':
p[0] = ['term', ('async', p[1][1:])] + [p[3]]
else:
p[0] = p[1] + [p[3]]
def p_maybe_for_word(p):
# Rearrange 'For' priority wrt TOKEN. See p_for_word
"""maybe_for_word : For"""
p[0] = ('maybe_for_word', p[1])
def p_for_clause(p):
"""for_clause : for_word name linebreak do_group
| for_word name linebreak in sequential_sep do_group
| for_word name linebreak in wordlist sequential_sep do_group"""
productions = p[1:]
do_group = get_production(productions, 'do_group')
try:
items = get_production(productions, 'in')[1:]
except KeyError:
raise NotImplementedError('"in" omission is not implemented')
try:
items = get_production(productions, 'wordlist')[1:]
except KeyError:
items = []
name = p[2]
p[0] = ('for_clause', ForLoop(name, items, do_group[1:]))
def p_name(p):
"""name : token""" #Was NAME instead of token
p[0] = p[1]
def p_in(p):
"""in : In"""
p[0] = ('in', p[1])
def p_wordlist(p):
"""wordlist : wordlist token
| token"""
if len(p)==2:
p[0] = ['wordlist', ('TOKEN', p[1])]
else:
p[0] = p[1] + [('TOKEN', p[2])]
def p_case_clause(p):
"""case_clause : Case token linebreak in linebreak case_list Esac
| Case token linebreak in linebreak case_list_ns Esac
| Case token linebreak in linebreak Esac"""
if len(p) < 8:
items = []
else:
items = p[6][1:]
name = p[2]
p[0] = ('case_clause', Case(name, [c[1] for c in items]))
def p_case_list_ns(p):
"""case_list_ns : case_list case_item_ns
| case_item_ns"""
p_case_list(p)
def p_case_list(p):
"""case_list : case_list case_item
| case_item"""
if len(p)==2:
p[0] = ['case_list', p[1]]
else:
p[0] = p[1] + [p[2]]
def p_case_item_ns(p):
"""case_item_ns : pattern RPARENS linebreak
| pattern RPARENS compound_list linebreak
| LPARENS pattern RPARENS linebreak
| LPARENS pattern RPARENS compound_list linebreak"""
p_case_item(p)
def p_case_item(p):
"""case_item : pattern RPARENS linebreak DSEMI linebreak
| pattern RPARENS compound_list DSEMI linebreak
| LPARENS pattern RPARENS linebreak DSEMI linebreak
| LPARENS pattern RPARENS compound_list DSEMI linebreak"""
if len(p) < 7:
name = p[1][1:]
else:
name = p[2][1:]
try:
cmds = get_production(p[1:], "compound_list")[1:]
except KeyError:
cmds = []
p[0] = ('case_item', (name, cmds))
def p_pattern(p):
"""pattern : token
| pattern PIPE token"""
if len(p)==2:
p[0] = ['pattern', ('TOKEN', p[1])]
else:
p[0] = p[1] + [('TOKEN', p[2])]
def p_maybe_if_word(p):
# Rearrange 'If' priority wrt TOKEN. See p_if_word
"""maybe_if_word : If"""
p[0] = ('maybe_if_word', p[1])
def p_maybe_then_word(p):
# Rearrange 'Then' priority wrt TOKEN. See p_then_word
"""maybe_then_word : Then"""
p[0] = ('maybe_then_word', p[1])
def p_if_clause(p):
"""if_clause : if_word compound_list then_word compound_list else_part Fi
| if_word compound_list then_word compound_list Fi"""
else_part = []
if len(p)==7:
else_part = p[5]
p[0] = ('if_clause', IfCond(p[2][1:], p[4][1:], else_part))
def p_else_part(p):
"""else_part : Elif compound_list then_word compound_list else_part
| Elif compound_list then_word compound_list
| Else compound_list"""
if len(p)==3:
p[0] = p[2][1:]
else:
else_part = []
if len(p)==6:
else_part = p[5]
p[0] = ('elif', IfCond(p[2][1:], p[4][1:], else_part))
def p_while_clause(p):
"""while_clause : While compound_list do_group"""
p[0] = ('while_clause', WhileLoop(p[2][1:], p[3][1:]))
def p_maybe_until_word(p):
# Rearrange 'Until' priority wrt TOKEN. See p_until_word
"""maybe_until_word : Until"""
p[0] = ('maybe_until_word', p[1])
def p_until_clause(p):
"""until_clause : until_word compound_list do_group"""
p[0] = ('until_clause', UntilLoop(p[2][1:], p[3][1:]))
def p_function_definition(p):
"""function_definition : fname LPARENS RPARENS linebreak function_body"""
p[0] = ('function_definition', FunDef(p[1], p[5]))
def p_function_body(p):
"""function_body : compound_command
| compound_command redirect_list"""
if len(p)!=2:
raise NotImplementedError('functions redirections lists are not implemented')
p[0] = p[1]
def p_fname(p):
"""fname : TOKEN""" #Was NAME instead of token
p[0] = p[1]
def p_brace_group(p):
"""brace_group : Lbrace compound_list Rbrace"""
p[0] = ('brace_group', BraceGroup(p[2][1:]))
def p_maybe_done_word(p):
#See p_assignment_word for details.
"""maybe_done_word : Done"""
p[0] = ('maybe_done_word', p[1])
def p_maybe_do_word(p):
"""maybe_do_word : Do"""
p[0] = ('maybe_do_word', p[1])
def p_do_group(p):
"""do_group : do_word compound_list done_word"""
#Do group contains a list of AndOr
p[0] = ['do_group'] + p[2][1:]
def p_simple_command(p):
"""simple_command : cmd_prefix cmd_word cmd_suffix
| cmd_prefix cmd_word
| cmd_prefix
| cmd_name cmd_suffix
| cmd_name"""
words, redirs, assigns = [], [], []
for e in p[1:]:
name = e[0]
if name in ('cmd_prefix', 'cmd_suffix'):
for sube in e[1:]:
subname = sube[0]
if subname=='io_redirect':
redirs.append(make_io_redirect(sube))
elif subname=='ASSIGNMENT_WORD':
assigns.append(sube)
else:
words.append(sube)
elif name in ('cmd_word', 'cmd_name'):
words.append(e)
cmd = SimpleCommand(words, redirs, assigns)
p[0] = ('simple_command', cmd)
def p_cmd_name(p):
"""cmd_name : TOKEN"""
p[0] = ('cmd_name', p[1])
def p_cmd_word(p):
"""cmd_word : token"""
p[0] = ('cmd_word', p[1])
def p_maybe_assignment_word(p):
#See p_assignment_word for details.
"""maybe_assignment_word : ASSIGNMENT_WORD"""
p[0] = ('maybe_assignment_word', p[1])
def p_cmd_prefix(p):
"""cmd_prefix : io_redirect
| cmd_prefix io_redirect
| assignment_word
| cmd_prefix assignment_word"""
try:
prefix = get_production(p[1:], 'cmd_prefix')
except KeyError:
prefix = ['cmd_prefix']
try:
value = get_production(p[1:], 'assignment_word')[1]
value = ('ASSIGNMENT_WORD', value.split('=', 1))
except KeyError:
value = get_production(p[1:], 'io_redirect')
p[0] = prefix + [value]
def p_cmd_suffix(p):
"""cmd_suffix : io_redirect
| cmd_suffix io_redirect
| token
| cmd_suffix token
| maybe_for_word
| cmd_suffix maybe_for_word
| maybe_done_word
| cmd_suffix maybe_done_word
| maybe_do_word
| cmd_suffix maybe_do_word
| maybe_until_word
| cmd_suffix maybe_until_word
| maybe_assignment_word
| cmd_suffix maybe_assignment_word
| maybe_if_word
| cmd_suffix maybe_if_word
| maybe_then_word
| cmd_suffix maybe_then_word
| maybe_bang_word
| cmd_suffix maybe_bang_word"""
try:
suffix = get_production(p[1:], 'cmd_suffix')
token = p[2]
except KeyError:
suffix = ['cmd_suffix']
token = p[1]
if isinstance(token, tuple):
if token[0]=='io_redirect':
p[0] = suffix + [token]
else:
#Convert maybe_* to TOKEN if necessary
p[0] = suffix + [('TOKEN', token[1])]
else:
p[0] = suffix + [('TOKEN', token)]
def p_redirect_list(p):
"""redirect_list : io_redirect
| redirect_list io_redirect"""
if len(p) == 2:
p[0] = ['redirect_list', make_io_redirect(p[1])]
else:
p[0] = p[1] + [make_io_redirect(p[2])]
def p_io_redirect(p):
"""io_redirect : io_file
| IO_NUMBER io_file
| io_here
| IO_NUMBER io_here"""
if len(p)==3:
p[0] = ('io_redirect', p[1], p[2])
else:
p[0] = ('io_redirect', None, p[1])
def p_io_file(p):
#Return the tuple (operator, filename)
"""io_file : LESS filename
| LESSAND filename
| GREATER filename
| GREATAND filename
| DGREAT filename
| LESSGREAT filename
| CLOBBER filename"""
#Extract the filename from the file
p[0] = ('io_file', p[1], p[2][1])
def p_filename(p):
#Return the filename
"""filename : TOKEN"""
p[0] = ('filename', p[1])
def p_io_here(p):
"""io_here : DLESS here_end
| DLESSDASH here_end"""
p[0] = ('io_here', p[1], p[2][1], p[2][2])
def p_here_end(p):
"""here_end : HERENAME TOKEN"""
p[0] = ('here_document', p[1], p[2])
def p_newline_sequence(p):
# Nothing in the grammar can handle leading NEWLINE productions, so add
# this one with the lowest possible priority relatively to newline_list.
"""newline_sequence : newline_list"""
p[0] = None
def p_newline_list(p):
"""newline_list : NEWLINE
| newline_list NEWLINE"""
p[0] = None
def p_linebreak(p):
"""linebreak : newline_list
| empty"""
p[0] = None
def p_separator_op(p):
"""separator_op : COMMA
| AMP"""
p[0] = p[1]
def p_separator(p):
"""separator : separator_op linebreak
| newline_list"""
if len(p)==2:
#Ignore newlines
p[0] = None
else:
#Keep the separator operator
p[0] = ('separator', p[1])
def p_sequential_sep(p):
"""sequential_sep : COMMA linebreak
| newline_list"""
p[0] = None
# Low priority TOKEN => for_word conversion.
# Let maybe_for_word be used as a token when necessary in higher priority
# rules.
def p_for_word(p):
"""for_word : maybe_for_word"""
p[0] = p[1]
def p_if_word(p):
"""if_word : maybe_if_word"""
p[0] = p[1]
def p_then_word(p):
"""then_word : maybe_then_word"""
p[0] = p[1]
def p_done_word(p):
"""done_word : maybe_done_word"""
p[0] = p[1]
def p_do_word(p):
"""do_word : maybe_do_word"""
p[0] = p[1]
def p_until_word(p):
"""until_word : maybe_until_word"""
p[0] = p[1]
def p_assignment_word(p):
"""assignment_word : maybe_assignment_word"""
p[0] = ('assignment_word', p[1][1])
def p_bang_word(p):
"""bang_word : maybe_bang_word"""
p[0] = ('bang_word', p[1][1])
def p_token(p):
"""token : TOKEN
| Fi"""
p[0] = p[1]
def p_empty(p):
'empty :'
p[0] = None
# Error rule for syntax errors
def p_error(p):
msg = []
w = msg.append
w('%r\n' % p)
w('followed by:\n')
for i in range(5):
n = yacc.token()
if not n:
break
w(' %r\n' % n)
raise sherrors.ShellSyntaxError(''.join(msg))
# Build the parser
try:
import pyshtables
except ImportError:
outputdir = os.path.dirname(__file__)
if not os.access(outputdir, os.W_OK):
outputdir = ''
yacc.yacc(tabmodule = 'pyshtables', outputdir = outputdir, debug = 0)
else:
yacc.yacc(tabmodule = 'pysh.pyshtables', write_tables = 0, debug = 0)
def parse(input, eof=False, debug=False):
"""Parse a whole script at once and return the generated AST and unconsumed
data in a tuple.
NOTE: eof is probably meaningless for now, the parser being unable to work
in pull mode. It should be set to True.
"""
lexer = pyshlex.PLYLexer()
remaining = lexer.add(input, eof)
if lexer.is_empty():
return [], remaining
if debug:
debug = 2
return yacc.parse(lexer=lexer, debug=debug), remaining
#-------------------------------------------------------------------------------
# AST rendering helpers
#-------------------------------------------------------------------------------
def format_commands(v):
"""Return a tree made of strings and lists. Make command trees easier to
display.
"""
if isinstance(v, list):
return [format_commands(c) for c in v]
if isinstance(v, tuple):
if len(v)==2 and isinstance(v[0], str) and not isinstance(v[1], str):
if v[0] == 'async':
return ['AsyncList', map(format_commands, v[1])]
else:
#Avoid decomposing tuples like ('pipeline', Pipeline(...))
return format_commands(v[1])
return format_commands(list(v))
elif isinstance(v, IfCond):
name = ['IfCond']
name += ['if', map(format_commands, v.cond)]
name += ['then', map(format_commands, v.if_cmds)]
name += ['else', map(format_commands, v.else_cmds)]
return name
elif isinstance(v, ForLoop):
name = ['ForLoop']
name += [repr(v.name)+' in ', map(str, v.items)]
name += ['commands', map(format_commands, v.cmds)]
return name
elif isinstance(v, AndOr):
return [v.op, format_commands(v.left), format_commands(v.right)]
elif isinstance(v, Pipeline):
name = 'Pipeline'
if v.reverse_status:
name = '!' + name
return [name, format_commands(v.commands)]
elif isinstance(v, Case):
name = ['Case']
name += [v.name, format_commands(v.items)]
elif isinstance(v, SimpleCommand):
name = ['SimpleCommand']
if v.words:
name += ['words', map(str, v.words)]
if v.assigns:
assigns = [tuple(a[1]) for a in v.assigns]
name += ['assigns', map(str, assigns)]
if v.redirs:
name += ['redirs', map(format_commands, v.redirs)]
return name
elif isinstance(v, RedirectList):
name = ['RedirectList']
if v.redirs:
name += ['redirs', map(format_commands, v.redirs)]
name += ['command', format_commands(v.cmd)]
return name
elif isinstance(v, IORedirect):
return ' '.join(map(str, (v.io_number, v.op, v.filename)))
elif isinstance(v, HereDocument):
return ' '.join(map(str, (v.io_number, v.op, repr(v.name), repr(v.content))))
elif isinstance(v, SubShell):
return ['SubShell', map(format_commands, v.cmds)]
else:
return repr(v)
def print_commands(cmds, output=sys.stdout):
"""Pretty print a command tree."""
def print_tree(cmd, spaces, output):
if isinstance(cmd, list):
for c in cmd:
print_tree(c, spaces + 3, output)
else:
print >>output, ' '*spaces + str(cmd)
formatted = format_commands(cmds)
print_tree(formatted, 0, output)
def stringify_commands(cmds):
"""Serialize a command tree as a string.
Returned string is not pretty and is currently used for unit tests only.
"""
def stringify(value):
output = []
if isinstance(value, list):
formatted = []
for v in value:
formatted.append(stringify(v))
formatted = ' '.join(formatted)
output.append(''.join(['<', formatted, '>']))
else:
output.append(value)
return ' '.join(output)
return stringify(format_commands(cmds))
def visit_commands(cmds, callable):
"""Visit the command tree and execute callable on every Pipeline and
SimpleCommand instances.
"""
if isinstance(cmds, (tuple, list)):
map(lambda c: visit_commands(c,callable), cmds)
elif isinstance(cmds, (Pipeline, SimpleCommand)):
callable(cmds)
| |
#!/usr/bin/python
"""
get_map_stats.py
:author: Brandon Arrendondo
:license: MIT
"""
import sys
import os
import argparse
import math
import logging
import numpy as np
from collections import Counter
def calculate_dimensions(points):
max_across = 0
min_across = 3000
for point in points:
(x, y) = point
if x < min_across:
min_across = x
if y < min_across:
min_across = y
if x > max_across:
max_across = x
if y > max_across:
max_across = y
universe_length = max_across - min_across
universe_size = 0
if universe_length <= 400:
universe_size = 400
elif universe_length <= 800:
universe_size = 800
elif universe_length <= 1200:
universe_size = 1200
elif universe_length <= 1600:
universe_size = 1600
else:
universe_size = 2000
return universe_size
def get_bucket(point):
(x, y) = point
x -= 1000
y -= 1000
return (x / 100, y / 100)
def distance(p1, p2):
(x1, y1) = p1
(x2, y2) = p2
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
def build_density_buckets(universe_size, points):
expected_buckets = universe_size / 100
density_buckets = [[0 for x in range(expected_buckets)] for y in range(expected_buckets)]
for point in points:
(x, y) = point
b = get_bucket(point)
(i, j) = b
density_buckets[i][j] = density_buckets[i][j] + 1
return density_buckets
def get_universe_bounds(points):
max_x = 0
max_y = 0
min_x = 3000
min_y = 3000
for p in points:
(x, y) = p
if x < min_x:
min_x = x
if y < min_y:
min_y = y
if y > max_y:
max_y = y
if x > max_x:
max_x = x
return (min_x, max_x, min_y, max_y)
def get_planet_distances(points):
distances = []
marked = {}
for p in points:
marked[p] = [p]
for p in points:
for q in points:
if(not q in marked[p]):
dist = distance(p, q)
distances.append(dist)
marked[p].append(q)
marked[q].append(p)
return distances
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('maps', nargs="+")
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
args = parser.parse_args()
logging.basicConfig(
format='[map_stats:%(levelname)s:%(asctime)s] %(message)s')
if(args.verbose):
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
for map in args.maps:
with open(map, "r") as m:
logging.debug("Parsing {0!s}".format(map))
points = []
for l in m:
line = l.strip()
if not(line.startswith("#")):
line_args = line.split()
x = int(line_args[1])
y = int(line_args[2])
points.append((x, y))
universe_size = calculate_dimensions(points)
num_planets = len(points)
total_expected_buckets = (universe_size ** 2) / (100 ** 2)
universe_density = (num_planets * 1.0) / total_expected_buckets
density_buckets = build_density_buckets(universe_size, points)
(min_x, max_x, min_y, max_y) = get_universe_bounds(points)
planet_distances = get_planet_distances(points)
min_planet_distance = min(planet_distances)
avg_distances = sum(planet_distances) / len(planet_distances)
std_dev_distances = np.std(np.array(planet_distances))
# average of non-zero density squares
non_zero_buckets = []
zero_bucket_count = 0
for b in density_buckets:
for y in b:
if y == 0:
zero_bucket_count += 1
else:
non_zero_buckets.append(y)
avg_non_zero_buckets = (sum(non_zero_buckets) * 1.0) / len(non_zero_buckets)
max_non_zero_buckets = max(non_zero_buckets)
logging.debug("Size of Universe = {0!s}".format(universe_size))
logging.debug("Num Planets = {0!s}".format(num_planets))
logging.debug("Density per 100 square light year = {0!s}".format(
universe_density))
logging.debug("Density map: ")
for b in density_buckets:
logging.debug(b)
logging.debug("Density numbers: ")
density_values = []
for b in density_buckets:
for j in b:
density_values.append(j)
c = Counter(density_values)
with open("histo.csv", "a") as f:
for i in xrange(9):
if i in c.keys():
f.write("{0!s},".format(c[i]))
else:
f.write("0,")
f.write("0\n")
logging.debug("Min x: {0!s}, Max x: {1!s}".format(
min_x, max_x))
logging.debug("Min y: {0!s}, Max y: {1!s}".format(
min_y, max_y))
logging.debug("Min distance = {0!s}".format(
min_planet_distance))
logging.debug("Average distance = {0!s}".format(avg_distances))
logging.debug("Standard deviation = {0!s}".format(std_dev_distances))
if(not os.path.exists("out.csv")):
with open("out.csv", "a") as f:
f.write("{0!s},{1!s},{2!s},{3!s},{4!s},{5!s},{6!s},{7!s},{8!s},{9!s},{10!s},{11!s},{12!s},{13!s}\n".format(
"path", "universe_size", "num_planets",
"universe_density", "min_x", "max_x", "min_y", "max_y",
"min_planet_distance", "avg_distance",
"std_dev_distance", "zero_bucket_count",
"avg_non_zero_buckets", "max_non_zero_buckets"))
with open("out.csv", "a") as f:
f.write("{0!s},{1!s},{2!s},{3!s},{4!s},{5!s},{6!s},{7!s},{8!s},{9!s},{10!s},{11!s},{12!s},{13!s}\n".format(
map, universe_size, num_planets, universe_density,
min_x, max_x, min_y, max_y,
min_planet_distance, avg_distances, std_dev_distances,
zero_bucket_count, avg_non_zero_buckets,
max_non_zero_buckets))
if __name__ == "__main__":
main(sys.argv[1:])
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._queue_operations import build_create_request, build_delete_request, build_get_request, build_list_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class QueueOperations:
"""QueueOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create(
self,
resource_group_name: str,
account_name: str,
queue_name: str,
queue: "_models.StorageQueue",
**kwargs: Any
) -> "_models.StorageQueue":
"""Creates a new queue with the specified queue name, under the specified account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:param queue: Queue properties and metadata to be created with.
:type queue: ~azure.mgmt.storage.v2021_04_01.models.StorageQueue
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageQueue, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.StorageQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(queue, 'StorageQueue')
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
queue_name=queue_name,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
queue_name: str,
queue: "_models.StorageQueue",
**kwargs: Any
) -> "_models.StorageQueue":
"""Creates a new queue with the specified queue name, under the specified account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:param queue: Queue properties and metadata to be created with.
:type queue: ~azure.mgmt.storage.v2021_04_01.models.StorageQueue
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageQueue, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.StorageQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(queue, 'StorageQueue')
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
queue_name=queue_name,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
queue_name: str,
**kwargs: Any
) -> "_models.StorageQueue":
"""Gets the queue with the specified queue name, under the specified account if it exists.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageQueue, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.StorageQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
queue_name=queue_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
account_name: str,
queue_name: str,
**kwargs: Any
) -> None:
"""Deletes the queue with the specified queue name, under the specified account if it exists.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
queue_name=queue_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
maxpagesize: Optional[str] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ListQueueResource"]:
"""Gets a list of all the queues under the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param maxpagesize: Optional, a maximum number of queues that should be included in a list
queue response.
:type maxpagesize: str
:param filter: Optional, When specified, only the queues with a name starting with the given
filter will be listed.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListQueueResource or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_04_01.models.ListQueueResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListQueueResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListQueueResource", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues'} # type: ignore
| |
"""Contains miscellaneous helpers"""
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib import request
from future.utils import PY2
import logging
import ast
import copy
import hashlib
import locale
import operator
import os
import re
import sys
from collections import MutableMapping, defaultdict
from datetime import timedelta, datetime
from pprint import pformat
import flexget
import queue
import requests
from html.entities import name2codepoint
log = logging.getLogger('utils')
def str_to_boolean(string):
return string.lower() in ['true', '1', 't', 'y', 'yes']
def str_to_int(string):
try:
return int(string.replace(',', ''))
except ValueError:
return None
if PY2:
def native_str_to_text(string, **kwargs):
if 'encoding' not in kwargs:
kwargs['encoding'] = 'ascii'
return string.decode(**kwargs)
else:
def native_str_to_text(string, **kwargs):
return string
def convert_bytes(bytes):
"""Returns given bytes as prettified string."""
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2fT' % terabytes
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fG' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2fM' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2fK' % kilobytes
else:
size = '%.2fb' % bytes
return size
class MergeException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def strip_html(text):
"""Tries to strip all HTML tags from *text*. If unsuccessful returns original text."""
from bs4 import BeautifulSoup
try:
text = ' '.join(BeautifulSoup(text).find_all(text=True))
return ' '.join(text.split())
except Exception:
return text
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
charrefpat = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?')
def _htmldecode(text):
"""Decode HTML entities in the given text."""
# From screpe.py - licensed under apache 2.0 .. should not be a problem for a MIT afaik
if isinstance(text, str):
uchr = chr
else:
def uchr(value):
value > 127 and chr(value) or chr(value)
def entitydecode(match, uchr=uchr):
entity = match.group(1)
if entity.startswith('#x'):
return uchr(int(entity[2:], 16))
elif entity.startswith('#'):
return uchr(int(entity[1:]))
elif entity in name2codepoint:
return uchr(name2codepoint[entity])
else:
return match.group(0)
return charrefpat.sub(entitydecode, text)
def decode_html(value):
"""
:param string value: String to be html-decoded
:returns: Html decoded string
"""
return _htmldecode(value)
def encode_html(unicode_data, encoding='ascii'):
"""
Encode unicode_data for use as XML or HTML, with characters outside
of the encoding converted to XML numeric character references.
"""
try:
return unicode_data.encode(encoding, 'xmlcharrefreplace')
except ValueError:
# ValueError is raised if there are unencodable chars in the
# data and the 'xmlcharrefreplace' error handler is not found.
# Pre-2.3 Python doesn't support the 'xmlcharrefreplace' error
# handler, so we'll emulate it.
return _xmlcharref_encode(unicode_data, encoding)
def _xmlcharref_encode(unicode_data, encoding):
"""Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler."""
chars = []
# Phase through the unicode_data string one character at a time in
# order to catch unencodable characters:
for char in unicode_data:
try:
chars.append(char.encode(encoding, 'strict'))
except UnicodeError:
chars.append('&#%i;' % ord(char))
return ''.join(chars)
def merge_dict_from_to(d1, d2):
"""Merges dictionary d1 into dictionary d2. d1 will remain in original form."""
for k, v in d1.items():
if k in d2:
if isinstance(v, type(d2[k])):
if isinstance(v, dict):
merge_dict_from_to(d1[k], d2[k])
elif isinstance(v, list):
d2[k].extend(copy.deepcopy(v))
elif isinstance(v, (str, bool, int, float, type(None))):
pass
else:
raise Exception(
'Unknown type: %s value: %s in dictionary' % (type(v), repr(v))
)
elif isinstance(v, (str, bool, int, float, type(None))) and isinstance(
d2[k], (str, bool, int, float, type(None))
):
# Allow overriding of non-container types with other non-container types
pass
else:
raise MergeException(
'Merging key %s failed, conflicting datatypes %r vs. %r.'
% (k, type(v).__name__, type(d2[k]).__name__)
)
else:
d2[k] = copy.deepcopy(v)
class SmartRedirectHandler(request.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = request.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = request.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
result.status = code
return result
class ReList(list):
"""
A list that stores regexps.
You can add compiled or uncompiled regexps to the list.
It will always return the compiled version.
It will compile the text regexps on demand when first accessed.
"""
# Set the default flags
flags = re.IGNORECASE | re.UNICODE
def __init__(self, *args, **kwargs):
"""Optional :flags: keyword argument with regexp flags to compile with"""
if 'flags' in kwargs:
self.flags = kwargs['flags']
del kwargs['flags']
list.__init__(self, *args, **kwargs)
def __getitem__(self, k):
item = list.__getitem__(self, k)
if isinstance(item, str):
item = re.compile(item, re.IGNORECASE | re.UNICODE)
self[k] = item
return item
def __iter__(self):
for i in range(len(self)):
yield self[i]
# Determine the encoding for io
io_encoding = None
if hasattr(sys.stdout, 'encoding'):
io_encoding = sys.stdout.encoding
if not io_encoding:
try:
io_encoding = locale.getpreferredencoding()
except Exception:
pass
if not io_encoding:
# Default to utf8 if nothing can be determined
io_encoding = 'utf8'
else:
# Normalize the encoding
io_encoding = io_encoding.lower()
if io_encoding == 'cp65001':
io_encoding = 'utf8'
elif io_encoding in ['us-ascii', '646', 'ansi_x3.4-1968']:
io_encoding = 'ascii'
def parse_timedelta(value):
"""Parse a string like '5 days' into a timedelta object. Also allows timedeltas to pass through."""
if isinstance(value, timedelta):
# Allow timedelta objects to pass through
return value
if not value:
# If no time is given, default to 0
return timedelta()
amount, unit = value.lower().split(' ')
# Make sure unit name is plural.
if not unit.endswith('s'):
unit += 's'
params = {unit: float(amount)}
try:
return timedelta(**params)
except TypeError:
raise ValueError('Invalid time format \'%s\'' % value)
def timedelta_total_seconds(td):
"""replaces python 2.7+ timedelta.total_seconds()"""
# TODO: Remove this when we no longer support python 2.6
try:
return td.total_seconds()
except AttributeError:
return (td.days * 24 * 3600) + td.seconds + (td.microseconds / 1000000)
def multiply_timedelta(interval, number):
"""`timedelta`s can not normally be multiplied by floating points. This does that."""
return timedelta(seconds=timedelta_total_seconds(interval) * number)
if os.name == 'posix':
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
import errno
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
else:
def pid_exists(pid):
import ctypes
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
PROCESS_QUERY_INFORMATION = 0x0400
STILL_ACTIVE = 259
handle = kernel32.OpenProcess(PROCESS_QUERY_INFORMATION, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == STILL_ACTIVE
_binOps = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.truediv,
ast.Mod: operator.mod,
}
def arithmeticEval(s):
"""
A safe eval supporting basic arithmetic operations.
:param s: expression to evaluate
:return: value
"""
node = ast.parse(s, mode='eval')
def _eval(node):
if isinstance(node, ast.Expression):
return _eval(node.body)
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
return _binOps[type(node.op)](_eval(node.left), _eval(node.right))
else:
raise Exception('Unsupported type {}'.format(node))
return _eval(node.body)
class TimedDict(MutableMapping):
"""Acts like a normal dict, but keys will only remain in the dictionary for a specified time span."""
def __init__(self, cache_time='5 minutes'):
self.cache_time = parse_timedelta(cache_time)
self._store = dict()
self._last_prune = datetime.now()
def _prune(self):
"""Prune all expired keys."""
for key, (add_time, _) in list(self._store.items()):
if add_time < datetime.now() - self.cache_time:
del self._store[key]
self._last_prune = datetime.now()
def __getitem__(self, key):
add_time, value = self._store[key]
# Prune data and raise KeyError if expired
if add_time < datetime.now() - self.cache_time:
del self._store[key]
raise KeyError(key, 'cache time expired')
return value
def __setitem__(self, key, value):
# Make sure we clear periodically, even if old keys aren't accessed again
if self._last_prune < datetime.now() - (2 * self.cache_time):
self._prune()
self._store[key] = (datetime.now(), value)
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
# Uses our getitem to skip expired items
return (key for key in list(self._store.keys()) if key in self)
def __len__(self):
return len(list(self.__iter__()))
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
dict(list(zip(self._store, (v[1] for v in list(self._store.values()))))),
)
class BufferQueue(queue.Queue):
"""Used in place of a file-like object to capture text and access it safely from another thread."""
# Allow access to the Empty error from here
Empty = queue.Empty
def write(self, line):
self.put(line)
def singleton(cls):
instances = {}
def getinstance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return getinstance
def split_title_year(title):
"""Splits title containing a year into a title, year pair."""
if not title:
return
if not re.search(r'\d{4}', title):
return title, None
# We only recognize years from the 2nd and 3rd millennium, FlexGetters from the year 3000 be damned!
match = re.search(r'(.*?)\(?([12]\d{3})?\)?$', title)
title = match.group(1).strip()
year_match = match.group(2)
if year_match and not title:
# title looks like a year, '2020' for example
title = year_match
year = None
elif title and not year_match:
year = None
else:
year = int(year_match)
return title, year
def get_latest_flexget_version_number():
"""
Return latest Flexget version from https://pypi.python.org/pypi/FlexGet/json
"""
try:
data = requests.get('https://pypi.python.org/pypi/FlexGet/json').json()
return data.get('info', {}).get('version')
except requests.RequestException:
return
def get_current_flexget_version():
return flexget.__version__
def parse_filesize(text_size, si=True):
"""
Parses a data size and returns its value in mebibytes
:param string text_size: string containing the data size to parse i.e. "5 GB"
:param bool si: If True, possibly ambiguous units like KB, MB, GB will be assumed to be base 10 units,
rather than the default base 2. i.e. if si then 50 GB = 47684 else 50GB = 51200
:returns: an float with the data size in mebibytes
"""
prefix_order = {'': 0, 'k': 1, 'm': 2, 'g': 3, 't': 4, 'p': 5}
parsed_size = re.match(
'(\d+(?:[.,\s]\d+)*)(?:\s*)((?:[ptgmk]i?)?b)', text_size.strip().lower(), flags=re.UNICODE
)
if not parsed_size:
raise ValueError('%s does not look like a file size' % text_size)
amount = parsed_size.group(1)
unit = parsed_size.group(2)
if not unit.endswith('b'):
raise ValueError('%s does not look like a file size' % text_size)
unit = unit.rstrip('b')
if unit.endswith('i'):
si = False
unit = unit.rstrip('i')
if unit not in prefix_order:
raise ValueError('%s does not look like a file size' % text_size)
order = prefix_order[unit]
amount = float(amount.replace(',', '').replace(' ', ''))
base = 1000 if si else 1024
return (amount * (base ** order)) / 1024 ** 2
def get_config_hash(config):
"""
:param dict config: Configuration
:return: MD5 hash for *config*
"""
if isinstance(config, dict) or isinstance(config, list):
# this does in fact support nested dicts, they're sorted too!
return hashlib.md5(pformat(config).encode('utf-8')).hexdigest()
else:
return hashlib.md5(str(config).encode('utf-8')).hexdigest()
def get_config_as_array(config, key):
"""
Return configuration key as array, even if given as a single string
:param dict config: Configuration
:param string key: Configuration
:return: Array
"""
v = config.get(key, [])
if isinstance(v, str):
return [v]
return v
def parse_episode_identifier(ep_id, identify_season=False):
"""
Parses series episode identifier, raises ValueError if it fails
:param ep_id: Value to parse
:return: Return identifier type: `sequence`, `ep` or `date`
:raises ValueError: If ep_id does not match any valid types
"""
error = None
identified_by = None
entity_type = 'episode'
if isinstance(ep_id, int):
if ep_id <= 0:
error = 'sequence type episode must be higher than 0'
identified_by = 'sequence'
elif re.match(r'(?i)^S\d{1,4}E\d{1,3}$', ep_id):
identified_by = 'ep'
elif re.match(r'(?i)^S\d{1,4}$', ep_id) and identify_season:
identified_by = 'ep'
entity_type = 'season'
elif re.match(r'\d{4}-\d{2}-\d{2}', ep_id):
identified_by = 'date'
else:
# Check if a sequence identifier was passed as a string
try:
ep_id = int(ep_id)
if ep_id <= 0:
error = 'sequence type episode must be higher than 0'
identified_by = 'sequence'
except ValueError:
error = '`%s` is not a valid episode identifier.' % ep_id
if error:
raise ValueError(error)
return (identified_by, entity_type)
def group_entries(entries, identifier):
from flexget.utils.template import RenderError
grouped_entries = defaultdict(list)
# Group by Identifier
for entry in entries:
try:
rendered_id = entry.render(identifier)
except RenderError:
continue
if not rendered_id:
continue
grouped_entries[rendered_id.lower().strip()].append(entry)
return grouped_entries
def aggregate_inputs(task, inputs):
from flexget import plugin
entries = []
entry_titles = set()
entry_urls = set()
entry_locations = set()
for item in inputs:
for input_name, input_config in item.items():
input = plugin.get_plugin_by_name(input_name)
method = input.phase_handlers['input']
try:
result = method(task, input_config)
except plugin.PluginError as e:
log.warning('Error during input plugin %s: %s', input_name, e)
continue
if not result:
log.warning('Input %s did not return anything', input_name)
continue
for entry in result:
urls = ([entry['url']] if entry.get('url') else []) + entry.get('urls', [])
if any(url in entry_urls for url in urls):
log.debug('URL for `%s` already in entry list, skipping.', entry['title'])
continue
if entry['title'] in entry_titles:
log.debug(
'Ignored duplicate title `%s`', entry['title']
) # TODO: should combine?
continue
if entry.get('location') and entry['location'] in entry_locations:
log.debug(
'Ignored duplicate location `%s`', entry['location']
) # TODO: should combine?
continue
entries.append(entry)
entry_titles.add(entry['title'])
entry_urls.update(urls)
if entry.get('location'):
entry_locations.add(entry['location'])
return entries
# Mainly used due to Too Many Variables error if we use too many variables at a time in the in_ clause.
# SQLite supports up to 999 by default. Ubuntu, Arch and macOS set this limit to 250,000 though, so it's a rare issue.
def chunked(seq, limit=900):
"""Helper to divide our expired lists into sizes sqlite can handle in a query. (<1000)"""
for i in range(0, len(seq), limit):
yield seq[i : i + limit]
| |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, unicode_literals
import functools
import io
import os
import sys
from tempfile import _bin_openflags, _mkstemp_inner, gettempdir
import six
try:
from weakref import finalize
except ImportError:
from pipenv.vendor.backports.weakref import finalize
def fs_encode(path):
try:
return os.fsencode(path)
except AttributeError:
from ..compat import fs_encode
return fs_encode(path)
def fs_decode(path):
try:
return os.fsdecode(path)
except AttributeError:
from ..compat import fs_decode
return fs_decode(path)
__all__ = ["finalize", "NamedTemporaryFile"]
try:
from tempfile import _infer_return_type
except ImportError:
def _infer_return_type(*args):
_types = set()
for arg in args:
if isinstance(type(arg), six.string_types):
_types.add(str)
elif isinstance(type(arg), bytes):
_types.add(bytes)
elif arg:
_types.add(type(arg))
return _types.pop()
def _sanitize_params(prefix, suffix, dir):
"""Common parameter processing for most APIs in this module."""
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = "tmp"
else:
prefix = os.fsencode("tmp")
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = fs_encode(gettempdir())
return prefix, suffix, dir, output_type
class _TemporaryFileCloser:
"""A separate object allowing proper closing of a temporary file's
underlying file object, without adding a __del__ method to the
temporary file."""
file = None # Set here since __del__ checks it
close_called = False
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if os.name != "nt":
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
def close(self, unlink=os.unlink):
if not self.close_called and self.file is not None:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
unlink(self.name)
# Need to ensure the file is deleted on __del__
def __del__(self):
self.close()
else:
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
self._closer = _TemporaryFileCloser(file, name, delete)
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__["file"]
a = getattr(file, name)
if hasattr(a, "__call__"):
func = a
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
# Avoid closing the file as long as the wrapper is alive,
# see issue #18879.
func_wrapper._closer = self._closer
a = func_wrapper
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def close(self):
"""
Close the temporary file, possibly deleting it.
"""
self._closer.close()
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
# Don't return iter(self.file), but yield from it to avoid closing
# file as long as it's being used as iterator (see issue #23700). We
# can't use 'yield from' here because iter(file) returns the file
# object itself, which has a close method, and thus the file would get
# closed when the generator is finalized, due to PEP380 semantics.
for line in self.file:
yield line
def NamedTemporaryFile(
mode="w+b",
buffering=-1,
encoding=None,
newline=None,
suffix=None,
prefix=None,
dir=None,
delete=True,
wrapper_class_override=None,
):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as its 'name' attribute. The file will be automatically
deleted when it is closed unless the 'delete' argument is set to False.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if not wrapper_class_override:
wrapper_class_override = _TemporaryFileWrapper
if os.name == "nt" and delete:
flags |= os.O_TEMPORARY
if sys.version_info < (3, 5):
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
else:
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
file = io.open(fd, mode, buffering=buffering, newline=newline, encoding=encoding)
if wrapper_class_override is not None:
return type(str("_TempFileWrapper"), (wrapper_class_override, object), {})(
file, name, delete
)
else:
return _TemporaryFileWrapper(file, name, delete)
except BaseException:
os.unlink(name)
os.close(fd)
raise
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.python_tensor_converter."""
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import _pywrap_python_tensor_converter
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class PythonTensorConverterTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def setUp(self):
context.ensure_initialized()
super(PythonTensorConverterTest, self).setUp()
def makePythonTensorConverter(self):
return _pywrap_python_tensor_converter.PythonTensorConverter(
context.context())
#=============================================================================
# Convert int to tensor.
def testConvertIntWithInferredDType(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert(12, types_pb2.DT_INVALID)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, 12)
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertIntWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert(12, types_pb2.DT_INT64)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, 12)
self.assertEqual(dtype, types_pb2.DT_INT64)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertIntWithIncompatibleDtype(self):
converter = self.makePythonTensorConverter()
with self.assertRaisesRegex(
TypeError, "Expected string, but got 3 of type 'int'"
"|Cannot convert 3 to EagerTensor of dtype string"):
converter.Convert(3, types_pb2.DT_STRING)
#=============================================================================
# Convert tensor to tensor.
def testConvertTensorWithInferredDType(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert(
constant_op.constant([1, 2, 3]), types_pb2.DT_INVALID)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, [1, 2, 3])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertFalse(used_fallback)
def testConvertTensorWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert(
constant_op.constant([1, 2, 3], dtypes.int64), types_pb2.DT_INT64)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, [1, 2, 3])
self.assertEqual(dtype, types_pb2.DT_INT64)
self.assertFalse(used_fallback)
def testConvertTensorWithIncorrectDtype(self):
converter = self.makePythonTensorConverter()
with self.assertRaises((TypeError, ValueError)):
converter.Convert(
constant_op.constant([1, 2, 3], dtypes.int32), types_pb2.DT_INT64)
#=============================================================================
# Convert list to tensor.
def testConvertListWithInferredDType(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert([[1, 2, 3], [4, 5, 6]],
types_pb2.DT_INVALID)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertListWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert([[1, 2, 3], [4, 5, 6]],
types_pb2.DT_INT64)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(dtype, types_pb2.DT_INT64)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertListWithIncompatibleDtype(self):
converter = self.makePythonTensorConverter()
with self.assertRaisesRegex(
TypeError, "Expected string, but got .* of type 'int'"
"|Cannot convert .* to EagerTensor of dtype string"):
converter.Convert([[1, 2, 3], [4, 5, 6]], types_pb2.DT_STRING)
def testConvertListWithInconsistentDtype(self):
converter = self.makePythonTensorConverter()
with self.assertRaisesRegex(
(TypeError, ValueError),
"Can't convert Python sequence with mixed types to Tensor."
"|Failed to convert"):
converter.Convert([[1, 2], ["a", "b"]], types_pb2.DT_INVALID)
#=============================================================================
# Convert np.array to tensor.
def testConvertNumpyArrayWithInferredDType(self):
converter = self.makePythonTensorConverter()
x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
result, dtype, used_fallback = converter.Convert(x, types_pb2.DT_INVALID)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertNumpyArrayWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
result, dtype, used_fallback = converter.Convert(x, types_pb2.DT_INT64)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(dtype, types_pb2.DT_INT64)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertNumpyArrayWithIncompatibleDtype(self):
converter = self.makePythonTensorConverter()
x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
with self.assertRaises((ValueError, TypeError)):
converter.Convert(x, types_pb2.DT_STRING)
def testConvertNumpyArrayWithUnsupportedDtype(self):
converter = self.makePythonTensorConverter()
x = np.array([[1, 2], ["a", "b"]], np.object_)
with self.assertRaises((ValueError, TypeError)):
converter.Convert(x, types_pb2.DT_INVALID)
#=============================================================================
# Convert IndexedSlices to tensor.
def testConvertIndexedSlicesWithInferredDType(self):
converter = self.makePythonTensorConverter()
x = indexed_slices.IndexedSlices(
constant_op.constant([[1, 2, 3]], dtypes.int32, name="x_values"),
constant_op.constant([1], dtypes.int64, name="x_indices"),
constant_op.constant([3, 3], dtypes.int64, name="x_shape"))
result, dtype, used_fallback = converter.Convert(x, types_pb2.DT_INVALID)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, [[0, 0, 0], [1, 2, 3], [0, 0, 0]])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertTrue(used_fallback)
def testConvertIndexedSlicesWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
x = indexed_slices.IndexedSlices(
constant_op.constant([[1, 2, 3]], dtypes.int32, name="x_values"),
constant_op.constant([1], dtypes.int64, name="x_indices"),
constant_op.constant([3, 3], dtypes.int64, name="x_shape"))
result, dtype, used_fallback = converter.Convert(x, types_pb2.DT_INT32)
self.assertIsInstance(result, ops.Tensor)
self.assertAllEqual(result, [[0, 0, 0], [1, 2, 3], [0, 0, 0]])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertTrue(used_fallback)
def testConvertIndexedSlicesWithIncorrectDtype(self):
converter = self.makePythonTensorConverter()
x = indexed_slices.IndexedSlices(
constant_op.constant([[1, 2, 3]], dtypes.int32, name="x_values"),
constant_op.constant([1], dtypes.int64, name="x_indices"),
constant_op.constant([3, 3], dtypes.int64, name="x_shape"))
with self.assertRaises((ValueError, TypeError)):
converter.Convert(x, types_pb2.DT_FLOAT)
if __name__ == "__main__":
googletest.main()
| |
"""
Tests of disco_elastigroup
"""
import random
from unittest import TestCase
from parameterized import parameterized
from mock import MagicMock, ANY, patch
from disco_aws_automation import DiscoElastigroup
ENVIRONMENT_NAME = "moon"
class DiscoElastigroupTests(TestCase):
"""Test DiscoElastigroup class"""
def mock_elastigroup(self, hostclass, ami_id=None, min_size=2, max_size=2, desired_size=2):
"""Convenience function for creating an elastigroup object"""
grp_id = 'sig-' + ''.join(random.choice("1234567890") for _ in range(10))
name = "{0}_{1}_{2}".format(
ENVIRONMENT_NAME,
hostclass,
''.join(random.choice("1234567890") for _ in range(10))
)
ami_id = ami_id or 'ami-' + ''.join(random.choice("1234567890") for _ in range(12))
mock_elastigroup = {
"id": grp_id,
"name": name,
"capacity": {
"minimum": min_size,
"maximum": max_size,
"target": desired_size,
"unit": "instance"
},
"compute": {
"product": "Linux/UNIX",
"instanceTypes": {
"ondemand": "m4.large",
"spot": [
"m4.large"
]
},
"launchSpecification": {
"imageId": ami_id,
"loadBalancersConfig": {
"loadBalancers": [{
"name": "elb-1234",
"type": "CLASSIC"
}, {
'arn': 'tg_1234',
'type': 'TARGET_GROUP'
}]
},
"blockDeviceMappings": [{
"deviceName": "/dev/xvda",
"ebs": {
"deleteOnTermination": "true",
"volumeSize": "80",
"volumeType": "gp2",
"snapshotId": "snapshot-abcd1234"
}
}]
},
"availabilityZones": [{
"name": 'us-moon-1a',
"subnetId": "subnet-abcd1234"
}]
},
"scheduling": {
"tasks": [{
'taskType': 'scale',
'cronExpression': '12 0 * * *',
'scaleMinCapacity': 5
}]
}
}
return mock_elastigroup
def setUp(self):
"""Pre-test setup"""
self.elastigroup = DiscoElastigroup(
ENVIRONMENT_NAME
)
self.elastigroup.spotinst_client = MagicMock()
def test_delete_groups_bad_hostclass(self):
"""Verifies elastigroup not deleted for bad hostclass"""
self.elastigroup._delete_group = MagicMock()
self.elastigroup._spotinst_call = MagicMock()
self.elastigroup.delete_groups(hostclass="mhcfoo")
self.assertFalse(self.elastigroup._delete_group.called)
def test_delete_groups_bad_groupname(self):
"""Verifies elastigroup not deleted for bad group name"""
self.elastigroup._delete_group = MagicMock()
self.elastigroup._spotinst_call = MagicMock()
self.elastigroup.delete_groups(group_name='moon_mhcfoo_12345678')
self.assertFalse(self.elastigroup._delete_group.called)
def test_delete_groups_good_hostclass(self):
"""Verifies elastigroup is deleted for only given hostclass"""
mock_group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup._delete_group = MagicMock()
self.elastigroup.get_existing_groups = MagicMock(return_value=[mock_group])
self.elastigroup.delete_groups(hostclass='mhcfoo')
self.elastigroup._delete_group.assert_called_once_with(group_id=mock_group['id'])
def test_delete_groups_good_groupname(self):
"""Verifies elastigroup is deleted for only given group name"""
mock_group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup._delete_group = MagicMock()
self.elastigroup.get_existing_groups = MagicMock(return_value=[mock_group])
self.elastigroup.delete_groups(group_name=mock_group['name'])
self.elastigroup._delete_group.assert_called_once_with(group_id=mock_group['id'])
@patch("boto3.session.Session")
def test_list_groups_with_groups(self, session_mock):
"""Verifies that listgroups correctly formats elastigroups"""
mock_group1 = self.mock_elastigroup(hostclass="mhcfoo")
mock_group2 = self.mock_elastigroup(hostclass="mhcbar")
self.elastigroup.spotinst_client.get_groups.return_value = [mock_group1, mock_group2]
self.elastigroup.spotinst_client.get_group_status.return_value = [{
"instanceId": "instance1"
}, {
"instanceId": "instance1"
}]
session_mock.return_value.region_name = 'us-moon'
mock_listings = [
{
'name': mock_group1['name'],
'image_id': mock_group1['compute']['launchSpecification']['imageId'],
'group_cnt': 2,
'min_size': mock_group1['capacity']['minimum'],
'desired_capacity': mock_group1['capacity']['target'],
'max_size': mock_group1['capacity']['maximum'],
'type': 'spot',
'tags': {}
},
{
'name': mock_group2['name'],
'image_id': mock_group2['compute']['launchSpecification']['imageId'],
'group_cnt': 2,
'min_size': mock_group2['capacity']['minimum'],
'desired_capacity': mock_group2['capacity']['target'],
'max_size': mock_group2['capacity']['maximum'],
'type': 'spot',
'tags': {}
}
]
self.assertEqual(self.elastigroup.list_groups(), mock_listings)
def test_create_new_group(self):
"""Verifies new elastigroup is created"""
self.elastigroup.spotinst_client.create_group.return_value = {
'name': 'mhcfoo'
}
group = self.elastigroup.create_or_update_group(
hostclass="mhcfoo",
subnets=[{
'SubnetId': 'sub-1234',
'AvailabilityZone': 'us-moon-1'
}],
spotinst=True,
instance_type='t2.small:m3.medium',
min_size=1,
desired_size=1,
max_size=1
)
self.elastigroup.spotinst_client.create_group.assert_called_with({
'group': {
'compute': {
'product': 'Linux/UNIX',
'availabilityZones': [{'subnetIds': ['sub-1234'], 'name': 'us-moon-1'}],
'instanceTypes': {
'spot': ['t2.small', 'm3.medium'],
'ondemand': 't2.small'
},
'launchSpecification': {
"iamRole": None,
'userData': None,
'tags': [{'tagKey': 'group_name', 'tagValue': ANY},
{'tagKey': 'spotinst', 'tagValue': 'True'}],
'blockDeviceMappings': None,
'imageId': None,
'networkInterfaces': None,
'monitoring': None,
'loadBalancersConfig': None,
'securityGroupIds': None,
'keyPair': None,
'ebsOptimized': None
}
},
'strategy': {
'onDemandCount': None,
'availabilityVsCost': 'availabilityOriented',
'fallbackToOd': True,
'risk': 100,
'utilizeReservedInstances': True
},
'capacity': {
'minimum': 1,
'target': 1,
'maximum': 1,
'unit': 'instance'
},
'name': ANY,
'description': ANY
}
})
self.assertEqual(group['name'], 'mhcfoo')
@patch("boto3.session.Session")
def test_update_image_id(self, session_mock):
"""Verifies updating AMI of an existing group"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
self.elastigroup.create_or_update_group(
hostclass="mhcfoo",
spotinst=True,
image_id="ami-123456"
)
expected_request = {
'group': {
'name': ANY,
'capacity': ANY,
'compute': {
'instanceTypes': ANY,
'availabilityZones': ANY,
'launchSpecification': {
'blockDeviceMappings': ANY,
'loadBalancersConfig': ANY,
'imageId': 'ami-123456'
}
},
'scheduling': ANY
}
}
self.elastigroup.spotinst_client.update_group.assert_called_once_with(group['id'], expected_request)
@patch("boto3.session.Session")
def test_update_size(self, session_mock):
"""Verifies resizing an existing group"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
self.elastigroup.create_or_update_group(
hostclass="mhcfoo",
spotinst=True,
min_size=5,
max_size=10,
desired_size=5
)
expected_request = {
'group': {
'name': ANY,
'capacity': {
'minimum': 5,
'maximum': 10,
'target': 5
},
'compute': ANY,
'scheduling': ANY
}
}
self.elastigroup.spotinst_client.update_group.assert_called_once_with(group['id'], expected_request)
@parameterized.expand([
("53%", 47, None),
("20", None, 20),
("", 100, None)
])
@patch("boto3.session.Session")
def test_update_spotinst_reserve(self, spotinst_reserve, risk, on_demand_count, session_mock):
"""Verifies updating risk of an existing group"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
self.elastigroup.create_or_update_group(
hostclass="mhcfoo",
spotinst=True,
spotinst_reserve=spotinst_reserve
)
expected_request = {
'group': {
'name': ANY,
'capacity': ANY,
'compute': ANY,
'scheduling': ANY,
'strategy': {
"risk": risk,
"onDemandCount": on_demand_count
}
}
}
self.elastigroup.spotinst_client.update_group.assert_called_once_with(group['id'], expected_request)
@patch("boto3.session.Session")
def test_update_snapshot(self, session_mock):
"""Verifies that snapshots for a Elastigroup are updated"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
self.elastigroup.update_snapshot('snapshot-newsnapshotid', 100, hostclass='mhcfoo')
expected_request = {
'group': {
'compute': {
'launchSpecification': {
'blockDeviceMappings': [{
"deviceName": "/dev/xvda",
"ebs": {
"deleteOnTermination": "true",
"volumeSize": 100,
"volumeType": "gp2",
"snapshotId": "snapshot-newsnapshotid"
}
}]
}
}
}
}
self.elastigroup.spotinst_client.update_group.assert_called_once_with(group['id'], expected_request)
@patch("boto3.session.Session")
def test_update_elb(self, session_mock):
"""Verifies ELBs and TGs for a Elastigroup are updated"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
new_elbs, extras, new_tgs, extra_tgs = self.elastigroup.update_elb(
elb_names=['elb-newelb'],
target_groups=["tg_arn"],
hostclass='mhcfoo'
)
expected_request = {
'group': {
'compute': {
'launchSpecification': {
'loadBalancersConfig': {
'loadBalancers': [{
'name': 'elb-newelb',
'type': 'CLASSIC'
}, {
'arn': 'tg_arn',
'type': 'TARGET_GROUP'
}]
}
}
}
}
}
self.elastigroup.spotinst_client.update_group.assert_called_once_with(group['id'], expected_request)
str(new_tgs)
str(extra_tgs)
self.assertEqual({'elb-newelb'}, new_elbs)
self.assertEqual({'elb-1234'}, extras)
self.assertEqual({'tg_arn'}, new_tgs)
self.assertEqual({'tg_1234'}, extra_tgs)
def test_update_elb_missing_group(self):
"""Test updating ELB and Target Group for group that doesn't exist"""
self.elastigroup.spotinst_client.get_groups.return_value = []
new_elbs, extras = self.elastigroup.update_elb(
elb_names=['elb-newelb'],
target_groups=["tg_arn"],
hostclass='mhcfoo'
)
self.assertEqual(set(), new_elbs)
self.assertEqual(set(), extras)
@patch("boto3.session.Session")
def test_update_group_update_elb(self, session_mock):
"""Verifies updating group also updates ELB and TG"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
self.elastigroup.create_or_update_group(
hostclass="mhcfoo",
load_balancers=["elb-newelb"],
target_groups=["tg_arn"],
spotinst=True
)
expected_request = {
'group': {
'compute': {
'launchSpecification': {
'loadBalancersConfig': {
'loadBalancers': [{
'name': 'elb-newelb',
'type': 'CLASSIC'
}, {
'arn': 'tg_arn',
'type': 'TARGET_GROUP'
}]
}
}
}
}
}
self.elastigroup.spotinst_client.update_group.assert_called_with(group['id'], expected_request)
@patch("boto3.session.Session")
def test_create_recurring_group_action(self, session_mock):
"""Verifies recurring actions are created for Elastigroups"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
self.elastigroup.create_recurring_group_action('0 0 * * *', min_size=1, hostclass='mhcfoo')
expected_request = {
'group': {
'scheduling': {
'tasks': [{
'taskType': 'scale',
'cronExpression': '12 0 * * *',
'scaleMinCapacity': 5
}, {
'taskType': 'scale',
'cronExpression': '0 0 * * *',
'scaleMinCapacity': 1
}]
}
}
}
self.elastigroup.spotinst_client.update_group.assert_called_once_with(group['id'], expected_request)
@patch("boto3.session.Session")
def test_delete_all_recurring_group_actions(self, session_mock):
"""Verifies recurring actions are deleted for Elastigroups"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
self.elastigroup.delete_all_recurring_group_actions(hostclass='mhcfoo')
expected_request = {
'group': {
'scheduling': None
}
}
self.elastigroup.spotinst_client.update_group.assert_called_once_with(group['id'], expected_request)
@patch("boto3.session.Session")
def test_scaledown(self, session_mock):
"""Verifies Elastigroups are scaled down"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
self.elastigroup.scaledown_groups(hostclass='mhcfoo')
expected_request = {
"group": {
"capacity": {
"target": 0,
"minimum": 0,
"maximum": 0
}
}
}
self.elastigroup.spotinst_client.update_group.assert_called_once_with(group['id'], expected_request)
@parameterized.expand([
("53%", 47, None),
("20", None, 20),
(None, 100, None)
])
def test_spotinst_reserve(self, spotinst_reserve, risk, on_demand_count):
""""Verifies spotinst_reserve handled correctly"""
self.elastigroup.create_or_update_group(
hostclass="mhcfoo",
instance_type='m3.medium',
spotinst_reserve=spotinst_reserve,
spotinst=True
)
expected_request = {
"group": {
"compute": ANY,
"capacity": ANY,
'name': ANY,
'description': ANY,
"strategy": {
"utilizeReservedInstances": ANY,
"availabilityVsCost": ANY,
"risk": risk,
"onDemandCount": on_demand_count,
"fallbackToOd": ANY
}
}
}
self.elastigroup.spotinst_client.create_group.assert_called_once_with(expected_request)
@patch('os.environ.get', MagicMock(return_value=None))
def test_is_spotinst_not_enabled(self):
"""Verify that if no spotinst token is set, spotinst is not enabled"""
self.elastigroup = DiscoElastigroup(ENVIRONMENT_NAME)
self.assertFalse(self.elastigroup.is_spotinst_enabled())
@patch('disco_aws_automation.spotinst_client.read_config')
@patch('os.environ.get', MagicMock(return_value="Fake_Spotinst_Token"))
def test_is_spotinst_enabled(self, mock_config):
"""Verify that if spotinst token is set, spotinst is enabled"""
mock_config.get_asiaq_option.return_value = "Fake_Spotinst_Token"
self.elastigroup = DiscoElastigroup(ENVIRONMENT_NAME)
self.assertTrue(self.elastigroup.is_spotinst_enabled())
@parameterized.expand([
(None, None),
("mhcfoo", None),
(None, "mhcfoo1"),
("mhcfoo", "mhcfoo1"),
])
def test_get_instances(self, hostclass, group_name):
"""Testing getting list of instances for a Elastigroup"""
self.elastigroup.boto3_ec.describe_instances = MagicMock(return_value={
'Reservations': [{
'Instances': [{
'InstanceId': 'i-2345',
'Tags': [{
'Key': 'group_name',
'Value': 'mhcfoo_1234'
}]
}]
}],
'NextToken': None
})
instances = self.elastigroup.get_instances(hostclass=hostclass, group_name=group_name)
filters = [
{'Name': 'tag:spotinst', 'Values': ['True']},
{'Name': 'tag:environment', 'Values': ['moon']},
{
'Name': 'instance-state-name',
'Values': ['pending', 'running', 'shutting-down', 'stopping', 'stopped']
}
]
if hostclass:
filters.append({
'Name': 'tag:hostclass',
'Values': [hostclass]
})
if group_name:
filters.append({
'Name': 'tag:group_name',
'Values': [group_name]
})
self.elastigroup.boto3_ec.describe_instances.assert_called_once_with(Filters=filters)
expected = [{
'instance_id': 'i-2345',
'group_name': 'mhcfoo_1234'
}]
self.assertEqual(expected, instances)
@patch("boto3.session.Session")
def test_get_launch_config(self, session_mock):
"""Testing getting launch config for a hostclass"""
group = self.mock_elastigroup(hostclass='mhcfoo')
self.elastigroup.spotinst_client.get_groups.return_value = [group]
session_mock.return_value.region_name = 'us-moon'
launch_config = self.elastigroup.get_launch_config(hostclass='mhcfoo')
self.assertEqual({'instance_type': 'm4.large'}, launch_config)
| |
# -*- coding: utf-8 -*-
from unittest import TestCase
import numpy as np
from sklearn.neural_network.multilayer_perceptron import MLPRegressor
from tests.estimator.regressor.Regressor import Regressor
from tests.language.JavaScript import JavaScript
class MLPRegressorJSTest(JavaScript, Regressor, TestCase):
N_RANDOM_TESTS = 50
def setUp(self):
super(MLPRegressorJSTest, self).setUp()
np.random.seed(0)
self.estimator = MLPRegressor(activation='relu', hidden_layer_sizes=50,
max_iter=500, learning_rate_init=.1,
random_state=3)
def tearDown(self):
super(MLPRegressorJSTest, self).tearDown()
def test_activation_fn_relu_with_mult_layers_2(self):
self.estimator = MLPRegressor(activation='relu',
hidden_layer_sizes=(50, 30),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_relu_with_mult_layers_3(self):
self.estimator = MLPRegressor(activation='relu',
hidden_layer_sizes=(50, 30, 15),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_identity(self):
self.estimator = MLPRegressor(activation='identity',
hidden_layer_sizes=50, max_iter=500,
learning_rate_init=.1)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_identity_with_mult_layers_2(self):
self.estimator = MLPRegressor(activation='identity',
hidden_layer_sizes=(50, 30), max_iter=500,
learning_rate_init=.1, random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_identity_with_mult_layers_3(self):
self.estimator = MLPRegressor(activation='identity',
hidden_layer_sizes=(50, 30, 15),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_tanh(self):
self.estimator = MLPRegressor(activation='tanh', hidden_layer_sizes=50,
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_tanh_with_mult_layers_2(self):
self.estimator = MLPRegressor(activation='tanh',
hidden_layer_sizes=(50, 30),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_tanh_with_mult_layers_3(self):
self.estimator = MLPRegressor(activation='tanh',
hidden_layer_sizes=(50, 30, 15),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_logistic(self):
self.estimator = MLPRegressor(activation='logistic',
hidden_layer_sizes=50, max_iter=500,
learning_rate_init=.1, random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_logstic_with_mult_layers_2(self):
self.estimator = MLPRegressor(activation='logistic',
hidden_layer_sizes=(50, 30), max_iter=500,
learning_rate_init=.1, random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_logstic_with_mult_layers_3(self):
self.estimator = MLPRegressor(activation='logistic',
hidden_layer_sizes=(50, 30, 15),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
| |
import re
import socket
import sys
# Helper module
try:
from .helper import H
except:
from helper import H
# Settings variables
try:
from . import settings as S
except:
import settings as S
# Config module
from .config import get_value
# Log module
from .log import debug
# HTML entities
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
# XML parser
try:
from xml.etree import cElementTree as ET
except ImportError:
try:
from xml.etree import ElementTree as ET
except ImportError:
from .elementtree import ElementTree as ET
try:
from xml.parsers import expat
UNESCAPE_RESPONSE_DATA = True
except ImportError:
# Module xml.parsers.expat missing, using SimpleXMLTreeBuilder
from .elementtree import SimpleXMLTreeBuilder
ET.XMLTreeBuilder = SimpleXMLTreeBuilder.TreeBuilder
UNESCAPE_RESPONSE_DATA = False
ILLEGAL_XML_UNICODE_CHARACTERS = [
(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F), (0x7F, 0x84),
(0x86, 0x9F), (0xD800, 0xDFFF), (0xFDD0, 0xFDDF),
(0xFFFE, 0xFFFF),
(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF), (0x3FFFE, 0x3FFFF),
(0x4FFFE, 0x4FFFF), (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF), (0x9FFFE, 0x9FFFF),
(0xAFFFE, 0xAFFFF), (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF), (0xFFFFE, 0xFFFFF),
(0x10FFFE, 0x10FFFF) ]
ILLEGAL_XML_RANGES = ["%s-%s" % (H.unicode_chr(low), H.unicode_chr(high))
for (low, high) in ILLEGAL_XML_UNICODE_CHARACTERS
if low < sys.maxunicode]
ILLEGAL_XML_RE = re.compile(H.unicode_string('[%s]') % H.unicode_string('').join(ILLEGAL_XML_RANGES))
class Protocol(object):
"""
Class for connecting with debugger engine which uses DBGp protocol.
"""
# Maximum amount of data to be received at once by socket
read_size = 1024
def __init__(self):
# Set port number to listen for response
self.port = get_value(S.KEY_PORT, S.DEFAULT_PORT)
self.clear()
def transaction_id():
"""
Standard argument for sending commands, an unique numerical ID.
"""
def fget(self):
self._transaction_id += 1
return self._transaction_id
def fset(self, value):
self._transaction_id = value
def fdel(self):
self._transaction_id = 0
return locals()
# Transaction ID property
transaction_id = property(**transaction_id())
def clear(self):
"""
Clear variables, reset transaction_id, close socket connection.
"""
self.buffer = ''
self.connected = False
self.listening = False
del self.transaction_id
try:
self.socket.close()
except:
pass
self.socket = None
def unescape(self, string):
"""
Convert HTML entities and character references to ordinary characters.
"""
def convert(matches):
text = matches.group(0)
# Character reference
if text[:2] == "&#":
try:
if text[:3] == "&#x":
return H.unicode_chr(int(text[3:-1], 16))
else:
return H.unicode_chr(int(text[2:-1]))
except ValueError:
pass
# Named entity
else:
try:
# Following are not needed to be converted for XML
if text[1:-1] == "amp" or text[1:-1] == "gt" or text[1:-1] == "lt":
pass
else:
text = H.unicode_chr(name2codepoint[text[1:-1]])
except KeyError:
pass
return text
return re.sub("&#?\w+;", convert, string)
def read_until_null(self):
"""
Get response data from debugger engine.
"""
# Check socket connection
if self.connected:
# Get result data from debugger engine
try:
while not '\x00' in self.buffer:
self.buffer += H.data_read(self.socket.recv(self.read_size))
data, self.buffer = self.buffer.split('\x00', 1)
return data
except:
e = sys.exc_info()[1]
raise ProtocolConnectionException(e)
else:
raise ProtocolConnectionException("Xdebug is not connected")
def read_data(self):
"""
Get response data from debugger engine and verify length of response.
"""
# Verify length of response data
length = self.read_until_null()
message = self.read_until_null()
if int(length) == len(message):
return message
else:
raise ProtocolException("Length mismatch encountered while reading the Xdebug message")
def read(self, return_string=False):
"""
Get response from debugger engine as XML document object.
"""
# Get result data from debugger engine and verify length of response
data = self.read_data()
# Show debug output
debug('[Response data] %s' % data)
# Return data string
if return_string:
return data
# Remove special character quoting
if UNESCAPE_RESPONSE_DATA:
data = self.unescape(data)
# Replace invalid XML characters
data = ILLEGAL_XML_RE.sub('?', data)
# Create XML document object
document = ET.fromstring(data)
return document
def send(self, command, *args, **kwargs):
"""
Send command to the debugger engine according to DBGp protocol.
"""
# Expression is used for conditional and watch type breakpoints
expression = None
# Seperate 'expression' from kwargs
if 'expression' in kwargs:
expression = kwargs['expression']
del kwargs['expression']
# Generate unique Transaction ID
transaction_id = self.transaction_id
# Append command/arguments to build list
build_command = [command, '-i %i' % transaction_id]
if args:
build_command.extend(args)
if kwargs:
build_command.extend(['-%s %s' % pair for pair in kwargs.items()])
# Remove leading/trailing spaces and build command string
build_command = [part.strip() for part in build_command if part.strip()]
command = ' '.join(build_command)
if expression:
command += ' -- ' + H.base64_encode(expression)
# Show debug output
debug('[Send command] %s' % command)
# Send command to debugger engine
try:
self.socket.send(H.data_write(command + '\x00'))
except:
e = sys.exc_info()[1]
raise ProtocolConnectionException(e)
def listen(self):
"""
Create socket server which listens for connection on configured port.
"""
# Create socket server
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if server:
# Configure socket server
try:
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.settimeout(1)
server.bind(('', self.port))
server.listen(1)
self.listening = True
self.socket = None
except:
e = sys.exc_info()[1]
raise ProtocolConnectionException(e)
# Accept incoming connection on configured port
while self.listening:
try:
self.socket, address = server.accept()
self.listening = False
except socket.timeout:
pass
# Check if a connection has been made
if self.socket:
self.connected = True
self.socket.settimeout(None)
else:
self.connected = False
self.listening = False
# Close socket server
try:
server.close()
except:
pass
server = None
# Return socket connection
return self.socket
else:
raise ProtocolConnectionException('Could not create socket server.')
class ProtocolException(Exception):
pass
class ProtocolConnectionException(ProtocolException):
pass
| |
import copy
import mufsim.utils as util
from mufsim.errors import MufRuntimeError
from functools import cmp_to_key, total_ordering
class Item(object):
value = 0
def __init__(self, value):
self.value = value
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return True
def __str__(self):
return "Unknown"
def __repr__(self):
return str(self)
@total_ordering
class Mark(Item):
def __init__(self):
super(Mark, self).__init__(0)
def __bool__(self):
return False
def __lt__(self, other):
return False
def __eq__(self, other):
return True
def __str__(self):
return "Mark"
@total_ordering
class DBRef(Item):
def __str__(self):
return "#%d" % self.value
def __bool__(self):
return self.value != -1
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
@total_ordering
class Lock(Item):
def __str__(self):
return "Lock:%s" % self.value
def __bool__(self):
return self.value is not None
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
@total_ordering
class Address(Item):
prog = -1
def __init__(self, value, prog):
self.prog = prog
super(Address, self).__init__(value)
def __bool__(self):
return self.value is not None
def __str__(self):
return "Addr:'#%d'%d" % (self.prog, self.value)
def __lt__(self, other):
a = (self.prog, self.value)
b = (other.prog, other.value)
return (a < b)
def __eq__(self, other):
a = (self.prog, self.value)
b = (other.prog, other.value)
return (a == b)
@total_ordering
class GlobalVar(Item):
def __str__(self):
return "LV%d" % self.value
def __bool__(self):
return True
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
@total_ordering
class FuncVar(Item):
def __str__(self):
return "SV%d" % self.value
def __bool__(self):
return True
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
@total_ordering
class MufList(Item):
def __init__(self, val=[], pin=False):
super(MufList, self).__init__(val)
self.pinned = pin
def __str__(self):
return str(self.value)
def __bool__(self):
return self.value != -1
def __len__(self):
return len(self.value)
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
def __getitem__(self, key):
if not isinstance(key, int):
raise MufRuntimeError("List array expects integer index.")
return self.value[key]
def __setitem__(self, key, val):
if not isinstance(key, int):
raise MufRuntimeError("List array expects integer index.")
self.value[key] = val
def __delitem__(self, key):
del self.value[key]
def __contains__(self, key):
return key in self.value
def __iter__(self):
for val in self.value:
yield val
def keys(self):
return range(len(self.value))
def copy_unpinned(self):
if self.pinned:
return self
return MufList(copy.copy(self.value), self.pinned)
def set_item(self, idx, val):
if not isinstance(idx, (int, slice)):
raise MufRuntimeError("List array expects integer index.")
arr = self.copy_unpinned()
arr[idx] = val
return arr
def del_item(self, idx):
if not isinstance(idx, (int, slice)):
raise MufRuntimeError("List array expects integer index.")
arr = self.copy_unpinned()
del arr[idx]
return arr
@total_ordering
class MufDict(Item):
def __init__(self, val={}, pin=False):
super(MufDict, self).__init__(val)
self.pinned = pin
def __str__(self):
vals = [
"{}=>{}".format(
util.escape_str(k) if isinstance(k, str) else str(k),
util.escape_str(self.value[k]) if isinstance(self.value[k], str) else str(self.value[k]),
)
for k in self.keys()
]
if not vals:
vals = ["=>"]
return "[{}]".format(", ".join(vals))
def __bool__(self):
return self.value != -1
def __len__(self):
return len(self.value)
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
def __getitem__(self, key):
if not isinstance(key, (int, str)):
raise MufRuntimeError("dictionary array expects integer or string index.")
return self.value[key]
def __setitem__(self, key, val):
if not isinstance(key, (int, str)):
raise MufRuntimeError("dictionary array expects integer or string index.")
self.value[key] = val
def __delitem__(self, key):
del self.value[key]
def __contains__(self, key):
return key in self.value
def __iter__(self):
for key in self.keys():
yield key
def keys(self):
return sorted(list(self.value.keys()), key=cmp_to_key(sortcomp))
def copy_unpinned(self):
if self.pinned:
return self
return MufDict(copy.copy(self.value), self.pinned)
def set_item(self, idx, val):
if not isinstance(idx, (int, str)):
raise MufRuntimeError("Dictionary array expects integer or string index.")
arr = self.copy_unpinned()
arr[idx] = val
return arr
def del_item(self, idx):
if not isinstance(idx, (int, str)):
raise MufRuntimeError("Dictionary array expects integer or string index.")
arr = self.copy_unpinned()
del arr[idx]
return arr
def sortcomp(a, b, nocase=False):
if isinstance(a, type(b)):
if isinstance(a, str) and nocase:
a = a.upper()
b = b.upper()
return (a > b) - (a < b)
if util.is_number(a) and util.is_number(b):
return (a > b) - (a < b)
if util.is_number(a):
return -1
if util.is_number(b):
return 1
if isinstance(a, DBRef):
return -1
if isinstance(b, DBRef):
return 1
if isinstance(a, str):
return -1
if isinstance(b, str):
return 1
return (a > b) - (a < b)
def sortcompi(a, b):
return sortcomp(a, b, nocase=True)
def item_type_name(val):
if type(val) in [int, float, str, list, dict]:
val = str(type(val)).split("'")[1].title()
else:
val = str(type(val)).split("'")[1].split(".")[1][5:]
return val
def item_repr(x):
if isinstance(x, int):
return "%d" % x
elif isinstance(x, float):
x = "%.12g" % x
if "e" not in x and "." not in x and x not in ["-inf", "inf", "nan"]:
x = "%s.0" % x
return x
elif isinstance(x, str):
return util.escape_str(x)
elif isinstance(x, list) or isinstance(x, tuple):
out = "%d[" % len(x)
out += ", ".join([item_repr(v) for v in x])
out += "]"
return out
elif isinstance(x, dict):
keys = sorted(list(x.keys()), key=cmp_to_key(sortcomp))
out = "%d{" % len(x)
out += ", ".join(
["%s: %s" % (item_repr(k), item_repr(x[k])) for k in keys]
)
out += "}"
return out
else:
return str(x)
def item_repr_pretty(x, indent=""):
subind = indent + ' '
if isinstance(x, int):
return "%s%d" % (indent, x)
elif isinstance(x, float):
x = "%.12g" % x
if "e" in x or "." in x or x in ["-inf", "inf", "nan"]:
return "%s%s" % (indent, x)
else:
return "%s%s.0" % (indent, x)
elif isinstance(x, str):
return "%s%s" % (indent, util.escape_str(x))
elif isinstance(x, list) or isinstance(x, tuple):
if not x:
return "%s[]" % indent
items = [
item_repr_pretty(v, subind)
for v in x
]
return "%s[\n%s\n%s]" % (indent, ",\n".join(items), indent)
elif isinstance(x, dict):
if not x:
return "%s{}" % indent
items = [
"%s: %s" % (
item_repr_pretty(k, subind),
item_repr_pretty(x[k], subind).lstrip(),
)
for k in sorted(list(x.keys()), key=cmp_to_key(sortcomp))
]
return "%s{\n%s\n%s}" % (indent, ",\n".join(items), indent)
else:
return '%s%s' % (indent, str(x))
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class TargetSslProxiesTransport(abc.ABC):
"""Abstract transport class for TargetSslProxies."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.set_backend_service: gapic_v1.method.wrap_method(
self.set_backend_service, default_timeout=None, client_info=client_info,
),
self.set_proxy_header: gapic_v1.method.wrap_method(
self.set_proxy_header, default_timeout=None, client_info=client_info,
),
self.set_ssl_certificates: gapic_v1.method.wrap_method(
self.set_ssl_certificates,
default_timeout=None,
client_info=client_info,
),
self.set_ssl_policy: gapic_v1.method.wrap_method(
self.set_ssl_policy, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteTargetSslProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetTargetSslProxyRequest],
Union[compute.TargetSslProxy, Awaitable[compute.TargetSslProxy]],
]:
raise NotImplementedError()
@property
def insert(
self,
) -> Callable[
[compute.InsertTargetSslProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListTargetSslProxiesRequest],
Union[compute.TargetSslProxyList, Awaitable[compute.TargetSslProxyList]],
]:
raise NotImplementedError()
@property
def set_backend_service(
self,
) -> Callable[
[compute.SetBackendServiceTargetSslProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_proxy_header(
self,
) -> Callable[
[compute.SetProxyHeaderTargetSslProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_ssl_certificates(
self,
) -> Callable[
[compute.SetSslCertificatesTargetSslProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_ssl_policy(
self,
) -> Callable[
[compute.SetSslPolicyTargetSslProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
__all__ = ("TargetSslProxiesTransport",)
| |
from .std import tqdm, TqdmTypeError, TqdmKeyError
from ._version import __version__ # NOQA
import sys
import re
import logging
__all__ = ["main"]
def cast(val, typ):
log = logging.getLogger(__name__)
log.debug((val, typ))
if " or " in typ:
for t in typ.split(" or "):
try:
return cast(val, t)
except TqdmTypeError:
pass
raise TqdmTypeError(val + ' : ' + typ)
# sys.stderr.write('\ndebug | `val:type`: `' + val + ':' + typ + '`.\n')
if typ == 'bool':
if (val == 'True') or (val == ''):
return True
elif val == 'False':
return False
else:
raise TqdmTypeError(val + ' : ' + typ)
try:
return eval(typ + '("' + val + '")')
except:
if typ == 'chr':
return chr(ord(eval('"' + val + '"')))
else:
raise TqdmTypeError(val + ' : ' + typ)
def posix_pipe(fin, fout, delim='\n', buf_size=256,
callback=lambda int: None # pragma: no cover
):
"""
Params
------
fin : file with `read(buf_size : int)` method
fout : file with `write` (and optionally `flush`) methods.
callback : function(int), e.g.: `tqdm.update`
"""
fp_write = fout.write
# tmp = ''
if not delim:
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
getattr(fout, 'flush', lambda: None)() # pragma: no cover
return
fp_write(tmp)
callback(len(tmp))
# return
buf = ''
# n = 0
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
if buf:
fp_write(buf)
callback(1 + buf.count(delim)) # n += 1 + buf.count(delim)
getattr(fout, 'flush', lambda: None)() # pragma: no cover
return # n
while True:
try:
i = tmp.index(delim)
except ValueError:
buf += tmp
break
else:
fp_write(buf + tmp[:i + len(delim)])
callback(1) # n += 1
buf = ''
tmp = tmp[i + len(delim):]
# ((opt, type), ... )
RE_OPTS = re.compile(r'\n {8}(\S+)\s{2,}:\s*([^,]+)')
# better split method assuming no positional args
RE_SHLEX = re.compile(r'\s*(?<!\S)--?([^\s=]+)(\s+|=|$)')
# TODO: add custom support for some of the following?
UNSUPPORTED_OPTS = ('iterable', 'gui', 'out', 'file')
# The 8 leading spaces are required for consistency
CLI_EXTRA_DOC = r"""
Extra CLI Options
-----------------
name : type, optional
TODO: find out why this is needed.
delim : chr, optional
Delimiting character [default: '\n']. Use '\0' for null.
N.B.: on Windows systems, Python converts '\n' to '\r\n'.
buf_size : int, optional
String buffer size in bytes [default: 256]
used when `delim` is specified.
bytes : bool, optional
If true, will count bytes, ignore `delim`, and default
`unit_scale` to True, `unit_divisor` to 1024, and `unit` to 'B'.
manpath : str, optional
Directory in which to install tqdm man pages.
log : str, optional
CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET.
"""
def main(fp=sys.stderr, argv=None):
"""
Parameters (internal use only)
---------
fp : file-like object for tqdm
argv : list (default: sys.argv[1:])
"""
if argv is None:
argv = sys.argv[1:]
try:
log = argv.index('--log')
except ValueError:
for i in argv:
if i.startswith('--log='):
logLevel = i[len('--log='):]
break
else:
logLevel = 'INFO'
else:
# argv.pop(log)
# logLevel = argv.pop(log)
logLevel = argv[log + 1]
logging.basicConfig(
level=getattr(logging, logLevel),
format="%(levelname)s:%(module)s:%(lineno)d:%(message)s")
log = logging.getLogger(__name__)
d = tqdm.__init__.__doc__ + CLI_EXTRA_DOC
opt_types = dict(RE_OPTS.findall(d))
# opt_types['delim'] = 'chr'
for o in UNSUPPORTED_OPTS:
opt_types.pop(o)
log.debug(sorted(opt_types.items()))
# d = RE_OPTS.sub(r' --\1=<\1> : \2', d)
split = RE_OPTS.split(d)
opt_types_desc = zip(split[1::3], split[2::3], split[3::3])
d = ''.join('\n --{0}=<{0}> : {1}{2}'.format(*otd)
for otd in opt_types_desc if otd[0] not in UNSUPPORTED_OPTS)
d = """Usage:
tqdm [--help | options]
Options:
-h, --help Print this help and exit
-v, --version Print version and exit
""" + d.strip('\n') + '\n'
# opts = docopt(d, version=__version__)
if any(v in argv for v in ('-v', '--version')):
sys.stdout.write(__version__ + '\n')
sys.exit(0)
elif any(v in argv for v in ('-h', '--help')):
sys.stdout.write(d + '\n')
sys.exit(0)
argv = RE_SHLEX.split(' '.join(["tqdm"] + argv))
opts = dict(zip(argv[1::3], argv[3::3]))
log.debug(opts)
opts.pop('log', True)
tqdm_args = {'file': fp}
try:
for (o, v) in opts.items():
try:
tqdm_args[o] = cast(v, opt_types[o])
except KeyError as e:
raise TqdmKeyError(str(e))
log.debug('args:' + str(tqdm_args))
except:
fp.write('\nError:\nUsage:\n tqdm [--help | options]\n')
for i in sys.stdin:
sys.stdout.write(i)
raise
else:
buf_size = tqdm_args.pop('buf_size', 256)
delim = tqdm_args.pop('delim', '\n')
delim_per_char = tqdm_args.pop('bytes', False)
manpath = tqdm_args.pop('manpath', None)
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
if manpath is not None:
from os import path
from shutil import copyfile
from pkg_resources import resource_filename, Requirement
fi = resource_filename(Requirement.parse('tqdm'), 'tqdm/tqdm.1')
fo = path.join(manpath, 'tqdm.1')
copyfile(fi, fo)
log.info("written:" + fo)
sys.exit(0)
if delim_per_char:
tqdm_args.setdefault('unit', 'B')
tqdm_args.setdefault('unit_scale', True)
tqdm_args.setdefault('unit_divisor', 1024)
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
posix_pipe(stdin, stdout, '', buf_size, t.update)
elif delim == '\n':
log.debug(tqdm_args)
for i in tqdm(stdin, **tqdm_args):
stdout.write(i)
else:
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
posix_pipe(stdin, stdout, delim, buf_size, t.update)
| |
__author__ = 'phoetrymaster'
from osgeo import gdal
from osgeo.gdalconst import *
import os
import numpy
from datetime import datetime as dt
gdal.UseExceptions()
imagepath = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/ARC_Testing/ClipTesting/ENVI_1/test_clip_envi_3.dat"
rootdir = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/OutImages/"
newfoldername = "Testing"
drivercode = 'ENVI'
ndvalue = -3000
startDOY = 1
thresh = 500
bestguess = 0
fitmthd = 'SLSQP'
soylocs = [(6002, 2143), (5944, 2102), (5746, 2183), (5998, 2171)]
cornlocs = [(5997, 2139), (5940, 2096), (6051, 2230), (5691, 1998)]
wheatlocs = [(5993, 2136), (5937, 2080), (5935, 2076), (5921, 2217)]
refstoget = {"soy": soylocs, "corn": cornlocs, "wheat": wheatlocs}
refs = {
'soy': {1: 174.5, 97: 1252.25, 65: 1139.5, 209: 7659.0, 273: 4606.75, 337: 1371.75, 17: 1055.5, 33: 1098.0,
49: 1355.25,
129: 1784.75, 257: 6418.0, 321: 1644.5, 305: 1472.75, 193: 5119.75, 289: 1878.75, 177: 3439.5, 241: 7565.75,
81: 1205.5, 225: 7729.75, 145: 1736.25, 161: 1708.25, 353: 1358.25, 113: 1340.0},
'corn': {1: 392.25, 97: 1433.25, 65: 1258.5, 209: 6530.0, 273: 1982.5, 337: 1658.5, 17: 1179.25, 33: 1196.75,
49: 1441.25, 129: 1885.25, 257: 2490.25, 321: 1665.75, 305: 1439.0, 193: 6728.25, 289: 1634.5,
177: 6356.75,
241: 4827.25, 81: 1355.75, 225: 5547.5, 145: 2196.5, 161: 3143.25, 353: 1704.75, 113: 1716.5},
'wheat': {1: 719.75, 97: 6594.75, 65: 1935.25, 209: 2013.5, 273: 1493.5, 337: 1498.25, 17: 1816.5, 33: 1815.0,
49: 1985.25, 129: 6758.0, 257: 1685.75, 321: 1582.5, 305: 1163.25, 193: 2186.25, 289: 1264.5, 177: 2222.5,
241: 2301.0, 81: 4070.5, 225: 1858.0, 145: 6228.5, 161: 3296.5, 353: 1372.5, 113: 7035.25}
}
########## METHODS ##########
def create_output_raster(outFile, cols, rows, bands, datatype, drivername="GTiff"):
driver = gdal.GetDriverByName(drivername)
driver.Register()
outds = driver.Create(outFile, cols, rows, bands, datatype)
return outds
def create_output_dir(root, name):
dirpath = os.path.join(root, name)
if os.path.isdir(dirpath):
count = 1
dirpath_ = dirpath + "_"
while 1:
dirpath = dirpath_ + str(count)
count += 1
if not os.path.isdir(dirpath):
break
os.makedirs(dirpath)
return dirpath
def get_sort_dates_values(vals, threshhold=-3000):
"""Gets the DOY dates (the keys) in a list from dictonary vals and sorts those, placing them in chronological order
(list x0). Then the function iterates over these values and gets the corresponding values, thresholding values if
they are lower than an optional threshhold value (-3000 default = NoData in MODIS imagery), then appending them to
the list y. x and y are then returned."""
x = vals.keys()
x.sort()
y = []
for i in x:
if vals[i] < threshhold:
y.append(threshhold)
else:
y.append(vals[i])
return x, y
def find_fit(valsf, valsh, bestguess, threshhold, mthd="TNC"):
from numpy import sum
from scipy import interpolate
from scipy import optimize
x0, y0 = get_sort_dates_values(valsf, threshhold=threshhold)
x1, y1 = get_sort_dates_values(valsh)
tck = interpolate.splrep(x1, y1)
fun = lambda x: ((1 / 22.8125 * sum(
(valsf[i] - (x[0] * interpolate.splev((x[1] * (i + x[2])), tck))) ** 2 for i in x0)) ** (
1. / 2))
bnds = ((0.6, 1.4), (0.6, 1.4), (-10, 10))
res = optimize.minimize(fun, (1, 1, bestguess), method=mthd, bounds=bnds)
return res.fun, res.x, res.message
########## PROCEDURE ##########
start = dt.now()
print start
try:
outputdirectory = create_output_dir(rootdir, newfoldername)
print "\nOutputting files to : {0}".format(outputdirectory)
gdal.AllRegister()
#Open multi-date image to analyze
img = gdal.Open(imagepath, GA_ReadOnly)
if img is None:
raise Exception("Could not open: {0}".format(imagepath))
#Get image properties
cols = img.RasterYSize
rows = img.RasterXSize
bands = img.RasterCount
geotransform = img.GetGeoTransform()
projection = img.GetProjection()
print "Input image dimensions are {0} columns by {1} rows and contains {2} bands.".format(cols, rows, bands)
#Create output rasters for each crop type to hold residual values from fit and arrays
print "\nCreating output files..."
outfiles = {}
outdatasets = {}
outarrays = {}
for key in refs:
outfile = os.path.join(outputdirectory, key) + ".tif"
outfiles[key] = create_output_raster(outfile, cols, rows, 1, GDT_Float32, drivername=drivercode)
outfiles[key].SetGeoTransform(geotransform)
outfiles[key].SetProjection(projection)
outdatasets[key] = outfiles[key].GetRasterBand(1)
outarrays[key] = numpy.zeros(shape=(rows, cols))
print "\tCreated file: {0}".format(outfile)
#Create output raster for bestFit
outfile = os.path.join(outputdirectory, "bestFit") + ".tif"
fitimgfile = create_output_raster(outfile, cols, rows, 1, GDT_Byte, drivername=drivercode)
fitimgfile.SetGeoTransform(geotransform)
fitimgfile.SetProjection(projection)
fitimg = fitimgfile.GetRasterBand(1)
fitarray = numpy.zeros(shape=(rows, cols))
print "\tCreated file: {0}".format(outfile)
#Iterate through each pixel and calculate the fit for each ref curve; write residuals and best fit to rasters
print "\nFinding pixel fits..."
for row in range(0, rows):
arrays = {}
for i in range(0, bands):
band = img.GetRasterBand(i+1)
arrays[i+1] = band.ReadAsArray(0, row, cols, 1)
for col in range(0, cols):
valsf = {}
#print "Pixel r:{0}, c:{1}:".format(row, col)
for i in range(0, bands):
measured = arrays[i+1][0, col]
valsf[startDOY + i*16] = measured
count = 1
fit = {}
for key, val in refs.items():
res, transforms, message = find_fit(valsf, val, bestguess, threshhold=thresh, mthd=fitmthd)
outarrays[key][row, col] = res
#print "\t{0}: {1}, {2}, {3}".format(key, res, transforms, message)
fit[res] = count
count += 1
fitarray[row, col] = fit[min(fit.keys())]
#Write output array values to files
print "\nWriting output files..."
for key, values in outdatasets.items():
outdatasets[key].WriteArray(outarrays[key], 0, 0)
fitimg.WriteArray(fitarray, 0, 0)
print "\nProcess finished."
print dt.now() - start
except Exception as e:
print e
finally:
print "\nClosing files..."
try:
fitimg = None
fitimgfile = None
except:
pass
try:
for key, value in outdatasets.items():
outdatasets[key] = None
except:
pass
try:
for key, value in outfiles.items():
outfiles[key] = None
except:
pass
| |
from collections import OrderedDict
import time
from threading import Thread
import requests
from django.db import connection
from aggregator.models import Variable, Dimension
from .utils import GRANULARITY_MIN_PAGES, ResultEncoder
def query_string_from_params(params):
return '?%s' % ('&'.join(['%s=%s' % (key, params[key]) for key in params.keys()]))
def process(self, dimension_values='', variable='', only_headers=False, commit=True, execute=False, raw_query=False):
if dimension_values:
dimension_values = dimension_values.split(',')
else:
dimension_values = []
# all params
request_params = {}
# select
selects = OrderedDict()
headers = []
header_sql_types = []
columns = []
groups = []
v_names = []
for _from in self.document['from']:
v_obj = Variable.objects.get(pk=_from['type'])
for s in _from['select']:
obj = None
if s['type'] != 'VALUE':
dimension = Dimension.objects.get(pk=s['type'])
obj = dimension
column_name = dimension.name
column_unit = dimension.unit
column_axis = dimension.axis
column_step = dimension.step
sql_type = dimension.sql_type
else:
obj = v_obj
column_name = v_obj.name
v_names.append(column_name)
column_unit = v_obj.unit
column_axis = None
column_step = None
sql_type = 'double precision'
selects[s['name']] = {'field': column_name, 'obj': obj}
if 'joined' not in s:
c_name = column_name
if s.get('aggregate', '') != '':
c_name = '%s(%s)' % (s.get('aggregate'), column_name)
if not s.get('exclude', False):
header_sql_types.append(sql_type)
headers.append({
'title': s['title'],
'name': s['name'],
'unit': column_unit,
'step': column_step,
'quote': '' if sql_type.startswith('numeric') or sql_type.startswith('double') else '"',
'isVariable': s['type'] == 'VALUE',
'axis': column_axis,
})
# add fields to select clause
columns.append((c_name, s['name']))
# add fields to grouping
if s.get('groupBy', False):
groups.append(c_name)
# select
select_clause = ','.join(['%s:%s' % (c[1], c[0]) for c in columns])
request_params['fl'] = select_clause
# where
filters = self.document.get('filters', '')
for v_name in v_names:
filters = {
'a': {'a': v_name, 'op': 'gt', 'b': '0'},
'op': 'AND',
'b': filters.copy() if filters else {'a': '*', 'op': 'EQ', 'b': '*'}
}
where_clause = self.process_filters(filters, mode='solr')
for column_name, field_name in columns:
where_clause = where_clause.replace(field_name, column_name)
request_params['q'] = where_clause
# grouping
if groups:
request_params['group'] = 'true'
for group_field in groups:
request_params['group.field'] = group_field
# ordering
orderings = self.document.get('orderings', [])
if orderings:
order_by_clause = ','.join([('%s %s' % (o['name'], o['type'])) for o in orderings])
for column_name, field_name in columns:
order_by_clause = order_by_clause.replace(field_name, column_name)
request_params['sort'] = order_by_clause
# offset & limit
offset = 0
limit = None
if 'offset' in self.document and self.document['offset']:
offset = int(self.document['offset'])
request_params['start'] = offset
if 'limit' in self.document and self.document['limit']:
limit = int(self.document['limit'])
request_params['rows'] = limit
print(query_string_from_params(request_params))
if not only_headers:
# execute query & return results
t1 = time.time()
# count pages
if limit is not None:
pages = {
'current': (offset / limit) + 1,
'total': 1
}
else:
pages = {
'current': 1,
'total': 1
}
if not execute:
# only count
request_params['rows'] = 0
resp = requests.get('http://212.101.173.50:8983/solr/bdo/select%s' %
query_string_from_params(request_params)).json()
self.count = resp['response']['numFound']
if limit is not None:
pages['total'] = (self.count - 1) / limit + 1
results = []
if execute:
all_rows = resp['response']['docs']
# we have to convert numeric results to float
# by default they're returned as strings to prevent loss of precision
for row in all_rows:
res_row = []
for _, field_alias in columns:
res_row.append(row[field_alias])
results.append(res_row)
# include dimension values if requested
for d_name in dimension_values:
hdx, header = [hi for hi in enumerate(headers) if hi[1]['name'] == d_name][0]
d = selects[d_name]['obj']
if not d.non_filterable:
header['values'] = d.values
# include variable ranges if requested
if variable:
vdx, v = [vi for vi in enumerate(headers) if vi[1]['name'] == variable][0]
v['distribution'] = selects[variable]['obj'].distribution
if not only_headers:
# monitor query duration
q_time = (time.time() - t1) * 1000
if not only_headers:
response = {
'results': results,
'headers': {
'runtime_msec': q_time,
'pages': pages,
}
}
else:
response = {'headers': {}}
response['headers']['columns'] = headers
if raw_query:
response['raw_query'] = query_string_from_params(request_params)
# store headers
self.headers = ResultEncoder(mode='solr').encode(headers)
if self.pk and commit:
self.save()
return response
| |
"""
Classes that generate content for ImgDataset.
These classes deal with the distributin of labour. The work may be done
online, in worker threads or in owrker processes.
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "nicu.tofan@gmail.com"
import cProfile
from datetime import datetime
import dill
import functools
import logging
import multiprocessing
import numpy
import os
import Queue
import threading
import time
import zmq
#from pyl2extra.datasets.img_dataset.dataset import ImgDataset
from pyl2extra.utils import slice_count
class Generator(object):
"""
The class is used to generate content.
"""
def __init__(self):
#: associated dataset - the bound is created in setup() method
self.dataset = None
super(Generator, self).__init__()
def is_inline(self):
"""
Tell if this generator works on the same thread as the requester.
Returns
-------
inline : bool
True if the thread will block waiting for the result, False if
the result is generated in paralel.
"""
raise NotImplementedError()
def setup(self, dataset):
"""
Called by the dataset once it initialized itself.
"""
self.dataset = dataset
#assert isinstance(dataset, ImgDataset)
def tear_down(self):
"""
Called by the dataset fromits tear_down() method.
"""
pass
def __hash__(self):
"""
Called by built-in function hash() and for operations on members
of hashed collections including set, frozenset, and dict.
"""
return hash(self.__class__.__name__)
def get(self, source, next_index):
"""
The get method used by the dataset to retreive batches of data.
Parameters
----------
source : touple of str
A tuple of source identifiers (strings) to indicate the
source for the data to retreive. The iterator will receive
a ``data_specs`` argument consisting of ``(space, source)``.
next_index : list or slice object
The indexes of the examples to retreive specified either as a
list or as a slice.
Returns
-------
next_batch : tuple
The result is a tuple of batches, one for each ``source`` that
was requested. Each batch in the tuple should follow the
dataspecs for the dataset.
"""
raise NotImplementedError()
def _prep_get(self, source, next_index):
"""
Common opperations for a get() call.
"""
count = slice_count(next_index)
# prepare for iteration
idx_features = -1
idx_targets = -1
result = []
for i, src in enumerate(source):
if src == 'features':
idx_features = i
result.append(numpy.zeros(shape=(count,
self.dataset.shape[0],
self.dataset.shape[1],
3)))
elif src == 'targets':
idx_targets = i
result.append(numpy.zeros(shape=(count, 1), dtype='int32'))
else:
raise ValueError('%s implements <features> and <targets>; '
'<%s> is not among these.' %
(str(self.__class__.__name__), src))
return count, result, idx_features, idx_targets
def __getstate__(self):
"""
Help pickle this instance.
"""
return {'dataset': self.dataset}
def __setstate__(self, state):
"""
Help un-pickle this instance.
"""
self.dataset = state['dataset']
class Basket(object):
"""
Holds a number of processed images
"""
def __init__(self, batch=None, classes=None):
#: a list of processed images in the form of a numpy.ndarray
self.batch = batch
#: the classes that corespond to processed images
self.classes = classes
#: a number that identifies this instance
self.ident = None
self.assign_id()
id_factory = 1
def __len__(self):
"""
Get the number of processed images.
"""
if self.batch is None:
return 0
else:
return self.batch.shape[0]
def assign_id(self):
"""
Assign an unique identifier to this instance.
"""
#: a number that identifies this instance
self.ident = Basket.id_factory
Basket.id_factory = Basket.id_factory + 1
class InlineGen(Generator):
"""
Generates the content while the other parties wait on the same thread.
"""
def __init__(self, profile=False):
self.profile = profile
if profile:
self.profile_file = '/dev/shm/pyl2x-adj-' + datetime.now().strftime("%Y%m%d-%H%M%S")
self.profile_cnt = 1
super(InlineGen, self).__init__()
@functools.wraps(Generator.is_inline)
def is_inline(self):
return True
@functools.wraps(Generator.is_inline)
def get(self, source, next_index):
if self.profile:
profiler = cProfile.Profile()
profiler.enable()
count, result, idx_features, idx_targets = self._prep_get(source,
next_index)
# iterate to collect data
for i in range(count):
fpath = self.dataset.data_provider.cnext()
trg, categ = self.dataset.data_provider.read(fpath)
categ = self.dataset.data_provider.categ2int(categ)
trg = numpy.reshape(trg,
(1, trg.shape[0], trg.shape[1], trg.shape[2]))
if idx_features > -1:
trg = self.dataset.process(trg)
result[idx_features][i, :, :, :] = trg[0, :, :, 0:3]
if idx_targets > -1:
result[idx_targets][i][0] = categ
if self.profile:
profiler.disable()
profiler.dump_stats('%s.%d.profile' % (self.profile_file,
self.profile_cnt))
self.profile_cnt = self.profile_cnt + 1
return tuple(result)
def __getstate__(self):
"""
Help pickle this instance.
"""
return super(InlineGen, self).__getstate__()
def __setstate__(self, state):
"""
Help un-pickle this instance.
"""
super(InlineGen, self).__setstate__(state)
class AsyncMixin(object):
"""
Functionality that is common to threads and processes.
"""
def __init__(self, cache_refill_count=64, keep_baskets=0):
#: list of cached batches; each list entry is a basket
self.baskets = []
#: number of cached images (in all baskets)
self.cached_images = 0
#: if the cache has fewer than this number of images request refill
self.cache_refill_treshold = 5256
#: number of images to retreive by each thread
self.cache_refill_count = cache_refill_count
#: on termination counts the workers that exited
self.finish = 0
#: one time trigger ofr the threads to exit
self._should_terminate = False
#: number of workers to use
self.workers_count = 0
#: number of baskets to keep arraound if we don't have enough data
self.keep_baskets = keep_baskets
#: the list of baskets kept around
self.baskets_backup = []
#: number of unique examples
self.uniq_ex = 0
def _get(self, source, next_index):
"""
Get method common implementation.
"""
count, result, idx_features, idx_targets = self._prep_get(source,
next_index)
assert count > 0
logging.debug('get a batch of %d images (%d cached)',
count, self.cached_images)
# deals with both bootstraping and saving for future
self.cache_first_batch()
# where inside result array we're placing the data
offset = 0
while count > 0:
self._new_or_backup(count)
# get a basket from our list
basket = self.get_basket()
if basket is None:
continue
# copy the things in place
to_copy = min(count, len(basket))
if idx_features > -1:
btc = basket.batch[0:to_copy, :, :, 0:3]
result[idx_features][offset:offset+to_copy, :, :, :] = btc
if idx_targets > -1:
btc = basket.classes[0:to_copy]
result[idx_targets][offset:offset+to_copy, 0] = btc
count = count - to_copy
# the basket was larger so we have to put it back
if len(basket) > to_copy:
logging.debug("Inefficient use of baskets: %d needed, %d in basket",
to_copy, len(basket))
leftover = Basket()
leftover.ident = basket.ident
leftover.batch = basket.batch[to_copy:, :, :, :]
leftover.classes = basket.classes[to_copy:]
self.add_basket(leftover, False)
self.basked_done(basket)
# make sure we're ready for next round
refill = self.cache_refill_treshold - self.cached_images
assert self.cache_refill_count > 0
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
return tuple(result)
def basked_done(self, basket):
"""
A basket was received and it was extracted from queue.
After the baskets are used they are normally discarded. If we're
unable to provide examples fast enough the network will block
waiting (sometimes for tens of seconds). To alleviate that, we keep
arround the old examples and we serve them when there are no
new baskets.
"""
if self.keep_baskets == 0:
return
assert self.keep_baskets > 0
lkb = len(self.baskets_backup)
if lkb >= self.keep_baskets:
# make room for the new basket
lkb = lkb - self.keep_baskets + 1
self.baskets_backup = self.baskets_backup[lkb:]
self.baskets_backup.append(basket)
logging.debug('basket of %d images cached; %d baskets in cache',
len(basket), len(self.baskets_backup))
def _new_or_backup(self, count):
"""
Replacement for `_wait_for_data()` that either gets examples from
queue or from the backup list.
"""
if len(self.baskets) > 0:
return
if len(self.baskets_backup) == 0:
self._wait_for_data(count)
else:
if self._starving():
refill = max(self.cache_refill_count, count)
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
self.add_basket(self.baskets_backup, False)
self.baskets_backup = []
def __getstate__(self):
"""
Help pickle this instance.
"""
return super(AsyncMixin, self).__getstate__()
def __setstate__(self, state):
"""
Help un-pickle this instance.
"""
super(AsyncMixin, self).__setstate__(state)
def _setup(self):
"""
Common setup for asyncroneous providers.
"""
# delayed initialization state
self.bootstrap_state = 3
# (should be a customizable param.) - number of examples in 1st batch
self.bootstrap_count = 1024
def cache_first_batch(self):
"""
If conditions are optimal and the user wants first batch saved do that now.
We check if the batch was already saved and we do nothing in that case.
"""
if self.bootstrap_state == 0:
# caching is disabled, so nothing to do
return
elif self.bootstrap_state == 1:
# there is already a cache entry saved (either by this run or loaded)
return
elif self.bootstrap_state == 3:
# delayed initialization from first get call
self.cached_first_batch = self.dataset.get_cache_loc()
if self.cached_first_batch is None:
logging.debug('Bootstrapping is disables')
self.bootstrap_state = 0
return
self.categ_file = os.path.join(self.cached_first_batch,
'bootstrap.categs.npy')
self.cached_first_batch = os.path.join(self.cached_first_batch,
'bootstrap.npy')
logging.debug('Bootstrapping location: %s', self.cached_first_batch)
if os.path.isfile(self.cached_first_batch):
# we have a file, so load it
array = numpy.load(self.cached_first_batch)
self.set_continous(array, numpy.load(self.categ_file))
self.bootstrap_state = 1
logging.debug('A bootstrap batch of %d examples was loaded from %s',
array.shape[0], self.cached_first_batch)
return
else:
logging.debug('Bootstrapping file missing')
self.bootstrap_state = 4
return
elif self.bootstrap_state == 4:
# we need to save a consistent batch for future runs
if self.uniq_ex < self.bootstrap_count:
return
numpy_cache, categs = self.get_continous(self.bootstrap_count)
if numpy_cache is None:
return
numpy.save(self.cached_first_batch, numpy_cache)
numpy.save(self.categ_file, categs)
del numpy_cache
del categs
self.bootstrap_state = 1
logging.debug('A bootstrap batch of %d examples was saved at %s',
self.bootstrap_count, self.cached_first_batch)
def get_continous(self, count):
"""
Get a numpy array with specified number of examples.
This should only be called in main thread.
"""
assert self.uniq_ex >= count
result = None
offset = 0
categs = None
ident_seen = []
uniq_baskets = 0
for bask in self.baskets:
if bask.ident in ident_seen:
continue
ident_seen.append(bask.ident)
uniq_baskets = uniq_baskets + 1
if result is None:
shape = list(bask.batch.shape)
shape[0] = count
result = numpy.empty(shape=shape, dtype=bask.batch.dtype)
categs = numpy.empty(shape=(count), dtype=bask.classes.dtype)
this_run = min(count, len(bask))
result[offset:offset+this_run, :, :, :] = bask.batch[0:this_run, :, :, :]
categs[offset:offset+this_run] = bask.classes[0:this_run]
count = count - this_run
offset = offset + this_run
if count == 0:
break
if count != 0:
logging.debug('LOGIC ERROR! uniq_ex (%d) should '
'reflect the number of unique baskets (%d) '
'among all batches (%d)',
self.uniq_ex, uniq_baskets, len(self.baskets))
return None, None
return result, categs
def set_continous(self, array, categs, brand_new=True):
"""
Initialize the basket with examples created in a previous run.
"""
basket = Basket()
basket.batch = array
basket.classes = categs
self.add_basket(basket, brand_new)
def _process_image(dataset, trg, categ, i, basket, basket_sz):
"""
Process image and append it to the basket.
"""
# process this image
trg = numpy.reshape(trg,
(1, trg.shape[0],
trg.shape[1],
trg.shape[2]))
trg = dataset.process(trg)
# and append it to our batch
if basket.batch is None:
basket.batch = numpy.empty(shape=(basket_sz,
trg.shape[1],
trg.shape[2],
trg.shape[3]),
dtype=trg.dtype)
basket.classes = numpy.empty(shape=(basket_sz),
dtype='int32')
# and we're done with this image
basket.batch[i, :, :, :] = trg
basket.classes[i] = categ
class ThreadedGen(Generator, AsyncMixin):
"""
Generates the content using separate threads in same process.
Parameters
----------
count : int, optional
The number of worker threads to use. If None, same number of threads
as the number of cores minus one are used.
"""
def __init__(self, count=None):
if count is None:
count = multiprocessing.cpu_count()
count = count - 1 if count > 1 else 1
elif count < 0:
raise ValueError("Number of processes must be a positive integer")
#: the list of active threads
self.threads = []
#: the queue to pass messages
self.queue = Queue.Queue()
#: semaphore
self.gen_semaphore = threading.BoundedSemaphore(count)
super(ThreadedGen, self).__init__()
self.workers_count = count
@functools.wraps(Generator.is_inline)
def is_inline(self):
return False
@functools.wraps(Generator.setup)
def setup(self, dataset):
"""
Starts the threads and waits for orders.
"""
self.dataset = dataset
assert self.workers_count > 0
# start threads
for i in range(self.workers_count):
thr = threading.Thread(target=ThreadedGen.worker,
args=(self, i),
name='ThreadedGen[%d]' % i)
#thr.daemon = True
self.threads.append(thr)
thr.start()
self._setup()
@functools.wraps(Generator.get)
def get(self, source, next_index):
return self._get(source, next_index)
def _wait_for_data(self, count):
"""
Waits for some provider to deliver its data.
"""
timeout_count = 100
while len(self.baskets) == 0:
if self._starving():
refill = max(self.cache_refill_count, count)
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
logging.debug('main threads sleeps waiting for data (%d)',
timeout_count)
# see if, instead of waiting useless here we can process some
# images online ourselves.
time.sleep(0.1)
timeout_count = timeout_count - 1
if timeout_count <= 0:
raise RuntimeError('Timeout waiting for a thread to provide '
'processed images in ThreadedGen.')
def push_request(self, count):
"""
Adds a request for a specified number of images to the queue.
"""
self.queue.put((count))
def _starving(self):
"""
Tell if the queue is empty.
"""
return self.queue.empty
def pop_request(self):
"""
Gets a request for a specified number of images from the queue.
The method asks the data provider for file paths.
"""
count = self.queue.get()
result = []
for i in range(count):
self.gen_semaphore.acquire()
fpath = self.dataset.data_provider.cnext()
self.gen_semaphore.release()
result.append(fpath)
return result
def add_basket(self, basket, brand_new):
"""
Appends a basket to the list.
Also, keeps `cached_images` syncronized.
"""
if isinstance(basket, Basket):
basket = [basket]
self.gen_semaphore.acquire()
for bsk in reversed(basket):
self.cached_images = self.cached_images + len(bsk)
if brand_new:
self.uniq_ex = self.uniq_ex + len(bsk)
bsk.assign_id()
self.baskets.append(bsk)
self.gen_semaphore.release()
def get_basket(self):
"""
Extracts a basket from the list.
Also, keeps `cached_images` syncronized.
"""
self.gen_semaphore.acquire()
if len(self.baskets) == 0:
result = None
else:
result = self.baskets.pop()
self.cached_images = self.cached_images - len(result)
self.gen_semaphore.release()
return result
def done_request(self, thid, basket):
"""
A thread reports that it is done with a basket.
"""
count = len(basket)
logging.debug('thread %d done with a request of %d images',
thid, count)
self.add_basket(basket, True)
self.queue.task_done()
def thread_ended(self, thid):
"""
Show yourself out.
"""
logging.debug("thread %d is done", thid)
self.gen_semaphore.acquire()
self.finish = self.finish + 1
self.gen_semaphore.release()
@functools.wraps(Generator.tear_down)
def tear_down(self):
"""
Terminates all threads.
"""
logging.debug('ThreadedGen is being terminated; '
'%d items in queue '
'%d running threads.',
self.queue.qsize(), self.workers_count - self.finish)
self._should_terminate = True
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
self.queue.join()
self.queue = None
self.gen_semaphore = None
self.threads = None
logging.debug('ThreadedGen was being terminated')
@staticmethod
def worker(myself, thid):
"""
Thread entry point.
"""
logging.debug("thread %d starts", thid)
while not myself._should_terminate:
# get next request from queue
req = myself.pop_request()
# nothing to do so take a nap
if req is None:
time.sleep(0.2)
continue
basket = Basket()
basket_sz = len(req)
logging.debug("thread %d will process %d images", thid, basket_sz)
for i, fpath in enumerate(req):
# read the file using data provider
myself.gen_semaphore.acquire()
b_ok = False
try:
trg, categ = myself.dataset.data_provider.read(fpath)
categ = myself.dataset.data_provider.categ2int(categ)
b_ok = True
except IOError, exc:
logging.error('Exception in worker loop: %s', str(exc))
myself.gen_semaphore.release()
if b_ok:
_process_image(myself.dataset, trg, categ,
i, basket, basket_sz)
# and we're done with this batch
myself.done_request(thid, basket)
myself.thread_ended(thid)
class ProcessGen(Generator, AsyncMixin):
"""
Generates the content using separate processes.
Parameters
----------
count : int, optional
The number of worker processes to use. If None, same number of
processes as the number of cores minus one are used.
Notes
-----
The 0MQ part of the class was heavily inspired by
``Python Multiprocessing with ZeroMQ`` TAO_ post.
Some parts wre copied straight from provided code_.
_code: https://github.com/taotetek/blog_examples/blob/master/python_multiprocessing_with_zeromq/workqueue_example.py
_TAO: http://taotetek.net/2011/02/02/python-multiprocessing-with-zeromq/
"""
if 0:
RESULTS_ADDRESS = 'tcp://127.0.0.1:12460'
CONTROL_ADDRESS = 'tcp://127.0.0.1:12461'
VENTILATOR_ADDRESS = 'tcp://127.0.0.1:12462'
else:
RESULTS_ADDRESS = 'ipc:///tmp/pyl2x-procgen-results.ipc'
CONTROL_ADDRESS = 'ipc:///tmp/pyl2x-procgen-control.ipc'
VENTILATOR_ADDRESS = 'ipc:///tmp/pyl2x-procgen-ventilator.ipc'
CTRL_FINISH = 'FINISHED'
def __init__(self, count=None):
if count is None:
count = multiprocessing.cpu_count()
count = count - 1 if count > 1 else 1
elif count < 0:
raise ValueError("Number of processes must be a positive integer")
super(ProcessGen, self).__init__()
self.workers_count = count
#: number of requests send that were not fulfilled, yet
self.outstanding_requests = 0
#: keep various processes from returning same files
self.provider_offset = 0
#: maximum number of outstanding requests
self.max_outstanding = 64
#: number of seconds to wait before declaring timeout
self.wait_timeout = 660
#: number of extra images to request
self.xcount = 16
self.xcountcrt = 0
#: used by receiver
self.gen_semaphore = threading.BoundedSemaphore(count)
@functools.wraps(Generator.is_inline)
def is_inline(self):
return False
@functools.wraps(Generator.setup)
def setup(self, dataset):
"""
Starts the processes and waits for orders.
"""
self.dataset = dataset
self.outstanding_requests = 0
self.dataset_provided = False
# the thread used for receiving data
self.receiverth = threading.Thread(target=ProcessGen.receiver_worker,
args=(self,),
name='ProcessGenReceiver')
#thr.daemon = True
self.receiverth.start()
# Create a pool of workers to distribute work to
assert self.workers_count > 0
self.worker_pool = range(self.workers_count)
for wrk_num in range(len(self.worker_pool)):
multiprocessing.Process(target=worker, args=(wrk_num,)).start()
# Initialize a zeromq context
self.context = zmq.Context()
# Set up a channel to receive results
self.results_rcv = self.context.socket(zmq.PULL)
self.results_rcv.bind(ProcessGen.RESULTS_ADDRESS)
# Set up a channel to send control commands
self.control_sender = self.context.socket(zmq.PUB)
self.control_sender.bind(ProcessGen.CONTROL_ADDRESS)
# Set up a channel to send work
self.ventilator_send = self.context.socket(zmq.PUSH)
self.ventilator_send.bind(ProcessGen.VENTILATOR_ADDRESS)
self._setup()
# Give everything a second to spin up and connect
time.sleep(0.5)
@functools.wraps(Generator.tear_down)
def tear_down(self):
"""
Terminates all components.
"""
logging.debug('ProcessGen is being terminated; ')
self._should_terminate = True
# Signal to all workers that we are finsihed
self.control_sender.send(dill.dumps(ProcessGen.CTRL_FINISH))
logging.debug('ProcessGen was being terminated')
@functools.wraps(Generator.get)
def get(self, source, next_index):
if not self.dataset_provided:
# send workers a copy of the dataset
self.control_sender.send(dill.dumps(self.dataset))
self.dataset_provided = True
time.sleep(0.5)
refill = self.cache_refill_treshold
assert self.cache_refill_count > 0
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
return self._get(source, next_index)
def _starving(self):
"""
Tell if the queue is empty.
"""
return self.outstanding_requests == 0
def _wait_for_data(self, count):
"""
Waits for some provider to deliver its data.
"""
timeout_count = self.wait_timeout * 10
while len(self.baskets) == 0:
if self._starving():
refill = max(self.cache_refill_count, count)
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
#else:
# self.receive_all_messages()
# if len(self.baskets) != 0:
# break
# see if, instead of waiting useless here we can process some
# images online ourselves.
time.sleep(0.1)
timeout_count = timeout_count - 1
if timeout_count <= 0:
raise RuntimeError('Timeout waiting for a process to provide '
'processed images in ProcessGen.')
def push_request(self, count):
"""
Adds a request for a specified number of images.
Sends a request for a specified number of images down a zeromq "PUSH"
connection to be processed by listening workers, in a round robin
load balanced fashion.
Parameters
----------
count : int
Number of images to retreive.
"""
if self.outstanding_requests >= self.max_outstanding:
# logging.debug('The number of outstanding requests is too '
# 'high (%d); request for %d images ignored',
# self.outstanding_requests, count)
return
# self.xcount = 16
# if self.xcountcrt >= self.xcount:
# self.xcountcrt = 0
# count = count + self.xcountcrt
# self.xcountcrt = self.xcountcrt + 1
self.outstanding_requests = self.outstanding_requests + 1
work_message = {'offset': self.provider_offset, 'count' : count}
self.provider_offset = self.provider_offset + count
self.ventilator_send.send_json(work_message)
def receive_all_messages(self, no_block=True):
"""
The "results_manager" function receives each result
from multiple workers.
"""
b_done = False
baskets = []
while not b_done:
try:
if no_block:
flags = zmq.NOBLOCK
else:
self.results_rcv.pool(timeout=1*1000)
flags = 0
basket = self.results_rcv.recv_pyobj(flags=flags)
self.outstanding_requests = self.outstanding_requests - 1
if len(basket) > 0:
logging.debug('A basket of %d examples has been '
'received; %d outstanding requests, '
'%d cached images',
len(basket),
self.outstanding_requests,
self.cached_images)
baskets.append(basket)
else:
logging.error("Empty basket received")
#assert self.outstanding_requests >= 0
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
b_done = True
else:
raise
if len(baskets) > 0:
self.add_basket(baskets, True)
#logging.debug("Received all messages; %d outstanding requests",
# self.outstanding_requests)
def add_basket(self, basket, brand_new):
"""
Appends a basket to the list.
Also, keeps `cached_images` syncronized.
"""
if isinstance(basket, Basket):
basket = [basket]
self.gen_semaphore.acquire()
for bsk in reversed(basket):
self.cached_images = self.cached_images + len(bsk)
self.baskets.append(bsk)
if brand_new:
self.uniq_ex = self.uniq_ex + len(bsk)
bsk.assign_id()
self.gen_semaphore.release()
def get_basket(self):
"""
Extracts a basket from the list.
Also, keeps `cached_images` syncronized.
"""
while True:
if len(self.baskets) == 0:
return None
else:
self.gen_semaphore.acquire()
result = self.baskets.pop()
self.gen_semaphore.release()
if result.batch is None:
continue
self.cached_images = self.cached_images - len(result)
return result
# The "ventilator" function generates a list of numbers from 0 to 10000, and
#
@staticmethod
def receiver_worker(myself):
"""
Thread entry point.
"""
logging.debug("worker thread starts")
time.sleep(0.5)
while not myself._should_terminate:
myself.receive_all_messages(no_block=True)
time.sleep(0.01)
# The "worker" functions listen on a zeromq PULL connection for "work"
# (numbers to be processed) from the ventilator, square those numbers,
# and send the results down another zeromq PUSH connection to the
# results manager.
def worker(wrk_num):
"""
Worker process for `ProcessGen`.
"""
logging.debug("worker process %d starts", wrk_num)
# Initialize a zeromq context
context = zmq.Context()
# Set up a channel to receive work from the ventilator
work_rcv = context.socket(zmq.PULL)
work_rcv.connect(ProcessGen.VENTILATOR_ADDRESS)
# Set up a channel to send result of work to the results reporter
results_sender = context.socket(zmq.PUSH)
results_sender.connect(ProcessGen.RESULTS_ADDRESS)
# Set up a channel to receive control messages over
control_rcv = context.socket(zmq.SUB)
control_rcv.connect(ProcessGen.CONTROL_ADDRESS)
control_rcv.setsockopt(zmq.SUBSCRIBE, "")
# Set up a poller to multiplex the work receiver and control receiver channels
poller = zmq.Poller()
poller.register(work_rcv, zmq.POLLIN)
poller.register(control_rcv, zmq.POLLIN)
dataset = None
# def pop_request(offset, count):
# """
# Gets a list of files to process
# """
# result = []
# count = count * (wrk_num+1)
# for i in range(count):
# fpath = dataset.data_provider.get(offset, count)
# result.append(fpath)
# return result
# Loop and accept messages from both channels, acting accordingly
while True:
socks = dict(poller.poll())
# If the message came from work_rcv channel, square the number
# and send the answer to the results reporter
if socks.get(work_rcv) == zmq.POLLIN and not dataset is None:
work_message = work_rcv.recv_json()
files = dataset.data_provider.get(work_message['offset'],
work_message['count'])
basket = Basket()
basket_sz = len(files)
logging.debug("worker process %d will process %d images",
wrk_num, basket_sz)
for i, fpath in enumerate(files):
b_ok = False
try:
trg, categ = dataset.data_provider.read(fpath)
categ = dataset.data_provider.categ2int(categ)
b_ok = True
except IOError, exc:
logging.error('Exception in worker loop: %s', str(exc))
if b_ok:
_process_image(dataset, trg, categ,
i, basket, basket_sz)
if len(basket) == 0:
logging.error("Worker %d sending empty basket", wrk_num)
results_sender.send_pyobj(basket)
# If the message came over the control channel, shut down the worker.
if socks.get(control_rcv) == zmq.POLLIN:
control_message = dill.loads(control_rcv.recv())
if isinstance(control_message, basestring):
if control_message == ProcessGen.CTRL_FINISH:
logging.info("Worker %i received FINSHED, quitting!",
wrk_num)
break
elif 'ImgDataset' in str(control_message.__class__):
dataset = control_message
def gen_from_string(gen_name):
"""
Creates a generator based on a string key.
Parameters
----------
gen_name : str
A string identifying the type of Generator to use.
Returns
-------
adj : Generator
The instance that was constructed.
"""
if gen_name == 'inline':
return InlineGen()
elif gen_name == 'threads':
return ThreadedGen()
elif gen_name == 'process':
return ProcessGen()
else:
raise ValueError('%s is not a known Generator name' % gen_name)
| |
from random import random, randint, choice
from copy import deepcopy
from math import log
class fwrapper:
"""
A wrapper for the functions that will be used on function nodes.
Its member variables are name of the function, the function itself,
and the number of parameters it takes.
"""
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
class node:
"""
The class for function nodes (nodes with children). This is initialized with an
fwrapper. When evaluate is called, it evaluates the child nodes and then applies
the function to their results.
"""
def __init__(self, fw, children):
self.function = fw.function
self.name = fw.name
self.children = children
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
def display(self, indent=0):
print((' ' * indent) + self.name)
for c in self.children:
c.display(indent + 1)
class paramnode:
"""
The class for nodes that only return one of the parameters passed to the program.
Its evaluate method returns the parameter specified by idx.
"""
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
def display(self, indent=0):
print('%sp%d' % (' ' * indent, self.idx))
class constnode:
"""
Nodes that return a constant value. The evaluate method simply returns the
value with which in was initialized.
"""
def __init__(self, v):
self.v = v
def evaluate(self, inp):
return self.v
def display(self, indent=0):
print('%s%d' % (' ' * indent, self.v))
# some helper functions
addw = fwrapper(lambda l: l[0] + l[1], 2, 'add')
subw = fwrapper(lambda l: l[0] - l[1], 2, 'substract')
mulw = fwrapper(lambda l: l[0] * l[1], 2, 'multiply')
def iffunc(l):
if l[0] > 0:
return l[1]
else:
return l[2]
ifw = fwrapper(iffunc, 3, 'if')
def isgreater(l):
if l[0] > l[1]:
return 1
else:
return 0
gtw = fwrapper(isgreater, 2, 'isgreater')
flist = [addw, mulw, ifw, gtw, subw] # list of all the functions for random choosing
def makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):
"""
This function creates a node with a random function and then looks to see how
many child nodes this function requires. For every child node required, the
function calls itself to create a new node.
:param pc: number of parameters that the tree will take as input
:param fpr: gives the probability that the new node created will be a function node
:param ppr: gives that probability that it will be a paramnode if it is not a function node
"""
if random() < fpr and maxdepth > 0:
f = choice(flist)
children = [makerandomtree(pc, maxdepth - 1, fpr, ppr)
for i in range(f.childcount)]
return node(f, children)
elif random() < ppr:
return paramnode(randint(0, pc - 1))
else:
return constnode(randint(0, 10))
def scorefunction(tree, dataset):
"""
This function checks every row in dataset, calculating the output from the function
and comparing it to the real result. It adds up all the diffences, giving lower
values for better programs.
Return value 0 indicates that the program got every result correct
"""
dif = 0
for data in dataset:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
def getrankfunction(dataset):
"""
Returns ranking function for a given dataset
"""
def rankfunction(population):
scores = [(scorefunction(tree, dataset), tree) for tree in population]
scores.sort()
return scores
return rankfunction
def mutate(tree, pc, probchange=0.1):
"""
Function begins at the top of the tree and decides whether the node should be
altered. If not, it calls mutate on the child nodes of the tree.
"""
if random() < probchange:
return makerandomtree(pc)
else:
result = deepcopy(tree)
if isinstance(tree, node):
result.children = [mutate(c, pc, probchange) for c in tree.children]
return result
def crossover(tree1, tree2, probswap=0.7, top=1):
"""
This function takes two trees as inputs and traverses down both of them. If a randomly
selected threshold is reached, the function returns a copy of the first tree with one
of its branches replaced by a branch in the second tree.
"""
if random() < probswap and not top:
return deepcopy(tree2)
else:
result = deepcopy(tree1)
if isinstance(tree1, node) and isinstance(tree2, node):
result.children = [crossover(c, choice(tree2.children), probswap, 0)
for c in tree1.children]
return result
def evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1, breedingrate=0.4,
pexp=0.7, pnew=0.05):
"""
Function returns a random number, tending towards lower numbers. The lower pexp is,
more lower numbers you will get
:param rankfunction: function used on the list of programs to rank them from best to worst
:param mutationrate: probability of a mutation, passed on to mutate
:param breedingrate: probability of crossover, passed on to crossover
:param popsize: the size of initial population
:param pexp: rate of decline in the probability of selecting lower-ranked programs.
A higher value makes the selection process more stringent, choosing only
programs with the best ranks to repicate
:param pnew: probability when building the new population that a completely new, random
program is introduced
"""
def selectindex():
return int(log(random())/log(pexp))
# creating a random initial population
population = [makerandomtree(pc) for i in range(popsize)]
for i in range(maxgen):
scores = rankfunction(population)
print(scores[0][0])
if scores[0][0] == 0:
break
# new population from the two best
newpop = [scores[0][1], scores[1][1]]
# building next generation
while len(newpop) < popsize:
if random() > pnew:
newpop.append(mutate(
crossover(scores[selectindex()][1],
scores[selectindex()][1],
probswap=breedingrate),
pc, probchange=mutationrate))
else:
# just adding random node to mix things up
newpop.append(makerandomtree(pc))
population = newpop
scores[0][1].display()
return scores[0][1]
| |
"""Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f::
>>> from scipy import signal
>>> signal.firwin(numtaps, f)
Use a specific window function::
>>> signal.firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> signal.firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> signal.firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0,
antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""Vispy configuration functions
"""
import os
from os import path as op
import json
import sys
import platform
import getopt
import traceback
import tempfile
import atexit
from shutil import rmtree
from .event import EmitterGroup, EventEmitter, Event
from .logs import logger, set_log_level, use_log_level
from ..ext.six import string_types, file_types
file_types = list(file_types)
try:
file_types += [tempfile._TemporaryFileWrapper] # Py3k Windows this happens
except Exception:
pass
file_types = tuple(file_types)
config = None
_data_path = None
_allowed_config_keys = None
def _init():
""" Create global Config object, parse command flags
"""
global config, _data_path, _allowed_config_keys
app_dir = _get_vispy_app_dir()
if app_dir is not None:
_data_path = op.join(app_dir, 'data')
_test_data_path = op.join(app_dir, 'test_data')
else:
_data_path = _test_data_path = None
# All allowed config keys and the types they may have
_allowed_config_keys = {
'data_path': string_types,
'default_backend': string_types,
'gl_backend': string_types,
'gl_debug': (bool,),
'glir_file': string_types+file_types,
'include_path': list,
'logging_level': string_types,
'qt_lib': string_types,
'dpi': (int, type(None)),
'profile': string_types + (type(None),),
'audit_tests': (bool,),
'test_data_path': string_types + (type(None),),
}
# Default values for all config options
default_config_options = {
'data_path': _data_path,
'default_backend': '',
'gl_backend': 'gl2',
'gl_debug': False,
'glir_file': '',
'include_path': [],
'logging_level': 'info',
'qt_lib': 'any',
'dpi': None,
'profile': None,
'audit_tests': False,
'test_data_path': _test_data_path,
}
config = Config(**default_config_options)
try:
config.update(**_load_config())
except Exception as err:
raise Exception('Error while reading vispy config file "%s":\n %s' %
(_get_config_fname(), err.message))
set_log_level(config['logging_level'])
_parse_command_line_arguments()
###############################################################################
# Command line flag parsing
VISPY_HELP = """
VisPy command line arguments:
--vispy-backend=(qt|pyqt4|pyqt5|pyside|glfw|pyglet|sdl2|wx)
Selects the backend system for VisPy to use. This will override the default
backend selection in your configuration file.
--vispy-log=(debug|info|warning|error|critical)[,search string]
Sets the verbosity of logging output. The default is 'warning'. If a search
string is given, messages will only be displayed if they match the string,
or if their call location (module.class:method(line) or
module:function(line)) matches the string.
--vispy-dpi=resolution
Force the screen resolution to a certain value (in pixels per inch). By
default, the OS is queried to determine the screen DPI.
--vispy-fps
Print the framerate (in Frames Per Second) in the console.
--vispy-gl-debug
Enables error checking for all OpenGL calls.
--vispy-glir-file
Export glir commands to specified file.
--vispy-profile=locations
Measure performance at specific code locations and display results.
*locations* may be "all" or a comma-separated list of method names like
"SceneCanvas.draw_visual".
--vispy-cprofile
Enable profiling using the built-in cProfile module and display results
when the program exits.
--vispy-audit-tests
Enable user auditing of image test results.
--vispy-help
Display this help message.
"""
def _parse_command_line_arguments():
""" Transform vispy specific command line args to vispy config.
Put into a function so that any variables dont leak in the vispy namespace.
"""
global config
# Get command line args for vispy
argnames = ['vispy-backend=', 'vispy-gl-debug', 'vispy-glir-file=',
'vispy-log=', 'vispy-help', 'vispy-profile=', 'vispy-cprofile',
'vispy-dpi=', 'vispy-audit-tests']
try:
opts, args = getopt.getopt(sys.argv[1:], '', argnames)
except getopt.GetoptError:
opts = []
# Use them to set the config values
for o, a in opts:
if o.startswith('--vispy'):
if o == '--vispy-backend':
config['default_backend'] = a
logger.info('vispy backend: %s', a)
elif o == '--vispy-gl-debug':
config['gl_debug'] = True
elif o == '--vispy-glir-file':
config['glir_file'] = a
elif o == '--vispy-log':
if ',' in a:
verbose, match = a.split(',')
else:
verbose = a
match = None
config['logging_level'] = a
set_log_level(verbose, match)
elif o == '--vispy-profile':
config['profile'] = a
elif o == '--vispy-cprofile':
_enable_profiling()
elif o == '--vispy-help':
print(VISPY_HELP)
elif o == '--vispy-dpi':
config['dpi'] = int(a)
elif o == '--vispy-audit-tests':
config['audit_tests'] = True
else:
logger.warning("Unsupported vispy flag: %s" % o)
###############################################################################
# CONFIG
# Adapted from pyzolib/paths.py:
# https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data"""
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path
class ConfigEvent(Event):
""" Event indicating a configuration change.
This class has a 'changes' attribute which is a dict of all name:value
pairs that have changed in the configuration.
"""
def __init__(self, changes):
Event.__init__(self, type='config_change')
self.changes = changes
class Config(object):
""" Container for global settings used application-wide in vispy.
Events:
-------
Config.events.changed - Emits ConfigEvent whenever the configuration
changes.
"""
def __init__(self, **kwargs):
self.events = EmitterGroup(source=self)
self.events['changed'] = EventEmitter(
event_class=ConfigEvent,
source=self)
self._config = {}
self.update(**kwargs)
self._known_keys = get_config_keys()
def __getitem__(self, item):
return self._config[item]
def __setitem__(self, item, val):
self._check_key_val(item, val)
self._config[item] = val
# inform any listeners that a configuration option has changed
self.events.changed(changes={item: val})
def _check_key_val(self, key, val):
global _allowed_config_keys
# check values against acceptable ones
known_keys = _allowed_config_keys
if key not in known_keys:
raise KeyError('key "%s" not in known keys: "%s"'
% (key, known_keys))
if not isinstance(val, known_keys[key]):
raise TypeError('Value for key "%s" must be one of %s, not %s.'
% (key, known_keys[key], type(val)))
def update(self, **kwargs):
for key, val in kwargs.items():
self._check_key_val(key, val)
self._config.update(kwargs)
self.events.changed(changes=kwargs)
def __repr__(self):
return repr(self._config)
def get_config_keys():
"""The config keys known by vispy and their allowed data types.
Returns
-------
keys : dict
Dict of {key: (types,)} pairs.
"""
global _allowed_config_keys
return _allowed_config_keys.copy()
def _get_config_fname():
"""Helper for the vispy config file"""
directory = _get_vispy_app_dir()
if directory is None:
return None
fname = op.join(directory, 'vispy.json')
if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:
fname = op.join(_TempDir(), 'vispy.json')
return fname
def _load_config():
"""Helper to load prefs from ~/.vispy/vispy.json"""
fname = _get_config_fname()
if fname is None or not op.isfile(fname):
return dict()
with open(fname, 'r') as fid:
config = json.load(fid)
return config
def save_config(**kwargs):
"""Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
"""
if kwargs == {}:
kwargs = config._config
current_config = _load_config()
current_config.update(**kwargs)
# write to disk
fname = _get_config_fname()
if fname is None:
raise RuntimeError('config filename could not be determined')
if not op.isdir(op.dirname(fname)):
os.mkdir(op.dirname(fname))
with open(fname, 'w') as fid:
json.dump(current_config, fid, sort_keys=True, indent=0)
def set_data_dir(directory=None, create=False, save=False):
"""Set vispy data download directory
Parameters
----------
directory : str | None
The directory to use.
create : bool
If True, create directory if it doesn't exist.
save : bool
If True, save the configuration to the vispy config.
"""
if directory is None:
directory = _data_path
if _data_path is None:
raise IOError('default path cannot be determined, please '
'set it manually (directory != None)')
if not op.isdir(directory):
if not create:
raise IOError('directory "%s" does not exist, perhaps try '
'create=True to create it?' % directory)
os.mkdir(directory)
config.update(data_path=directory)
if save:
save_config(data_path=directory)
def _enable_profiling():
""" Start profiling and register callback to print stats when the program
exits.
"""
import cProfile
import atexit
global _profiler
_profiler = cProfile.Profile()
_profiler.enable()
atexit.register(_profile_atexit)
_profiler = None
def _profile_atexit():
global _profiler
_profiler.print_stats(sort='cumulative')
def sys_info(fname=None, overwrite=False):
"""Get relevant system and debugging information
Parameters
----------
fname : str | None
Filename to dump info to. Use None to simply print.
overwrite : bool
If True, overwrite file (if it exists).
Returns
-------
out : str
The system information as a string.
"""
if fname is not None and op.isfile(fname) and not overwrite:
raise IOError('file exists, use overwrite=True to overwrite')
out = ''
try:
# Nest all imports here to avoid any circular imports
from ..app import use_app, Canvas
from ..app.backends import BACKEND_NAMES
from ..gloo import gl
from ..testing import has_backend
# get default app
with use_log_level('warning'):
app = use_app(call_reuse=False) # suppress messages
out += 'Platform: %s\n' % platform.platform()
out += 'Python: %s\n' % str(sys.version).replace('\n', ' ')
out += 'Backend: %s\n' % app.backend_name
for backend in BACKEND_NAMES:
if backend.startswith('ipynb_'):
continue
with use_log_level('warning', print_msg=False):
which = has_backend(backend, out=['which'])[1]
out += '{0:<9} {1}\n'.format(backend + ':', which)
out += '\n'
# We need an OpenGL context to get GL info
canvas = Canvas('Test', (10, 10), show=False, app=app)
canvas._backend._vispy_set_current()
out += 'GL version: %r\n' % (gl.glGetParameter(gl.GL_VERSION),)
x_ = gl.GL_MAX_TEXTURE_SIZE
out += 'MAX_TEXTURE_SIZE: %r\n' % (gl.glGetParameter(x_),)
out += 'Extensions: %r\n' % (gl.glGetParameter(gl.GL_EXTENSIONS),)
canvas.close()
except Exception: # don't stop printing info
out += '\nInfo-gathering error:\n%s' % traceback.format_exc()
pass
if fname is not None:
with open(fname, 'w') as fid:
fid.write(out)
return out
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules.
We cannot simply use __del__() method for cleanup here because the rmtree
function may be cleaned up before this object, so we use the atexit module
instead.
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
atexit.register(self.cleanup)
def cleanup(self):
rmtree(self._path, ignore_errors=True)
# initialize config options
_init()
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'EnginePatch.patch_version'
db.alter_column(u'physical_enginepatch', 'patch_version', self.gf('django.db.models.fields.PositiveIntegerField')())
# Changing field 'Engine.major_version'
db.alter_column(u'physical_engine', 'major_version', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
# Changing field 'Engine.minor_version'
db.alter_column(u'physical_engine', 'minor_version', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
def backwards(self, orm):
# Changing field 'EnginePatch.patch_version'
db.alter_column(u'physical_enginepatch', 'patch_version', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'Engine.major_version'
db.alter_column(u'physical_engine', 'major_version', self.gf('django.db.models.fields.CharField')(max_length=100, null=True))
# Changing field 'Engine.minor_version'
db.alter_column(u'physical_engine', 'minor_version', self.gf('django.db.models.fields.CharField')(max_length=100, null=True))
models = {
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'backup_hour': ('django.db.models.fields.IntegerField', [], {}),
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'engine_patch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EnginePatch']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'major_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'minor_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginepatch': {
'Meta': {'object_name': 'EnginePatch'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'patchs'", 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_initial_patch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patch_path': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'patch_version': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environmentgroup': {
'Meta': {'object_name': 'EnvironmentGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'groups'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vip': {
'Meta': {'object_name': 'Vip'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'vips'", 'to': u"orm['physical.DatabaseInfra']"}),
'original_vip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Vip']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
| |
"""
Test the functions that put vector data into GMT.
"""
import itertools
from datetime import datetime
import numpy as np
import numpy.testing as npt
import pytest
from pygmt import clib
from pygmt.exceptions import GMTCLibError, GMTInvalidInput
from pygmt.helpers import GMTTempFile
def test_put_vector():
"""
Check that assigning a numpy array to a dataset works.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
for dtype in dtypes:
with clib.Session() as lib:
dataset = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[3, 5, 1, 0], # columns, rows, layers, dtype
)
x = np.array([1, 2, 3, 4, 5], dtype=dtype)
y = np.array([6, 7, 8, 9, 10], dtype=dtype)
z = np.array([11, 12, 13, 14, 15], dtype=dtype)
lib.put_vector(dataset, column=lib["GMT_X"], vector=x)
lib.put_vector(dataset, column=lib["GMT_Y"], vector=y)
lib.put_vector(dataset, column=lib["GMT_Z"], vector=z)
# Turns out wesn doesn't matter for Datasets
wesn = [0] * 6
# Save the data to a file to see if it's being accessed correctly
with GMTTempFile() as tmp_file:
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
wesn,
tmp_file.name,
dataset,
)
# Load the data and check that it's correct
newx, newy, newz = tmp_file.loadtxt(unpack=True, dtype=dtype)
npt.assert_allclose(newx, x)
npt.assert_allclose(newy, y)
npt.assert_allclose(newz, z)
def test_put_vector_mixed_dtypes():
"""
Passing a numpy array of mixed dtypes to a dataset.
See https://github.com/GenericMappingTools/pygmt/issues/255
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
for dtypex, dtypey in itertools.permutations(dtypes, r=2):
with clib.Session() as lib:
dataset = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[2, 5, 1, 0], # columns, rows, layers, dtype
)
x = np.array([1, 2, 3, 4, 5], dtype=dtypex)
y = np.array([6, 7, 8, 9, 10], dtype=dtypey)
lib.put_vector(dataset, column=lib["GMT_X"], vector=x)
lib.put_vector(dataset, column=lib["GMT_Y"], vector=y)
# Turns out wesn doesn't matter for Datasets
wesn = [0] * 6
# Save the data to a file to see if it's being accessed correctly
with GMTTempFile() as tmp_file:
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
wesn,
tmp_file.name,
dataset,
)
# Load the data and check that it's correct
newx, newy = tmp_file.loadtxt(
unpack=True, dtype=[("x", dtypex), ("y", dtypey)]
)
assert x.dtype == newx.dtype
assert y.dtype == newy.dtype
npt.assert_allclose(newx, x)
npt.assert_allclose(newy, y)
def test_put_vector_string_dtype():
"""
Passing string type vectors to a dataset.
"""
# input string vectors: numbers, longitudes, latitudes, and datetimes
vectors = np.array(
[
["10", "20.0", "-30.0", "3.5e1"],
["10W", "30.50E", "30:30W", "40:30:30.500E"],
["10N", "30.50S", "30:30N", "40:30:30.500S"],
["2021-02-03", "2021-02-03T04", "2021-02-03T04:05:06.700", "T04:50:06.700"],
]
)
# output vectors in double or string type
# Notes:
# 1. longitudes and latitudes are stored in double in GMT
# 2. The default output format for datetime is YYYY-mm-ddTHH:MM:SS
expected_vectors = [
[10.0, 20.0, -30.0, 35],
[-10, 30.5, -30.5, 40.508472],
[10, -30.50, 30.5, -40.508472],
[
"2021-02-03T00:00:00",
"2021-02-03T04:00:00",
"2021-02-03T04:05:06",
f"{datetime.utcnow().strftime('%Y-%m-%d')}T04:50:06",
],
]
# loop over all possible combinations of input types
for i, j in itertools.combinations_with_replacement(range(4), r=2):
with clib.Session() as lib:
dataset = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[2, 4, 1, 0], # columns, rows, layers, dtype
)
lib.put_vector(dataset, column=lib["GMT_X"], vector=vectors[i])
lib.put_vector(dataset, column=lib["GMT_Y"], vector=vectors[j])
# Turns out wesn doesn't matter for Datasets
wesn = [0] * 6
# Save the data to a file to see if it's being accessed correctly
with GMTTempFile() as tmp_file:
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
wesn,
tmp_file.name,
dataset,
)
# Load the data
output = np.genfromtxt(
tmp_file.name, dtype=None, names=("x", "y"), encoding=None
)
# check that the output is correct
# Use npt.assert_allclose for numeric arrays
# and npt.assert_array_equal for string arrays
if i != 3:
npt.assert_allclose(output["x"], expected_vectors[i])
else:
npt.assert_array_equal(output["x"], expected_vectors[i])
if j != 3:
npt.assert_allclose(output["y"], expected_vectors[j])
else:
npt.assert_array_equal(output["y"], expected_vectors[j])
def test_put_vector_invalid_dtype():
"""
Check that it fails with an exception for invalid data types.
"""
with clib.Session() as lib:
dataset = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[2, 3, 1, 0], # columns, rows, layers, dtype
)
data = np.array([37, 12, 556], dtype="object")
with pytest.raises(GMTInvalidInput):
lib.put_vector(dataset, column=1, vector=data)
def test_put_vector_wrong_column():
"""
Check that it fails with an exception when giving an invalid column.
"""
with clib.Session() as lib:
dataset = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[1, 3, 1, 0], # columns, rows, layers, dtype
)
data = np.array([37, 12, 556], dtype="float32")
with pytest.raises(GMTCLibError):
lib.put_vector(dataset, column=1, vector=data)
def test_put_vector_2d_fails():
"""
Check that it fails with an exception for multidimensional arrays.
"""
with clib.Session() as lib:
dataset = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[1, 6, 1, 0], # columns, rows, layers, dtype
)
data = np.array([[37, 12, 556], [37, 12, 556]], dtype="int32")
with pytest.raises(GMTInvalidInput):
lib.put_vector(dataset, column=0, vector=data)
| |
# -*- coding: utf-8 -*-
# file api/tests.py
from __future__ import unicode_literals
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from address.models import ipv6_address, ipv4_address
from bgp.models import aut_num, neighbor
from netdevice.models import router, network_os, interface, logical_interface
from static.models import ipv6_static, ipv4_static
from netdevice.tests import create_router, create_interface
class APITests(APITestCase):
def test_create_aut_num(self):
"""
Create an aut_num object, and check if it exists.
"""
data = {"asn": '65000', "name": 'My Test ASN', "contact": '', "vrf": ''}
url = reverse('api:aut_num')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(aut_num.objects.count(), 1)
self.assertEqual(str(aut_num.objects.get().asn), '65000')
def test_create_aut_num_and_view_detail(self):
"""
Create an aut_num object, then check the aut_num_detail api view.
"""
local_aut_num = aut_num.objects.create(asn=65000, name='test asn')
url = reverse(
'api:aut_num_detail',
kwargs={'pk': local_aut_num.pk}
)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(aut_num.objects.count(), 1)
self.assertEqual(aut_num.objects.get().asn, local_aut_num.asn)
def test_create_bgp_neighbor(self):
"""
Create a bgp neighbor object, then check if it exists.
"""
test_router = create_router('junos')
data = {
"router": test_router.pk,
"aut_num": test_router.local_aut_num.pk,
"peer_ip": '192.0.2.1',
}
url = reverse('api:bgp_neighbor')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(neighbor.objects.count(), 1)
self.assertEqual(str(neighbor.objects.get().peer_ip), '192.0.2.1')
def test_create_bgp_neighbor_and_view_detail(self):
"""
Create a bgp neighbor, then test the detailed api view.
"""
test_router = create_router('junos')
test_asn = test_router.local_aut_num
test_neighbor = neighbor.objects.create(
router=test_router,
aut_num=test_asn,
peer_ip='192.0.2.1',
)
url = reverse(
'api:bgp_neighbor_detail',
kwargs={'pk': test_neighbor.pk},
)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(neighbor.objects.count(), 1)
self.assertEqual(neighbor.objects.get().peer_ip, test_neighbor.peer_ip)
def test_create_router(self):
"""
Create an ASN, then create a router and check that it exists.
"""
local_aut_num = aut_num.objects.create(asn=65000, name='test asn')
data = {
"routing_id": '1.1.1.1',
"hostname": 'test-router',
"ibgp": 'true',
"service_ssh": 'true',
"service_netconf": 'true',
"network_os": 2,
"local_aut_num": local_aut_num.pk,
}
response = self.client.post(reverse('api:routers'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(router.objects.count(), 1)
self.assertEqual(router.objects.get().hostname, 'test-router')
def test_create_router_and_view_detail(self):
test_router = create_router('junos')
url = reverse(
'api:routers_detail',
kwargs={'pk': test_router.pk}
)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(router.objects.count(), 1)
self.assertEqual(router.objects.get().hostname, 'test-router')
def test_create_network_os(self):
"""
Create a network_os object, then view it in the api.
"""
data = {"name": 'test-os'}
url = reverse('api:network_os')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(network_os.objects.count(), 6)
self.assertEqual(str(network_os.objects.get(name='test-os')), 'test-os')
def test_create_network_os_and_view_detail(self):
test_os = network_os.objects.create(name='test-os')
url = reverse('api:network_os_detail', kwargs={'pk': test_os.pk})
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(network_os.objects.count(), 6)
self.assertEqual(network_os.objects.get(name='test-os'), test_os)
def test_create_interface(self):
"""
Create an interface object, then view it in the api.
"""
test_router = create_router('junos')
data = {
"router": test_router.pk,
"name": 'ge-0/0/0',
"description": 'test-interface',
}
url = reverse('api:interfaces')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(interface.objects.count(), 1)
self.assertEqual(str(interface.objects.get().name), 'ge-0/0/0')
def test_create_interface_and_view_detail(self):
"""
Create an interface object, then view the detailed api call.
"""
test_router = create_router('junos')
test_interface = interface.objects.create(router=test_router,
name='ge-0/0/0',
description='test-interface')
url = reverse(
'api:interfaces_detail',
kwargs={'pk': test_interface.pk}
)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(interface.objects.count(), 1)
self.assertEqual(interface.objects.get(name='ge-0/0/0'), test_interface)
def test_create_logical_interface(self):
"""
Create a logical_interface object, then view it in the api.
"""
test_router = create_router('junos')
test_interface = interface.objects.create(router=test_router,
name='ge-0/0/0',
description='test-interface')
data = {
"interface": test_interface.pk,
"name": '0',
"description": 'test-logical-interface',
}
url = reverse('api:logical_interfaces')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(logical_interface.objects.count(), 1)
self.assertEqual(str(logical_interface.objects.get().name), '0')
def test_create_logical_interface_and_view_detail(self):
"""
Create a logical_interface object, then view the detailed api call.
"""
test_router = create_router('junos')
test_interface = create_interface(test_router)
url = reverse(
'api:logical_interfaces_detail',
kwargs={'pk': test_interface.pk},
)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(logical_interface.objects.count(), 1)
self.assertEqual(
logical_interface.objects.get(name='10'),
test_interface
)
def test_create_ipv6_address(self):
"""
Create an ipv6_address object, then view it in the api.
"""
test_router = create_router('junos')
test_interface = create_interface(test_router)
data = {
"interface": test_interface.pk,
"host": '2001:db8::1',
"cidr": '64',
}
url = reverse('api:ipv6_address')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ipv6_address.objects.count(), 1)
self.assertEqual(str(ipv6_address.objects.get().host), '2001:db8::1')
def test_create_ipv6_address_and_view_detail(self):
"""
Create an ipv6_address object, then view the detailed api call.
"""
test_router = create_router('junos')
test_interface = create_interface(test_router)
test_address = ipv6_address.objects.create(
interface=test_interface,
host='2001:db8::1',
cidr=64,
)
url = reverse(
'api:ipv6_address_detail',
kwargs={'pk': test_address.pk},
)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(ipv6_address.objects.count(), 1)
self.assertEqual(
ipv6_address.objects.get(host='2001:db8::1'),
test_address
)
def test_create_ipv4_address(self):
"""
Create an ipv4_address object, then view it in the api.
"""
test_router = create_router('junos')
test_interface = create_interface(test_router)
data = {
"interface": test_interface.pk,
"host": '192.0.2.1',
"cidr": '24',
}
url = reverse('api:ipv4_address')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ipv4_address.objects.count(), 1)
self.assertEqual(str(ipv4_address.objects.get().host), '192.0.2.1')
def test_create_ipv4_address_and_view_detail(self):
"""
Create an ipv4_address object, then view the detailed api call.
"""
test_router = create_router('junos')
test_interface = create_interface(test_router)
test_address = ipv4_address.objects.create(
interface=test_interface,
host='192.0.2.1',
cidr=24,
)
url = reverse(
'api:ipv4_address_detail',
kwargs={'pk': test_address.pk},
)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(ipv4_address.objects.count(), 1)
self.assertEqual(
ipv4_address.objects.get(host='192.0.2.1'),
test_address
)
def test_create_ipv6_static(self):
"""
Create an ipv6_static object, with an api call.
"""
test_router = create_router('junos')
data = {
"router": test_router.pk,
"network": '2001:db8::',
"next_hop": '2001:db8:1::1',
}
url = reverse('api:ipv6_static')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ipv6_static.objects.count(), 1)
self.assertEqual(str(ipv6_static.objects.get().network), '2001:db8::')
def test_create_ipv6_static_and_view_detail(self):
"""
Create an ipv6 static object, then check the detailed api view.
"""
test_router = create_router('junos')
test_route = ipv6_static.objects.create(
router=test_router,
network='2001:db8::',
next_hop='2001:db8:1::1',
)
url = reverse(
'api:ipv6_static_detail',
kwargs={'pk': test_route.pk},
)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(ipv6_static.objects.count(), 1)
self.assertEqual(
ipv6_static.objects.get(network='2001:db8::'),
test_route,
)
def test_create_ipv4_static(self):
"""
Create an ipv4_static object, with an api call.
"""
test_router = create_router('junos')
data = {
"router": test_router.pk,
"network": '192.0.2.0',
"next_hop": '192.0.2.1',
}
url = reverse('api:ipv4_static')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ipv4_static.objects.count(), 1)
self.assertEqual(str(ipv4_static.objects.get().network), '192.0.2.0')
def test_create_ipv4_static_and_view_detail(self):
"""
Create an ipv4 static object, then check the detailed api view.
"""
test_router = create_router('junos')
test_route = ipv4_static.objects.create(
router=test_router,
network='192.0.2.0',
next_hop='192.0.2.1',
)
url = reverse(
'api:ipv4_static_detail',
kwargs={'pk': test_route.pk},
)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(ipv4_static.objects.count(), 1)
self.assertEqual(
ipv4_static.objects.get(network='192.0.2.0'),
test_route,
)
| |
import os
import tempfile
from numpy.testing import assert_array_equal, assert_almost_equal
import pytest
try:
import rdkit
from rdkit import Chem
except ImportError:
rdkit = None
if rdkit is not None:
from oddt.toolkits.extras.rdkit.fixer import (AtomListToSubMol,
PreparePDBMol,
ExtractPocketAndLigand,
IsResidueConnected,
FetchStructure,
PrepareComplexes)
test_data_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.join(test_data_dir, 'data', 'pdb')
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_atom_list_to_submol():
mol = Chem.MolFromSmiles('CCCCC(=O)O')
submol = AtomListToSubMol(mol, range(3, 7))
assert submol.GetNumAtoms() == 4
assert submol.GetNumAtoms() == 4
assert submol.GetNumBonds() == 3
assert submol.GetBondBetweenAtoms(1, 2).GetBondType() == rdkit.Chem.rdchem.BondType.DOUBLE
molfile = os.path.join(test_dir, '2qwe_Sbridge.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
assert mol.GetConformer().Is3D()
submol = AtomListToSubMol(mol, range(6), includeConformer=True)
assert submol.GetConformer().Is3D()
# submol has residue info
atom = submol.GetAtomWithIdx(0)
info = atom.GetPDBResidueInfo()
assert info.GetResidueName() == 'CYS'
assert info.GetResidueNumber() == 92
# test multiple conformers
mol.AddConformer(mol.GetConformer())
assert mol.GetNumConformers() == 2
submol = AtomListToSubMol(mol, range(6), includeConformer=True)
assert submol.GetNumConformers() == 2
# FIXME: Newer RDKit has GetPositions, 2016.03 does not
mol_conf = mol.GetConformer()
submol_conf = submol.GetConformer()
assert_array_equal([submol_conf.GetAtomPosition(i)
for i in range(submol_conf.GetNumAtoms())],
[mol_conf.GetAtomPosition(i) for i in range(6)])
submol2 = AtomListToSubMol(submol, range(3), includeConformer=True)
submol2_conf = submol2.GetConformer()
assert submol2.GetNumConformers() == 2
assert_array_equal([submol2_conf.GetAtomPosition(i)
for i in range(submol2_conf.GetNumAtoms())],
[mol_conf.GetAtomPosition(i) for i in range(3)])
@pytest.mark.skipif(rdkit is None or rdkit.__version__ < '2017.03', reason="RDKit required")
def test_multivalent_Hs():
"""Test if fixer deals with multivalent Hs"""
# TODO: require mol without Hs in the future (rdkit v. 2018)
molfile = os.path.join(test_dir, '2c92_hypervalentH.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol, residue_whitelist=[], removeHs=False)
atom = mol.GetAtomWithIdx(84)
assert atom.GetAtomicNum() == 1 # is it H
assert atom.GetDegree() == 1 # H should have 1 bond
for n in atom.GetNeighbors(): # Check if neighbor is from the same residue
assert atom.GetPDBResidueInfo().GetResidueName() == n.GetPDBResidueInfo().GetResidueName()
# mol can be sanitized
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_HOH_bonding():
"""Test if fixer unbinds HOH"""
molfile = os.path.join(test_dir, '2vnf_bindedHOH.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
# don't use templates and don't remove waters
mol = PreparePDBMol(mol, removeHOHs=False)
atom = mol.GetAtomWithIdx(5)
assert atom.GetPDBResidueInfo().GetResidueName() == 'HOH'
assert atom.GetDegree() == 0 # HOH should have no bonds
# mol can be sanitized
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_metal_bonding():
"""Test if fixer disconnects metals"""
molfile = os.path.join(test_dir, '1ps3_zn.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol)
atom = mol.GetAtomWithIdx(36)
assert atom.GetAtomicNum() == 30 # is it Zn
assert atom.GetDegree() == 0 # Zn should have no bonds
assert atom.GetFormalCharge() == 2
assert atom.GetNumExplicitHs() == 0
# mol can be sanitized
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_interresidue_bonding():
"""Test if fixer removes wrong connections between residues"""
molfile = os.path.join(test_dir, '4e6d_residues.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol)
# check if O from PRO
atom1 = mol.GetAtomWithIdx(11)
assert atom1.GetAtomicNum() == 8
assert atom1.GetPDBResidueInfo().GetResidueName() == 'PRO'
# ...and N from GLN
atom2 = mol.GetAtomWithIdx(22)
assert atom2.GetAtomicNum() == 7
assert atom2.GetPDBResidueInfo().GetResidueName() == 'GLN'
# ...are not connected
assert mol.GetBondBetweenAtoms(11, 22) is None
# mol can be sanitized
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_intraresidue_bonding():
"""Test if fixer removes wrong connections within single residue"""
molfile = os.path.join(test_dir, '1idg_connectivity.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol)
# check if N and C from GLU20 are not connected
atom1 = mol.GetAtomWithIdx(11)
assert atom1.GetAtomicNum() == 7
assert atom1.GetPDBResidueInfo().GetResidueName() == 'GLU'
assert atom1.GetPDBResidueInfo().GetResidueNumber() == 20
atom2 = mol.GetAtomWithIdx(13)
assert atom2.GetAtomicNum() == 6
assert atom2.GetPDBResidueInfo().GetResidueName() == 'GLU'
assert atom2.GetPDBResidueInfo().GetResidueNumber() == 20
assert mol.GetBondBetweenAtoms(11, 13) is None
# mol can be sanitized
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_bondtype():
"""Test if fixer deals with non-standard residue and fixes bond types"""
molfile = os.path.join(test_dir, '3rsb_bondtype.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol)
# check if there is double bond between N and C from MSE
atom1 = mol.GetAtomWithIdx(13)
assert atom1.GetAtomicNum() == 6
assert atom1.GetPDBResidueInfo().GetResidueName() == 'MSE'
atom2 = mol.GetAtomWithIdx(14)
assert atom2.GetAtomicNum() == 8
assert atom2.GetPDBResidueInfo().GetResidueName() == 'MSE'
# there is a bond and it is double
bond = mol.GetBondBetweenAtoms(13, 14)
assert bond is not None
assert_almost_equal(bond.GetBondTypeAsDouble(), 2.0)
# mol can be sanitized
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_ring():
"""Test if fixer adds missing bond in ring"""
molfile = os.path.join(test_dir, '4yzm_ring.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol)
# check if there is double bond between N and C from MSE
atom1 = mol.GetAtomWithIdx(12)
assert atom1.GetAtomicNum() == 6
assert atom1.GetPDBResidueInfo().GetResidueName() == 'PHE'
atom2 = mol.GetAtomWithIdx(13)
assert atom2.GetAtomicNum() == 6
assert atom2.GetPDBResidueInfo().GetResidueName() == 'PHE'
# there is a bond and it is aromatic
bond = mol.GetBondBetweenAtoms(12, 13)
assert bond is not None
assert_almost_equal(bond.GetBondTypeAsDouble(), 1.5)
# mol can be sanitized
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_sulphur_bridge():
"""Test sulphur bridges retention"""
molfile = os.path.join(test_dir, '2qwe_Sbridge.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol)
atom1 = mol.GetAtomWithIdx(5)
atom2 = mol.GetAtomWithIdx(11)
bond = mol.GetBondBetweenAtoms(atom1.GetIdx(), atom2.GetIdx())
assert atom1.GetPDBResidueInfo().GetName().strip() == 'SG'
assert atom1.GetPDBResidueInfo().GetResidueNumber() == 92
assert atom2.GetPDBResidueInfo().GetName().strip() == 'SG'
assert atom2.GetPDBResidueInfo().GetResidueNumber() == 417
assert bond is not None
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_pocket_extractor():
"""Test extracting pocket and ligand"""
molfile = os.path.join(test_dir, '5ar7.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
# there should be no pocket at 1A
pocket, ligand = ExtractPocketAndLigand(mol, cutoff=1.)
assert pocket.GetNumAtoms() == 0
assert ligand.GetNumAtoms() == 26
# small pocket of 5A
pocket, ligand = ExtractPocketAndLigand(mol, cutoff=12.)
assert pocket.GetNumAtoms() == 928
assert ligand.GetNumAtoms() == 26
# check if HOH is in pocket
atom = pocket.GetAtomWithIdx(910)
assert atom.GetAtomicNum() == 8
assert atom.GetPDBResidueInfo().GetResidueName() == 'HOH'
# Prepare and sanitize pocket and ligand
pocket = PreparePDBMol(pocket)
ligand = PreparePDBMol(ligand)
assert Chem.SanitizeMol(pocket) == Chem.SanitizeFlags.SANITIZE_NONE
assert Chem.SanitizeMol(ligand) == Chem.SanitizeFlags.SANITIZE_NONE
# Check atom/bond properies for both molecules
bond = pocket.GetBondWithIdx(39)
assert bond.GetIsAromatic()
assert bond.GetBeginAtom().GetPDBResidueInfo().GetResidueName() == 'TYR'
atom = ligand.GetAtomWithIdx(22)
assert atom.GetAtomicNum() == 7
assert atom.GetIsAromatic()
assert atom.GetPDBResidueInfo().GetResidueName() == 'SR8'
# test if metal is in pocket
molfile = os.path.join(test_dir, '4p6p_lig_zn.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
assert mol.GetNumAtoms() == 176
pocket, ligand = ExtractPocketAndLigand(mol, cutoff=5.)
assert pocket.GetNumAtoms() == 162
assert ligand.GetNumAtoms() == 14
atom = pocket.GetAtomWithIdx(153)
assert atom.GetPDBResidueInfo().GetResidueName().strip() == 'ZN'
atom = pocket.GetAtomWithIdx(160)
assert atom.GetPDBResidueInfo().GetResidueName() == 'HOH'
pocket, ligand = ExtractPocketAndLigand(mol, cutoff=5., expandResidues=False)
assert pocket.GetNumAtoms() == 74
assert ligand.GetNumAtoms() == 14
atom = pocket.GetAtomWithIdx(65)
assert atom.GetPDBResidueInfo().GetResidueName().strip() == 'ZN'
atom = pocket.GetAtomWithIdx(73)
assert atom.GetPDBResidueInfo().GetResidueName() == 'HOH'
# ligand and protein white/blacklist
molfile = os.path.join(test_dir, '1dy3_2LIG.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
# by default the largest ligand - ATP
pocket, ligand = ExtractPocketAndLigand(mol, cutoff=20.)
assert pocket.GetNumAtoms() == 304
assert ligand.GetNumAtoms() == 31
atom = ligand.GetAtomWithIdx(0)
assert atom.GetPDBResidueInfo().GetResidueName() == 'ATP'
# blacklist APT to get other largest ligand - 87Y
pocket, ligand = ExtractPocketAndLigand(mol, cutoff=20.,
ligand_residue_blacklist=['ATP'])
assert pocket.GetNumAtoms() == 304
assert ligand.GetNumAtoms() == 23
atom = ligand.GetAtomWithIdx(0)
assert atom.GetPDBResidueInfo().GetResidueName() == '87Y'
# point to 87Y explicitly
pocket, ligand = ExtractPocketAndLigand(mol, cutoff=20.,
ligand_residue='87Y')
assert pocket.GetNumAtoms() == 304
assert ligand.GetNumAtoms() == 23
atom = ligand.GetAtomWithIdx(0)
assert atom.GetPDBResidueInfo().GetResidueName() == '87Y'
# include APT in pocket to get other largest ligand - 87Y
pocket, ligand = ExtractPocketAndLigand(mol, cutoff=20.,
append_residues=['ATP'])
assert pocket.GetNumAtoms() == 304+31
assert ligand.GetNumAtoms() == 23
atom = ligand.GetAtomWithIdx(0)
assert atom.GetPDBResidueInfo().GetResidueName() == '87Y'
atom = pocket.GetAtomWithIdx(310)
assert atom.GetPDBResidueInfo().GetResidueName() == 'ATP'
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_aromatic_ring():
"""Test aromaticity for partial matches"""
# ring is complete and should be aromatic
molfile = os.path.join(test_dir, '5ar7_HIS.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol)
atom = mol.GetAtomWithIdx(6)
assert atom.GetAtomicNum() == 7
info = atom.GetPDBResidueInfo()
assert info.GetResidueName() == 'HIS'
assert info.GetResidueNumber() == 246
assert info.GetName().strip() == 'ND1'
assert atom.GetIsAromatic()
atom = mol.GetAtomWithIdx(9)
assert atom.GetAtomicNum() == 7
info = atom.GetPDBResidueInfo()
assert info.GetResidueName() == 'HIS'
assert info.GetResidueNumber() == 246
assert info.GetName().strip() == 'NE2'
assert atom.GetIsAromatic()
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
# there is only one atom from the ring and it shouldn't be aromatic
molfile = os.path.join(test_dir, '3cx9_TYR.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol)
atom = mol.GetAtomWithIdx(14)
assert atom.GetAtomicNum() == 6
info = atom.GetPDBResidueInfo()
assert info.GetResidueName() == 'TYR'
assert info.GetResidueNumber() == 138
assert info.GetName().strip() == 'CG'
assert not atom.GetIsAromatic()
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_many_missing():
"""Test parsing residues with **many** missing atoms and bonds"""
molfile = os.path.join(test_dir, '2wb5_GLN.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol)
assert mol.GetNumAtoms() == 5
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
assert mol.GetAtomWithIdx(4).GetDegree() == 0
# test if removal works
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol, remove_incomplete=True)
assert mol.GetNumAtoms() == 0
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_remove_incomplete():
"""Test removing residues with missing atoms"""
molfile = os.path.join(test_dir, '3cx9_TYR.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
# keep all residues
new_mol = PreparePDBMol(mol, remove_incomplete=False)
assert new_mol.GetNumAtoms() == 23
residues = set()
for atom in new_mol.GetAtoms():
residues.add(atom.GetPDBResidueInfo().GetResidueNumber())
assert residues, {137, 138 == 139}
assert Chem.SanitizeMol(new_mol) == Chem.SanitizeFlags.SANITIZE_NONE
# remove residue with missing sidechain
new_mol = PreparePDBMol(mol, remove_incomplete=True)
assert new_mol.GetNumAtoms() == 17
residues = set()
for atom in new_mol.GetAtoms():
residues.add(atom.GetPDBResidueInfo().GetResidueNumber())
assert residues, {137 == 139}
assert Chem.SanitizeMol(new_mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_custom_templates():
"""Test using custom templates"""
molfile = os.path.join(test_dir, '3cx9_TYR.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
templates = {
'TYR': 'CCC(N)C=O',
'LYS': 'NC(C(O)=O)CCCCN',
'LEU': 'CC(C)CC(N)C(=O)O',
}
mol_templates = {resname: Chem.MolFromSmiles(smi)
for resname, smi in templates.items()}
for kwargs in ({'custom_templates': {'TYR': 'CCC(N)C=O'}},
{'custom_templates': {'TYR': Chem.MolFromSmiles('CCC(N)C=O')}},
{'custom_templates': templates, 'replace_default_templates': True},
{'custom_templates': mol_templates, 'replace_default_templates': True}):
# use TYR without sidechain - all matches should be complete
new_mol = PreparePDBMol(mol, remove_incomplete=True, **kwargs)
assert new_mol.GetNumAtoms() == 23
residues = set()
for atom in new_mol.GetAtoms():
residues.add(atom.GetPDBResidueInfo().GetResidueNumber())
assert residues, {137, 138 == 139}
assert Chem.SanitizeMol(new_mol) == Chem.SanitizeFlags.SANITIZE_NONE
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_add_missing_atoms():
# add missing atom at tryptophan
molfile = os.path.join(test_dir, '5dhh_missingatomTRP.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=True)
mol = Chem.RemoveHs(mol, sanitize=False)
assert mol.GetNumAtoms() == 26
mol = PreparePDBMol(mol, add_missing_atoms=True)
assert mol.GetNumAtoms() == 27
atom = mol.GetAtomWithIdx(21)
assert atom.GetAtomicNum() == 6
info = atom.GetPDBResidueInfo()
assert info.GetResidueName() == 'TRP'
assert info.GetResidueNumber() == 175
assert info.GetName().strip() == 'C9'
assert atom.IsInRing()
assert atom.GetIsAromatic()
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
# add whole ring to tyrosine
molfile = os.path.join(test_dir, '3cx9_TYR.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=True)
mol = Chem.RemoveHs(mol, sanitize=False)
assert mol.GetNumAtoms() == 23
mol = PreparePDBMol(mol, add_missing_atoms=True)
assert mol.GetNumAtoms() == 29
atom = mol.GetAtomWithIdx(17)
assert atom.GetAtomicNum() == 6
info = atom.GetPDBResidueInfo()
assert info.GetResidueName() == 'TYR'
assert info.GetResidueNumber() == 138
assert info.GetName().strip() == 'C6'
assert atom.IsInRing()
assert atom.GetIsAromatic()
assert Chem.SanitizeMol(mol) == Chem.SanitizeFlags.SANITIZE_NONE
# missing protein backbone atoms
molfile = os.path.join(test_dir, '5ar7_HIS.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False)
mol = Chem.RemoveHs(mol, sanitize=False)
assert mol.GetNumAtoms() == 21
assert mol.GetNumBonds() == 19
mol = PreparePDBMol(mol, add_missing_atoms=True)
assert mol.GetNumAtoms() == 25
assert mol.GetNumBonds() == 25
# missing nucleotide backbone atoms
molfile = os.path.join(test_dir, '1bpx_missingBase.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False)
mol = Chem.RemoveHs(mol, sanitize=False)
assert mol.GetNumAtoms() == 301
assert mol.GetNumBonds() == 333
mol = PreparePDBMol(mol, add_missing_atoms=True)
assert mol.GetNumAtoms() == 328
assert mol.GetNumBonds() == 366
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_connected_residues():
molfile = os.path.join(test_dir, '4p6p_lig_zn.pdb')
mol = Chem.MolFromPDBFile(molfile, sanitize=False, removeHs=False)
mol = PreparePDBMol(mol) # we need to use fixer with rdkit < 2018
# residue which has neighbours
assert IsResidueConnected(mol, range(120, 127))
# ligand
assert not IsResidueConnected(mol, range(153, 167))
# fragments of two residues
with pytest.raises(ValueError):
IsResidueConnected(mol, range(5, 15))
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_fetch_structures():
pdbid = '3ws8'
tmpdir = tempfile.mkdtemp()
mol1 = FetchStructure(pdbid)
mol2 = FetchStructure(pdbid, cache_dir=tmpdir)
mol3 = FetchStructure(pdbid, cache_dir=tmpdir)
assert mol1.GetNumAtoms() == mol2.GetNumAtoms()
assert mol1.GetNumAtoms() == mol3.GetNumAtoms()
@pytest.mark.skipif(rdkit is None, reason="RDKit required")
def test_prepare_complexes():
ids = [
'3WS9', # simple case with everything fine
'3HLJ', # ligand not in report
'3BYM', # non-existing ligand and backbone residue in report
'2PIN', # two ligands with binding affinities
'3CYU', # can't parse ligands properly
'1A28', # multiple affinity types
]
tmpdir = tempfile.mkdtemp()
complexes = PrepareComplexes(ids, cache_dir=tmpdir)
expected_values = {
'3WS9': {'X4D': {'IC50': 92.0}},
'3BYM': {'AM0': {'IC50': 6.0}},
'2PIN': {'LEG': {'IC50': 1500.0}},
'3CYU': {'0CR': {'Kd': 60.0}},
'1A28': {'STR': {'Ki': 5.1}},
}
values = {}
for pdbid, pairs in complexes.items():
values[pdbid] = {}
for resname, (_, ligand) in pairs.items():
values[pdbid][resname] = {k: float(v) for k, v
in ligand.GetPropsAsDict().items()}
assert expected_values.keys() == values.keys()
for pdbid in expected_values:
assert values[pdbid].keys() == expected_values[pdbid].keys()
for resname in values[pdbid]:
assert values[pdbid][resname].keys() == expected_values[pdbid][resname].keys()
for key, val in values[pdbid][resname].items():
assert key in expected_values[pdbid][resname]
assert_almost_equal(expected_values[pdbid][resname][key], val)
for idx in expected_values:
assert os.path.exists(os.path.join(tmpdir, idx,
'%s.pdb' % idx))
| |
# Copyright 2019 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Callable, List, Iterator
import logging
import abc
import time
from pathlib import Path
import collections
import weakref
import htcondor
import classad
from . import jobs, exceptions, utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Handle(abc.ABC):
"""
A connection to a set of jobs defined by a constraint.
The handle can be used to query, act on, or edit those jobs.
"""
def __init__(self, condor):
self.condor = condor
@property
def constraint_string(self) -> str:
raise NotImplementedError
def __repr__(self):
return "{}(constraint = {})".format(type(self).__name__, self.constraint_string)
def __eq__(self, other):
return all(
(
isinstance(other, self.__class__),
self.condor == other.condor,
self.constraint_string == other.constraint_string,
)
)
def __hash__(self):
return hash((self.__class__, self.constraint_string, self.condor))
@property
def get_schedd(self):
return self.condor.get_local_schedd()
def query(self, projection=None, options=htcondor.QueryOpts.Default, limit=-1):
"""
Query against this set of jobs.
Parameters
----------
projection
The :class:`classad.ClassAd` attributes to retrieve, as a list of case-insensitive strings.
If ``None`` (the default), all attributes will be returned.
options
limit
The total number of matches to return from the query.
If ``None`` (the default), return all matches.
Returns
-------
ads : Iterator[:class:`classad.ClassAd`]
An iterator over the :class:`classad.ClassAd` that match the constraint.
"""
return self.condor.query(
self.constraint_string, projection=projection, opts=options, limit=limit
)
def _act(self, action):
return self.condor.act(action, self.constraint_string)
def remove(self):
"""
Remove jobs from the queue.
Returns
-------
ad : :class:`classad.ClassAd`
An ad describing the results of the action.
"""
return self._act(htcondor.JobAction.Remove)
def hold(self):
"""
Hold jobs.
Returns
-------
ad : :class:`classad.ClassAd`
An ad describing the results of the action.
"""
return self._act(htcondor.JobAction.Hold)
def release(self):
"""
Release held jobs.
They will return to the queue in the idle state.
Returns
-------
ad : :class:`classad.ClassAd`
An ad describing the results of the action.
"""
return self._act(htcondor.JobAction.Release)
def pause(self):
"""
Pause jobs.
Jobs will stop running, but will hold on to their claimed resources.
Returns
-------
ad : :class:`classad.ClassAd`
An ad describing the results of the action.
"""
return self._act(htcondor.JobAction.Suspend)
def resume(self):
"""
Resume (un-pause) jobs.
Returns
-------
ad : :class:`classad.ClassAd`
An ad describing the results of the action.
"""
return self._act(htcondor.JobAction.Continue)
def vacate(self):
"""
Vacate running jobs.
This will force them off of their current execute resource, causing them to become idle again.
Returns
-------
ad : :class:`classad.ClassAd`
An ad describing the results of the action.
"""
return self._act(htcondor.JobAction.Vacate)
def edit(self, attr, value):
"""
Edit attributes of jobs.
.. warning::
Many attribute edits will not affect jobs that have already matched.
For example, changing ``RequestMemory`` will not affect the memory allocation
of a job that is already executing.
In that case, you would need to vacate (or release the job if it was held)
before the edit had the desired effect.
Parameters
----------
attr
The attribute to edit. Case-insensitive.
value
The new value for the attribute.
Returns
-------
ad : :class:`classad.ClassAd`
An ad describing the results of the edit.
"""
return self.condor.edit(self.constraint_string, attr, str(value))
class ConstraintHandle(Handle):
"""
A connection to a set of jobs defined by an :attr:`ConstraintHandle.constraint`.
The handle can be used to query, act on, or edit those jobs.
"""
def __init__(self, condor, constraint):
super().__init__(condor=condor)
if isinstance(constraint, str):
constraint = classad.ExprTree(constraint)
self._constraint = constraint
@property
def constraint(self) -> classad.ExprTree:
"""
The constraint that defines this :class:`ConstraintHandle`,
as an :class:`classad.ExprTree`.
"""
return self._constraint
@property
def constraint_string(self) -> str:
"""
The constraint that defines this :class:`ConstraintHandle`,
as a string.
"""
return str(self.constraint)
def __repr__(self):
return "{}(constraint = {})".format(type(self).__name__, self.constraint)
class ClusterHandle(ConstraintHandle):
"""
A subclass of :class:`ConstraintHandle` that targets a single cluster of jobs,
as produced by :func:`Condor.submit`.
Because this handle targets a single cluster of jobs, it has superpowers.
If the cluster has an event log
(``log = <path>`` in the submit description,
see the `docs`_),
this handle's ``state`` attribute will be a :class:`ClusterState` that provides
information about the current state of the jobs in the cluster.
.. warning ::
You shouldn't have to construct a :class:`ClusterHandle` yourself.
Instead, use the ones returned by :func:`Condor.submit`.
.. _docs: https://htcondor.readthedocs.io/en/latest/man-pages/condor_submit.html
"""
def __init__(self, condor, submit_result):
self._clusterid = submit_result.cluster()
self._clusterad = submit_result.clusterad()
self._first_proc = submit_result.first_proc()
self._num_procs = submit_result.num_procs()
super().__init__(
condor=condor,
constraint=classad.ExprTree("ClusterID == {}".format(self.clusterid)),
)
# must delay this until after init, because at this point the submit
# transaction may not be done yet
self._state = None
self._event_log = None
def __int__(self):
return self.clusterid
def __repr__(self):
batch_name = self.clusterad.get("JobBatchName", None)
batch = (
", JobBatchName = {}".format(batch_name) if batch_name is not None else ""
)
return "{}(ClusterID = {}{})".format(type(self).__name__, self.clusterid, batch)
@property
def clusterid(self):
"""The cluster's cluster ID."""
return self._clusterid
@property
def clusterad(self):
"""The cluster's cluster ad."""
return self._clusterad
@property
def first_proc(self):
"""The process ID of the first job in the cluster."""
return self._first_proc
@property
def num_procs(self):
"""The number of jobs in the cluster."""
return self._num_procs
def __len__(self):
return self.num_procs
@property
def job_ids(self) -> List[jobs.JobID]:
"""Return the list of :class:`JobID` in this :class:`ClusterHandle`."""
return [jobs.JobID(self.clusterid, proc) for proc in range(len(self))]
@property
def state(self):
"""A :class:`ClusterState` that provides information about job state for this cluster."""
if self._state is None:
self._state = ClusterState(self)
return self._state
@property
def event_log(self):
"""The :class:`EventLog` for this :class:`ClusterHandle`."""
if self._event_log is None:
self._event_log = EventLog(self)
return self._event_log
def wait(
self,
condition: Optional[Callable[["ClusterState"], bool]] = None,
fail_condition: Optional[Callable[["ClusterState"], bool]] = None,
timeout: int = 120,
verbose: bool = False,
) -> bool:
"""
Waits for the ``condition`` to become ``True``.
Parameters
----------
condition
The function to wait to become ``True``. It will be passed the
:class:`ClusterState` as its only argument.
Because of how Python calls unbound class methods, you may directly
pass :class:`ClusterState` methods as conditions
(e.g., ``handle.wait(condition = ClusterState.any_held)``).
The default condition is :meth:`ClusterState.all_complete`, which
means "wait until all the jobs in this cluster are completed".
fail_condition
If this function becomes ``True``, ``wait`` will immediately
return ``False``. Use this to avoid waiting for a long time when
a test is failing.
timeout
After this amount of time, ``wait`` will return ``False`` and emit
a warning in the log.
verbose
If ``True``, the handle's state counts will be logged during the wait.
Returns
-------
success : bool
``True`` if the wait finished because the condition became ``True``;
``False`` otherwise.
"""
if condition is None:
condition = ClusterState.all_complete
if fail_condition is None:
fail_condition = lambda _: False
start_time = time.time()
num_events_read = self.state.read_events()
while True:
if verbose:
logger.debug("Handle {} state: {}".format(self, self.state.counts()))
if condition(self.state):
break
if fail_condition(self.state):
logger.warning(
"Wait for handle {} triggered its failure condition".format(self)
)
return False
if timeout is not None and time.time() > start_time + timeout:
logger.warning("Wait for handle {} timed out".format(self))
return False
# Sleep a second here if no job log events were waiting for us to prevent
# busy waiting. However, if we did see an event, try to read another
# event as often they come in bunches - and we want to consume them
# as rapidly as possible.
if num_events_read == 0:
time.sleep(1)
num_events_read = self.state.read_events()
logger.debug("Wait for handle {} finished successfully".format(self))
return True
class _MockSubmitResult:
"""
This class is used purely to transform unpacked submit results back into
"submit results" to accommodate the :class:`ClusterHandle` constructor.
**Should not be used in user code.**
"""
def __init__(self, clusterid, clusterad, first_proc, num_procs):
self._clusterid = clusterid
self._clusterad = clusterad
self._first_proc = first_proc
self._num_procs = num_procs
def cluster(self):
return self._clusterid
def clusterad(self):
return self._clusterad
def first_proc(self):
return self._first_proc
def num_procs(self):
return self._num_procs
JOB_EVENT_STATUS_TRANSITIONS = {
htcondor.JobEventType.SUBMIT: jobs.JobStatus.IDLE,
htcondor.JobEventType.JOB_EVICTED: jobs.JobStatus.IDLE,
htcondor.JobEventType.JOB_UNSUSPENDED: jobs.JobStatus.IDLE,
htcondor.JobEventType.JOB_RELEASED: jobs.JobStatus.IDLE,
htcondor.JobEventType.SHADOW_EXCEPTION: jobs.JobStatus.IDLE,
htcondor.JobEventType.JOB_RECONNECT_FAILED: jobs.JobStatus.IDLE,
htcondor.JobEventType.JOB_TERMINATED: jobs.JobStatus.COMPLETED,
htcondor.JobEventType.EXECUTE: jobs.JobStatus.RUNNING,
htcondor.JobEventType.JOB_HELD: jobs.JobStatus.HELD,
htcondor.JobEventType.JOB_SUSPENDED: jobs.JobStatus.SUSPENDED,
htcondor.JobEventType.JOB_ABORTED: jobs.JobStatus.REMOVED,
}
NO_EVENT_LOG = object()
class ClusterState:
"""
A class that manages the state of the cluster tracked by a :class:`ClusterHandle`.
It reads from the cluster's event log internally and provides a variety of views
of the individual job states.
.. warning::
:class:`ClusterState` objects should not be instantiated manually.
:class:`ClusterHandle` will create them automatically when needed.
"""
def __init__(self, handle):
self._handle = weakref.proxy(handle)
self._clusterid = handle.clusterid
self._offset = handle.first_proc
self._data = self._make_initial_data(handle)
self._counts = collections.Counter(jobs.JobStatus(js) for js in self._data)
self._last_event_read = -1
def _make_initial_data(self, handle):
return [jobs.JobStatus.UNMATERIALIZED for _ in range(len(handle))]
def read_events(self):
# TODO: this reacharound through the handle is bad
# trigger a read...
list(self._handle.event_log.read_events())
# ... but actually look through everything we haven't read yet
# in case someone else has read elsewhere
num_events_read = 0
for event in self._handle.event_log.events[self._last_event_read + 1 :]:
self._last_event_read += 1
num_events_read += 1
new_status = JOB_EVENT_STATUS_TRANSITIONS.get(event.type, None)
if new_status is not None:
key = event.proc - self._offset
# update counts
old_status = self._data[key]
self._counts[old_status] -= 1
self._counts[new_status] += 1
# set new status on individual job
self._data[key] = new_status
# break here to avoid race conditions where a test may be waiting
# for a status that is very temporary and thus the wait condition never fires.
break
return num_events_read
def __getitem__(self, proc):
if isinstance(proc, int):
return self._data[proc - self._offset]
elif isinstance(proc, slice):
start, stop, stride = proc.indices(len(self))
return self._data[start - self._offset : stop - self._offset : stride]
def counts(self):
"""
Return the number of jobs in each :class:`JobStatus`, as a :class:`collections.Counter`.
"""
return self._counts.copy()
@property
def by_name(self):
states = collections.defaultdict(list)
for p, s in enumerate(self._data):
states[s].append(jobs.JobID(self._clusterid, p + self._offset))
return states
def __iter__(self):
yield from self._data
def __str__(self):
return str(self._data)
def __repr__(self):
return repr(self._data)
def __len__(self):
return len(self._data)
def __eq__(self, other):
return isinstance(other, self.__class__) and self._handle == other._handle
def all_complete(self) -> bool:
"""
Return ``True`` if **all** of the jobs in the cluster are complete.
Note that this definition does include jobs that have left the queue,
not just ones that are in the "Completed" state in the queue.
"""
return self.all_status(jobs.JobStatus.COMPLETED)
def any_complete(self) -> bool:
"""
Return ``True`` if **any** of the jobs in the cluster are complete.
Note that this definition does include jobs that have left the queue,
not just ones that are in the "Completed" state in the queue.
"""
return self.any_status(jobs.JobStatus.COMPLETED)
def any_idle(self) -> bool:
"""Return ``True`` if **any** of the jobs in the cluster are idle."""
return self.any_status(jobs.JobStatus.IDLE)
def none_idle(self) -> bool:
"""Return ``True`` if **none** of the jobs in the cluster are idle."""
return self.none_status(jobs.JobStatus.IDLE)
@staticmethod
def running_exactly(count) -> bool:
"""Returns ``True`` if **count** of the jobs in the cluster are running."""
return lambda self: self.status_exactly(count, jobs.JobStatus.RUNNING)
def any_running(self) -> bool:
"""Return ``True`` if **any** of the jobs in the cluster are running."""
return self.any_status(jobs.JobStatus.RUNNING)
def all_running(self) -> bool:
"""Return ``True`` if **all** of the jobs in the cluster are running."""
return self.all_status(jobs.JobStatus.RUNNING)
def all_held(self) -> bool:
"""Return ``True`` if **all** of the jobs in the cluster are held."""
return self.all_status(jobs.JobStatus.HELD)
def all_idle(self) -> bool:
"""Return ``True`` if **all** of the jobs in the cluster are held."""
return self.all_status(jobs.JobStatus.IDLE)
def any_held(self) -> bool:
"""Return ``True`` if **any** of the jobs in the cluster are held."""
return self.any_status(jobs.JobStatus.HELD)
def none_held(self) -> bool:
"""Return ``True`` if **none** of the jobs in the cluster are held."""
return self.none_status(jobs.JobStatus.HELD)
def all_terminal(self) -> bool:
"""Return ``True`` if **all** of the jobs in the cluster are completed, held, or removed."""
return self.all_status(
jobs.JobStatus.COMPLETED, jobs.JobStatus.HELD, jobs.JobStatus.REMOVED
)
def any_terminal(self) -> bool:
"""Return ``True`` if **any** of the jobs in the cluster are completed, held, or removed."""
return self.any_status(
jobs.JobStatus.COMPLETED, jobs.JobStatus.HELD, jobs.JobStatus.REMOVED
)
def status_exactly(self, count, *statuses: jobs.JobStatus) -> bool:
"""
Return ``True`` if **exactly** ``count`` of the jobs in the cluster are
in one of the ``statuses``. Prefer one of the explicitly-named helper
methods when possible, and don't be afraid to make a new helper method!
"""
return self.count_status(*statuses) == count
def all_status(self, *statuses: jobs.JobStatus) -> bool:
"""
Return ``True`` if **all** of the jobs in the cluster are in one of the ``statuses``.
Prefer one of the explicitly-named helper methods when possible,
and don't be afraid to make a new helper method!
"""
return self.count_status(*statuses) == len(self)
def any_status(self, *statuses: jobs.JobStatus) -> bool:
"""
Return ``True`` if **any** of the jobs in the cluster are in one of the ``statuses``.
Prefer one of the explicitly-named helper methods when possible,
and don't be afraid to make a new helper method!
"""
return self.count_status(*statuses) > 0
def none_status(self, *statuses: jobs.JobStatus) -> bool:
"""
Return ``True`` if **none** of the jobs in the cluster are in one of the ``statuses``.
Prefer one of the explicitly-named helper methods when possible,
and don't be afraid to make a new helper method!
"""
return self.count_status(*statuses) == 0
def count_status(self, *statuses: jobs.JobStatus) -> int:
"""Return the total number of jobs in the cluster in any of the given statuses."""
counts = self.counts()
return sum(counts[status] for status in statuses)
class EventLog:
"""
This class represents the job event log for a :class:`ClusterHandle`.
.. warning ::
You shouldn't have to construct this yourself.
Instead, use :attr:`ClusterHandle.event_log`.
"""
def __init__(self, handle: ClusterHandle):
self._handle = handle
self._clusterid = handle.clusterid
raw_event_log_path = utils.chain_get(
handle.clusterad, ("UserLog", "DAGManNodesLog"), default=NO_EVENT_LOG
)
if raw_event_log_path is NO_EVENT_LOG:
raise exceptions.NoJobEventLog(
"Cluster for handle {} does not have a job event log, so it cannot track job state".format(
self._handle
)
)
self._event_log_path = Path(raw_event_log_path).absolute()
self._event_reader = None
self.events = []
def read_events(self) -> Iterator[htcondor.JobEvent]:
"""Yield all un-read events in the event log."""
if self._event_reader is None:
self._event_reader = htcondor.JobEventLog(
self._event_log_path.as_posix()
).events(0)
for event in self._event_reader:
if event.cluster != self._clusterid:
continue
self.events.append(event)
yield event
def filter(
self, condition: Callable[[htcondor.JobEvent], bool]
) -> List[htcondor.JobEvent]:
"""
Return a list containing the job events that the condition is ``True`` for.
"""
return [e for e in self.events if condition(e)]
| |
# Copyright (c) 2017 Huawei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.contrib.staticfiles.templatetags.staticfiles import static
from horizon.utils import functions as utils
from openstack_dashboard import api as os_api
from conveyordashboard import api
from conveyordashboard.api import models
from conveyordashboard.common import constants as consts
RESOURCE_TYPE_IMAGE_MAPPINGS = consts.RESOURCE_TYPE_IMAGE_MAPPINGS
def update_pagination(entities, page_size, marker, sort_dir):
has_more_data, has_prev_data = False, False
if len(entities) > page_size:
has_more_data = True
entities.pop()
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif sort_dir == 'asc' and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
return entities, has_more_data, has_prev_data
def get_resource_image(res_type, color='green'):
if res_type not in RESOURCE_TYPE_IMAGE_MAPPINGS:
res_type = 'UNKNOWN'
url = static(
consts.IMAGE_DIR + RESOURCE_TYPE_IMAGE_MAPPINGS[res_type][color])
return url
def plan_list(request, search_opts=None):
search_opts = search_opts or {}
paginate = search_opts.pop('paginate', False)
marker = search_opts.pop('marker', None)
sort_dir = search_opts.pop('sort_dir', 'desc')
if paginate:
page_size = utils.get_page_size(request)
plans = api.conveyorclient(request).plans.list(
search_opts,
marker=marker,
limit=page_size + 1,
sort_key='created_at',
sort_dir=sort_dir)
else:
plans = api.conveyorclient(request).plans.list(search_opts)
plans = [models.Plan(p) for p in plans]
if paginate:
return update_pagination(plans, page_size, marker, sort_dir)
else:
return plans, None, None
def plan_create(request, plan_type, resources,
plan_name=None):
return models.Plan(api.conveyorclient(request).plans.create(
plan_type, resources, plan_name=plan_name))
def plan_delete(request, plan_id):
return api.conveyorclient(request).plans.delete(plan_id)
def plan_get(request, plan_id):
return models.Plan(api.conveyorclient(request).plans.get(plan_id))
def download_template(request, plan_id):
return api.conveyorclient(request).plans.download_template(plan_id)
def create_plan_by_template(request, template):
return api.conveyorclient(request).plans.create_plan_by_template(template)
def list_clone_resources_attribute(request, plan_id, attribute_name):
return api.conveyorclient(request).resources\
.list_clone_resources_attribute(plan_id, attribute_name)
def build_resources_topo(request, plan_id, az_map, search_opt=None):
return api.conveyorclient(request)\
.resources.build_resources_topo(plan_id, az_map, search_opt=search_opt)
def resource_list(request, resource_type, search_opts=None):
if not search_opts:
search_opts = {}
search_opts['type'] = resource_type
return api.conveyorclient(request).resources.list(search_opts)
def resource_get(request, res_type, res_id):
return api.conveyorclient(request).resources.get_resource_detail(res_type,
res_id)
def clone(request, plan_id, destination, clone_resources,
update_resources=None, replace_resources=None, clone_links=None,
sys_clone=False, copy_data=True):
if update_resources is None:
update_resources = []
if replace_resources is None:
replace_resources = []
if clone_links is None:
clone_links = []
return api.conveyorclient(request).clones.clone(
plan_id, destination, clone_resources,
update_resources=update_resources, clone_links=clone_links,
replace_resources=replace_resources,
sys_clone=sys_clone, copy_data=copy_data
)
def export_migrate_template(request, plan_id):
return api.conveyorclient(request)\
.migrates.export_migrate_template(plan_id)
def migrate(request, plan_id, destination):
return api.conveyorclient(request).migrates.migrate(plan_id, destination)
def server_list(request, search_opts=None, all_tenants=False):
page_size = utils.get_page_size(request)
paginate = False
if search_opts is None:
search_opts = {}
elif 'paginate' in search_opts:
paginate = search_opts.pop('paginate')
if paginate:
search_opts['limit'] = page_size + 1
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
servers = [s for s in resource_list(request,
consts.NOVA_SERVER,
search_opts)]
has_more_data = False
if paginate and len(servers) > page_size:
servers.pop(-1)
has_more_data = True
elif paginate and len(servers) == getattr(settings, 'API_RESULT_LIMIT',
1000):
has_more_data = True
return ([os_api.nova.Server(i, request) for i in servers],
has_more_data)
def availability_zone_list(request, detailed=False):
azs = resource_list(request, consts.NOVA_AZ)
if not detailed:
azs = [az for az in azs if az.zoneName != 'internal']
return azs
def volume_list(request, search_opts=None):
volumes = resource_list(request, consts.CINDER_VOLUME,
search_opts=search_opts)
return [models.Volume(v) for v in volumes]
def net_list(request, search_opts=None):
networks = resource_list(request, consts.NEUTRON_NET,
search_opts=search_opts)
subnets = resource_list(request, consts.NEUTRON_SUBNET)
subnet_dict = dict([(s.id, s) for s in subnets])
for n in networks:
setattr(n, 'subnets',
[subnet_dict[s] for s in getattr(n, 'subnets', [])
if s in subnet_dict])
return networks
def net_list_for_tenant(request, tenant_id, search_opts=None):
nets = net_list(request, search_opts)
return [os_api.neutron.Network(n.__dict__) for n in nets
if n.shared or n.tenant_id == tenant_id]
def subnet_list(request, search_opts=None):
if search_opts is None:
search_opts = {}
subnets = resource_list(request, consts.NEUTRON_SUBNET,
search_opts=search_opts)
return [os_api.neutron.Subnet(sn.__dict__) for sn in subnets]
def sg_list(request, tenant_id=None, search_opts=None):
if search_opts is None:
search_opts = {}
if tenant_id:
search_opts['tenant_id'] = tenant_id
secgroups = resource_list(request, consts.NEUTRON_SECGROUP,
search_opts=search_opts)
sgs = [sg.__dict__ for sg in secgroups]
return [os_api.neutron.SecurityGroup(sg) for sg in sgs]
def pool_list(request, **kwargs):
pools = [p.get('pools') for p in
resource_list(request, consts.NEUTRON_POOL)]
return [os_api.lbaas.Pool(p) for p in pools]
def stack_list(request, **kwargs):
stacks = resource_list(request, consts.HEAT_STACK)
return [models.Stack(s) for s in stacks]
| |
import pexpect
import sys
import time
def make_tunnel_aps(self, count, namepfx, inlabel_w, outlabel_w, inlabel_p, outlabel_p, nhp_w, nhp_p):
for i in range(1, count+1):
name = namepfx + str(i)
name_w = namepfx + 'w_' + str(i)
name_p = namepfx + 'p_' + str(i)
cmd = 'mpls lsp-pe ' + name_w
self.sendline(cmd)
self.expect('#')
cmd = 'inlabel ' + str(inlabel_w)
self.sendline(cmd)
self.expect('#')
cmd = 'outlabel ' + str(outlabel_w) + ' ' + nhp_w
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 megid ' + name_w
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 mepid ' + str(inlabel_w) + ' interval 1'
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 rmepid ' + str(outlabel_w)
self.sendline(cmd)
self.expect('#')
self.sendline('oam-y1731 cc')
self.expect('#')
self.sendline('exit')
self.expect('#')
cmd = 'mpls lsp-pe ' + name_p
self.sendline(cmd)
self.expect('#')
cmd = 'inlabel ' + str(inlabel_p)
self.sendline(cmd)
self.expect('#')
cmd = 'outlabel ' + str(outlabel_p) + ' ' + nhp_p
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 megid ' + name_p
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 mepid ' + str(inlabel_p) + ' interval 1'
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 rmepid ' + str(outlabel_p)
self.sendline(cmd)
self.expect('#')
self.sendline('oam-y1731 cc')
self.expect('#')
self.sendline('exit')
self.expect('#')
cmd = 'mpls tunnel ' + name + ' aps'
self.sendline(cmd)
self.expect('#')
cmd = 'primary ' + name_w
self.sendline(cmd)
self.expect('#')
cmd = 'secondary ' + name_p
self.sendline(cmd)
self.expect('#')
self.sendline('exit')
self.expect('#')
inlabel_w += 2
outlabel_w += 2
inlabel_p += 2
outlabel_p += 2
def no_make_tunnel_aps(self, count, namepfx):
for i in range(1, count+1):
name = namepfx + str(i)
name_w = namepfx + 'w_' + str(i)
name_p = namepfx + 'p_' + str(i)
cmd = 'no mpls lsp-pe ' + name_w
self.sendline(cmd)
self.expect('#')
cmd = 'no mpls lsp-pe ' + name_p
self.sendline(cmd)
self.expect('#')
cmd = 'no mpls tunnel ' + name
self.sendline(cmd)
self.expect('#')
def make_tunnel(self, count, namepfx, inlabel, outlabel, nhp):
for i in range(1, count+1):
name = namepfx + str(i)
cmd = 'mpls lsp-pe ' + name
self.sendline(cmd)
self.expect('#')
cmd = 'inlabel ' + str(inlabel)
self.sendline(cmd)
self.expect('#')
cmd = 'outlabel ' + str(outlabel) + ' ' + nhp
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 megid ' + name
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 mepid ' + str(inlabel) + ' interval 1'
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 rmepid ' + str(outlabel)
self.sendline(cmd)
self.expect('#')
self.sendline('oam-y1731 cc')
self.expect('#')
self.sendline('exit')
self.expect('#')
cmd = 'mpls tunnel ' + name + ' non-aps'
self.sendline(cmd)
self.expect('#')
cmd = 'primary ' + name
self.sendline(cmd)
self.expect('#')
self.sendline('exit')
self.expect('#')
inlabel += 2
outlabel += 2
def make_tpe(self, count, namepfx, inlabel, outlabel, tunlpfx):
for i in range(1, count+1):
name = namepfx + str(i)
tunlname = tunlpfx + str(i)
cmd = 'mpls pw t-pe ' + name
self.sendline(cmd)
self.expect('#')
cmd = 'inlabel ' + str(inlabel) + ' outlabel ' + str(outlabel) + ' mode tagged tunnel ' + tunlname
self.sendline(cmd)
self.expect('#')
'''
cmd = 'oam-y1731 megid ' + name
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 mepid ' + str(inlabel) + ' interval 3'
self.sendline(cmd)
self.expect('#')
cmd = 'oam-y1731 rmepid ' + str(outlabel)
self.sendline(cmd)
self.expect('#')
self.sendline('oam-y1731 cc')
self.expect('#')
'''
cmd = 'mpls vpws ' + name + ' ' + str(inlabel) + ' non-aps'
self.sendline(cmd)
self.expect('#')
cmd = 'primary ' + name
self.sendline(cmd)
self.expect('#')
self.sendline('exit')
self.expect('#')
inlabel += 2
outlabel += 2
def no_make_tpe(self, count, namepfx):
for i in range(1, count+1):
name = namepfx + str(i)
cmd = 'no mpls pw t-pe ' + name
self.sendline(cmd)
self.expect('#')
cmd = 'no mpls vpws ' + name
self.sendline(cmd)
self.expect('#')
def make_spe(self, count, namepfx, east_label, east_tunl, west_label, west_tunl):
for i in range(1, count+1):
name = namepfx + str(i)
east_tunl_name = east_tunl + str(i)
west_tunl_name = west_tunl + str(i)
cmd = 'mpls pw s-pe ' + name
self.sendline(cmd)
self.expect('#')
cmd = 'inlabel-east ' + str(east_label) + ' outlabel ' + str(east_label) + ' tunnel ' + east_tunl_name
self.sendline(cmd)
self.expect('#')
cmd = 'inlabel-west ' + str(west_label) + ' outlabel ' + str(west_label) + ' tunnel ' + west_tunl_name
self.sendline(cmd)
self.expect('#')
self.sendline('exit')
self.expect('#')
east_label += 2
west_label += 2
def make_vpws(self, count, nameinterface, namepfx, vlanpfx):
cmd = 'interface ' + str(nameinterface)
self.sendline(cmd)
self.expect('#')
self.sendline('no shutdown')
self.expect('#')
self.sendline('switchport mode trunk')
self.expect('#')
for i in range(1, count+1):
name = namepfx + str(i)
vlanname = vlanpfx + i
cmd = 'mpls-vpws ' + name + ' ' + 'vlan' + ' ' + str(vlanname)
self.sendline(cmd)
self.expect('#')
def no_make_vpws(self, count, nameinterface, namepfx):
cmd = 'interface ' + str(nameinterface)
self.sendline(cmd)
self.expect('#')
for i in range(1, count+1):
name = namepfx + str(i)
cmd = 'no mpls-vpws ' + name
self.sendline(cmd)
self.expect('#')
self.sendline('switchport mode access')
self.expect('#')
self.sendline('shutdown')
self.expect('#')
if __name__ == "__main__":
t3 = pexpect.spawnu('telnet', ['192.168.4.53'], logfile=sys.stdout)
t3.expect('#')
t3.sendline('conf ter')
t3.expect('#')
make_tunnel_aps(t3, 250, 'lsp_', 1002, 1001, 3002, 3001, '25.1.1.1', '25.1.2.1')
# no_make_tunnel_aps(t3, 30, 'lsp_')
make_tpe(t3, 250, 'pwtpe_', 5002, 5001, 'lsp_')
# no_make_tpe(t3, 3, 'pwtpe_')
make_vpws(t3, 250, 'eth-1-1', 'pwtpe_', 1000)
# no_make_vpws(t3, 3, 'eth-1-1', 'pwtpe_')
t3.close()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class RouteFiltersOperations(object):
"""RouteFiltersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def _delete_initial(
self, resource_group_name, route_filter_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, route_filter_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, route_filter_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RouteFilter or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_11_01.models.RouteFilter or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, route_filter_name, route_filter_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(route_filter_parameters, 'RouteFilter')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilter', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_filter_name, route_filter_parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the create or
update route filter operation.
:type route_filter_parameters:
~azure.mgmt.network.v2017_11_01.models.RouteFilter
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns RouteFilter
or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.RouteFilter]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('RouteFilter', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _update_initial(
self, resource_group_name, route_filter_name, route_filter_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(route_filter_parameters, 'PatchRouteFilter')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, route_filter_name, route_filter_parameters, custom_headers=None, raw=False, **operation_config):
"""Updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the update
route filter operation.
:type route_filter_parameters:
~azure.mgmt.network.v2017_11_01.models.PatchRouteFilter
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns RouteFilter
or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.RouteFilter]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('RouteFilter', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all route filters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteFilter
:rtype:
~azure.mgmt.network.v2017_11_01.models.RouteFilterPaged[~azure.mgmt.network.v2017_11_01.models.RouteFilter]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteFilterPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteFilterPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all route filters in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteFilter
:rtype:
~azure.mgmt.network.v2017_11_01.models.RouteFilterPaged[~azure.mgmt.network.v2017_11_01.models.RouteFilter]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteFilterPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteFilterPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| |
'''oscaar v2.0
Module for differential photometry
Developed by Brett Morris, 2011-2013 & minor modifications by Luuk Visser
'''
import numpy as np
import pyfits
from matplotlib import pyplot as plt
from scipy import optimize
from glob import glob
import os
import re
import oscaar
import mathMethods
import sys
import systematics
oscaarpath = os.path.dirname(os.path.abspath(oscaar.__file__))
oscaarpathplus = os.path.join(oscaarpath,'extras')
class dataBank:
'''
Methods for easily storing and accessing information from the entire
differential photometry process with OSCAAR.
Core Developer: Brett Morris
'''
def __init__(self, initParFilePath=None):
"""
Get the inital guesses for the initial centroids of the stars from the DS9 regions file,
create dictionaries in which to store all of the data collected for each star, and for each
aperture radius. Allocate the memory for these arrays wherever possible. Parse the init.par
file to grab the paths and initial parameters for the run.
Parameters
----------
initParFilePath : str
Optional full path to the init.par file to use for the data
"""
self.dict = {}
self.parseInit(initParFilePath)
self.flatPath = self.dict["flatPath"]
self.rawRegionsList = self.dict["regPaths"]
self.ingress = self.dict["ingress"]
self.egress = self.dict["egress"]
self.apertureRadii = self.dict["apertureRadius"]
self.trackingZoom = self.dict["trackingZoom"]
self.ccdGain = self.dict["ccdGain"]
self.trackPlots = self.dict["trackPlots"]
self.photPlots = self.dict["photPlots"]
self.smoothConst = self.dict ["smoothConst"]
self.darksPath = self.dict["darksPath"]
self.imagesPaths = self.dict["imagesPaths"]
self.timeKeyword = self.dict["timeKeyword"]
if self.timeKeyword == 'JD':
# Since we're trying to convert to JD, use a dummy lambda function
self.convertToJD = lambda x: x
elif self.timeKeyword == 'DATE-OBS':
self.convertToJD = mathMethods.ut2jdSplitAtT
#if not hasattr(sys, 'real_prefix'):
# assert len(self.imagesPaths) > 1, 'Must have at least two data images'
if not hasattr(sys, 'real_prefix'):
self.masterFlat = np.ones_like(pyfits.getdata(self.imagesPaths[0]))
elif self.flatPath != '':
self.masterFlat = pyfits.getdata(self.flatPath)
self.masterFlatPath = self.flatPath
elif self.flatPath == '':
self.masterFlat = np.ones_like(pyfits.getdata(self.imagesPaths[0]))
self.allStarsDict = {}
self.regionsFileList, self.regionsFITSrefsList = self.parseRawRegionsList(self.rawRegionsList)
init_x_list,init_y_list = self.parseRegionsFile(self.regionsFileList[0])
zeroArray = np.zeros_like(self.imagesPaths,dtype=np.float32)
self.times = np.zeros_like(self.imagesPaths,dtype=np.float64)
self.keys = []
self.targetKey = '000'
Nradii = len(self.apertureRadii)
for i in range(0,len(init_x_list)):
self.allStarsDict[str(i).zfill(3)] = {'x-pos':np.copy(zeroArray), 'y-pos':np.copy(zeroArray),\
'rawFlux':[np.copy(zeroArray) for j in range(Nradii)], 'rawError':[np.copy(zeroArray) for j in range(Nradii)],'flag':False,\
'scaledFlux':[np.copy(zeroArray) for j in range(Nradii)], 'scaledError':[np.copy(zeroArray) for j in range(Nradii)], 'chisq':np.zeros_like(self.apertureRadii)}
self.allStarsDict[str(i).zfill(3)]['x-pos'][0] = init_x_list[i]
self.allStarsDict[str(i).zfill(3)]['y-pos'][0] = init_y_list[i]
self.keys.append(str(i).zfill(3))
def getDict(self):
'''Return dictionary of all star data called ``allStarsDict`.'''
return self.allStarsDict
def getMeanDarkFrame(self):
if type(self.darksPath) == str and self.darksPath == "":
return np.zeros_like(pyfits.getdata(self.imagesPaths[0]))
else:
# Else it will be a list of strings
return systematics.meanDarkFrame(self.darksPath)
def centroidInitialGuess(self,expNumber,star):
'''
Gets called for each exposure. If called on the first exposure, it will return
the intial centroid guesses input by the DS9 regions file. If any other image
and only one regions file has been submitted, it will return the previous centroid
as the initial guess for subsequent exposures. If multiple regions files have been
submitted, it will return the initial guesses in those regions files when the image path
with index ``expNumber`` is equivalent to the path stored for that regions file's
"Reference FITS image".
Parameters
----------
expNumber : int
The index of the exposure currently being analyzed. The image gets called
by its index from the list of image paths returned by getPaths().
star : str
The key from ``allStarsDict`` that corresponds to the star for which you'd
like a centroid initial guess.
Returns
-------
est_x : float
Estimated centroid position of the star ``star`` along the *x*-axis of pixels for
exposure index ``expNumber``
est_y : float
Estimated centroid position of the star ``star`` along the *y*-axis of pixels for
exposure index ``expNumber``
'''
if expNumber == 0:
est_x = self.allStarsDict[star]['x-pos'][0] ## Use DS9 regions file's estimate for the
est_y = self.allStarsDict[star]['y-pos'][0] ## stellar centroid for the first exposure
elif self.imagesPaths[expNumber] in self.regionsFITSrefsList:
refIndex = self.regionsFITSrefsList.index(self.imagesPaths[expNumber])
init_x_list, init_y_list = self.parseRegionsFile(self.regionsFileList[refIndex])
est_x = init_x_list[int(star)]
est_y = init_y_list[int(star)]
else:
est_x = self.allStarsDict[star]['x-pos'][expNumber-1] ## All other exposures use the
est_y = self.allStarsDict[star]['y-pos'][expNumber-1] ## previous exposure centroid as estimate
return est_x, est_y
def storeCentroid(self,star,exposureNumber,xCentroid,yCentroid):
'''
Store the centroid data collected by `trackSmooth`
Parameters
----------
star : string
Key for the star for which the centroid has been measured
exposureNumber : int
Index of exposure being considered
xCentroid : float
*x*-centroid of the star
yCentroid : float
*y*-centroid of the star
'''
self.allStarsDict[star]['x-pos'][exposureNumber] = xCentroid
self.allStarsDict[star]['y-pos'][exposureNumber] = yCentroid
def storeFlux(self,star,exposureNumber,rawFlux,rawError):
'''
Store the flux and error data collected by `phot`
Parameters
----------
star : string
Key for the star from the ``allStarsDict`` dictionary
exposureNumber : int
Index of exposure being considered
rawFlux : float
flux measured, to be stored
rawError : float
flux uncertainty measured, to be stored
'''
self.allStarsDict[star]['rawFlux'][exposureNumber] = rawFlux
self.allStarsDict[star]['rawError'][exposureNumber] = rawError
def storeFluxes(self,star,exposureNumber,rawFluxes,rawErrors):
'''
Store the flux and error data collected by oscaar.phot()
Parameters
----------
star : str
Key for the star from the `allStarsDict` dictionary
exposureNumber : int
Index of exposure being considered
rawFluxes : list of floats
flux measured, to be stored
rawErrors : list of floats
photon noise measured, to be stored
'''
for apertureRadiusIndex in range(len(self.apertureRadii)):
self.allStarsDict[star]['rawFlux'][apertureRadiusIndex][exposureNumber] = rawFluxes[apertureRadiusIndex]
self.allStarsDict[star]['rawError'][apertureRadiusIndex][exposureNumber] = rawErrors[apertureRadiusIndex]
def getPaths(self):
'''Return the paths to the raw images to be used'''
return self.imagesPaths
def getFluxes(self,star):
'''
Return list of fluxes for the star with key ``star``
Parameters
----------
star : str
Key for the star from the ``allStarsDict`` dictionary
Returns
-------
fluxes : list
List of fluxes for each aperture radius
'''
return self.allStarsDict[star]['rawFlux']
def getErrors(self,star):
'''Return the errors for one star, where the star parameter is the key for the
star of interest.'''
return self.allStarsDict[star]['rawError']
def storeTime(self,expNumber):
'''
Store the time in JD from the FITS header.
Parameters
----------
exposureNumber : string
Index of exposure being considered
'''
#try:
timeStamp = pyfits.getheader(self.getPaths()[expNumber])[self.timeKeyword]
#except KeyError:
# print 'Input Error: The Exposure Time Keyword indicated in observatory.par is not a valid key: ',self.timeKeyword
#finally:
self.times[expNumber] = self.convertToJD(timeStamp)
def getTimes(self):
'''Return all times collected with dataBank.storeTime()'''
return self.times
def getFlag(self,star):
'''Return the flag for the star with key `star` '''
return self.allStarsDict[star]['flag']
def getAllFlags(self):
'''Return flags for all stars'''
flags = []
for star in self.allStarsDict:
flags.append(self.allStarsDict[star]['flag'])
self.flags = flags
return flags
def setFlag(self,star,setting):
'''Set flag for star with key <star> to <setting> where
setting is a Boolean'''
self.allStarsDict[star]['flag'] = setting
def getKeys(self):
'''Return the keys for all of the stars'''
return self.keys
def scaleFluxes(self):
'''
When all fluxes have been collected, run this to re-scale the fluxes of each
comparison star to the flux of the target star. Do the same transformation on the errors.
'''
for star in self.allStarsDict:
if star != self.targetKey:
self.allStarsDict[star]['scaledFlux'], m = mathMethods.regressionScale(self.getFluxes(star),self.getFluxes(self.targetKey),self.getTimes(),self.ingress,self.egress,returncoeffs=True)
print m
self.allStarsDict[star]['scaledError'] = np.abs(m)*self.getErrors(star)
if star == self.targetKey: ## (Keep the target star the same)
self.allStarsDict[star]['scaledFlux'] = self.allStarsDict[star]['rawFlux']
self.allStarsDict[star]['scaledError'] = self.allStarsDict[star]['rawError']
def getFluxes_multirad(self,star,apertureRadiusIndex):
'''Return the fluxes for one star, where the star parameter is the key for the
star of interest.'''
return self.allStarsDict[star]['rawFlux'][apertureRadiusIndex]
def getErrors_multirad(self,star,apertureRadiusIndex):
'''Return the errors for one star, where the star parameter is the key for the
star of interest.'''
return self.allStarsDict[star]['rawError'][apertureRadiusIndex]
def scaleFluxes_multirad(self):
'''
When all fluxes have been collected, run this to re-scale the fluxes of each
comparison star to the flux of the target star. Do the same transformation on the errors.
'''
for star in self.allStarsDict:
for apertureRadiusIndex in range(len(self.apertureRadii)):
if star != self.targetKey:
print self.getFluxes_multirad(star,apertureRadiusIndex)[0]
self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex], m = mathMethods.regressionScale(self.getFluxes_multirad(star,apertureRadiusIndex),self.getFluxes_multirad(self.targetKey,apertureRadiusIndex),self.getTimes(),self.ingress,self.egress,returncoeffs=True)
#print m
self.allStarsDict[star]['scaledError'][apertureRadiusIndex] = np.abs(m)*self.getErrors_multirad(star,apertureRadiusIndex)
if star == self.targetKey: ## (Keep the target star the same)
self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex] = self.allStarsDict[star]['rawFlux'][apertureRadiusIndex]
self.allStarsDict[star]['scaledError'][apertureRadiusIndex] = self.allStarsDict[star]['rawError'][apertureRadiusIndex]
def getScaledFluxes(self,star):
'''Return the scaled fluxes for one star, where the star parameter is the
key for the star of interest.'''
return np.array(self.allStarsDict[star]['scaledFlux'])
def getScaledErrors(self,star):
'''Return the scaled fluxes for one star, where the star parameter is the
key for the star of interest.'''
return np.array(self.allStarsDict[star]['scaledError'])
def getScaledFluxes_multirad(self,star,apertureRadiusIndex):
'''Return the scaled fluxes for star and one aperture, where the star parameter is the
key for the star of interest.'''
return np.array(self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex])
def getScaledErrors_multirad(self,star,apertureRadiusIndex):
'''Return the scaled errors for star and one aperture, where the star parameter is the
key for the star of interest.'''
return np.array(self.allStarsDict[star]['scaledError'][apertureRadiusIndex])
def calcChiSq(self):
"""
Calculate the :math:`$\chi^2$` for the fluxes of each comparison star and the fluxes of the target star. This
metric can be used to suggest which comparison stars have similar overall trends to the target star.
"""
for star in self.allStarsDict:
self.allStarsDict[star]['chisq'] = mathMethods.chiSquared(self.getFluxes(self.targetKey),self.getFluxes(star))
chisq = []
for star in self.allStarsDict:
chisq.append(self.allStarsDict[star]['chisq'])
self.chisq = np.array(chisq)
self.meanChisq = np.mean(chisq)
self.stdChisq = np.std(chisq)
def calcChiSq_multirad(self,apertureRadiusIndex):
"""
Calculate the :math:`$\chi^2$` for the fluxes of each comparison star and the fluxes of the target star. This
metric can be used to suggest which comparison stars have similar overall trends to the target star.
"""
for star in self.allStarsDict:
print self.getFluxes_multirad(self.targetKey,apertureRadiusIndex),self.getFluxes_multirad(star,apertureRadiusIndex)
self.allStarsDict[star]['chisq'][apertureRadiusIndex] = mathMethods.chiSquared(self.getFluxes_multirad(self.targetKey,apertureRadiusIndex),self.getFluxes_multirad(star,apertureRadiusIndex))
chisq = []
for star in self.allStarsDict:
chisq.append(self.allStarsDict[star]['chisq'][apertureRadiusIndex])
self.chisq = np.array(chisq)
self.meanChisq = np.mean(chisq)
self.stdChisq = np.std(chisq)
def calcMeanComparison_multirad(self,ccdGain=1):
"""
Take the regression-weighted mean of some of the comparison stars
to produce one comparison star flux to compare to the target to
produce a light curve.
The comparison stars used are those whose :math:`$\chi^2$`s calculated by
`calcChiSq()` are less than :math:`$2\sigma$` away from the other :math:`$\chi^2$`s.
This condition removes outlier comparison stars, which can be caused by intrinsic
variability, tracking inaccuracies, or other effects.
"""
self.meanComparisonStars = []
self.meanComparisonStarErrors = []
self.comparisonStarWeights = []
for apertureRadiusIndex in range(len(self.apertureRadii)):
## Check whether chi-squared has been calculated already. If not, compute it.
chisq = []
for star in self.allStarsDict: chisq.append(self.allStarsDict[star]['chisq'])
chisq = np.array(chisq)
#if all(chisq == 0): self.calcChiSq_multirad(apertureRadiusIndex)
if (chisq==0).all(): self.calcChiSq_multirad(apertureRadiusIndex)
## Begin regression technique
numCompStars = len(self.allStarsDict) - 1
targetFullLength = len(self.getScaledFluxes_multirad(self.targetKey,apertureRadiusIndex))
print "Aperture rad:", apertureRadiusIndex
print "Target raw flux:",self.getFluxes_multirad(self.targetKey,apertureRadiusIndex)
print "Target scaled flux:",self.getScaledFluxes_multirad(self.targetKey,apertureRadiusIndex)
target = self.getFluxes_multirad(self.targetKey,apertureRadiusIndex)[self.outOfTransit()]
compStars = np.zeros([targetFullLength,numCompStars])
compStarsOOT = np.zeros([len(target),numCompStars])
compErrors = np.copy(compStars)
columnCounter = 0
acceptedCompStarKeys = []
compStarKeys = []
for star in self.allStarsDict:
if star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) < 2*self.stdChisq).any():
compStars[:,columnCounter] = self.getScaledFluxes_multirad(star,apertureRadiusIndex).astype(np.float64)
compStarsOOT[:,columnCounter] = self.getScaledFluxes_multirad(star,apertureRadiusIndex)[self.outOfTransit()].astype(np.float64)
compErrors[:,columnCounter] = self.getScaledErrors_multirad(star,apertureRadiusIndex).astype(np.float64)
compStarKeys.append(int(star))
columnCounter += 1
elif star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) > 2*self.stdChisq):
print 'Star '+str(star)+' excluded from regression'
compStarKeys.append(int(star))
columnCounter += 1
initP = np.zeros([numCompStars])+ 1./numCompStars
def errfunc(p,target):
if all(p >=0.0): return np.dot(p,compStarsOOT.T) - target ## Find only positive coefficients
#return np.dot(p,compStarsOOT.T) - target
bestFitP = optimize.leastsq(errfunc,initP[:],args=(target.astype(np.float64)),maxfev=10000000,epsfcn=np.finfo(np.float32).eps)[0]
print '\nBest fit regression coefficients:',bestFitP
print 'Default weight:',1./numCompStars
self.comparisonStarWeights_i = np.vstack([compStarKeys,bestFitP])
self.meanComparisonStar = np.dot(bestFitP,compStars.T)
self.meanComparisonStarError = np.sqrt(np.dot(bestFitP**2,compErrors.T**2))
self.meanComparisonStars.append(self.meanComparisonStar)
self.meanComparisonStarErrors.append(self.meanComparisonStarError)
self.comparisonStarWeights.append(self.comparisonStarWeights_i)
return self.meanComparisonStars, self.meanComparisonStarErrors
def getAllChiSq(self):
"""
Return :math:`$\chi^2$`s for all stars
"""
return self.chisq
def outOfTransit(self):
"""
Boolean array where `True` are the times in `getTimes()` that are
before ingress or after egress.
Returns
-------
List of bools
"""
return (self.getTimes() < self.ingress) + (self.getTimes() > self.egress)
def calcMeanComparison(self,ccdGain=1):
"""
Take the regression-weighted mean of some of the comparison stars
to produce one comparison star flux to compare to the target to
produce a light curve.
The comparison stars used are those whose chi-squareds calculated by
self.calcChiSq() are less than 2*sigma away from the other chi-squareds.
This condition removes outliers.
"""
## Check whether chi-squared has been calculated already. If not, compute it.
chisq = []
for star in self.allStarsDict: chisq.append(self.allStarsDict[star]['chisq'])
chisq = np.array(chisq)
if all(chisq == 0): self.calcChiSq()
## Begin regression technique
numCompStars = len(self.allStarsDict) - 1
targetFullLength = len(self.getScaledFluxes(self.targetKey))
target = self.getFluxes(self.targetKey)[self.outOfTransit()]
compStars = np.zeros([targetFullLength,numCompStars])
compStarsOOT = np.zeros([len(target),numCompStars])
compErrors = np.copy(compStars)
columnCounter = 0
compStarKeys = []
for star in self.allStarsDict:
if star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) < 2*self.stdChisq):
compStars[:,columnCounter] = self.getScaledFluxes(star).astype(np.float64)
compStarsOOT[:,columnCounter] = self.getScaledFluxes(star)[self.outOfTransit()].astype(np.float64)
compErrors[:,columnCounter] = self.getScaledErrors(star).astype(np.float64)
compStarKeys.append(int(star))
columnCounter += 1
elif star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) > 2*self.stdChisq):
print 'Star '+str(star)+' excluded from regression'
compStarKeys.append(int(star))
columnCounter += 1
initP = np.zeros([numCompStars])+ 1./numCompStars
def errfunc(p,target):
if all(p >=0.0): return np.dot(p,compStarsOOT.T) - target ## Find only positive coefficients
#return np.dot(p,compStarsOOT.T) - target
bestFitP = optimize.leastsq(errfunc,initP[:],args=(target.astype(np.float64)),maxfev=10000000,epsfcn=np.finfo(np.float32).eps)[0]
print '\nBest fit regression coefficients:',bestFitP
print 'Default weight:',1./numCompStars
self.comparisonStarWeights = np.vstack([compStarKeys,bestFitP])
self.meanComparisonStar = np.dot(bestFitP,compStars.T)
self.meanComparisonStarError = np.sqrt(np.dot(bestFitP**2,compErrors.T**2))
return self.meanComparisonStar, self.meanComparisonStarError
def computeLightCurve(self,meanComparisonStar,meanComparisonStarError):
'''
Divide the target star flux by the mean comparison star to yield a light curve,
save the light curve into the dataBank object.
INPUTS: meanComparisonStar - The fluxes of the (one) mean comparison star
RETURNS: self.lightCurve - The target star divided by the mean comparison
star, i.e., the light curve.
'''
self.lightCurve = self.getFluxes(self.targetKey)/meanComparisonStar
self.lightCurveError = np.sqrt(self.lightCurve**2 * ( (self.getErrors(self.targetKey)/self.getFluxes(self.targetKey))**2 + (meanComparisonStarError/meanComparisonStar)**2 ))
return self.lightCurve, self.lightCurveError
def computeLightCurve_multirad(self,meanComparisonStars,meanComparisonStarErrors):
'''
Divide the target star flux by the mean comparison star to yield a light curve,
save the light curve into the `dataBank` object.
Parameters
----------
meanComparisonStar : list
The fluxes of the (one) mean comparison star
Returns
-------
self.lightCurves:
The fluxes of the target star divided by the fluxes of the mean comparison
star, i.e., the light curve
self.lightCurveErrors:
The propagated errors on each relative flux in `self.lightCurves`
'''
self.lightCurves = []
self.lightCurveErrors = []
for apertureRadiusIndex in range(len(self.apertureRadii)):
lightCurve = self.getFluxes_multirad(self.targetKey,apertureRadiusIndex)/meanComparisonStars[apertureRadiusIndex]
self.lightCurves.append(lightCurve)
self.lightCurveErrors.append(np.sqrt(lightCurve**2 * ( (self.getErrors_multirad(self.targetKey,apertureRadiusIndex)/self.getFluxes_multirad(self.targetKey,apertureRadiusIndex))**2 +\
(meanComparisonStarErrors[apertureRadiusIndex]/meanComparisonStars[apertureRadiusIndex])**2 )))
return self.lightCurves, self.lightCurveErrors
def getPhotonNoise(self):
'''
Calculate photon noise using the lightCurve and the meanComparisonStar
RETURNS: self.photonNoise - The estimated photon noise limit
'''
self.photonNoise = self.lightCurve*self.meanComparisonStarError
return self.photonNoise
def parseInit(self, initParFilePath=None):
"""
Parses `init.par`, a plain text file that contains all of the running parameters
that control the `differentialPhotometry.py` script. `init.par` is written by
the OSCAAR GUI or can be edited directly by the user.
Parameters
----------
initParFilePath : str
Optional full path to the init.par file to use for the data
"""
if initParFilePath is None:
init = open(os.path.join(
os.path.dirname(os.path.abspath(oscaar.__file__)),
'init.par'), 'r').read().splitlines()
else:
if os.path.exists(initParFilePath):
init = open(os.path.abspath(initParFilePath), 'r').read().splitlines()
else:
raise ValueError, (
"PAR file {0} cannot be found.".format(initParFilePath))
for line in init:
if len(line.split()) > 1:
inline = line.split(':', 1)
name = inline[0].strip()
value = str(inline[1].strip())
list = [("Path to Master-Flat Frame", "flatPath"),
("Path to Regions File", "regPaths"),
("Ingress", "ingress"), ("Egress", "egress"),
("Radius", "apertureRadius"), ("Tracking Zoom", "trackingZoom"),
("CCD Gain", "ccdGain"), ("Plot Tracking", "trackPlots"),
("Plot Photometry", "photPlots"), ("Smoothing Constant", "smoothConst"),
("Output Path","outputPath"), ("Path to Dark Frames", "darksPath"),
("Path to Data Images", "imagesPaths"), ("Exposure Time Keyword", "timeKeyword")]
for string,save in list:
if string == name:
#if name == "Smoothing Constant" or name == "Radius" or name == "Tracking Zoom" or name == "CCD Gain":
if name == "Smoothing Constant" or name == "Tracking Zoom" or name == "CCD Gain":
self.dict[save] = float(value)
elif name == "Ingress" or name == "Egress":
self.dict[save] = mathMethods.ut2jd(value)
elif name == "Plot Photometry" or name == "Plot Tracking":
if value == "on":
self.dict[save] = True
else:
self.dict[save] = False
elif name == "Path to Dark Frames" or name == "Path to Data Images":
value = inline[1].strip()
if len(glob(value)) > 0:
self.dict[save] = np.sort(glob(value))
elif value == "":
self.dict[save] = ""
else:
tempArr = []
for path in str(inline[1]).split(','):
path = path.strip()
path = os.path.join(oscaarpathplus,os.path.abspath(path))
tempArr.append(path)
self.dict[save] = np.sort(tempArr)
elif name == "Radius":
if len(value.split(',')) == 3:
## If multiple aperture radii are requested by dictating the range, enumerate the range:
apertureRadiusMin, apertureRadiusMax, apertureRadiusStep = map(float,value.split(','))
if (apertureRadiusMax-apertureRadiusMin) % apertureRadiusStep == 0:
apertureRadii = np.arange(apertureRadiusMin, apertureRadiusMax+apertureRadiusStep, apertureRadiusStep)
else:
apertureRadii = np.arange(apertureRadiusMin, apertureRadiusMax, apertureRadiusStep)
self.dict[save] = apertureRadii
elif len(value.split(',')) == 1:
## If only one aperture radius is requested, make a list with only that one element
self.dict[save] = [float(value)]
else:
self.dict[save] = [float(i) for i in value.split(',')]
elif name == "Output Path":
self.outputPath = os.path.join(oscaarpathplus,os.path.abspath(value))
else:
self.dict[save] = value
def parseRegionsFile(self,regPath):
"""
Parses the regions files (.REG) created by DS9. These files are written in plain text, where
each circuluar region's centroid and radius are logged in the form "circle(`x-centroid`,`y-centroid`,`radius`)".
This method uses regular expressions to parse out the centroids.
Parameters
----------
regPath : string
Path to the regions file to read
Returns
-------
init_x_list : list
Initial estimates for the x-centroids
init_y_list : list
Initial estimates for the y-centroids
"""
regionsData = open(regPath,'r').read().splitlines()
init_x_list = []
init_y_list = []
for i in range(0,len(regionsData)):
if regionsData[i][0:6] == 'circle':
y,x = re.split("\,",re.split("\(",regionsData[i])[1])[0:2]
init_y_list.append(float(y))
init_x_list.append(float(x))
return init_x_list,init_y_list
def parseRawRegionsList(self,rawRegionsList):
"""
Split up the `rawRegionsList`, which should be in the format:
<first regions file>,<reference FITS file for the first regs file>;<second> regions file>,
<reference FITS file for the first regs file>;....
into a list of regions files and a list of FITS reference files.
"""
regionsFiles = []
refFITSFiles = []
for pair in rawRegionsList.split(';'):
if len(pair.split(",")) == 2:
regionsFile, refFITSFile = pair.split(',')
regionsFiles.append(regionsFile)
refFITSFiles.append(refFITSFile)
return regionsFiles, refFITSFiles
def plot(self,pointsPerBin=10):
"""
Produce a plot of the light curve, show it. Over-plot 10-point median binning
of the light curve.
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
"""
plt.close()
times = self.getTimes()
meanComparisonStar, meanComparisonStarError = self.calcMeanComparison(ccdGain = self.ccdGain)
lightCurve, lightCurveErr = self.computeLightCurve(meanComparisonStar, meanComparisonStarError)
binnedTime, binnedFlux, binnedStd = mathMethods.medianBin(times,lightCurve,pointsPerBin)
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
axis.errorbar(times,lightCurve,yerr=lightCurveErr,fmt='k.',ecolor='gray')
axis.errorbar(binnedTime, binnedFlux, yerr=binnedStd, fmt='rs-', linewidth=2)
axis.axvline(ymin=0,ymax=1,x=self.ingress,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress,color='k',ls=':')
axis.set_title('Light Curve')
axis.set_xlabel('Time (JD)')
axis.set_ylabel('Relative Flux')
plt.ioff()
plt.show()
def plotLightCurve(self,pointsPerBin=10,apertureRadiusIndex=0):
"""
Produce a plot of the light curve, show it. Over-plot 10-point median binning
of the light curve.
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
binnedTime, binnedFlux, binnedStd = mathMethods.medianBin(self.times,self.lightCurves[apertureRadiusIndex],pointsPerBin)
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
axis.errorbar(self.times,self.lightCurves[apertureRadiusIndex],yerr=self.lightCurveErrors[apertureRadiusIndex],fmt='k.',ecolor='gray')
axis.errorbar(binnedTime, binnedFlux, yerr=binnedStd, fmt='rs-', linewidth=2)
axis.axvline(ymin=0,ymax=1,x=self.ingress,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress,color='k',ls=':')
axis.set_title(('Light curve for aperture radius %s' % self.apertureRadii[apertureRadiusIndex]))
axis.set_xlabel('Time (JD)')
axis.set_ylabel('Relative Flux')
plt.ioff()
plt.show()
def plotRawFluxes(self,apertureRadiusIndex=0,pointsPerBin=10):
"""
Plot all raw flux time series for a particular aperture radius,
for each comparison star.
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
plt.ion()
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
for star in self.allStarsDict:
axis.errorbar(self.times,self.allStarsDict[star]['rawFlux'][apertureRadiusIndex],yerr=self.allStarsDict[star]['rawError'][apertureRadiusIndex],fmt='o')
axis.axvline(ymin=0,ymax=1,x=self.ingress,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress,color='k',ls=':')
axis.set_title(('Raw fluxes for aperture radius %s' % self.apertureRadii[apertureRadiusIndex]))
axis.set_xlabel('Time (JD)')
axis.set_ylabel('Counts')
plt.ioff()
plt.show()
def plotScaledFluxes(self,apertureRadiusIndex=0,pointsPerBin=10):
"""
Plot all scaled flux time series for a particular aperture radius,
for each comparison star.
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
plt.ion()
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
for star in self.allStarsDict:
axis.errorbar(self.times,self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex],yerr=self.allStarsDict[star]['scaledError'][apertureRadiusIndex],fmt='o')
axis.axvline(ymin=0,ymax=1,x=self.ingress,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress,color='k',ls=':')
axis.set_title(('Scaled fluxes for aperture radius: %s' % self.apertureRadii[apertureRadiusIndex]))
axis.set_xlabel('Time (JD)')
axis.set_ylabel('Counts')
plt.ioff()
plt.show()
def plotCentroidsTrace(self,pointsPerBin=10):
"""
Plot all centroid positions for a particular aperture radius,
for each comparison star. The plot will be in (`x`,`y`) coordinates
to visualize the physical image drift (this is not a plot as a function
of time).
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
for star in self.allStarsDict:
axis.plot(self.allStarsDict[star]['y-pos'],self.allStarsDict[star]['x-pos'])
axis.set_title('Tracing Stellar Centroids')
axis.set_xlabel('X')
axis.set_ylabel('Y')
plt.ioff()
plt.show()
def plotComparisonWeightings(self, apertureRadiusIndex=0):
"""
Plot histograms visualizing the relative weightings of the comparison
stars used to produce the "mean comparison star", from which the
light curve is calculated.
Parameters
----------
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
plt.ion()
weights = self.comparisonStarWeights[apertureRadiusIndex]
weights = np.sort(weights,axis=1)
width = 0.5
indices = weights[0,:]
coefficients = weights[1,:]
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
ax = fig.add_subplot(111)
ax.set_xlim([0,len(indices)+1])
ax.set_xticks(indices+width/2)
ax.set_xticklabels(["Star "+str(i) for i in range(len(indices))])
ax.set_xlabel('Comparison Star')
ax.set_ylabel('Normalized Weighting')
ax.set_title('Comparison Star Weights into the Composite Comparison Star for aperture radius %s' \
% self.apertureRadii[apertureRadiusIndex])
ax.axhline(xmin=0,xmax=1,y=1.0/len(indices),linestyle=':',color='k')
ax.bar(indices,coefficients,width,color='w')
plt.ioff()
plt.show()
def updateMCMC(self,bestp,allparams,acceptanceRate,dataBankPath,uncertainties):
"""
Assigns variables within the dataBank object for the results of an MCMC run.
Parameters
----------
bestp : list
Best-fit parameters from the MCMC run. The list elements correspond to [<ratio of planetary radius
to stellar radius>,<ratio of semi-major axis to stellar radius>,<inclination>,<mid-transit time>].
allparams : 2D matrix
This matrix represents the many "states", "trails" or "links in the chain" that are accepted and saved
throughout the Metropolis-Hastings process in the MCMC scripts. From allparams we can calculate the
uncertainties on each best-fit parameter.
acceptanceRate : float
The final acceptance rate achieved by the chain; the ratio of the number of accepted states and the
number of states attempted
dataBankPath : string
Path to the dataBank object pickle (aka "OSCAAR pkl") to update
uncertainties : list of lists
:math:`$\pm 1\sigma$` uncertainties on each of the best-fit parameters in `bestp`
"""
self.MCMC_bestp = bestp
self.MCMC_allparams = allparams
self.MCMC_acceptanceRate = acceptanceRate
self.dataBankPath = dataBankPath
self.MCMC_uncertainties = uncertainties
def uncertaintyString(self):
"""
Returns
-------
savestring : string
A string formatted for human-readable results from the MCMC process, with
the best-fit parameters and the :math:`$\pm 1\sigma$` uncertainties
"""
savestring = 'MCMC Best Fit Parameters And One-Sigma Uncertainties\n----------------------------------------------------\n\n'
labels = ['Rp/Rs','a/Rs','Inclination','Mid-transit time']
for i in range(len(labels)):
savestring += '%s:\t%s\t +%s / -%s \n' % (labels[i],self.MCMC_bestp[i],self.MCMC_uncertainties[i][0],self.MCMC_uncertainties[i][1])
return savestring
def czechETDstring(self,apertureRadiusIndex):
"""
Returns a string containing the tab delimited light curve data for submission
to the *Czech Astronomical Society's Exoplanet Transit Database*, for submission
here: http://var2.astro.cz/ETD/protocol.php
Parameters
----------
apertureRadiusIndex : int
Index of the aperture radius from which to use for the light curve fluxes
and errors.
"""
N_measurements = len(self.lightCurves[apertureRadiusIndex])
outputString = ''
for i in xrange(N_measurements):
outputString += '\t'.join(map(str,[self.times[i],self.lightCurves[apertureRadiusIndex][i],\
self.lightCurveErrors[apertureRadiusIndex][i]]))
outputString += '\n'
return outputString
# def plotMCMC(self):
# bestp = self.MCMC_bestp
# allparams = self.MCMC_allparams
# x = self.times
# y = self.lightCurve
# sigma_y = self.lightCurveError
#
# ##############################
# # Prepare figures
# fig = plt.figure()
# ax1 = fig.add_subplot(331)
# ax2 = fig.add_subplot(332)
# ax3 = fig.add_subplot(333)
# ax4 = fig.add_subplot(334)
# ax5 = fig.add_subplot(335)
# ax6 = fig.add_subplot(336)
# ax7 = fig.add_subplot(337)
# ax8 = fig.add_subplot(338)
# ax9 = fig.add_subplot(339)
# yfit = occult4params(x,bestp)
# ax1.errorbar(x,y,yerr=sigma_y,fmt='o-')
# ax1.plot(x,yfit,'r')
# ax1.set_title("Fit with MCMC")
#
# ##############################
# # Plot traces and histograms of mcmc params
# p = allparams[0,:]
# ap = allparams[1,:]
# i = allparams[2,:]
# t0 = allparams[3,:]
# abscissa = np.arange(len(allparams[0,:])) ## Make x-axis for trace plots
# burnFraction = 0.20 ## "burn" or ignore the first 20% of the chains
#
# ax2.plot(abscissa,p,'k.')
# ax2.set_title('p trace')
# ax2.axvline(ymin=0,ymax=1,x=burnFraction*len(abscissa),linestyle=':')
#
# ax3.plot(abscissa,ap,'k.')
# ax3.set_title('ap trace')
# ax3.axvline(ymin=0,ymax=1,x=burnFraction*len(abscissa),linestyle=':')
#
# ax4.plot(abscissa,i,'k.')
# ax4.set_title('i trace')
# ax4.axvline(ymin=0,ymax=1,x=burnFraction*len(abscissa),linestyle=':')
#
# ax5.plot(abscissa,t0,'k.')
# ax5.set_title('t0 trace')
# ax5.axvline(ymin=0,ymax=1,x=burnFraction*len(abscissa),linestyle=':')
#
# def histplot(parameter,axis,title,bestFitParameter):
# postburn = parameter[burnFraction*len(parameter):len(parameter)] ## Burn beginning of chain
# Nbins = 15 ## Plot histograms with 15 bins
# n, bins, patches = axis.hist(postburn, Nbins, normed=0, facecolor='white') ## Generate histogram
# plus,minus = oscaar.fitting.get_uncertainties(postburn,bestFitParameter) ## Calculate uncertainties on best fit parameter
# axis.axvline(ymin=0,ymax=1,x=bestFitParameter+plus,ls=':',color='r') ## Plot vertical lines representing uncertainties
# axis.axvline(ymin=0,ymax=1,x=bestFitParameter-minus,ls=':',color='r')
# axis.set_title(title)
# ## Plot the histograms
# histplot(p,ax6,'p',bestp[0])
# histplot(ap,ax7,'ap',bestp[1])
# histplot(i,ax8,'i',bestp[2])
# histplot(t0,ax9,'t0',bestp[3])
#
# plt.savefig("mcmc_results.png",bbox_inches='tight') ## Save plot
# plt.show()
def plotLightCurve_multirad_output(self):
plt.show()
def plotLightCurve_multirad(self,pointsPerBin=10):
for apertureRadiusIndex in range(len(self.apertureRadii)):
meanTimeInt = int(np.rint(np.mean(self.times)))
offsetTimes = self.times - meanTimeInt
binnedTime, binnedFlux, binnedStd = mathMethods.medianBin(offsetTimes,self.lightCurves[apertureRadiusIndex],pointsPerBin)
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (meanTimeInt+x, y)
axis.format_coord = format_coord
axis.errorbar(offsetTimes,self.lightCurves[apertureRadiusIndex],yerr=self.lightCurveErrors[apertureRadiusIndex],fmt='k.',ecolor='gray')
axis.errorbar(binnedTime, binnedFlux, yerr=binnedStd, fmt='rs-', linewidth=2)
axis.axvline(ymin=0,ymax=1,x=self.ingress-meanTimeInt,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress-meanTimeInt,color='k',ls=':')
axis.set_title('Light curve for aperture radius: %s' % self.apertureRadii[apertureRadiusIndex])
axis.set_xlabel(('Time - %i (JD)' % meanTimeInt))
axis.set_ylabel('Relative Flux')
plt.ioff()
self.plotLightCurve_multirad_output()
| |
from __future__ import unicode_literals, division, absolute_import
import logging
import re
import sys
import urllib
import urlparse
from datetime import datetime
from path import Path
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.entry import Entry
log = logging.getLogger('filesystem')
class Filesystem(object):
"""
Uses local path content as an input. Can use recursion if configured.
Recursion is False by default. Can be configured to true or get integer that will specify max depth in relation to
base folder.
All files/dir/symlinks are retrieved by default. Can be changed by using the 'retrieve' property.
Example 1:: Single path
filesystem: /storage/movies/
Example 2:: List of paths
filesystem:
- /storage/movies/
- /storage/tv/
Example 3:: Object with list of paths
filesystem:
path:
- /storage/movies/
- /storage/tv/
mask: '*.mkv'
Example 4::
filesystem:
path:
- /storage/movies/
- /storage/tv/
recursive: 4 # 4 levels deep from each base folder
retrieve: files # Only files will be retrieved
Example 5::
filesystem:
path:
- /storage/movies/
- /storage/tv/
recursive: yes # No limit to depth, all sub dirs will be accessed
retrieve: # Only files and dirs will be retrieved
- files
- dirs
"""
retrieval_options = ['files', 'dirs', 'symlinks']
paths = one_or_more({'type': 'string', 'format': 'path'}, unique_items=True)
schema = {
'oneOf': [
paths,
{'type': 'object',
'properties': {
'path': paths,
'mask': {'type': 'string'},
'regexp': {'type': 'string', 'format': 'regex'},
'recursive': {'oneOf': [{'type': 'integer', 'minimum': 2}, {'type': 'boolean'}]},
'retrieve': one_or_more({'type': 'string', 'enum': retrieval_options}, unique_items=True)
},
'required': ['path'],
'additionalProperties': False}]
}
def prepare_config(self, config):
from fnmatch import translate
config = config
# Converts config to a dict with a list of paths
if not isinstance(config, dict):
config = {'path': config}
if not isinstance(config['path'], list):
config['path'] = [config['path']]
config.setdefault('recursive', False)
# If mask was specified, turn it in to a regexp
if config.get('mask'):
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
config.setdefault('regexp', '.')
# Sets the default retrieval option to files
config.setdefault('retrieve', self.retrieval_options)
return config
def create_entry(self, filepath, test_mode):
"""
Creates a single entry using a filepath and a type (file/dir)
"""
filepath = filepath.abspath()
entry = Entry()
entry['location'] = filepath
entry['url'] = urlparse.urljoin('file:', urllib.pathname2url(filepath.encode('utf8')))
entry['filename'] = filepath.name
if filepath.isfile():
entry['title'] = filepath.namebase
else:
entry['title'] = filepath.name
try:
entry['timestamp'] = datetime.fromtimestamp(filepath.getmtime())
except Exception as e:
log.warning('Error setting timestamp for %s: %s' % (filepath, e))
entry['timestamp'] = None
entry['accessed'] = datetime.fromtimestamp(filepath.getatime())
entry['modified'] = datetime.fromtimestamp(filepath.getmtime())
entry['created'] = datetime.fromtimestamp(filepath.getctime())
if entry.isvalid():
if test_mode:
log.info("Test mode. Entry includes:")
log.info(" Title: %s" % entry["title"])
log.info(" URL: %s" % entry["url"])
log.info(" Filename: %s" % entry["filename"])
log.info(" Location: %s" % entry["location"])
log.info(" Timestamp: %s" % entry["timestamp"])
return entry
else:
log.error('Non valid entry created: %s ' % entry)
return
def get_max_depth(self, recursion, base_depth):
if recursion is False:
return base_depth + 1
elif recursion is True:
return float('inf')
else:
return base_depth + recursion
def get_folder_objects(self, folder, recursion):
if recursion is False:
return folder.listdir()
else:
return folder.walk(errors='ignore')
def get_entries_from_path(self, path_list, match, recursion, test_mode, get_files, get_dirs, get_symlinks):
entries = []
for folder in path_list:
log.verbose('Scanning folder %s. Recursion is set to %s.' % (folder, recursion))
folder = Path(folder).expanduser()
log.debug('Scanning %s' % folder)
base_depth = len(folder.splitall())
max_depth = self.get_max_depth(recursion, base_depth)
folder_objects = self.get_folder_objects(folder, recursion)
for path_object in folder_objects:
log.debug('Checking if %s qualifies to be added as an entry.' % path_object)
try:
path_object.exists()
except UnicodeError:
log.error('File %s not decodable with filesystem encoding: %s' % (path_object, sys.getfilesystemencoding()))
continue
entry = None
object_depth = len(path_object.splitall())
if object_depth <= max_depth:
if match(path_object):
if (path_object.isdir() and get_dirs) or (
path_object.islink() and get_symlinks) or (
path_object.isfile() and not path_object.islink() and get_files):
entry = self.create_entry(path_object, test_mode)
else:
log.debug("Path object's %s type doesn't match requested object types." % path_object)
if entry and entry not in entries:
entries.append(entry)
return entries
def on_task_input(self, task, config):
config = self.prepare_config(config)
path_list = config['path']
test_mode = task.options.test
match = re.compile(config['regexp'], re.IGNORECASE).match
recursive = config['recursive']
get_files = 'files' in config['retrieve']
get_dirs = 'dirs' in config['retrieve']
get_symlinks = 'symlinks' in config['retrieve']
log.verbose('Starting to scan folders.')
return self.get_entries_from_path(path_list, match, recursive, test_mode, get_files, get_dirs, get_symlinks)
@event('plugin.register')
def register_plugin():
plugin.register(Filesystem, 'filesystem', api_ver=2)
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import unquote
import hashlib
import io
import logging
import mimetypes
import os
import shutil
import socket
import sys
import tempfile
from cgi import parse_header
from http.client import BadStatusLine
from requests import RequestException
from flexget import options, plugin
from flexget.event import event
from flexget.utils.tools import decode_html, native_str_to_text
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
log = logging.getLogger('download')
class PluginDownload(object):
"""
Downloads content from entry url and writes it into a file.
Example::
download: ~/torrents/
Allow HTML content:
By default download plugin reports failure if received content
is a html. Usually this is some sort of custom error page without
proper http code and thus entry is assumed to be downloaded
incorrectly.
In the rare case you actually need to retrieve html-pages you must
disable this feature.
Example::
download:
path: ~/something/
fail_html: no
You may use commandline parameter --dl-path to temporarily override
all paths to another location.
"""
schema = {
'oneOf': [
{
'title': 'specify options',
'type': 'object',
'properties': {
'path': {'type': 'string', 'format': 'path'},
'fail_html': {'type': 'boolean', 'default': True},
'overwrite': {'type': 'boolean', 'default': False},
'temp': {'type': 'string', 'format': 'path'},
'filename': {'type': 'string'}
},
'additionalProperties': False
},
{'title': 'specify path', 'type': 'string', 'format': 'path'},
{'title': 'no options', 'type': 'boolean', 'enum': [True]}
]
}
def process_config(self, config):
"""Return plugin configuration in advanced form"""
if isinstance(config, str):
config = {'path': config}
if not isinstance(config, dict):
config = {}
if not config.get('path'):
config['require_path'] = True
config.setdefault('fail_html', True)
return config
def on_task_download(self, task, config):
config = self.process_config(config)
# set temporary download path based on user's config setting or use fallback
tmp = config.get('temp', os.path.join(task.manager.config_base, 'temp'))
self.get_temp_files(task, require_path=config.get('require_path', False), fail_html=config['fail_html'],
tmp_path=tmp)
def get_temp_file(self, task, entry, require_path=False, handle_magnets=False, fail_html=True,
tmp_path=tempfile.gettempdir()):
"""
Download entry content and store in temporary folder.
Fails entry with a reason if there was problem.
:param bool require_path:
whether or not entries without 'path' field are ignored
:param bool handle_magnets:
when used any of urls containing magnet link will replace url,
otherwise warning is printed.
:param fail_html:
fail entries which url respond with html content
:param tmp_path:
path to use for temporary files while downloading
"""
if entry.get('urls'):
urls = entry.get('urls')
else:
urls = [entry['url']]
errors = []
for url in urls:
if url.startswith('magnet:'):
if handle_magnets:
# Set magnet link as main url, so a torrent client plugin can grab it
log.debug('Accepting magnet url for %s', entry['title'])
entry['url'] = url
break
else:
log.warning('Can\'t download magnet url')
errors.append('Magnet URL')
continue
if require_path and 'path' not in entry:
# Don't fail here, there might be a magnet later in the list of urls
log.debug('Skipping url %s because there is no path for download', url)
continue
error = self.process_entry(task, entry, url, tmp_path)
# disallow html content
html_mimes = ['html', 'text/html']
if entry.get('mime-type') in html_mimes and fail_html:
error = 'Unexpected html content received from `%s` - maybe a login page?' % entry['url']
self.cleanup_temp_file(entry)
if not error:
# Set the main url, so we know where this file actually came from
log.debug('Successfully retrieved %s from %s', entry['title'], url)
entry['url'] = url
break
else:
errors.append(error)
else:
# check if entry must have a path (download: yes)
if require_path and 'path' not in entry:
log.error('%s can\'t be downloaded, no path specified for entry', entry['title'])
entry.fail('no path specified for entry')
else:
entry.fail(', '.join(errors))
def save_error_page(self, entry, task, page):
received = os.path.join(task.manager.config_base, 'received', task.name)
if not os.path.isdir(received):
os.makedirs(received)
filename = os.path.join(received, pathscrub('%s.error' % entry['title'], filename=True))
log.error('Error retrieving %s, the error page has been saved to %s', entry['title'], filename)
with io.open(filename, 'wb') as outfile:
outfile.write(page)
def get_temp_files(self, task, require_path=False, handle_magnets=False, fail_html=True,
tmp_path=tempfile.gettempdir()):
"""Download all task content and store in temporary folder.
:param bool require_path:
whether or not entries without 'path' field are ignored
:param bool handle_magnets:
when used any of urls containing magnet link will replace url,
otherwise warning is printed.
:param fail_html:
fail entries which url respond with html content
:param tmp_path:
path to use for temporary files while downloading
"""
for entry in task.accepted:
self.get_temp_file(task, entry, require_path, handle_magnets, fail_html, tmp_path)
# TODO: a bit silly method, should be get rid of now with simplier exceptions ?
def process_entry(self, task, entry, url, tmp_path):
"""
Processes `entry` by using `url`. Does not use entry['url'].
Does not fail the `entry` if there is a network issue, instead just logs and returns a string error.
:param task: Task
:param entry: Entry
:param url: Url to try download
:param tmp_path: Path to store temporary files
:return: String error, if failed.
"""
try:
if task.options.test:
log.info('Would download: %s', entry['title'])
else:
if not task.manager.unit_test:
log.info('Downloading: %s', entry['title'])
self.download_entry(task, entry, url, tmp_path)
except RequestException as e:
log.warning('RequestException %s, while downloading %s', e, url)
return 'Network error during request: %s' % e
except BadStatusLine as e:
log.warning('Failed to reach server. Reason: %s', getattr(e, 'message', 'N/A'))
return 'BadStatusLine'
except IOError as e:
if hasattr(e, 'reason'):
log.warning('Failed to reach server. Reason: %s', e.reason)
elif hasattr(e, 'code'):
log.warning('The server couldn\'t fulfill the request. Error code: %s', e.code)
log.debug('IOError', exc_info=True)
return 'IOError'
except ValueError as e:
# Probably unknown url type
msg = 'ValueError %s' % e
log.warning(msg)
log.debug(msg, exc_info=True)
return msg
def download_entry(self, task, entry, url, tmp_path):
"""Downloads `entry` by using `url`.
:raises: Several types of exceptions ...
:raises: PluginWarning
"""
log.debug('Downloading url \'%s\'', url)
# get content
auth = None
if 'download_auth' in entry:
auth = entry['download_auth']
log.debug('Custom auth enabled for %s download: %s', entry['title'], entry['download_auth'])
try:
response = task.requests.get(url, auth=auth, raise_status=False)
except UnicodeError:
log.error('Unicode error while encoding url %s', url)
return
if response.status_code != 200:
log.debug('Got %s response from server. Saving error page.', response.status_code)
# Save the error page
if response.content:
self.save_error_page(entry, task, response.content)
# Raise the error
response.raise_for_status()
return
# expand ~ in temp path
# TODO jinja?
try:
tmp_path = os.path.expanduser(tmp_path)
except RenderError as e:
entry.fail('Could not set temp path. Error during string replacement: %s' % e)
return
# Clean illegal characters from temp path name
tmp_path = pathscrub(tmp_path)
# create if missing
if not os.path.isdir(tmp_path):
log.debug('creating tmp_path %s' % tmp_path)
os.mkdir(tmp_path)
# check for write-access
if not os.access(tmp_path, os.W_OK):
raise plugin.PluginError('Not allowed to write to temp directory `%s`' % tmp_path)
# download and write data into a temp file
tmp_dir = tempfile.mkdtemp(dir=tmp_path)
fname = hashlib.md5(url.encode('utf-8', 'replace')).hexdigest()
datafile = os.path.join(tmp_dir, fname)
outfile = io.open(datafile, 'wb')
try:
for chunk in response.iter_content(chunk_size=150 * 1024, decode_unicode=False):
outfile.write(chunk)
except Exception as e:
# don't leave futile files behind
# outfile has to be closed before we can delete it on Windows
outfile.close()
log.debug('Download interrupted, removing datafile')
os.remove(datafile)
if isinstance(e, socket.timeout):
log.error('Timeout while downloading file')
else:
raise
else:
outfile.close()
# Do a sanity check on downloaded file
if os.path.getsize(datafile) == 0:
entry.fail('File %s is 0 bytes in size' % datafile)
os.remove(datafile)
return
# store temp filename into entry so other plugins may read and modify content
# temp file is moved into final destination at self.output
entry['file'] = datafile
log.debug('%s field file set to: %s', entry['title'], entry['file'])
if 'content-type' in response.headers:
entry['mime-type'] = str(parse_header(response.headers['content-type'])[0])
else:
entry['mime-type'] = "unknown/unknown"
content_encoding = response.headers.get('content-encoding', '')
decompress = 'gzip' in content_encoding or 'deflate' in content_encoding
if 'content-length' in response.headers and not decompress:
entry['content-length'] = int(response.headers['content-length'])
# prefer content-disposition naming, note: content-disposition can be disabled completely
# by setting entry field `content-disposition` to False
if entry.get('content-disposition', True):
self.filename_from_headers(entry, response)
else:
log.info('Content-disposition disabled for %s', entry['title'])
self.filename_ext_from_mime(entry)
if not entry.get('filename'):
filename = unquote(url.rsplit('/', 1)[1])
log.debug('No filename - setting from url: %s', filename)
entry['filename'] = filename
log.debug('Finishing download_entry() with filename %s', entry.get('filename'))
def filename_from_headers(self, entry, response):
"""Checks entry filename if it's found from content-disposition"""
if not response.headers.get('content-disposition'):
# No content disposition header, nothing we can do
return
filename = parse_header(response.headers['content-disposition'])[1].get('filename')
if filename:
# try to decode to unicode, specs allow latin1, some may do utf-8 anyway
try:
filename = native_str_to_text(filename, encoding='latin1')
log.debug('filename header latin1 decoded')
except UnicodeError:
try:
filename = native_str_to_text(filename, encoding='utf-8')
log.debug('filename header UTF-8 decoded')
except UnicodeError:
pass
filename = decode_html(filename)
log.debug('Found filename from headers: %s', filename)
if 'filename' in entry:
log.debug('Overriding filename %s with %s from content-disposition', entry['filename'], filename)
entry['filename'] = filename
def filename_ext_from_mime(self, entry):
"""Tries to set filename extension from mime-type"""
extensions = mimetypes.guess_all_extensions(entry['mime-type'], strict=False)
if extensions:
log.debug('Mimetype guess for %s is %s ', entry['mime-type'], extensions)
if entry.get('filename'):
if any(entry['filename'].endswith(extension) for extension in extensions):
log.debug('Filename %s extension matches to mime-type', entry['filename'])
else:
# mimetypes library has no concept of a 'prefered' extension when there are multiple possibilites
# this causes the first to be used which is not always desirable, e.g. 'ksh' for 'text/plain'
extension = mimetypes.guess_extension(entry['mime-type'], strict=False)
log.debug('Adding mime-type extension %s to %s', extension, entry['filename'])
entry['filename'] = entry['filename'] + extension
else:
log.debug('Python doesn\'t know extension for mime-type: %s', entry['mime-type'])
def on_task_output(self, task, config):
"""Move downloaded content from temp folder to final destination"""
config = self.process_config(config)
for entry in task.accepted:
try:
self.output(task, entry, config)
except plugin.PluginWarning as e:
entry.fail()
log.error('Plugin error while writing: %s', e)
except Exception as e:
entry.fail()
log.exception('Exception while writing: %s', e)
def output(self, task, entry, config):
"""Moves temp-file into final destination
Raises:
PluginError if operation fails
"""
if 'file' not in entry and not task.options.test:
log.debug('file missing, entry: %s', entry)
raise plugin.PluginError('Entry `%s` has no temp file associated with' % entry['title'])
try:
# use path from entry if has one, otherwise use from download definition parameter
path = entry.get('path', config.get('path'))
if not isinstance(path, str):
raise plugin.PluginError('Invalid `path` in entry `%s`' % entry['title'])
# override path from command line parameter
if task.options.dl_path:
path = task.options.dl_path
# expand variables in path
try:
path = os.path.expanduser(entry.render(path))
except RenderError as e:
entry.fail('Could not set path. Error during string replacement: %s' % e)
return
# Clean illegal characters from path name
path = pathscrub(path)
# If we are in test mode, report and return
if task.options.test:
log.info('Would write `%s` to `%s`', entry['title'], path)
# Set a fake location, so the exec plugin can do string replacement during --test #1015
entry['location'] = os.path.join(path, 'TEST_MODE_NO_OUTPUT')
return
# make path
if not os.path.isdir(path):
log.debug('Creating directory %s', path)
try:
os.makedirs(path)
except:
raise plugin.PluginError('Cannot create path %s' % path, log)
# check that temp file is present
if not os.path.exists(entry['file']):
log.debug('entry: %s', entry)
raise plugin.PluginWarning('Downloaded temp file `%s` doesn\'t exist!?' % entry['file'])
if config.get('filename'):
try:
entry['filename'] = entry.render(config['filename'])
log.debug('set filename from config %s' % entry['filename'])
except RenderError as e:
entry.fail('Could not set filename. Error during string replacement: %s' % e)
return
# if we still don't have a filename, try making one from title (last resort)
elif not entry.get('filename'):
entry['filename'] = entry['title']
log.debug('set filename from title %s', entry['filename'])
if 'mime-type' not in entry:
log.warning('Unable to figure proper filename for %s. Using title.', entry['title'])
else:
guess = mimetypes.guess_extension(entry['mime-type'])
if not guess:
log.warning('Unable to guess extension with mime-type %s', guess)
else:
self.filename_ext_from_mime(entry)
name = entry.get('filename', entry['title'])
# Remove illegal characters from filename #325, #353
name = pathscrub(name)
# Remove directory separators from filename #208
name = name.replace('/', ' ')
if sys.platform.startswith('win'):
name = name.replace('\\', ' ')
# remove duplicate spaces
name = ' '.join(name.split())
# combine to full path + filename
destfile = os.path.join(path, name)
log.debug('destfile: %s', destfile)
if os.path.exists(destfile):
import filecmp
if filecmp.cmp(entry['file'], destfile):
log.debug("Identical destination file '%s' already exists", destfile)
elif config.get('overwrite'):
log.debug("Overwriting already existing file %s", destfile)
else:
log.info('File `%s` already exists and is not identical, download failed.', destfile)
entry.fail('File `%s` already exists and is not identical.' % destfile)
return
else:
# move temp file
log.debug('moving %s to %s', entry['file'], destfile)
try:
shutil.move(entry['file'], destfile)
except (IOError, OSError) as err:
# ignore permission errors, see ticket #555
import errno
if not os.path.exists(destfile):
raise plugin.PluginError('Unable to write %s: %s' % (destfile, err))
if err.errno != errno.EPERM and err.errno != errno.EACCES:
raise
else:
del(entry['file'])
# store final destination as output key
entry['location'] = destfile
finally:
self.cleanup_temp_file(entry)
def on_task_learn(self, task, config):
"""Make sure all temp files are cleaned up after output phase"""
self.cleanup_temp_files(task)
def on_task_abort(self, task, config):
"""Make sure all temp files are cleaned up when task is aborted."""
self.cleanup_temp_files(task)
def cleanup_temp_file(self, entry):
if 'file' in entry:
if os.path.exists(entry['file']):
log.debug('removing temp file %s from %s', entry['file'], entry['title'])
os.remove(entry['file'])
if os.path.exists(os.path.dirname(entry['file'])):
shutil.rmtree(os.path.dirname(entry['file']))
del (entry['file'])
def cleanup_temp_files(self, task):
"""Checks all entries for leftover temp files and deletes them."""
for entry in task.entries + task.rejected + task.failed:
self.cleanup_temp_file(entry)
@event('plugin.register')
def register_plugin():
plugin.register(PluginDownload, 'download', api_ver=2)
@event('options.register')
def register_parser_arguments():
options.get_parser('execute').add_argument('--dl-path', dest='dl_path', default=False, metavar='PATH',
help='override path for download plugin, applies to all executed tasks')
| |
"""
Support for interacting with and controlling the cmus music player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.cmus/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_SET, SUPPORT_PLAY_MEDIA, SUPPORT_SEEK, PLATFORM_SCHEMA,
MediaPlayerDevice)
from homeassistant.const import (
STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_HOST, CONF_NAME, CONF_PORT,
CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pycmus==0.1.0']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'cmus'
DEFAULT_PORT = 3000
SUPPORT_CMUS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | \
SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_PLAY_MEDIA | SUPPORT_SEEK
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Inclusive(CONF_HOST, 'remote'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'remote'): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discover_info=None):
"""Setup the CMUS platform."""
from pycmus import exceptions
host = config.get(CONF_HOST)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
try:
cmus_remote = CmusDevice(host, password, port, name)
except exceptions.InvalidPassword:
_LOGGER.error("The provided password was rejected by cmus")
return False
add_devices([cmus_remote])
class CmusDevice(MediaPlayerDevice):
"""Representation of a running cmus."""
# pylint: disable=no-member
def __init__(self, server, password, port, name):
"""Initialize the CMUS device."""
from pycmus import remote
if server:
self.cmus = remote.PyCmus(
server=server, password=password, port=port)
auto_name = 'cmus-{}'.format(server)
else:
self.cmus = remote.PyCmus()
auto_name = 'cmus-local'
self._name = name or auto_name
self.status = {}
self.update()
def update(self):
"""Get the latest data and update the state."""
status = self.cmus.get_status_dict()
if not status:
_LOGGER.warning("Recieved no status from cmus")
else:
self.status = status
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if 'status' not in self.status:
self.update()
if self.status['status'] == 'playing':
return STATE_PLAYING
elif self.status['status'] == 'paused':
return STATE_PAUSED
else:
return STATE_OFF
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.status.get('file')
@property
def content_type(self):
"""Content type of the current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self.status.get('duration')
@property
def media_title(self):
"""Title of current playing media."""
return self.status['tag'].get('title')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self.status['tag'].get('artist')
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return self.status['tag'].get('tracknumber')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self.status['tag'].get('album')
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return self.status['tag'].get('albumartist')
@property
def volume_level(self):
"""Return the volume level."""
left = self.status['set'].get('vol_left')[0]
right = self.status['set'].get('vol_right')[0]
if left != right:
volume = float(left + right) / 2
else:
volume = left
return int(volume)/100
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_CMUS
def turn_off(self):
"""Service to send the CMUS the command to stop playing."""
self.cmus.player_stop()
def turn_on(self):
"""Service to send the CMUS the command to start playing."""
self.cmus.player_play()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.cmus.set_volume(int(volume * 100))
def volume_up(self):
"""Function to send CMUS the command for volume up."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) + 5)
def volume_down(self):
"""Function to send CMUS the command for volume down."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) - 5)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play command."""
if media_type in [MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST]:
self.cmus.player_play_file(media_id)
else:
_LOGGER.error(
"Invalid media type %s. Only %s and %s are supported",
media_type, MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST)
def media_pause(self):
"""Send the pause command."""
self.cmus.player_pause()
def media_next_track(self):
"""Send next track command."""
self.cmus.player_next()
def media_previous_track(self):
"""Send next track command."""
self.cmus.player_prev()
def media_seek(self, position):
"""Send seek command."""
self.cmus.seek(position)
def media_play(self):
"""Send the play command."""
self.cmus.player_play()
def media_stop(self):
"""Send the stop command."""
self.cmus.stop()
| |
# -*- coding: utf-8 -*-
# resource.py ---
#
# Created: Mon Dec 12 12:10:00 2011 (+0200)
# Author: Janne Kuuskeri
#
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.conf import settings
import errors
import datamapper
import util
from http import codes, Response
import logging
# todo: move and make configurable
REALM = 'devil'
# todo: move somewhere and add note about borrowing this from piston
def coerce_put_post(request):
"""
Django doesn't particularly understand REST.
In case we send data over PUT, Django won't
actually look at the data and load it. We need
to twist its arm here.
The try/except abominiation here is due to a bug
in mod_python. This should fix it.
"""
if request.method.upper() == "PUT":
# Bug fix: if _load_post_and_files has already been called, for
# example by middleware accessing request.POST, the below code to
# pretend the request is a POST instead of a PUT will be too late
# to make a difference. Also calling _load_post_and_files will result
# in the following exception:
# AttributeError: You cannot set the upload handlers after the upload has been processed.
# The fix is to check for the presence of the _post field which is set
# the first time _load_post_and_files is called (both by wsgi.py and
# modpython.py). If it's set, the request has to be 'reset' to redo
# the query value parsing in POST mode.
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = "POST"
request._load_post_and_files()
request.method = "PUT"
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'PUT'
request.PUT = request.POST
class Resource(object):
""" Base class for resources exposed by an API.
Derive this class to create a resource. At minimum, (in the
derived class) you need to define one method named after the
corresponding http method (i.e. post, get, put or delete).
See the documentation on ``_process_response()`` to see what can
be returned by this method. Additionally, ``HttpStatusCodeError``s
are caught and converted to corresponding http responses.
todo: change function parameters so that request is always before response.
"""
# configuration parameters
access_controller = None
allow_anonymous = True
authentication = None
representation = None
post_representation = None
factory = None
post_factory = None
default_mapper = None
mapper = None
def __call__(self, request, *args, **kw):
""" Entry point for HTTP requests. """
coerce_put_post(request) # django-fix
try:
return self.__handle_request(request, *args, **kw)
except errors.HttpStatusCodeError, exc:
return self._get_error_response(exc)
except Exception, exc:
return self._get_unknown_error_response(request, exc)
def name(self):
""" Return resource's name.
This is used mainly by the permission module to determine the
name of a permission.
By default, return the name of the class. Override to return
something else.
"""
return util.camelcase_to_slash(self.__class__.__name__)
def __handle_request(self, request, *args, **kw):
""" Intercept the request and response.
This function lets `HttpStatusCodeError`s fall through. They
are caught and transformed into HTTP responses by the caller.
:return: ``HttpResponse``
"""
self._authenticate(request)
self._check_permission(request)
method = self._get_method(request)
data = self._get_input_data(request)
data = self._clean_input_data(data, request)
response = self._exec_method(method, request, data, *args, **kw)
return self._process_response(response, request)
def _exec_method(self, method, request, data, *args, **kw):
""" Execute appropriate request handler. """
if self._is_data_method(request):
return method(data, request, *args, **kw)
else:
return method(request, *args, **kw)
def _process_response(self, response, request):
""" Process the response.
If the response is ``HttpResponse``, does nothing. Otherwise,
serializes, formats and validates the response.
:param response: resource's response. This can be
- ``None``,
- django's ``HttpResponse``
- devil's ``Response``
- dictionary (or list of dictionaries)
- object (or list of objects) that are first serialized into dict
using ``self.factory``.
- plaintext
:returns: Django's ``HttpResponse``
"""
def coerce_response():
""" Coerce the response object into devil structure. """
if not isinstance(response, Response):
return Response(0, response)
return response
if isinstance(response, HttpResponse):
# we don't do anything if resource returns django's http response
return response
devil_res = coerce_response()
if devil_res.content and devil_res.get_code_num() in (0, 200, 201):
# serialize, format and validate
serialized_res = devil_res.content = self._serialize_object(devil_res.content, request)
formatted_res = self._format_response(request, devil_res)
self._validate_output_data(response, serialized_res, formatted_res, request)
else:
# no data -> format only
formatted_res = self._format_response(request, devil_res)
return formatted_res
def _format_response(self, request, response):
""" Format response using appropriate datamapper.
Take the devil response and turn it into django response, ready to
be returned to the client.
"""
res = datamapper.format(request, response, self)
# data is now formatted, let's check if the status_code is set
if res.status_code is 0:
res.status_code = 200
# apply headers
self._add_resposne_headers(res, response)
return res
def _add_resposne_headers(self, django_response, devil_response):
""" Add response headers.
Add HTTP headers from devil's response to django's response.
"""
try:
headers = devil_response.headers
except AttributeError:
# ok, there was no devil_response
pass
else:
for k, v in headers.items():
django_response[k] = v
return django_response
def _get_input_data(self, request):
""" If there is data, parse it, otherwise return None. """
# only PUT and POST should provide data
if not self._is_data_method(request):
return None
content = [row for row in request.read()]
content = ''.join(content) if content else None
return self._parse_input_data(content, request) if content else None
def _parse_input_data(self, data, request):
""" Execute appropriate parser. """
return datamapper.parse(data, request, self)
def _clean_input_data(self, data, request):
""" Clean input data. """
# sanity check
if not self._is_data_method(request):
# this is not PUT or POST -> return
return data
# do cleaning
try:
if self.representation:
# representation defined -> perform validation
self._validate_input_data(data, request)
if self.factory:
# factory defined -> create object
return self._create_object(data, request)
else:
# no factory nor representation -> return the same data back
return data
except ValidationError, exc:
return self._input_validation_failed(exc, data, request)
def _get_input_validator(self, request):
""" Return appropriate input validator.
For POST requests, ``self.post_representation`` is returned
if it is present, ``self.representation`` otherwise.
"""
method = request.method.upper()
if method != 'POST':
return self.representation
elif self.post_representation:
return self.post_representation
else:
return self.representation
def _validate_input_data(self, data, request):
""" Validate input data.
:param request: the HTTP request
:param data: the parsed data
:return: if validation is performed and succeeds the data is converted
into whatever format the validation uses (by default Django's
Forms) If not, the data is returned unchanged.
:raises: HttpStatusCodeError if data is not valid
"""
validator = self._get_input_validator(request)
if isinstance(data, (list, tuple)):
return map(validator.validate, data)
else:
return validator.validate(data)
def _validate_output_data(
self, original_res, serialized_res, formatted_res, request):
""" Validate the response data.
:param response: ``HttpResponse``
:param data: payload data. This implementation assumes
dict or list of dicts.
:raises: `HttpStatusCodeError` if data is not valid
"""
validator = self.representation
# when not to validate...
if not validator:
return
try:
if isinstance(serialized_res, (list, tuple)):
map(validator.validate, serialized_res)
else:
validator.validate(serialized_res)
except ValidationError, exc:
self._output_validation_failed(exc, serialized_res, request)
def _input_validation_failed(self, error, data, request):
""" Always raises HttpStatusCodeError.
Override to raise different status code when request data
doesn't pass validation.
todo: should format the content using the datamapper
"""
raise errors.BadRequest(str(error))
def _output_validation_failed(self, error, data, request):
""" Always raises HttpStatusCodeError.
Override to raise different status code when response data
doesn't pass validation.
"""
raise errors.InternalServerError(str(error))
def _create_object(self, data, request):
""" Create a python object from the given data.
This will use ``self.factory`` object's ``create()`` function to
create the data.
If no factory is defined, this will simply return the same data
that was given.
"""
if request.method.upper() == 'POST' and self.post_factory:
fac_func = self.post_factory.create
else:
fac_func = self.factory.create
if isinstance(data, (list, tuple)):
return map(fac_func, data)
else:
return fac_func(data)
def _serialize_object(self, response_data, request):
""" Create a python datatype from the given python object.
This will use ``self.factory`` object's ``serialize()`` function
to convert the object into dictionary.
If no factory is defined, this will simply return the same data
that was given.
:param response_data: data returned by the resource
"""
if not self.factory:
return response_data
if isinstance(response_data, (list, tuple)):
return map(
lambda item: self.factory.serialize(item, request),
response_data)
else:
return self.factory.serialize(response_data, request)
def _get_unknown_error_response(self, request, exc):
""" Generate HttpResponse for unknown exceptions.
todo: this should be more informative..
"""
logging.getLogger('devil').error(
'while doing %s on %s with [%s], devil caught: %s' % (
request.method, request.path_info, str(request.GET), str(exc)), exc_info=True)
if settings.DEBUG:
raise
else:
return HttpResponse(status=codes.INTERNAL_SERVER_ERROR[1])
def _get_error_response(self, exc):
""" Generate HttpResponse based on the HttpStatusCodeError. """
if exc.has_code(codes.UNAUTHORIZED):
return self._get_auth_challenge(exc)
else:
if exc.has_code(codes.INTERNAL_SERVER_ERROR):
logging.getLogger('devil').error('devil caught http error: ' + str(exc), exc_info=True)
else:
logging.getLogger('devil').error('devil caught http error: ' + str(exc))
content = exc.content or ''
return HttpResponse(content=content, status=exc.get_code_num())
def _get_auth_challenge(self, exc):
""" Returns HttpResponse for the client. """
response = HttpResponse(content=exc.content, status=exc.get_code_num())
response['WWW-Authenticate'] = 'Basic realm="%s"' % REALM
return response
def _get_method(self, request):
""" Figure out the requested method and return the callable. """
methodname = request.method.lower()
method = getattr(self, methodname, None)
if not method or not callable(method):
raise errors.MethodNotAllowed()
return method
def _is_data_method(self, request):
""" Return True, if request method is either PUT or POST """
return request.method.upper() in ('PUT', 'POST')
def _authenticate(self, request):
""" Perform authentication. """
def ensure_user_obj():
""" Make sure that request object has user property.
If `request.user` is not present or is `None`, it is
created and initialized with `AnonymousUser`.
"""
try:
if request.user:
return
except AttributeError:
pass
request.user = AnonymousUser()
def anonymous_access(exc_obj):
""" Determine what to do with unauthenticated requests.
If the request has already been authenticated, does
nothing.
:param exc_obj: exception object to be thrown if anonymous
access is not permitted.
"""
if request.user and request.user.is_authenticated():
# request is already authenticated
pass
elif self.allow_anonymous:
request.user = AnonymousUser()
else:
raise exc_obj
# first, make sure that the request carries `user` attribute
ensure_user_obj()
if self.authentication:
# authentication handler is configured
try:
self.authentication.authenticate(request)
except errors.Unauthorized, exc:
# http request doesn't carry any authentication information
anonymous_access(exc)
else:
# no authentication configured
anonymous_access(errors.Forbidden())
def _check_permission(self, request):
""" Check user permissions.
:raises: Forbidden, if user doesn't have access to the resource.
"""
if self.access_controller:
self.access_controller.check_perm(request, self)
#
# resource.py ends here
| |
"""
Test thread states.
"""
from __future__ import print_function
import unittest2
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ThreadStateTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=["linux"],
bugnumber="llvm.org/pr15824 thread states not properly maintained")
@skipIfDarwin # llvm.org/pr15824 thread states not properly maintained and <rdar://problem/28557237>
@expectedFailureAll(
oslist=["freebsd"],
bugnumber="llvm.org/pr18190 thread states not properly maintained")
def test_state_after_breakpoint(self):
"""Test thread state after breakpoint."""
self.build(dictionary=self.getBuildFlags(use_cpp11=False))
self.thread_state_after_breakpoint_test()
@skipIfDarwin # 'llvm.org/pr23669', cause Python crash randomly
@expectedFailureAll(
oslist=lldbplatformutil.getDarwinOSTriples(),
bugnumber="llvm.org/pr23669")
@expectedFailureAll(oslist=["freebsd"], bugnumber="llvm.org/pr15824")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24660")
def test_state_after_continue(self):
"""Test thread state after continue."""
self.build(dictionary=self.getBuildFlags(use_cpp11=False))
self.thread_state_after_continue_test()
@skipIfDarwin # 'llvm.org/pr23669', cause Python crash randomly
@expectedFailureDarwin('llvm.org/pr23669')
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24660")
# thread states not properly maintained
@unittest2.expectedFailure("llvm.org/pr16712")
def test_state_after_expression(self):
"""Test thread state after expression."""
self.build(dictionary=self.getBuildFlags(use_cpp11=False))
self.thread_state_after_expression_test()
# thread states not properly maintained
@unittest2.expectedFailure("llvm.org/pr16712")
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr24668: Breakpoints not resolved correctly")
@skipIfDarwin # llvm.org/pr15824 thread states not properly maintained and <rdar://problem/28557237>
def test_process_interrupt(self):
"""Test process interrupt."""
self.build(dictionary=self.getBuildFlags(use_cpp11=False))
self.process_interrupt_test()
# thread states not properly maintained
@unittest2.expectedFailure("llvm.org/pr15824 and <rdar://problem/28557237>")
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr24668: Breakpoints not resolved correctly")
@skipIfDarwin # llvm.org/pr15824 thread states not properly maintained and <rdar://problem/28557237>
def test_process_state(self):
"""Test thread states (comprehensive)."""
self.build(dictionary=self.getBuildFlags(use_cpp11=False))
self.thread_states_test()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers for our breakpoints.
self.break_1 = line_number('main.cpp', '// Set first breakpoint here')
self.break_2 = line_number('main.cpp', '// Set second breakpoint here')
def thread_state_after_breakpoint_test(self):
"""Test thread state after breakpoint."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# This should create a breakpoint in the main thread.
bp = lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.break_1, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Get the target process
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
# Make sure the thread is in the stopped state.
self.assertTrue(
thread.IsStopped(),
"Thread state isn't \'stopped\' during breakpoint 1.")
self.assertFalse(thread.IsSuspended(),
"Thread state is \'suspended\' during breakpoint 1.")
# Kill the process
self.runCmd("process kill")
def wait_for_running_event(self, process):
listener = self.dbg.GetListener()
if lldb.remote_platform:
lldbutil.expect_state_changes(
self, listener, process, [
lldb.eStateConnected])
lldbutil.expect_state_changes(
self, listener, process, [
lldb.eStateRunning])
def thread_state_after_continue_test(self):
"""Test thread state after continue."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# This should create a breakpoint in the main thread.
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.break_1, num_expected_locations=1)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.break_2, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Get the target process
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
# Continue, the inferior will go into an infinite loop waiting for
# 'g_test' to change.
self.dbg.SetAsync(True)
self.runCmd("continue")
self.wait_for_running_event(process)
# Check the thread state. It should be running.
self.assertFalse(
thread.IsStopped(),
"Thread state is \'stopped\' when it should be running.")
self.assertFalse(
thread.IsSuspended(),
"Thread state is \'suspended\' when it should be running.")
# Go back to synchronous interactions
self.dbg.SetAsync(False)
# Kill the process
self.runCmd("process kill")
def thread_state_after_expression_test(self):
"""Test thread state after expression."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# This should create a breakpoint in the main thread.
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.break_1, num_expected_locations=1)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.break_2, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Get the target process
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
# Get the inferior out of its loop
self.runCmd("expression g_test = 1")
# Check the thread state
self.assertTrue(
thread.IsStopped(),
"Thread state isn't \'stopped\' after expression evaluation.")
self.assertFalse(
thread.IsSuspended(),
"Thread state is \'suspended\' after expression evaluation.")
# Let the process run to completion
self.runCmd("process continue")
def process_interrupt_test(self):
"""Test process interrupt and continue."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# This should create a breakpoint in the main thread.
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.break_1, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Get the target process
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
# Continue, the inferior will go into an infinite loop waiting for
# 'g_test' to change.
self.dbg.SetAsync(True)
self.runCmd("continue")
self.wait_for_running_event(process)
# Go back to synchronous interactions
self.dbg.SetAsync(False)
# Stop the process
self.runCmd("process interrupt")
self.assertEqual(thread.GetStopReason(), lldb.eStopReasonSignal)
# Get the inferior out of its loop
self.runCmd("expression g_test = 1")
# Run to completion
self.runCmd("continue")
def thread_states_test(self):
"""Test thread states (comprehensive)."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# This should create a breakpoint in the main thread.
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.break_1, num_expected_locations=1)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.break_2, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Get the target process
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
# Make sure the thread is in the stopped state.
self.assertTrue(
thread.IsStopped(),
"Thread state isn't \'stopped\' during breakpoint 1.")
self.assertFalse(thread.IsSuspended(),
"Thread state is \'suspended\' during breakpoint 1.")
# Continue, the inferior will go into an infinite loop waiting for
# 'g_test' to change.
self.dbg.SetAsync(True)
self.runCmd("continue")
self.wait_for_running_event(process)
# Check the thread state. It should be running.
self.assertFalse(
thread.IsStopped(),
"Thread state is \'stopped\' when it should be running.")
self.assertFalse(
thread.IsSuspended(),
"Thread state is \'suspended\' when it should be running.")
# Go back to synchronous interactions
self.dbg.SetAsync(False)
# Stop the process
self.runCmd("process interrupt")
self.assertEqual(thread.GetState(), lldb.eStopReasonSignal)
# Check the thread state
self.assertTrue(
thread.IsStopped(),
"Thread state isn't \'stopped\' after process stop.")
self.assertFalse(thread.IsSuspended(),
"Thread state is \'suspended\' after process stop.")
# Get the inferior out of its loop
self.runCmd("expression g_test = 1")
# Check the thread state
self.assertTrue(
thread.IsStopped(),
"Thread state isn't \'stopped\' after expression evaluation.")
self.assertFalse(
thread.IsSuspended(),
"Thread state is \'suspended\' after expression evaluation.")
self.assertEqual(thread.GetState(), lldb.eStopReasonSignal)
# Run to breakpoint 2
self.runCmd("continue")
self.assertEqual(thread.GetState(), lldb.eStopReasonBreakpoint)
# Make sure both threads are stopped
self.assertTrue(
thread.IsStopped(),
"Thread state isn't \'stopped\' during breakpoint 2.")
self.assertFalse(thread.IsSuspended(),
"Thread state is \'suspended\' during breakpoint 2.")
# Run to completion
self.runCmd("continue")
# At this point, the inferior process should have exited.
self.assertEqual(process.GetState(), lldb.eStateExited, PROCESS_EXITED)
| |
"""
Support for Alexa skill service end point.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/alexa/
"""
import asyncio
import copy
import enum
import logging
import uuid
from datetime import datetime
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import HTTP_BAD_REQUEST
from homeassistant.helpers import template, script, config_validation as cv
from homeassistant.components.http import HomeAssistantView
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
INTENTS_API_ENDPOINT = '/api/alexa'
FLASH_BRIEFINGS_API_ENDPOINT = '/api/alexa/flash_briefings/{briefing_id}'
CONF_ACTION = 'action'
CONF_CARD = 'card'
CONF_INTENTS = 'intents'
CONF_SPEECH = 'speech'
CONF_TYPE = 'type'
CONF_TITLE = 'title'
CONF_CONTENT = 'content'
CONF_TEXT = 'text'
CONF_FLASH_BRIEFINGS = 'flash_briefings'
CONF_UID = 'uid'
CONF_DATE = 'date'
CONF_TITLE = 'title'
CONF_AUDIO = 'audio'
CONF_TEXT = 'text'
CONF_DISPLAY_URL = 'display_url'
ATTR_UID = 'uid'
ATTR_UPDATE_DATE = 'updateDate'
ATTR_TITLE_TEXT = 'titleText'
ATTR_STREAM_URL = 'streamUrl'
ATTR_MAIN_TEXT = 'mainText'
ATTR_REDIRECTION_URL = 'redirectionURL'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.0Z'
DOMAIN = 'alexa'
DEPENDENCIES = ['http']
class SpeechType(enum.Enum):
"""The Alexa speech types."""
plaintext = "PlainText"
ssml = "SSML"
class CardType(enum.Enum):
"""The Alexa card types."""
simple = "Simple"
link_account = "LinkAccount"
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
CONF_INTENTS: {
cv.string: {
vol.Optional(CONF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_CARD): {
vol.Required(CONF_TYPE): cv.enum(CardType),
vol.Required(CONF_TITLE): cv.template,
vol.Required(CONF_CONTENT): cv.template,
},
vol.Optional(CONF_SPEECH): {
vol.Required(CONF_TYPE): cv.enum(SpeechType),
vol.Required(CONF_TEXT): cv.template,
}
}
},
CONF_FLASH_BRIEFINGS: {
cv.string: vol.All(cv.ensure_list, [{
vol.Required(CONF_UID, default=str(uuid.uuid4())): cv.string,
vol.Optional(CONF_DATE, default=datetime.utcnow()): cv.string,
vol.Required(CONF_TITLE): cv.template,
vol.Optional(CONF_AUDIO): cv.template,
vol.Required(CONF_TEXT, default=""): cv.template,
vol.Optional(CONF_DISPLAY_URL): cv.template,
}]),
}
}
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Activate Alexa component."""
intents = config[DOMAIN].get(CONF_INTENTS, {})
flash_briefings = config[DOMAIN].get(CONF_FLASH_BRIEFINGS, {})
hass.http.register_view(AlexaIntentsView(hass, intents))
hass.http.register_view(AlexaFlashBriefingView(hass, flash_briefings))
return True
class AlexaIntentsView(HomeAssistantView):
"""Handle Alexa requests."""
url = INTENTS_API_ENDPOINT
name = 'api:alexa'
def __init__(self, hass, intents):
"""Initialize Alexa view."""
super().__init__()
intents = copy.deepcopy(intents)
template.attach(hass, intents)
for name, intent in intents.items():
if CONF_ACTION in intent:
intent[CONF_ACTION] = script.Script(
hass, intent[CONF_ACTION], "Alexa intent {}".format(name))
self.intents = intents
@asyncio.coroutine
def post(self, request):
"""Handle Alexa."""
data = yield from request.json()
_LOGGER.debug('Received Alexa request: %s', data)
req = data.get('request')
if req is None:
_LOGGER.error('Received invalid data from Alexa: %s', data)
return self.json_message('Expected request value not received',
HTTP_BAD_REQUEST)
req_type = req['type']
if req_type == 'SessionEndedRequest':
return None
intent = req.get('intent')
response = AlexaResponse(request.app['hass'], intent)
if req_type == 'LaunchRequest':
response.add_speech(
SpeechType.plaintext,
"Hello, and welcome to the future. How may I help?")
return self.json(response)
if req_type != 'IntentRequest':
_LOGGER.warning('Received unsupported request: %s', req_type)
return self.json_message(
'Received unsupported request: {}'.format(req_type),
HTTP_BAD_REQUEST)
intent_name = intent['name']
config = self.intents.get(intent_name)
if config is None:
_LOGGER.warning('Received unknown intent %s', intent_name)
response.add_speech(
SpeechType.plaintext,
"This intent is not yet configured within Home Assistant.")
return self.json(response)
speech = config.get(CONF_SPEECH)
card = config.get(CONF_CARD)
action = config.get(CONF_ACTION)
if action is not None:
yield from action.async_run(response.variables)
# pylint: disable=unsubscriptable-object
if speech is not None:
response.add_speech(speech[CONF_TYPE], speech[CONF_TEXT])
if card is not None:
response.add_card(card[CONF_TYPE], card[CONF_TITLE],
card[CONF_CONTENT])
return self.json(response)
class AlexaResponse(object):
"""Help generating the response for Alexa."""
def __init__(self, hass, intent=None):
"""Initialize the response."""
self.hass = hass
self.speech = None
self.card = None
self.reprompt = None
self.session_attributes = {}
self.should_end_session = True
if intent is not None and 'slots' in intent:
self.variables = {key: value['value'] for key, value
in intent['slots'].items() if 'value' in value}
else:
self.variables = {}
def add_card(self, card_type, title, content):
"""Add a card to the response."""
assert self.card is None
card = {
"type": card_type.value
}
if card_type == CardType.link_account:
self.card = card
return
card["title"] = title.async_render(self.variables)
card["content"] = content.async_render(self.variables)
self.card = card
def add_speech(self, speech_type, text):
"""Add speech to the response."""
assert self.speech is None
key = 'ssml' if speech_type == SpeechType.ssml else 'text'
if isinstance(text, template.Template):
text = text.async_render(self.variables)
self.speech = {
'type': speech_type.value,
key: text
}
def add_reprompt(self, speech_type, text):
"""Add reprompt if user does not answer."""
assert self.reprompt is None
key = 'ssml' if speech_type == SpeechType.ssml else 'text'
self.reprompt = {
'type': speech_type.value,
key: text.async_render(self.variables)
}
def as_dict(self):
"""Return response in an Alexa valid dict."""
response = {
'shouldEndSession': self.should_end_session
}
if self.card is not None:
response['card'] = self.card
if self.speech is not None:
response['outputSpeech'] = self.speech
if self.reprompt is not None:
response['reprompt'] = {
'outputSpeech': self.reprompt
}
return {
'version': '1.0',
'sessionAttributes': self.session_attributes,
'response': response,
}
class AlexaFlashBriefingView(HomeAssistantView):
"""Handle Alexa Flash Briefing skill requests."""
url = FLASH_BRIEFINGS_API_ENDPOINT
name = 'api:alexa:flash_briefings'
def __init__(self, hass, flash_briefings):
"""Initialize Alexa view."""
super().__init__()
self.flash_briefings = copy.deepcopy(flash_briefings)
template.attach(hass, self.flash_briefings)
@callback
def get(self, request, briefing_id):
"""Handle Alexa Flash Briefing request."""
_LOGGER.debug('Received Alexa flash briefing request for: %s',
briefing_id)
if self.flash_briefings.get(briefing_id) is None:
err = 'No configured Alexa flash briefing was found for: %s'
_LOGGER.error(err, briefing_id)
return b'', 404
briefing = []
for item in self.flash_briefings.get(briefing_id, []):
output = {}
if item.get(CONF_TITLE) is not None:
if isinstance(item.get(CONF_TITLE), template.Template):
output[ATTR_TITLE_TEXT] = item[CONF_TITLE].async_render()
else:
output[ATTR_TITLE_TEXT] = item.get(CONF_TITLE)
if item.get(CONF_TEXT) is not None:
if isinstance(item.get(CONF_TEXT), template.Template):
output[ATTR_MAIN_TEXT] = item[CONF_TEXT].async_render()
else:
output[ATTR_MAIN_TEXT] = item.get(CONF_TEXT)
if item.get(CONF_UID) is not None:
output[ATTR_UID] = item.get(CONF_UID)
if item.get(CONF_AUDIO) is not None:
if isinstance(item.get(CONF_AUDIO), template.Template):
output[ATTR_STREAM_URL] = item[CONF_AUDIO].async_render()
else:
output[ATTR_STREAM_URL] = item.get(CONF_AUDIO)
if item.get(CONF_DISPLAY_URL) is not None:
if isinstance(item.get(CONF_DISPLAY_URL),
template.Template):
output[ATTR_REDIRECTION_URL] = \
item[CONF_DISPLAY_URL].async_render()
else:
output[ATTR_REDIRECTION_URL] = item.get(CONF_DISPLAY_URL)
if isinstance(item[CONF_DATE], str):
item[CONF_DATE] = dt_util.parse_datetime(item[CONF_DATE])
output[ATTR_UPDATE_DATE] = item[CONF_DATE].strftime(DATE_FORMAT)
briefing.append(output)
return self.json(briefing)
| |
# -*- coding: utf-8 -*-
"""
flask.ext.cache
~~~~~~~~~~~~~~
Adds cache support to your application.
:copyright: (c) 2010 by Thadeus Burgess.
:license: BSD, see LICENSE for more details
"""
__version__ = '0.14'
__versionfull__ = __version__
import base64
import functools
import hashlib
import inspect
import logging
import string
import uuid
import warnings
from werkzeug import import_string
from flask import request, current_app
from ._compat import PY2
logger = logging.getLogger(__name__)
TEMPLATE_FRAGMENT_KEY_TEMPLATE = '_template_fragment_cache_%s%s'
# Used to remove control characters and whitespace from cache keys.
valid_chars = set(string.ascii_letters + string.digits + '_.')
delchars = ''.join(c for c in map(chr, range(256)) if c not in valid_chars)
if PY2:
null_control = (None, delchars)
else:
null_control = (dict((k,None) for k in delchars),)
def function_namespace(f, args=None):
"""
Attempts to returns unique namespace for function
"""
m_args = inspect.getargspec(f)[0]
instance_token = None
instance_self = getattr(f, '__self__', None)
if instance_self \
and not inspect.isclass(instance_self):
instance_token = repr(f.__self__)
elif m_args \
and m_args[0] == 'self' \
and args:
instance_token = repr(args[0])
module = f.__module__
if hasattr(f, '__qualname__'):
name = f.__qualname__
else:
klass = getattr(f, '__self__', None)
if klass \
and not inspect.isclass(klass):
klass = klass.__class__
if not klass:
klass = getattr(f, 'im_class', None)
if not klass:
if m_args and args:
if m_args[0] == 'self':
klass = args[0].__class__
elif m_args[0] == 'cls':
klass = args[0]
if klass:
name = klass.__name__ + '.' + f.__name__
else:
name = f.__name__
ns = '.'.join((module, name))
ns = ns.translate(*null_control)
if instance_token:
ins = '.'.join((module, name, instance_token))
ins = ins.translate(*null_control)
else:
ins = None
return ns, ins
def make_template_fragment_key(fragment_name, vary_on=[]):
"""
Make a cache key for a specific fragment name
"""
if vary_on:
fragment_name = "%s_" % fragment_name
return TEMPLATE_FRAGMENT_KEY_TEMPLATE % (fragment_name, "_".join(vary_on))
#: Cache Object
################
class Cache(object):
"""
This class is used to control the cache objects.
"""
def __init__(self, app=None, with_jinja2_ext=True, config=None):
if not (config is None or isinstance(config, dict)):
raise ValueError("`config` must be an instance of dict or None")
self.with_jinja2_ext = with_jinja2_ext
self.config = config
self.app = app
if app is not None:
self.init_app(app, config)
def init_app(self, app, config=None):
"This is used to initialize cache with your app object"
if not (config is None or isinstance(config, dict)):
raise ValueError("`config` must be an instance of dict or None")
#: Ref PR #44.
#: Do not set self.app in the case a single instance of the Cache
#: object is being used for multiple app instances.
#: Example use case would be Cache shipped as part of a blueprint
#: or utility library.
base_config = app.config.copy()
if self.config:
base_config.update(self.config)
if config:
base_config.update(config)
config = base_config
config.setdefault('CACHE_DEFAULT_TIMEOUT', 300)
config.setdefault('CACHE_THRESHOLD', 500)
config.setdefault('CACHE_KEY_PREFIX', 'flask_cache_')
config.setdefault('CACHE_MEMCACHED_SERVERS', None)
config.setdefault('CACHE_DIR', None)
config.setdefault('CACHE_OPTIONS', None)
config.setdefault('CACHE_ARGS', [])
config.setdefault('CACHE_TYPE', 'null')
config.setdefault('CACHE_NO_NULL_WARNING', False)
if config['CACHE_TYPE'] == 'null' and not config['CACHE_NO_NULL_WARNING']:
warnings.warn("Flask-Cache: CACHE_TYPE is set to null, "
"caching is effectively disabled.")
if self.with_jinja2_ext:
from .jinja2ext import CacheExtension, JINJA_CACHE_ATTR_NAME
setattr(app.jinja_env, JINJA_CACHE_ATTR_NAME, self)
app.jinja_env.add_extension(CacheExtension)
self._set_cache(app, config)
def _set_cache(self, app, config):
import_me = config['CACHE_TYPE']
if '.' not in import_me:
from . import backends
try:
cache_obj = getattr(backends, import_me)
except AttributeError:
raise ImportError("%s is not a valid FlaskCache backend" % (
import_me))
else:
cache_obj = import_string(import_me)
cache_args = config['CACHE_ARGS'][:]
cache_options = {'default_timeout': config['CACHE_DEFAULT_TIMEOUT']}
if config['CACHE_OPTIONS']:
cache_options.update(config['CACHE_OPTIONS'])
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions.setdefault('cache', {})
app.extensions['cache'][self] = cache_obj(
app, config, cache_args, cache_options)
@property
def cache(self):
app = self.app or current_app
return app.extensions['cache'][self]
def get(self, *args, **kwargs):
"Proxy function for internal cache object."
return self.cache.get(*args, **kwargs)
def set(self, *args, **kwargs):
"Proxy function for internal cache object."
self.cache.set(*args, **kwargs)
def add(self, *args, **kwargs):
"Proxy function for internal cache object."
self.cache.add(*args, **kwargs)
def delete(self, *args, **kwargs):
"Proxy function for internal cache object."
self.cache.delete(*args, **kwargs)
def delete_many(self, *args, **kwargs):
"Proxy function for internal cache object."
self.cache.delete_many(*args, **kwargs)
def clear(self):
"Proxy function for internal cache object."
self.cache.clear()
def get_many(self, *args, **kwargs):
"Proxy function for internal cache object."
return self.cache.get_many(*args, **kwargs)
def set_many(self, *args, **kwargs):
"Proxy function for internal cache object."
self.cache.set_many(*args, **kwargs)
def cached(self, timeout=None, key_prefix='view/%s', unless=None):
"""
Decorator. Use this to cache a function. By default the cache key
is `view/request.path`. You are able to use this decorator with any
function by changing the `key_prefix`. If the token `%s` is located
within the `key_prefix` then it will replace that with `request.path`
Example::
# An example view function
@cache.cached(timeout=50)
def big_foo():
return big_bar_calc()
# An example misc function to cache.
@cache.cached(key_prefix='MyCachedList')
def get_list():
return [random.randrange(0, 1) for i in range(50000)]
my_list = get_list()
.. note::
You MUST have a request context to actually called any functions
that are cached.
.. versionadded:: 0.4
The returned decorated function now has three function attributes
assigned to it. These attributes are readable/writable.
**uncached**
The original undecorated function
**cache_timeout**
The cache timeout value for this function. For a custom value
to take affect, this must be set before the function is called.
**make_cache_key**
A function used in generating the cache_key used.
:param timeout: Default None. If set to an integer, will cache for that
amount of time. Unit of time is in seconds.
:param key_prefix: Default 'view/%(request.path)s'. Beginning key to .
use for the cache key.
.. versionadded:: 0.3.4
Can optionally be a callable which takes no arguments
but returns a string that will be used as the cache_key.
:param unless: Default None. Cache will *always* execute the caching
facilities unless this callable is true.
This will bypass the caching entirely.
"""
def decorator(f):
@functools.wraps(f)
def decorated_function(*args, **kwargs):
#: Bypass the cache entirely.
if callable(unless) and unless() is True:
return f(*args, **kwargs)
try:
cache_key = decorated_function.make_cache_key(*args, **kwargs)
rv = self.cache.get(cache_key)
except Exception:
if current_app.debug:
raise
logger.exception("Exception possibly due to cache backend.")
return f(*args, **kwargs)
if rv is None:
rv = f(*args, **kwargs)
try:
self.cache.set(cache_key, rv,
timeout=decorated_function.cache_timeout)
except Exception:
if current_app.debug:
raise
logger.exception("Exception possibly due to cache backend.")
return f(*args, **kwargs)
return rv
def make_cache_key(*args, **kwargs):
if callable(key_prefix):
cache_key = key_prefix()
elif '%s' in key_prefix:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix
return cache_key
decorated_function.uncached = f
decorated_function.cache_timeout = timeout
decorated_function.make_cache_key = make_cache_key
return decorated_function
return decorator
def _memvname(self, funcname):
return funcname + '_memver'
def _memoize_make_version_hash(self):
return base64.b64encode(uuid.uuid4().bytes)[:6].decode('utf-8')
def _memoize_version(self, f, args=None,
reset=False, delete=False, timeout=None):
"""
Updates the hash version associated with a memoized function or method.
"""
fname, instance_fname = function_namespace(f, args=args)
version_key = self._memvname(fname)
fetch_keys = [version_key]
if instance_fname:
instance_version_key = self._memvname(instance_fname)
fetch_keys.append(instance_version_key)
# Only delete the per-instance version key or per-function version
# key but not both.
if delete:
self.cache.delete_many(fetch_keys[-1])
return fname, None
version_data_list = list(self.cache.get_many(*fetch_keys))
dirty = False
if version_data_list[0] is None:
version_data_list[0] = self._memoize_make_version_hash()
dirty = True
if instance_fname and version_data_list[1] is None:
version_data_list[1] = self._memoize_make_version_hash()
dirty = True
# Only reset the per-instance version or the per-function version
# but not both.
if reset:
fetch_keys = fetch_keys[-1:]
version_data_list = [self._memoize_make_version_hash()]
dirty = True
if dirty:
self.cache.set_many(dict(zip(fetch_keys, version_data_list)),
timeout=timeout)
return fname, ''.join(version_data_list)
def _memoize_make_cache_key(self, make_name=None, timeout=None):
"""
Function used to create the cache_key for memoized functions.
"""
def make_cache_key(f, *args, **kwargs):
_timeout = getattr(timeout, 'cache_timeout', timeout)
fname, version_data = self._memoize_version(f, args=args,
timeout=_timeout)
#: this should have to be after version_data, so that it
#: does not break the delete_memoized functionality.
if callable(make_name):
altfname = make_name(fname)
else:
altfname = fname
if callable(f):
keyargs, keykwargs = self._memoize_kwargs_to_args(f,
*args,
**kwargs)
else:
keyargs, keykwargs = args, kwargs
try:
updated = u"{0}{1}{2}".format(altfname, keyargs, keykwargs)
except AttributeError:
updated = u"%s%s%s" % (altfname, keyargs, keykwargs)
cache_key = hashlib.md5()
cache_key.update(updated.encode('utf-8'))
cache_key = base64.b64encode(cache_key.digest())[:16]
cache_key = cache_key.decode('utf-8')
cache_key += version_data
return cache_key
return make_cache_key
def _memoize_kwargs_to_args(self, f, *args, **kwargs):
#: Inspect the arguments to the function
#: This allows the memoization to be the same
#: whether the function was called with
#: 1, b=2 is equivilant to a=1, b=2, etc.
new_args = []
arg_num = 0
argspec = inspect.getargspec(f)
args_len = len(argspec.args)
for i in range(args_len):
if i == 0 and argspec.args[i] in ('self', 'cls'):
#: use the repr of the class instance
#: this supports instance methods for
#: the memoized functions, giving more
#: flexibility to developers
arg = repr(args[0])
arg_num += 1
elif argspec.args[i] in kwargs:
arg = kwargs[argspec.args[i]]
elif arg_num < len(args):
arg = args[arg_num]
arg_num += 1
elif abs(i-args_len) <= len(argspec.defaults):
arg = argspec.defaults[i-args_len]
arg_num += 1
else:
arg = None
arg_num += 1
#: Attempt to convert all arguments to a
#: hash/id or a representation?
#: Not sure if this is necessary, since
#: using objects as keys gets tricky quickly.
# if hasattr(arg, '__class__'):
# try:
# arg = hash(arg)
# except:
# arg = repr(arg)
#: Or what about a special __cacherepr__ function
#: on an object, this allows objects to act normal
#: upon inspection, yet they can define a representation
#: that can be used to make the object unique in the
#: cache key. Given that a case comes across that
#: an object "must" be used as a cache key
# if hasattr(arg, '__cacherepr__'):
# arg = arg.__cacherepr__
new_args.append(arg)
return tuple(new_args), {}
def memoize(self, timeout=None, make_name=None, unless=None):
"""
Use this to cache the result of a function, taking its arguments into
account in the cache key.
Information on
`Memoization <http://en.wikipedia.org/wiki/Memoization>`_.
Example::
@cache.memoize(timeout=50)
def big_foo(a, b):
return a + b + random.randrange(0, 1000)
.. code-block:: pycon
>>> big_foo(5, 2)
753
>>> big_foo(5, 3)
234
>>> big_foo(5, 2)
753
.. versionadded:: 0.4
The returned decorated function now has three function attributes
assigned to it.
**uncached**
The original undecorated function. readable only
**cache_timeout**
The cache timeout value for this function. For a custom value
to take affect, this must be set before the function is called.
readable and writable
**make_cache_key**
A function used in generating the cache_key used.
readable and writable
:param timeout: Default None. If set to an integer, will cache for that
amount of time. Unit of time is in seconds.
:param make_name: Default None. If set this is a function that accepts
a single argument, the function name, and returns a
new string to be used as the function name. If not set
then the function name is used.
:param unless: Default None. Cache will *always* execute the caching
facilities unelss this callable is true.
This will bypass the caching entirely.
.. versionadded:: 0.5
params ``make_name``, ``unless``
"""
def memoize(f):
@functools.wraps(f)
def decorated_function(*args, **kwargs):
#: bypass cache
if callable(unless) and unless() is True:
return f(*args, **kwargs)
try:
cache_key = decorated_function.make_cache_key(f, *args, **kwargs)
rv = self.cache.get(cache_key)
except Exception:
if current_app.debug:
raise
logger.exception("Exception possibly due to cache backend.")
return f(*args, **kwargs)
if rv is None:
rv = f(*args, **kwargs)
try:
self.cache.set(cache_key, rv,
timeout=decorated_function.cache_timeout)
except Exception:
if current_app.debug:
raise
logger.exception("Exception possibly due to cache backend.")
return rv
decorated_function.uncached = f
decorated_function.cache_timeout = timeout
decorated_function.make_cache_key = self._memoize_make_cache_key(
make_name, decorated_function)
decorated_function.delete_memoized = lambda: self.delete_memoized(f)
return decorated_function
return memoize
def delete_memoized(self, f, *args, **kwargs):
"""
Deletes the specified functions caches, based by given parameters.
If parameters are given, only the functions that were memoized with them
will be erased. Otherwise all versions of the caches will be forgotten.
Example::
@cache.memoize(50)
def random_func():
return random.randrange(1, 50)
@cache.memoize()
def param_func(a, b):
return a+b+random.randrange(1, 50)
.. code-block:: pycon
>>> random_func()
43
>>> random_func()
43
>>> cache.delete_memoized('random_func')
>>> random_func()
16
>>> param_func(1, 2)
32
>>> param_func(1, 2)
32
>>> param_func(2, 2)
47
>>> cache.delete_memoized('param_func', 1, 2)
>>> param_func(1, 2)
13
>>> param_func(2, 2)
47
Delete memoized is also smart about instance methods vs class methods.
When passing a instancemethod, it will only clear the cache related
to that instance of that object. (object uniqueness can be overridden
by defining the __repr__ method, such as user id).
When passing a classmethod, it will clear all caches related across
all instances of that class.
Example::
class Adder(object):
@cache.memoize()
def add(self, b):
return b + random.random()
.. code-block:: pycon
>>> adder1 = Adder()
>>> adder2 = Adder()
>>> adder1.add(3)
3.23214234
>>> adder2.add(3)
3.60898509
>>> cache.delete_memoized(adder1.add)
>>> adder1.add(3)
3.01348673
>>> adder2.add(3)
3.60898509
>>> cache.delete_memoized(Adder.add)
>>> adder1.add(3)
3.53235667
>>> adder2.add(3)
3.72341788
:param fname: Name of the memoized function, or a reference to the function.
:param \*args: A list of positional parameters used with memoized function.
:param \**kwargs: A dict of named parameters used with memoized function.
.. note::
Flask-Cache uses inspect to order kwargs into positional args when
the function is memoized. If you pass a function reference into ``fname``
instead of the function name, Flask-Cache will be able to place
the args/kwargs in the proper order, and delete the positional cache.
However, if ``delete_memoized`` is just called with the name of the
function, be sure to pass in potential arguments in the same order
as defined in your function as args only, otherwise Flask-Cache
will not be able to compute the same cache key.
.. note::
Flask-Cache maintains an internal random version hash for the function.
Using delete_memoized will only swap out the version hash, causing
the memoize function to recompute results and put them into another key.
This leaves any computed caches for this memoized function within the
caching backend.
It is recommended to use a very high timeout with memoize if using
this function, so that when the version hash is swapped, the old cached
results would eventually be reclaimed by the caching backend.
"""
if not callable(f):
raise DeprecationWarning("Deleting messages by relative name is no longer"
" reliable, please switch to a function reference")
try:
if not args and not kwargs:
self._memoize_version(f, reset=True)
else:
cache_key = f.make_cache_key(f.uncached, *args, **kwargs)
self.cache.delete(cache_key)
except Exception:
if current_app.debug:
raise
logger.exception("Exception possibly due to cache backend.")
def delete_memoized_verhash(self, f, *args):
"""
Delete the version hash associated with the function.
..warning::
Performing this operation could leave keys behind that have
been created with this version hash. It is up to the application
to make sure that all keys that may have been created with this
version hash at least have timeouts so they will not sit orphaned
in the cache backend.
"""
if not callable(f):
raise DeprecationWarning("Deleting messages by relative name is no longer"
" reliable, please use a function reference")
try:
self._memoize_version(f, delete=True)
except Exception:
if current_app.debug:
raise
logger.exception("Exception possibly due to cache backend.")
| |
# Copyright (c) 2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from heat.common import exception
from heat.engine import check_resource
from heat.engine import dependencies
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stack
from heat.engine import sync_point
from heat.engine import worker
from heat.rpc import worker_client
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import utils
@mock.patch.object(check_resource, 'construct_input_data')
@mock.patch.object(check_resource, 'check_stack_complete')
@mock.patch.object(check_resource, 'propagate_check_resource')
@mock.patch.object(check_resource, 'check_resource_cleanup')
@mock.patch.object(check_resource, 'check_resource_update')
class CheckWorkflowUpdateTest(common.HeatTestCase):
@mock.patch.object(worker_client.WorkerClient, 'check_resource',
lambda *_: None)
def setUp(self):
super(CheckWorkflowUpdateTest, self).setUp()
thread_group_mgr = mock.Mock()
cfg.CONF.set_default('convergence_engine', True)
self.worker = worker.WorkerService('host-1',
'topic-1',
'engine_id',
thread_group_mgr)
self.cr = check_resource.CheckResource(self.worker.engine_id,
self.worker._rpc_client,
self.worker.thread_group_mgr)
self.worker._rpc_client = worker_client.WorkerClient()
self.ctx = utils.dummy_context()
self.stack = tools.get_stack(
'check_workflow_create_stack', self.ctx,
template=tools.string_template_five, convergence=True)
self.stack.converge_stack(self.stack.t)
self.resource = self.stack['A']
self.is_update = True
self.graph_key = (self.resource.id, self.is_update)
self.orig_load_method = stack.Stack.load
stack.Stack.load = mock.Mock(return_value=self.stack)
def tearDown(self):
super(CheckWorkflowUpdateTest, self).tearDown()
stack.Stack.load = self.orig_load_method
def test_resource_not_available(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.worker.check_resource(
self.ctx, 'non-existant-id', self.stack.current_traversal, {},
True, None)
for mocked in [mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid]:
self.assertFalse(mocked.called)
def test_stale_traversal(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.worker.check_resource(self.ctx, self.resource.id,
'stale-traversal', {}, True, None)
for mocked in [mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid]:
self.assertFalse(mocked.called)
def test_is_update_traversal(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
mock_cru.assert_called_once_with(self.resource,
self.resource.stack.t.id,
{}, self.worker.engine_id,
mock.ANY)
self.assertFalse(mock_crc.called)
expected_calls = []
for req, fwd in self.stack.convergence_dependencies.leaves():
expected_calls.append(
(mock.call.worker.propagate_check_resource.
assert_called_once_with(
self.ctx, mock.ANY, mock.ANY,
self.stack.current_traversal, mock.ANY,
self.graph_key, {}, self.is_update)))
mock_csc.assert_called_once_with(
self.ctx, mock.ANY, self.stack.current_traversal,
self.resource.id,
mock.ANY, True)
@mock.patch.object(resource.Resource, 'make_replacement')
@mock.patch.object(stack.Stack, 'time_remaining')
def test_is_update_traversal_raise_update_replace(
self, tr, mock_mr, mock_cru, mock_crc, mock_pcr, mock_csc,
mock_cid):
mock_cru.side_effect = exception.UpdateReplace
tr.return_value = 317
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
mock_cru.assert_called_once_with(self.resource,
self.resource.stack.t.id,
{}, self.worker.engine_id,
mock.ANY)
self.assertTrue(mock_mr.called)
self.assertFalse(mock_crc.called)
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
@mock.patch.object(check_resource.CheckResource, '_try_steal_engine_lock')
@mock.patch.object(stack.Stack, 'time_remaining')
@mock.patch.object(resource.Resource, 'state_set')
def test_is_update_traversal_raise_update_inprogress(
self, mock_ss, tr, mock_tsl, mock_cru, mock_crc, mock_pcr,
mock_csc, mock_cid):
mock_cru.side_effect = exception.UpdateInProgress
self.worker.engine_id = 'some-thing-else'
mock_tsl.return_value = True
tr.return_value = 317
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
mock_cru.assert_called_once_with(self.resource,
self.resource.stack.t.id,
{}, self.worker.engine_id,
mock.ANY)
mock_ss.assert_called_once_with(self.resource.action,
resource.Resource.FAILED,
mock.ANY)
self.assertFalse(mock_crc.called)
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
def test_try_steal_lock_alive(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
res = self.cr._try_steal_engine_lock(self.ctx,
self.resource.id)
self.assertFalse(res)
@mock.patch.object(check_resource.listener_client, 'EngineListenerClient')
@mock.patch.object(check_resource.resource_objects.Resource, 'get_obj')
def test_try_steal_lock_dead(
self, mock_get, mock_elc, mock_cru, mock_crc, mock_pcr,
mock_csc, mock_cid):
fake_res = mock.Mock()
fake_res.engine_id = 'some-thing-else'
mock_get.return_value = fake_res
mock_elc.return_value.is_alive.return_value = False
res = self.cr._try_steal_engine_lock(self.ctx,
self.resource.id)
self.assertTrue(res)
@mock.patch.object(check_resource.listener_client, 'EngineListenerClient')
@mock.patch.object(check_resource.resource_objects.Resource, 'get_obj')
def test_try_steal_lock_not_dead(
self, mock_get, mock_elc, mock_cru, mock_crc, mock_pcr,
mock_csc, mock_cid):
fake_res = mock.Mock()
fake_res.engine_id = self.worker.engine_id
mock_get.return_value = fake_res
mock_elc.return_value.is_alive.return_value = True
res = self.cr._try_steal_engine_lock(self.ctx, self.resource.id)
self.assertFalse(res)
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_resource_update_failure_sets_stack_state_as_failed(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.stack.state_set(self.stack.UPDATE, self.stack.IN_PROGRESS, '')
self.resource.state_set(self.resource.UPDATE,
self.resource.IN_PROGRESS)
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
s = self.stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual((s.UPDATE, s.FAILED), (s.action, s.status))
self.assertEqual('Resource UPDATE failed: '
'ResourceNotAvailable: resources.A: The Resource (A)'
' is not available.', s.status_reason)
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_resource_cleanup_failure_sets_stack_state_as_failed(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.is_update = False # invokes check_resource_cleanup
self.stack.state_set(self.stack.UPDATE, self.stack.IN_PROGRESS, '')
self.resource.state_set(self.resource.UPDATE,
self.resource.IN_PROGRESS)
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_crc.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
s = self.stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual((s.UPDATE, s.FAILED), (s.action, s.status))
self.assertEqual('Resource UPDATE failed: '
'ResourceNotAvailable: resources.A: The Resource (A)'
' is not available.', s.status_reason)
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_resource_update_failure_triggers_rollback_if_enabled(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.stack.disable_rollback = False
self.stack.store()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertTrue(mock_tr.called)
# make sure the rollback is called on given stack
call_args, call_kwargs = mock_tr.call_args
called_stack = call_args[0]
self.assertEqual(self.stack.id, called_stack.id)
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_resource_cleanup_failure_triggers_rollback_if_enabled(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.is_update = False # invokes check_resource_cleanup
self.stack.disable_rollback = False
self.stack.store()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_crc.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertTrue(mock_tr.called)
# make sure the rollback is called on given stack
call_args, call_kwargs = mock_tr.call_args
called_stack = call_args[0]
self.assertEqual(self.stack.id, called_stack.id)
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_rollback_is_not_triggered_on_rollback_disabled_stack(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.stack.disable_rollback = True
self.stack.store()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.stack.CREATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertFalse(mock_tr.called)
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_rollback_not_re_triggered_for_a_rolling_back_stack(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.stack.disable_rollback = False
self.stack.action = self.stack.ROLLBACK
self.stack.status = self.stack.IN_PROGRESS
self.stack.store()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.stack.CREATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertFalse(mock_tr.called)
def test_resource_update_failure_purges_db_for_stack_failure(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.stack.disable_rollback = True
self.stack.store()
self.stack.purge_db = mock.Mock()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertTrue(self.stack.purge_db.called)
def test_resource_cleanup_failure_purges_db_for_stack_failure(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.is_update = False
self.stack.disable_rollback = True
self.stack.store()
self.stack.purge_db = mock.Mock()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_crc.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertTrue(self.stack.purge_db.called)
@mock.patch.object(check_resource.CheckResource,
'_retrigger_check_resource')
@mock.patch.object(stack.Stack, 'load')
def test_initiate_propagate_rsrc_retriggers_check_rsrc_on_new_stack_update(
self, mock_stack_load, mock_rcr, mock_cru, mock_crc, mock_pcr,
mock_csc, mock_cid):
key = sync_point.make_key(self.resource.id,
self.stack.current_traversal,
self.is_update)
mock_pcr.side_effect = exception.EntityNotFound(entity='Sync Point',
name=key)
updated_stack = stack.Stack(self.ctx, self.stack.name, self.stack.t,
self.stack.id,
current_traversal='some_newy_trvl_uuid')
mock_stack_load.return_value = updated_stack
self.cr._initiate_propagate_resource(self.ctx, self.resource.id,
self.stack.current_traversal,
self.is_update, self.resource,
self.stack)
mock_rcr.assert_called_once_with(self.ctx, self.is_update,
self.resource.id, updated_stack)
@mock.patch.object(sync_point, 'sync')
def test_retrigger_check_resource(self, mock_sync, mock_cru, mock_crc,
mock_pcr, mock_csc, mock_cid):
resC = self.stack['C']
# A, B are predecessors to C when is_update is True
expected_predecessors = {(self.stack['A'].id, True),
(self.stack['B'].id, True)}
self.cr._retrigger_check_resource(self.ctx, self.is_update,
resC.id, self.stack)
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, resC.id,
self.stack.current_traversal,
mock.ANY, (resC.id, True), None,
True, None)
call_args, call_kwargs = mock_pcr.call_args
actual_predecessors = call_args[4]
self.assertItemsEqual(expected_predecessors, actual_predecessors)
def test_retrigger_check_resource_new_traversal_deletes_rsrc(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
# mock dependencies to indicate a rsrc with id 2 is not present
# in latest traversal
self.stack._convg_deps = dependencies.Dependencies([
[(1, False), (1, True)], [(2, False), None]])
# simulate rsrc 2 completing its update for old traversal
# and calling rcr
self.cr._retrigger_check_resource(self.ctx, True, 2, self.stack)
# Ensure that pcr was called with proper delete traversal
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, 2,
self.stack.current_traversal,
mock.ANY, (2, False), None,
False, None)
@mock.patch.object(stack.Stack, 'purge_db')
def test_handle_failure(self, mock_purgedb, mock_cru, mock_crc, mock_pcr,
mock_csc, mock_cid):
self.cr._handle_failure(self.ctx, self.stack, 'dummy-reason')
mock_purgedb.assert_called_once_with()
self.assertEqual('dummy-reason', self.stack.status_reason)
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_handle_failure_rollback(self, mock_tr, mock_cru, mock_crc,
mock_pcr, mock_csc, mock_cid):
self.stack.disable_rollback = False
self.stack.state_set(self.stack.UPDATE, self.stack.IN_PROGRESS, '')
self.cr._handle_failure(self.ctx, self.stack, 'dummy-reason')
mock_tr.assert_called_once_with(self.stack)
@mock.patch.object(stack.Stack, 'purge_db')
@mock.patch.object(stack.Stack, 'state_set')
@mock.patch.object(check_resource.CheckResource,
'_retrigger_check_resource')
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_handle_rsrc_failure_when_update_fails(
self, mock_tr, mock_rcr, mock_ss, mock_pdb, mock_cru, mock_crc,
mock_pcr, mock_csc, mock_cid):
# Emulate failure
mock_ss.return_value = False
self.cr._handle_resource_failure(self.ctx, self.is_update,
self.resource.id, self.stack,
'dummy-reason')
self.assertTrue(mock_ss.called)
self.assertFalse(mock_rcr.called)
self.assertFalse(mock_pdb.called)
self.assertFalse(mock_tr.called)
@mock.patch.object(stack.Stack, 'purge_db')
@mock.patch.object(stack.Stack, 'state_set')
@mock.patch.object(check_resource.CheckResource,
'_retrigger_check_resource')
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_handle_rsrc_failure_when_update_fails_different_traversal(
self, mock_tr, mock_rcr, mock_ss, mock_pdb, mock_cru, mock_crc,
mock_pcr, mock_csc, mock_cid):
# Emulate failure
mock_ss.return_value = False
# Emulate new traversal
new_stack = tools.get_stack('check_workflow_create_stack', self.ctx,
template=tools.string_template_five,
convergence=True)
new_stack.current_traversal = 'new_traversal'
stack.Stack.load = mock.Mock(return_value=new_stack)
self.cr._handle_resource_failure(self.ctx, self.is_update,
self.resource.id,
self.stack, 'dummy-reason')
# Ensure retrigger called
self.assertTrue(mock_rcr.called)
self.assertTrue(mock_ss.called)
self.assertFalse(mock_pdb.called)
self.assertFalse(mock_tr.called)
@mock.patch.object(check_resource.CheckResource, '_handle_failure')
def test_handle_stack_timeout(self, mock_hf, mock_cru, mock_crc, mock_pcr,
mock_csc, mock_cid):
self.cr._handle_stack_timeout(self.ctx, self.stack)
mock_hf.assert_called_once_with(self.ctx, self.stack, u'Timed out')
@mock.patch.object(check_resource.CheckResource,
'_handle_stack_timeout')
def test_do_check_resource_marks_stack_as_failed_if_stack_timesout(
self, mock_hst, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
mock_cru.side_effect = scheduler.Timeout(None, 60)
self.is_update = True
self.cr._do_check_resource(self.ctx, self.stack.current_traversal,
self.stack.t, {}, self.is_update,
self.resource, self.stack, {})
mock_hst.assert_called_once_with(self.ctx, self.stack)
@mock.patch.object(check_resource.CheckResource,
'_handle_stack_timeout')
def test_do_check_resource_ignores_timeout_for_new_update(
self, mock_hst, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
# Ensure current_traversal is check before marking the stack as
# failed due to time-out.
mock_cru.side_effect = scheduler.Timeout(None, 60)
self.is_update = True
old_traversal = self.stack.current_traversal
self.stack.current_traversal = 'new_traversal'
self.cr._do_check_resource(self.ctx, old_traversal,
self.stack.t, {}, self.is_update,
self.resource, self.stack, {})
self.assertFalse(mock_hst.called)
@mock.patch.object(stack.Stack, 'has_timed_out')
@mock.patch.object(check_resource.CheckResource,
'_handle_stack_timeout')
def test_check_resource_handles_timeout(self, mock_hst, mock_to, mock_cru,
mock_crc, mock_pcr, mock_csc,
mock_cid):
mock_to.return_value = True
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal,
{}, self.is_update, {})
self.assertTrue(mock_hst.called)
@mock.patch.object(check_resource, 'construct_input_data')
@mock.patch.object(check_resource, 'check_stack_complete')
@mock.patch.object(check_resource, 'propagate_check_resource')
@mock.patch.object(check_resource, 'check_resource_cleanup')
@mock.patch.object(check_resource, 'check_resource_update')
class CheckWorkflowCleanupTest(common.HeatTestCase):
@mock.patch.object(worker_client.WorkerClient, 'check_resource',
lambda *_: None)
def setUp(self):
super(CheckWorkflowCleanupTest, self).setUp()
thread_group_mgr = mock.Mock()
self.worker = worker.WorkerService('host-1',
'topic-1',
'engine_id',
thread_group_mgr)
self.worker._rpc_client = worker_client.WorkerClient()
self.ctx = utils.dummy_context()
tstack = tools.get_stack(
'check_workflow_create_stack', self.ctx,
template=tools.string_template_five, convergence=True)
tstack.converge_stack(tstack.t, action=tstack.CREATE)
self.stack = stack.Stack.load(self.ctx, stack_id=tstack.id)
self.stack.converge_stack(self.stack.t, action=self.stack.DELETE)
self.resource = self.stack['A']
self.is_update = False
self.graph_key = (self.resource.id, self.is_update)
@mock.patch.object(stack.Stack, 'time_remaining')
def test_is_cleanup_traversal(
self, tr, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
tr.return_value = 317
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
self.assertFalse(mock_cru.called)
mock_crc.assert_called_once_with(
self.resource, self.resource.stack.t.id,
{}, self.worker.engine_id,
tr())
@mock.patch.object(stack.Stack, 'time_remaining')
def test_is_cleanup_traversal_raise_update_inprogress(
self, tr, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
mock_crc.side_effect = exception.UpdateInProgress
tr.return_value = 317
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
mock_crc.assert_called_once_with(self.resource,
self.resource.stack.t.id,
{}, self.worker.engine_id,
tr())
self.assertFalse(mock_cru.called)
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
class MiscMethodsTest(common.HeatTestCase):
def setUp(self):
super(MiscMethodsTest, self).setUp()
cfg.CONF.set_default('convergence_engine', True)
self.ctx = utils.dummy_context()
self.stack = tools.get_stack(
'check_workflow_create_stack', self.ctx,
template=tools.attr_cache_template, convergence=True)
self.stack.converge_stack(self.stack.t)
self.resource = self.stack['A']
def test_construct_input_data_ok(self):
expected_input_data = {'attrs': {(u'flat_dict', u'key2'): 'val2',
(u'flat_dict', u'key3'): 'val3',
(u'nested_dict', u'dict', u'a'): 1,
(u'nested_dict', u'dict', u'b'): 2},
'id': mock.ANY,
'reference_id': 'A',
'name': 'A',
'uuid': mock.ANY,
'action': mock.ANY,
'status': mock.ANY}
actual_input_data = check_resource.construct_input_data(self.resource,
self.stack)
self.assertEqual(expected_input_data, actual_input_data)
def test_construct_input_data_exception(self):
expected_input_data = {'attrs': {},
'id': mock.ANY,
'reference_id': 'A',
'name': 'A',
'uuid': mock.ANY,
'action': mock.ANY,
'status': mock.ANY}
self.resource.get_attribute = mock.Mock(
side_effect=exception.InvalidTemplateAttribute(resource='A',
key='value'))
actual_input_data = check_resource.construct_input_data(self.resource,
self.stack)
self.assertEqual(expected_input_data, actual_input_data)
@mock.patch.object(sync_point, 'sync')
def test_check_stack_complete_root(self, mock_sync):
check_resource.check_stack_complete(
self.ctx, self.stack, self.stack.current_traversal,
self.stack['E'].id, self.stack.convergence_dependencies,
True)
mock_sync.assert_called_once_with(
self.ctx, self.stack.id, self.stack.current_traversal, True,
mock.ANY, mock.ANY, {(self.stack['E'].id, True): None})
@mock.patch.object(sync_point, 'sync')
def test_check_stack_complete_child(self, mock_sync):
check_resource.check_stack_complete(
self.ctx, self.stack, self.stack.current_traversal,
self.resource.id, self.stack.convergence_dependencies,
True)
self.assertFalse(mock_sync.called)
@mock.patch.object(dependencies.Dependencies, 'roots')
@mock.patch.object(stack.Stack, '_persist_state')
def test_check_stack_complete_persist_called(self, mock_persist_state,
mock_dep_roots):
mock_dep_roots.return_value = [(1, True)]
check_resource.check_stack_complete(
self.ctx, self.stack, self.stack.current_traversal,
1, self.stack.convergence_dependencies,
True)
self.assertTrue(mock_persist_state.called)
@mock.patch.object(sync_point, 'sync')
def test_propagate_check_resource(self, mock_sync):
check_resource.propagate_check_resource(
self.ctx, mock.ANY, mock.ANY,
self.stack.current_traversal, mock.ANY,
('A', True), {}, True, None)
self.assertTrue(mock_sync.called)
@mock.patch.object(resource.Resource, 'create_convergence')
@mock.patch.object(resource.Resource, 'update_convergence')
def test_check_resource_update_init_action(self, mock_update, mock_create):
self.resource.action = 'INIT'
check_resource.check_resource_update(
self.resource, self.resource.stack.t.id, {}, 'engine-id',
self.stack)
self.assertTrue(mock_create.called)
self.assertFalse(mock_update.called)
@mock.patch.object(resource.Resource, 'create_convergence')
@mock.patch.object(resource.Resource, 'update_convergence')
def test_check_resource_update_create_action(
self, mock_update, mock_create):
self.resource.action = 'CREATE'
check_resource.check_resource_update(
self.resource, self.resource.stack.t.id, {}, 'engine-id',
self.stack)
self.assertFalse(mock_create.called)
self.assertTrue(mock_update.called)
@mock.patch.object(resource.Resource, 'create_convergence')
@mock.patch.object(resource.Resource, 'update_convergence')
def test_check_resource_update_update_action(
self, mock_update, mock_create):
self.resource.action = 'UPDATE'
check_resource.check_resource_update(
self.resource, self.resource.stack.t.id, {}, 'engine-id',
self.stack)
self.assertFalse(mock_create.called)
self.assertTrue(mock_update.called)
@mock.patch.object(resource.Resource, 'delete_convergence')
def test_check_resource_cleanup_delete(self, mock_delete):
self.resource.current_template_id = 'new-template-id'
check_resource.check_resource_cleanup(
self.resource, self.resource.stack.t.id, {}, 'engine-id',
self.stack.timeout_secs())
self.assertTrue(mock_delete.called)
| |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from pathlib import Path
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not, when_none
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
if is_state('kubernetes-worker.gpu.enabled'):
remove_state('kubernetes-worker.gpu.enabled')
try:
disable_gpu()
except ApplyNodeLabelFailed:
# Removing node label failed. Probably the master is unavailable.
# Proceed with the upgrade in hope GPUs will still be there.
hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', get_node_name())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
get_node_name()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress
load balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
# set --allow-privileged flag for kubelet
kubelet_opts['allow-privileged'] = set_privileged()
if is_state('kubernetes-worker.gpu.enabled'):
hookenv.log('Adding '
'--feature-gates=DevicePlugins=true '
'to kubelet')
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'gce'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'openstack'
kubelet_opts['cloud-config'] = str(cloud_config_path)
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.ingress-ssl-chain-completion',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.4"
elif context['arch'] == 'arm64':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-arm64:1.4"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ssl_chain_completion'] = config.get(
'ingress-ssl-chain-completion')
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
images = {'amd64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0', # noqa
'arm64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.15.0', # noqa
's390x': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.15.0', # noqa
'ppc64el': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.15.0', # noqa
}
context['ingress_image'] = images.get(context['arch'], images['amd64'])
if get_version('kubelet') < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
else:
context['daemonset_api_version'] = 'apps/v1beta2'
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Return 'true' if privileged containers are needed.
This is when a) the user requested them
b) user does not care (auto) and GPUs are available in a pre
1.9 era
"""
privileged = hookenv.config('allow-privileged').lower()
gpu_needs_privileged = (is_state('kubernetes-worker.gpu.enabled') and
get_version('kubelet') < (1, 9))
if privileged == 'auto':
privileged = 'true' if gpu_needs_privileged else 'false'
if privileged == 'false' and gpu_needs_privileged:
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
# No need to restart kubernetes (set the restart-needed state)
# because set-privileged is already in the restart path
return privileged
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('nvidia-docker.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
if get_version('kubelet') < (1, 9):
hookenv.status_set(
'active',
'Upgrade to snap channel >= 1.9/stable to enable GPU suppport.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('nvidia-docker.installed')
@when_not('kubernetes-worker.restart-needed')
def nvidia_departed():
"""Cuda departed, probably due to the docker layer switching to a
non nvidia-docker."""
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
if 'kube-control' in goal_state.get('relations', {}):
hookenv.status_set(
'waiting',
'Waiting for kubernetes-master to become ready')
else:
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if is_state('endpoint.aws.ready'):
cloud_provider = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_provider = 'gce'
elif is_state('endpoint.openstack.ready'):
cloud_provider = 'openstack'
if cloud_provider == 'aws':
return getfqdn().lower()
else:
return gethostname().lower()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined')
@when('kube-control.cluster_tag.available')
@when_not('kubernetes-worker.cloud-request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
cluster_tag = kube_control.get_cluster_tag()
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
cloud.enable_instance_inspection()
cloud.enable_dns_management()
set_state('kubernetes-worker.cloud-request-sent')
hookenv.status_set('waiting', 'waiting for cloud integration')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined')
def clear_requested_integration():
remove_state('kubernetes-worker.cloud-request-sent')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready',
'endpoint.openstack.ready')
@when_not('kubernetes-worker.restarted-for-cloud')
def restart_for_cloud():
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kubelet')
elif is_state('endpoint.openstack.ready'):
_write_openstack_snap_config('kubelet')
set_state('kubernetes-worker.restarted-for-cloud')
set_state('kubernetes-worker.restart-needed')
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def _write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]))
def get_first_mount(mount_relation):
mount_relation_list = mount_relation.mounts()
if mount_relation_list and len(mount_relation_list) > 0:
# mount relation list is a list of the mount layer relations
# for now we just use the first one that is nfs
for mount in mount_relation_list:
# for now we just check the first mount and use that.
# the nfs charm only supports one for now.
if ('mounts' in mount and
mount['mounts'][0]['fstype'] == 'nfs'):
return mount['mounts'][0]
return None
@when('nfs.available')
def nfs_state_control(mount):
''' Determine if we should remove the state that controls the re-render
and execution of the nfs-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs '''
mount_data = get_first_mount(mount)
if mount_data:
nfs_relation_data = {
'options': mount_data['options'],
'host': mount_data['hostname'],
'mountpoint': mount_data['mountpoint'],
'fstype': mount_data['fstype']
}
# Re-execute the rendering if the data has changed.
if data_changed('nfs-config', nfs_relation_data):
hookenv.log('reconfiguring nfs')
remove_state('nfs.configured')
@when('nfs.available')
@when_not('nfs.configured')
def nfs_storage(mount):
'''NFS on kubernetes requires nfs config rendered into a deployment of
the nfs client provisioner. That will handle the persistent volume claims
with no persistent volume to back them.'''
mount_data = get_first_mount(mount)
if not mount_data:
return
addon_path = '/root/cdk/addons/{}'
# Render the NFS deployment
manifest = addon_path.format('nfs-provisioner.yaml')
render('nfs-provisioner.yaml', manifest, mount_data)
hookenv.log('Creating the nfs provisioner.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa
return
set_state('nfs.configured')
| |
#!/usr/bin/python
#
# Copyright (c) 2014, 2015 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author : Fredrik Thulin <fredrik@thulin.net>
#
import datetime
import logging
from typing import Mapping, Optional, Sequence
import saml2.server
import saml2.time_util
from bson import ObjectId
from saml2.authn_context import PASSWORDPROTECTEDTRANSPORT
from saml2.s_utils import UnravelError
from werkzeug.exceptions import BadRequest, Forbidden
from eduid_common.misc.timeutil import utc_now
from eduid_common.session.logindata import ExternalMfaData, SSOLoginData
from eduid_userdb.credentials import METHOD_SWAMID_AL2_MFA, METHOD_SWAMID_AL2_MFA_HI, U2F, Credential, Password
from eduid_userdb.idp import IdPUser
from eduid_userdb.nin import Nin, NinList
from eduid_webapp.idp.idp_authn import AuthnData
from eduid_webapp.idp.idp_saml import IdP_SAMLRequest
from eduid_webapp.idp.login import SSO
from eduid_webapp.idp.sso_session import SSOSession
from eduid_webapp.idp.tests.test_app import IdPTests
from eduid_webapp.idp.util import b64encode
SWAMID_AL1 = 'http://www.swamid.se/policy/assurance/al1'
SWAMID_AL2 = 'http://www.swamid.se/policy/assurance/al2'
SWAMID_AL2_MFA_HI = 'http://www.swamid.se/policy/authentication/swamid-al2-mfa-hi'
cc = {
'REFEDS_MFA': 'https://refeds.org/profile/mfa',
'REFEDS_SFA': 'https://refeds.org/profile/sfa',
'EDUID_MFA': 'https://eduid.se/specs/mfa',
'FIDO_U2F': 'https://www.swamid.se/specs/id-fido-u2f-ce-transports',
'PASSWORD_PT': 'urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport',
}
_U2F = U2F(version='U2F_V2', app_id='unit test', keyhandle='firstU2FElement', public_key='foo')
_U2F_SWAMID_AL2 = U2F(
version='U2F_V2',
app_id='unit test',
keyhandle='U2F SWAMID AL2',
public_key='foo',
is_verified=True,
proofing_method=METHOD_SWAMID_AL2_MFA,
proofing_version='testing',
)
_U2F_SWAMID_AL2_HI = U2F(
version='U2F_V2',
app_id='unit test',
keyhandle='U2F SWAMID AL2 HI',
public_key='foo',
is_verified=True,
proofing_method=METHOD_SWAMID_AL2_MFA_HI,
proofing_version='testing',
)
def make_SAML_request(class_ref):
return _transport_encode(
'''
<?xml version="1.0" encoding="UTF-8"?>
<ns0:AuthnRequest xmlns:ns0="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:assertion"
AssertionConsumerServiceURL="https://sp.example.edu/saml2/acs/"
Destination="https://unittest-idp.example.edu/sso/post"
ID="id-57beb2b2f788ec50b10541dbe48e9626"
IssueInstant="{now!s}"
ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
Version="2.0">
<ns1:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">https://sp.example.edu/saml2/metadata/</ns1:Issuer>
<ns0:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent"/>
<ns0:RequestedAuthnContext>
<ns1:AuthnContextClassRef>{class_ref!s}</ns1:AuthnContextClassRef>
</ns0:RequestedAuthnContext>
</ns0:AuthnRequest>
'''.format(
class_ref=class_ref, now=saml2.time_util.instant()
)
)
def _transport_encode(data):
# encode('base64') only works for POST bindings, redirect uses zlib compression too.
return b64encode(''.join(data.split('\n')))
class SSOIdPTests(IdPTests):
def _make_login_ticket(self, req_class_ref, key=None) -> SSOLoginData:
xmlstr = make_SAML_request(class_ref=req_class_ref)
info = {'SAMLRequest': xmlstr}
binding = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
if key is None:
key = 'unique-key-for-request-1'
saml_req = self._parse_SAMLRequest(
info,
binding,
self.app.logger,
self.app.IDP,
BadRequest,
self.app.conf.debug,
self.app.conf.verify_request_signatures,
)
# context.idp.parse_authn_request(xmlstr, binding)
ticket = SSOLoginData(key, xmlstr, binding)
ticket.saml_req = saml_req
return ticket
def _parse_SAMLRequest(
self,
info: Mapping,
binding: str,
logger: logging.Logger,
idp: saml2.server.Server,
bad_request,
debug: bool = False,
verify_request_signatures=True,
) -> IdP_SAMLRequest:
"""
Parse a SAMLRequest query parameter (base64 encoded) into an AuthnRequest
instance.
If the SAMLRequest is signed, the signature is validated and a BadRequest()
returned on failure.
:param info: dict with keys 'SAMLRequest' and possibly 'SigAlg' and 'Signature'
:param binding: SAML binding
:returns: pysaml2 interface class IdP_SAMLRequest
:raise: BadRequest if request signature validation fails
"""
try:
saml_req = IdP_SAMLRequest(info['SAMLRequest'], binding, idp, logger, debug=debug)
except UnravelError:
raise bad_request('No valid SAMLRequest found', logger=logger)
except ValueError:
raise bad_request('No valid SAMLRequest found', logger=logger)
if 'SigAlg' in info and 'Signature' in info: # Signed request
if verify_request_signatures:
if not saml_req.verify_signature(info['SigAlg'], info['Signature']):
raise bad_request('SAML request signature verification failure', logger=logger)
else:
logger.debug('Ignoring existing request signature, verify_request_signature is False')
else:
# XXX check if metadata says request should be signed ???
# Leif says requests are typically not signed, and that verifying signatures
# on SAML requests is considered a possible DoS attack vector, so it is typically
# not done.
# XXX implement configuration flag to disable signature verification
logger.debug('No signature in SAMLRequest')
return saml_req
class TestSSO(SSOIdPTests):
# ------------------------------------------------------------------------
def get_user_set_nins(self, eppn: str, nins: Sequence[str]) -> IdPUser:
"""
Fetch a user from the user database and set it's NINs to those in nins.
:param eppn: eduPersonPrincipalName or email address
:param nins: List of NINs to configure user with (all verified)
:return: IdPUser instance
"""
user = self.app.userdb.lookup_user(eppn)
user.nins = NinList(nins=[])
for number in nins:
this_nin = Nin(
number=number,
created_by='unittest',
created_ts=utc_now(),
is_verified=True,
is_primary=user.nins.primary is None,
)
user.nins.add(this_nin)
return user
# ------------------------------------------------------------------------
def _get_login_response_authn(self, req_class_ref: str, credentials, user: Optional[IdPUser] = None):
if user is None:
user = self.get_user_set_nins(self.test_user.eppn, [])
ticket = self._make_login_ticket(req_class_ref)
sso_session_1 = SSOSession(
user_id=user.user_id,
authn_request_id='some-unique-id-1',
authn_credentials=[],
idp_user=user,
eppn=user.eppn,
)
if 'u2f' in credentials and not user.credentials.filter(U2F).to_list():
# add a U2F credential to the user
user.credentials.add(_U2F)
for this in credentials:
if this == 'pw':
this = user.credentials.filter(Password).to_list()[0]
elif this == 'u2f':
this = user.credentials.filter(U2F).to_list()[0]
if isinstance(this, AuthnData):
sso_session_1.add_authn_credential(this)
elif isinstance(this, ExternalMfaData):
sso_session_1.external_mfa = this
elif isinstance(this, Credential):
data = AuthnData(this.key)
sso_session_1.add_authn_credential(data)
else:
raise ValueError(f'Unhandled test data: {repr(this)}')
with self.app.app_context():
_SSO = SSO(sso_session_1)
return _SSO._get_login_response_authn(ticket, user)
# ------------------------------------------------------------------------
def test__get_login_response_1(self):
"""
Test login with password and SWAMID AL2-HI U2F, request REFEDS MFA.
Expect the response Authn to be REFEDS MFA, and assurance attribute to include SWAMID MFA HI.
"""
user = self.get_user_set_nins(self.test_user.eppn, ['190101011234'])
user.credentials.add(_U2F_SWAMID_AL2_HI)
out = self._get_login_response_authn(
user=user, req_class_ref=cc['REFEDS_MFA'], credentials=['pw', _U2F_SWAMID_AL2_HI],
)
self.assertEqual(out.class_ref, cc['REFEDS_MFA'])
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1, SWAMID_AL2, SWAMID_AL2_MFA_HI])
def test__get_login_response_2(self):
"""
Test login with password and SWAMID AL2 U2F, request REFEDS MFA.
Expect the response Authn to be REFEDS MFA.
"""
user = self.get_user_set_nins(self.test_user.eppn, ['190101011234'])
user.credentials.add(_U2F_SWAMID_AL2)
out = self._get_login_response_authn(
user=user, req_class_ref=cc['REFEDS_MFA'], credentials=['pw', _U2F_SWAMID_AL2],
)
self.assertEqual(out.class_ref, cc['REFEDS_MFA'])
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1, SWAMID_AL2])
def test__get_login_response_wrong_multifactor(self):
"""
Test login with password and non-SWAMID-AL2 U2F, request REFEDS MFA.
Expect a failure because a self-registered U2F token is not acceptable as REFEDS MFA.
"""
with self.assertRaises(Forbidden):
self._get_login_response_authn(
req_class_ref=cc['REFEDS_MFA'], credentials=['pw', 'u2f'],
)
def test__get_login_response_external_multifactor(self):
"""
Test login with password and and external MFA, request REFEDS MFA.
Expect the response Authn to be REFEDS MFA and assurance attribute to include SWAMID MFA HI.
"""
user = self.get_user_set_nins(self.test_user.eppn, ['190101011234'])
external_mfa = ExternalMfaData(
issuer='issuer.example.com',
authn_context='http://id.elegnamnden.se/loa/1.0/loa3',
timestamp=datetime.datetime.utcnow(),
)
out = self._get_login_response_authn(
user=user, req_class_ref=cc['REFEDS_MFA'], credentials=['pw', external_mfa],
)
self.assertEqual(out.class_ref, cc['REFEDS_MFA'])
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1, SWAMID_AL2, SWAMID_AL2_MFA_HI])
def test__get_login_response_3(self):
"""
Test login with password and U2F, request REFEDS SFA.
Expect the response Authn to be REFEDS SFA.
"""
out = self._get_login_response_authn(req_class_ref=cc['REFEDS_SFA'], credentials=['pw', 'u2f'],)
self.assertEqual(out.class_ref, cc['REFEDS_SFA'])
def test__get_login_response_4(self):
"""
Test login with password, request REFEDS SFA.
Expect the response Authn to be REFEDS SFA.
"""
out = self._get_login_response_authn(req_class_ref=cc['REFEDS_SFA'], credentials=['pw'],)
self.assertEqual(out.class_ref, cc['REFEDS_SFA'])
def test__get_login_response_UNSPECIFIED2(self):
"""
Test login with U2F, request REFEDS SFA.
Expect the response Authn to be REFEDS SFA.
"""
out = self._get_login_response_authn(req_class_ref=cc['REFEDS_SFA'], credentials=['u2f'],)
self.assertEqual(out.class_ref, cc['REFEDS_SFA'])
def test__get_login_response_5(self):
"""
Test login with password and U2F, request FIDO U2F.
Expect the response Authn to be FIDO U2F.
"""
out = self._get_login_response_authn(req_class_ref=cc['FIDO_U2F'], credentials=['pw', 'u2f'],)
self.assertEqual(out.class_ref, cc['FIDO_U2F'])
def test__get_login_response_6(self):
"""
Test login with password and U2F, request plain password-protected-transport.
Expect the response Authn to be password-protected-transport.
"""
out = self._get_login_response_authn(req_class_ref=PASSWORDPROTECTEDTRANSPORT, credentials=['pw', 'u2f'],)
self.assertEqual(out.class_ref, PASSWORDPROTECTEDTRANSPORT)
def test__get_login_response_7(self):
"""
Test login with password, request plain password-protected-transport.
Expect the response Authn to be password-protected-transport.
"""
out = self._get_login_response_authn(req_class_ref=PASSWORDPROTECTEDTRANSPORT, credentials=['pw'],)
self.assertEqual(out.class_ref, PASSWORDPROTECTEDTRANSPORT)
def test__get_login_response_8(self):
"""
Test login with password, request unknown context class.
Expect the response Authn to be FIDO U2F.
"""
out = self._get_login_response_authn(req_class_ref='urn:no-such-class', credentials=['pw', 'u2f'],)
self.assertEqual(out.class_ref, cc['FIDO_U2F'])
def test__get_login_response_9(self):
"""
Test login with password, request unknown context class.
Expect the response Authn to be password-protected-transport.
"""
out = self._get_login_response_authn(req_class_ref='urn:no-such-class', credentials=['pw'],)
self.assertEqual(out.class_ref, PASSWORDPROTECTEDTRANSPORT)
def test__get_login_response_assurance_AL1(self):
"""
Make sure eduPersonAssurace is SWAMID AL1 with no verified nin.
"""
out = self._get_login_response_authn(req_class_ref='urn:no-such-class', credentials=['pw'],)
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1])
def test__get_login_response_assurance_AL2(self):
"""
Make sure eduPersonAssurace is SWAMID AL2 with a verified nin.
"""
user = self.get_user_set_nins(self.test_user.eppn, ['190101011234'])
out = self._get_login_response_authn(user=user, req_class_ref='urn:no-such-class', credentials=['pw'],)
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1, SWAMID_AL2])
def test__get_login_eduid_mfa_fido_al1(self):
"""
Test login with password and fido for not verified user, request EDUID_MFA.
Expect the response Authn to be EDUID_MFA, eduPersonAssurance AL1
"""
out = self._get_login_response_authn(req_class_ref=cc['EDUID_MFA'], credentials=['pw', 'u2f'],)
self.assertEqual(out.class_ref, cc['EDUID_MFA'])
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1])
def test__get_login_eduid_mfa_fido_al2(self):
"""
Test login with password and fido for verified user, request EDUID_MFA.
Expect the response Authn to be EDUID_MFA, eduPersonAssurance AL1,Al2
"""
user = self.get_user_set_nins(self.test_user.eppn, ['190101011234'])
user.credentials.add(_U2F)
out = self._get_login_response_authn(user=user, req_class_ref=cc['EDUID_MFA'], credentials=['pw', _U2F],)
self.assertEqual(out.class_ref, cc['EDUID_MFA'])
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1, SWAMID_AL2])
def test__get_login_eduid_mfa_fido_swamid_al2(self):
"""
Test login with password and fido_swamid_al2 for verified user, request EDUID_MFA.
Expect the response Authn to be EDUID_MFA, eduPersonAssurance AL1,Al2
"""
user = self.get_user_set_nins(self.test_user.eppn, ['190101011234'])
user.credentials.add(_U2F_SWAMID_AL2)
out = self._get_login_response_authn(
user=user, req_class_ref=cc['EDUID_MFA'], credentials=['pw', _U2F_SWAMID_AL2],
)
self.assertEqual(out.class_ref, cc['EDUID_MFA'])
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1, SWAMID_AL2])
def test__get_login_eduid_mfa_fido_swamid_al2_hi(self):
"""
Test login with password and fido_swamid_al2_hi for verified user, request EDUID_MFA.
Expect the response Authn to be EDUID_MFA, eduPersonAssurance AL1,Al2
"""
user = self.get_user_set_nins(self.test_user.eppn, ['190101011234'])
user.credentials.add(_U2F_SWAMID_AL2_HI)
out = self._get_login_response_authn(
user=user, req_class_ref=cc['EDUID_MFA'], credentials=['pw', _U2F_SWAMID_AL2_HI],
)
self.assertEqual(out.class_ref, cc['EDUID_MFA'])
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1, SWAMID_AL2])
def test__get_login_eduid_mfa_external_mfa_al2(self):
"""
Test login with password and external mfa for verified user, request EDUID_MFA.
Expect the response Authn to be EDUID_MFA.
"""
user = self.get_user_set_nins(self.test_user.eppn, ['190101011234'])
external_mfa = ExternalMfaData(
issuer='issuer.example.com',
authn_context='http://id.elegnamnden.se/loa/1.0/loa3',
timestamp=datetime.datetime.utcnow(),
)
out = self._get_login_response_authn(
user=user, req_class_ref=cc['EDUID_MFA'], credentials=['pw', external_mfa],
)
self.assertEqual(out.class_ref, cc['EDUID_MFA'])
self.assertEqual(out.authn_attributes['eduPersonAssurance'], [SWAMID_AL1, SWAMID_AL2])
def test__get_login_response_eduid_mfa_no_multifactor(self):
"""
Test login with password, request EDUID_MFA.
Expect a failure because MFA is needed for EDUID_MFA.
"""
with self.assertRaises(Forbidden):
self._get_login_response_authn(req_class_ref=cc['EDUID_MFA'], credentials=['pw'])
| |
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError
SSLContext = None
HAS_SNI = False
create_default_context = None
import errno
import ssl
try: # Test for SSL features
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
try:
from ssl import _DEFAULT_CIPHERS
except ImportError:
_DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5'
)
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = sys.version_info >= (2, 7)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, location):
self.ca_certs = location
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None):
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1,
32: sha256,
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, odd = divmod(len(fingerprint), 2)
if odd or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or _DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None):
"""
All arguments except for server_hostname and ssl_context have the same
meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
"""
context = ssl_context
if context is None:
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs:
try:
context.load_verify_locations(ca_certs)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
| |
from sympy.logic.boolalg import to_cnf, eliminate_implications, distribute_and_over_or, \
compile_rule, conjuncts, disjuncts, to_int_repr, fuzzy_not
from sympy import symbols, And, Or, Xor, Not, Nand, Nor, Implies, Equivalent
from sympy.utilities.pytest import raises, XFAIL
def test_overloading():
"""Test that |, & are overloaded as expected"""
A, B, C = symbols('ABC')
assert A & B == And(A, B)
assert A | B == Or(A, B)
assert (A & B) | C == Or(And(A, B), C)
assert A >> B == Implies(A, B)
assert A << B == Implies(B, A)
assert ~A == Not(A)
def test_And():
A, B, C = symbols('ABC')
assert And() == True
assert And(A) == A
assert And(True) == True
assert And(False) == False
assert And(True, True ) == True
assert And(True, False) == False
assert And(False, False) == False
assert And(True, A) == A
assert And(False, A) == False
assert And(True, True, True) == True
assert And(True, True , A) == A
assert And(True, False, A) == False
def test_Or():
A, B, C = symbols('ABC')
assert Or() == False
assert Or(A) == A
assert Or(True) == True
assert Or(False) == False
assert Or(True, True ) == True
assert Or(True, False) == True
assert Or(False, False) == False
assert Or(True, A) == True
assert Or(False, A) == A
assert Or(True, False, False) == True
assert Or(True, False, A) == True
assert Or(False, False, A) == A
def test_Xor():
A, B, C = symbols('ABC')
assert Xor() == False
assert Xor(A) == A
assert Xor(True) == True
assert Xor(False) == False
assert Xor(True, True ) == False
assert Xor(True, False) == True
assert Xor(False, False) == False
assert Xor(True, A) == ~A
assert Xor(False, A) == A
assert Xor(True, False, False) == True
assert Xor(True, False, A) == ~A
assert Xor(False, False, A) == A
def test_Not():
assert Not(True) == False
assert Not(False) == True
assert Not(True, True ) == [False, False]
assert Not(True, False) == [False, True ]
assert Not(False,False) == [True, True ]
def test_Nand():
A, B, C = symbols('ABC')
assert Nand() == False
assert Nand(A) == ~A
assert Nand(True) == False
assert Nand(False) == True
assert Nand(True, True ) == False
assert Nand(True, False) == True
assert Nand(False, False) == True
assert Nand(True, A) == ~A
assert Nand(False, A) == True
assert Nand(True, True, True) == False
assert Nand(True, True , A) == ~A
assert Nand(True, False, A) == True
def test_Nor():
A, B, C = symbols('ABC')
assert Nor() == False
assert Nor(A) == ~A
assert Nor(True) == False
assert Nor(False) == True
assert Nor(True, True ) == False
assert Nor(True, False) == False
assert Nor(False, False) == True
assert Nor(True, A) == False
assert Nor(False, A) == ~A
assert Nor(True, True, True) == False
assert Nor(True, True , A) == False
assert Nor(True, False, A) == False
def test_Implies():
A, B, C = symbols('ABC')
Implies(True, True) == True
Implies(False, False) == False
assert A >> B == B << A
def test_Equivalent():
A, B, C = symbols('ABC')
assert Equivalent(A, B) == Equivalent(B, A)
def test_bool_symbol():
"""Test that mixing symbols with boolean values
works as expected"""
A, B, C = symbols('ABC')
assert And(A, True) == A
assert And(A, True, True) == A
assert And(A, False) == False
assert And(A, True, False) == False
assert Or(A, True) == True
assert Or(A, False) == A
def test_subs():
A, B, C = symbols('ABC')
assert (A & B).subs(A, True) == B
assert (A & B).subs(A, False) == False
assert (A & B).subs(B, True) == A
assert (A & B).subs(B, False) == False
assert (A & B).subs({A: True, B:True}) == True
assert (A | B).subs(A, True) == True
assert (A | B).subs(A, False) == B
assert (A | B).subs(B, True) == True
assert (A | B).subs(B, False) == A
assert (A | B).subs({A: True, B:True}) == True
"""
we test for axioms of boolean algebra
see http://en.wikipedia.org/wiki/Boolean_algebra_(structure)
"""
def test_commutative():
"""Test for commutativity of And and Not"""
A, B = symbols('AB')
assert A & B == B & A
assert A | B == B | A
def test_and_associativity():
"""Test for associativity of And"""
A, B, C = symbols('ABC')
assert (A & B) & C == A & (B & C)
def test_or_assicativity():
A, B, C = symbols('ABC')
assert ((A | B) | C) == (A | (B | C))
def test_double_negation():
a = symbols('a')
assert ~(~a) == a
def test_De_Morgan():
A, B, C = symbols('ABC')
assert ~(A & B) == (~A) | (~B)
assert ~(A | B) == (~A) & (~B)
assert ~(A | B | C) == ~A & ~B & ~C
# test methods
def test_eliminate_implications():
A, B, C = symbols('ABC')
assert eliminate_implications( A >> B) == (~A) | B
assert eliminate_implications(A >> (C >>Not(B))) \
== Or(Or(Not(B), Not(C)), Not(A))
def test_conjuncts():
A, B, C = symbols('ABC')
assert set(conjuncts(A & B & C)) == set([A, B, C])
assert set(conjuncts((A | B) & C)) == set([A | B, C])
assert conjuncts(A) == [A]
assert conjuncts(True) == [True]
assert conjuncts(False) == [False]
def test_disjuncts():
A, B, C = symbols('ABC')
assert disjuncts(A | B | C) == [A, B, C]
assert disjuncts((A | B) & C) == [(A | B) & C]
assert disjuncts(A) == [A]
assert disjuncts(True) == [True]
assert disjuncts(False) == [False]
def test_distribute():
A, B, C = symbols('ABC')
assert distribute_and_over_or(Or(And(A, B), C)) == And(Or(A, C), Or(B, C))
def test_to_cnf():
A, B, C = symbols('ABC')
assert to_cnf(~(B | C)) == And(Not(B), Not(C))
assert to_cnf((A & B) | C) == And(Or(A, C), Or(B, C))
assert to_cnf(A >> B) == (~A) | B
assert to_cnf(A >> (B & C)) == (~A | B) & (~A | C)
assert to_cnf(Equivalent(A, B)) == And(Or(A, Not(B)), Or(B, Not(A)))
assert to_cnf(Equivalent(A, B & C)) == (~A | B) & (~A | C) & (~B | ~C | A)
assert to_cnf(Equivalent(A, B | C)) == \
And(Or(Not(B), A), Or(Not(C), A), Or(B, C, Not(A)))
def test_compile_rule():
from sympy import sympify
assert compile_rule("A & B") == sympify("A & B")
def test_to_int_repr():
x, y, z = symbols('x y z')
def sorted_recursive(arg):
try:
return sorted(sorted_recursive(x) for x in arg)
except TypeError: #arg is not a sequence
return arg
assert sorted_recursive(to_int_repr([x | y, z | x], [x, y, z])) == \
sorted_recursive([[1, 2], [1, 3]])
assert sorted_recursive(to_int_repr([x | y, z | ~x], [x, y, z])) == \
sorted_recursive([[1, 2], [3, -1]])
def test_fuzzy_not():
assert fuzzy_not(False) == True
assert fuzzy_not(True) == False
assert fuzzy_not(None) == None
| |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import struct
import sys
if sys.platform == 'win32':
import wmi
from xml.etree import ElementTree
from oslo_utils import units
from nova.i18n import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
VHDX_BAT_ENTRY_SIZE = 8
VHDX_HEADER_OFFSETS = [64 * units.Ki, 128 * units.Ki]
VHDX_HEADER_SECTION_SIZE = units.Mi
VHDX_LOG_LENGTH_OFFSET = 68
VHDX_METADATA_SIZE_OFFSET = 64
VHDX_REGION_TABLE_OFFSET = 192 * units.Ki
VHDX_BS_METADATA_ENTRY_OFFSET = 48
class VHDUtilsV2(vhdutils.VHDUtils):
_VHD_TYPE_DYNAMIC = 3
_VHD_TYPE_DIFFERENCING = 4
_vhd_format_map = {
constants.DISK_FORMAT_VHD: 2,
constants.DISK_FORMAT_VHDX: 3,
}
def __init__(self):
self._vmutils = vmutilsv2.VMUtilsV2()
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization/v2')
def create_dynamic_vhd(self, path, max_internal_size, format):
vhd_format = self._vhd_format_map.get(format)
if not vhd_format:
raise vmutils.HyperVException(_("Unsupported disk format: %s") %
format)
self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path,
max_internal_size=max_internal_size)
def create_differencing_vhd(self, path, parent_path):
# Although this method can take a size argument in case of VHDX
# images, avoid it as the underlying Win32 is currently not
# resizing the disk properly. This can be reconsidered once the
# Win32 issue is fixed.
parent_vhd_info = self.get_vhd_info(parent_path)
self._create_vhd(self._VHD_TYPE_DIFFERENCING,
parent_vhd_info["Format"],
path, parent_path=parent_path)
def _create_vhd(self, vhd_type, format, path, max_internal_size=None,
parent_path=None):
vhd_info = self._conn.Msvm_VirtualHardDiskSettingData.new()
vhd_info.Type = vhd_type
vhd_info.Format = format
vhd_info.Path = path
vhd_info.ParentPath = parent_path
if max_internal_size:
vhd_info.MaxInternalSize = max_internal_size
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateVirtualHardDisk(
VirtualDiskSettingData=vhd_info.GetText_(1))
self._vmutils.check_ret_val(ret_val, job_path)
def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, child_vhd_path)
et = ElementTree.fromstring(vhd_info_xml)
item = et.find(".//PROPERTY[@NAME='ParentPath']/VALUE")
if item is not None:
item.text = parent_vhd_path
else:
msg = (_("Failed to reconnect image %(child_vhd_path)s to "
"parent %(parent_vhd_path)s. The child image has no "
"parent path property.") %
{'child_vhd_path': child_vhd_path,
'parent_vhd_path': parent_vhd_path})
raise vmutils.HyperVException(msg)
vhd_info_xml = ElementTree.tostring(et)
(job_path, ret_val) = image_man_svc.SetVirtualHardDiskSettingData(
VirtualDiskSettingData=vhd_info_xml)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_resize_method(self):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
return image_man_svc.ResizeVirtualHardDisk
def get_internal_vhd_size_by_file_size(self, vhd_path,
new_vhd_file_size):
"""VHDX Size = Header (1 MB)
+ Log
+ Metadata Region
+ BAT
+ Payload Blocks
Chunk size = maximum number of bytes described by a SB block
= 2 ** 23 * LogicalSectorSize
"""
vhd_format = self.get_vhd_format(vhd_path)
if vhd_format == constants.DISK_FORMAT_VHD:
return super(VHDUtilsV2,
self).get_internal_vhd_size_by_file_size(
vhd_path, new_vhd_file_size)
else:
vhd_info = self.get_vhd_info(vhd_path)
vhd_type = vhd_info['Type']
if vhd_type == self._VHD_TYPE_DIFFERENCING:
vhd_parent = self.get_vhd_parent_path(vhd_path)
return self.get_internal_vhd_size_by_file_size(vhd_parent,
new_vhd_file_size)
else:
try:
with open(vhd_path, 'rb') as f:
hs = VHDX_HEADER_SECTION_SIZE
bes = VHDX_BAT_ENTRY_SIZE
lss = vhd_info['LogicalSectorSize']
bs = self._get_vhdx_block_size(f)
ls = self._get_vhdx_log_size(f)
ms = self._get_vhdx_metadata_size_and_offset(f)[0]
chunk_ratio = (1 << 23) * lss / bs
size = new_vhd_file_size
max_internal_size = (bs * chunk_ratio * (size - hs -
ls - ms - bes - bes / chunk_ratio) / (bs *
chunk_ratio + bes * chunk_ratio + bes))
return max_internal_size - (max_internal_size % bs)
except IOError as ex:
raise vmutils.HyperVException(_("Unable to obtain "
"internal size from VHDX: "
"%(vhd_path)s. Exception: "
"%(ex)s") %
{"vhd_path": vhd_path,
"ex": ex})
def _get_vhdx_current_header_offset(self, vhdx_file):
sequence_numbers = []
for offset in VHDX_HEADER_OFFSETS:
vhdx_file.seek(offset + 8)
sequence_numbers.append(struct.unpack('<Q',
vhdx_file.read(8))[0])
current_header = sequence_numbers.index(max(sequence_numbers))
return VHDX_HEADER_OFFSETS[current_header]
def _get_vhdx_log_size(self, vhdx_file):
current_header_offset = self._get_vhdx_current_header_offset(vhdx_file)
offset = current_header_offset + VHDX_LOG_LENGTH_OFFSET
vhdx_file.seek(offset)
log_size = struct.unpack('<I', vhdx_file.read(4))[0]
return log_size
def _get_vhdx_metadata_size_and_offset(self, vhdx_file):
offset = VHDX_METADATA_SIZE_OFFSET + VHDX_REGION_TABLE_OFFSET
vhdx_file.seek(offset)
metadata_offset = struct.unpack('<Q', vhdx_file.read(8))[0]
metadata_size = struct.unpack('<I', vhdx_file.read(4))[0]
return metadata_size, metadata_offset
def _get_vhdx_block_size(self, vhdx_file):
metadata_offset = self._get_vhdx_metadata_size_and_offset(vhdx_file)[1]
offset = metadata_offset + VHDX_BS_METADATA_ENTRY_OFFSET
vhdx_file.seek(offset)
file_parameter_offset = struct.unpack('<I', vhdx_file.read(4))[0]
vhdx_file.seek(file_parameter_offset + metadata_offset)
block_size = struct.unpack('<I', vhdx_file.read(4))[0]
return block_size
def _get_vhd_info_xml(self, image_man_svc, vhd_path):
(job_path,
ret_val,
vhd_info_xml) = image_man_svc.GetVirtualHardDiskSettingData(vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
return vhd_info_xml.encode('utf8', 'xmlcharrefreplace')
def get_vhd_info(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, vhd_path)
vhd_info_dict = {}
et = ElementTree.fromstring(vhd_info_xml)
for item in et.findall("PROPERTY"):
name = item.attrib["NAME"]
value_item = item.find("VALUE")
if value_item is None:
value_text = None
else:
value_text = value_item.text
if name in ["Path", "ParentPath"]:
vhd_info_dict[name] = value_text
elif name in ["BlockSize", "LogicalSectorSize",
"PhysicalSectorSize", "MaxInternalSize"]:
vhd_info_dict[name] = long(value_text)
elif name in ["Type", "Format"]:
vhd_info_dict[name] = int(value_text)
return vhd_info_dict
def get_best_supported_vhd_format(self):
return constants.DISK_FORMAT_VHDX
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Wrapper layers: layers that augment the functionality of another layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.engine import base_layer
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.engine.base_layer import shape_type_conversion
from tensorflow.python.keras._impl.keras.utils.generic_utils import has_arg
from tensorflow.python.ops import array_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.Wrapper')
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
Arguments:
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
self.layer = layer
# Tracks mapping of Wrapper inputs to inner layer inputs. Useful when
# the inner layer has update ops that depend on its inputs (as opposed
# to the inputs to the Wrapper layer).
self._input_map = {}
super(Wrapper, self).__init__(**kwargs)
def build(self, input_shape=None):
self.built = True
@property
def activity_regularizer(self):
if hasattr(self.layer, 'activity_regularizer'):
return self.layer.activity_regularizer
else:
return None
@property
def trainable(self):
return self.layer.trainable
@trainable.setter
def trainable(self, value):
self.layer.trainable = value
@property
def trainable_weights(self):
return self.layer.trainable_weights
@property
def non_trainable_weights(self):
return self.layer.non_trainable_weights
@property
def updates(self):
return self.layer.updates + self._updates
@property
def losses(self):
return self.layer.losses + self._losses
def get_weights(self):
return self.layer.get_weights()
def set_weights(self, weights):
self.layer.set_weights(weights)
def get_config(self):
config = {
'layer': {
'class_name': self.layer.__class__.__name__,
'config': self.layer.get_config()
}
}
base_config = super(Wrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras._impl.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(
config.pop('layer'), custom_objects=custom_objects)
return cls(layer, **config)
@tf_export('keras.layers.TimeDistributed')
class TimeDistributed(Wrapper):
"""This wrapper allows to apply a layer to every temporal slice of an input.
The input should be at least 3D, and the dimension of index one
will be considered to be the temporal dimension.
Consider a batch of 32 samples,
where each sample is a sequence of 10 vectors of 16 dimensions.
The batch input shape of the layer is then `(32, 10, 16)`,
and the `input_shape`, not including the samples dimension, is `(10, 16)`.
You can then use `TimeDistributed` to apply a `Dense` layer
to each of the 10 timesteps, independently:
```python
# as the first layer in a model
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# now model.output_shape == (None, 10, 8)
```
The output will then have shape `(32, 10, 8)`.
In subsequent layers, there is no need for the `input_shape`:
```python
model.add(TimeDistributed(Dense(32)))
# now model.output_shape == (None, 10, 32)
```
The output will then have shape `(32, 10, 32)`.
`TimeDistributed` can be used with arbitrary layers, not just `Dense`,
for instance with a `Conv2D` layer:
```python
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3)),
input_shape=(10, 299, 299, 3)))
```
Arguments:
layer: a layer instance.
"""
def __init__(self, layer, **kwargs):
super(TimeDistributed, self).__init__(layer, **kwargs)
self.supports_masking = True
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
assert len(input_shape) >= 3
self.input_spec = InputSpec(shape=input_shape)
child_input_shape = [input_shape[0]] + input_shape[2:]
if not self.layer.built:
self.layer.build(child_input_shape)
self.layer.built = True
super(TimeDistributed, self).build()
self.built = True
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
child_input_shape = tensor_shape.TensorShape([input_shape[0]] +
input_shape[2:])
child_output_shape = self.layer.compute_output_shape(
child_input_shape).as_list()
timesteps = input_shape[1]
return tensor_shape.TensorShape([child_output_shape[0], timesteps] +
child_output_shape[1:])
def call(self, inputs, training=None, mask=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
uses_learning_phase = False # pylint: disable=redefined-outer-name
input_shape = K.int_shape(inputs)
if input_shape[0]:
# batch size matters, use rnn-based implementation
def step(x, _):
global uses_learning_phase # pylint: disable=global-variable-undefined
output = self.layer.call(x, **kwargs)
if hasattr(output, '_uses_learning_phase'):
uses_learning_phase = (output._uses_learning_phase or
uses_learning_phase)
return output, []
_, outputs, _ = K.rnn(
step,
inputs,
initial_states=[],
unroll=False)
y = outputs
else:
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
input_length = input_shape[1]
if not input_length:
input_length = array_ops.shape(inputs)[1]
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
input_uid = base_layer.object_list_uid(inputs)
inputs = array_ops.reshape(inputs, (-1,) + input_shape[2:])
self._input_map[input_uid] = inputs
# (num_samples * timesteps, ...)
y = self.layer.call(inputs, **kwargs)
if hasattr(y, '_uses_learning_phase'):
uses_learning_phase = y._uses_learning_phase
# Shape: (num_samples, timesteps, ...)
output_shape = self.compute_output_shape(input_shape).as_list()
y = array_ops.reshape(y, (-1, input_length) + tuple(output_shape[2:]))
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
self.layer.activity_regularizer is not None):
regularization_loss = self.layer.activity_regularizer(y)
self.add_loss(regularization_loss, inputs)
if uses_learning_phase:
y._uses_learning_phase = True
return y
@tf_export('keras.layers.Bidirectional')
class Bidirectional(Wrapper):
"""Bidirectional wrapper for RNNs.
Arguments:
layer: `Recurrent` instance.
merge_mode: Mode by which outputs of the
forward and backward RNNs will be combined.
One of {'sum', 'mul', 'concat', 'ave', None}.
If None, the outputs will not be combined,
they will be returned as a list.
Raises:
ValueError: In case of invalid `merge_mode` argument.
Examples:
```python
model = Sequential()
model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5,
10)))
model.add(Bidirectional(LSTM(10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
"""
def __init__(self, layer, merge_mode='concat', weights=None, **kwargs):
if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]:
raise ValueError('Invalid merge mode. '
'Merge mode should be one of '
'{"sum", "mul", "ave", "concat", None}')
self.forward_layer = copy.copy(layer)
config = layer.get_config()
config['go_backwards'] = not config['go_backwards']
self.backward_layer = layer.__class__.from_config(config)
self.forward_layer._name = 'forward_' + self.forward_layer.name
self.backward_layer._name = 'backward_' + self.backward_layer.name
self.merge_mode = merge_mode
if weights:
nw = len(weights)
self.forward_layer.initial_weights = weights[:nw // 2]
self.backward_layer.initial_weights = weights[nw // 2:]
self.stateful = layer.stateful
self.return_sequences = layer.return_sequences
self.return_state = layer.return_state
self.supports_masking = True
self._trainable = True
super(Bidirectional, self).__init__(layer, **kwargs)
self.input_spec = layer.input_spec
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
self.forward_layer.trainable = value
self.backward_layer.trainable = value
def get_weights(self):
return self.forward_layer.get_weights() + self.backward_layer.get_weights()
def set_weights(self, weights):
nw = len(weights)
self.forward_layer.set_weights(weights[:nw // 2])
self.backward_layer.set_weights(weights[nw // 2:])
@shape_type_conversion
def compute_output_shape(self, input_shape):
output_shape = tuple(self.forward_layer.compute_output_shape(
input_shape).as_list())
if self.return_state:
state_shape = output_shape[1:]
output_shape = output_shape[0]
if self.merge_mode == 'concat':
output_shape = list(output_shape)
output_shape[-1] *= 2
output_shape = tuple(output_shape)
elif self.merge_mode is None:
output_shape = [output_shape, copy.copy(output_shape)]
if self.return_state:
if self.merge_mode is None:
return output_shape + state_shape + copy.copy(state_shape)
return [output_shape] + state_shape + copy.copy(state_shape)
return output_shape
def __call__(self, inputs, initial_state=None, **kwargs):
if isinstance(inputs, list):
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
if initial_state is None:
return super(Bidirectional, self).__call__(inputs, **kwargs)
# Standardize `initial_state` into list
if isinstance(initial_state, tuple):
initial_state = list(initial_state)
elif not isinstance(initial_state, list):
initial_state = [initial_state]
# Check if `initial_state` can be splitted into half
num_states = len(initial_state)
if num_states % 2 > 0:
raise ValueError(
'When passing `initial_state` to a Bidirectional RNN, the state '
'should be a list containing the states of the underlying RNNs. '
'Found: ' + str(initial_state))
# Applies the same workaround as in `RNN.__call__`, without handling
# constants
kwargs['initial_state'] = initial_state
additional_inputs = initial_state
additional_specs = [InputSpec(shape=K.int_shape(state))
for state in initial_state]
self.forward_layer.state_spec = additional_specs[:num_states // 2]
self.backward_layer.state_spec = additional_specs[num_states // 2:]
is_keras_tensor = K.is_keras_tensor(additional_inputs[0])
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state of a Bidirectional'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(Bidirectional, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(Bidirectional, self).__call__(inputs, **kwargs)
def call(self, inputs, training=None, mask=None, initial_state=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
if has_arg(self.layer.call, 'mask'):
kwargs['mask'] = mask
if initial_state is not None and has_arg(self.layer.call, 'initial_state'):
forward_state = initial_state[:len(initial_state) // 2]
backward_state = initial_state[len(initial_state) // 2:]
y = self.forward_layer.call(inputs, initial_state=forward_state, **kwargs)
y_rev = self.backward_layer.call(
inputs, initial_state=backward_state, **kwargs)
else:
y = self.forward_layer.call(inputs, **kwargs)
y_rev = self.backward_layer.call(inputs, **kwargs)
if self.return_state:
states = y[1:] + y_rev[1:]
y = y[0]
y_rev = y_rev[0]
if self.return_sequences:
y_rev = K.reverse(y_rev, 1)
if self.merge_mode == 'concat':
output = K.concatenate([y, y_rev])
elif self.merge_mode == 'sum':
output = y + y_rev
elif self.merge_mode == 'ave':
output = (y + y_rev) / 2
elif self.merge_mode == 'mul':
output = y * y_rev
elif self.merge_mode is None:
output = [y, y_rev]
# Properly set learning phase
if (getattr(y, '_uses_learning_phase', False) or
getattr(y_rev, '_uses_learning_phase', False)):
if self.merge_mode is None:
for out in output:
out._uses_learning_phase = True
else:
output._uses_learning_phase = True
if self.return_state:
if self.merge_mode is None:
return output + states
return [output] + states
return output
def reset_states(self):
self.forward_layer.reset_states()
self.backward_layer.reset_states()
def build(self, input_shape):
with K.name_scope(self.forward_layer.name):
self.forward_layer.build(input_shape)
with K.name_scope(self.backward_layer.name):
self.backward_layer.build(input_shape)
self.built = True
def compute_mask(self, inputs, mask):
if self.return_sequences:
if not self.merge_mode:
return [mask, mask]
else:
return mask
else:
return None
@property
def trainable_weights(self):
if hasattr(self.forward_layer, 'trainable_weights'):
return (self.forward_layer.trainable_weights +
self.backward_layer.trainable_weights)
return []
@property
def non_trainable_weights(self):
if hasattr(self.forward_layer, 'non_trainable_weights'):
return (self.forward_layer.non_trainable_weights +
self.backward_layer.non_trainable_weights)
return []
@property
def updates(self):
if hasattr(self.forward_layer, 'updates'):
return self.forward_layer.updates + self.backward_layer.updates
return []
@property
def losses(self):
if hasattr(self.forward_layer, 'losses'):
return self.forward_layer.losses + self.backward_layer.losses
return []
@property
def constraints(self):
constraints = {}
if hasattr(self.forward_layer, 'constraints'):
constraints.update(self.forward_layer.constraints)
constraints.update(self.backward_layer.constraints)
return constraints
def get_config(self):
config = {'merge_mode': self.merge_mode}
base_config = super(Bidirectional, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| |
"""Test network helper."""
import pytest
from homeassistant.components import cloud
from homeassistant.config import async_process_ha_core_config
from homeassistant.core import HomeAssistant
from homeassistant.helpers.network import (
NoURLAvailableError,
_get_cloud_url,
_get_deprecated_base_url,
_get_external_url,
_get_internal_url,
_get_request_host,
get_url,
)
from tests.async_mock import Mock, patch
from tests.common import mock_component
async def test_get_url_internal(hass: HomeAssistant):
"""Test getting an instance URL when the user has set an internal URL."""
assert hass.config.internal_url is None
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True)
# Test with internal URL: http://example.local:8123
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
assert hass.config.internal_url == "http://example.local:8123"
assert _get_internal_url(hass) == "http://example.local:8123"
assert _get_internal_url(hass, allow_ip=False) == "http://example.local:8123"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="example.local"
):
assert (
_get_internal_url(hass, require_current_request=True)
== "http://example.local:8123"
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(
hass, require_current_request=True, require_standard_port=True
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True, require_ssl=True)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="no_match.example.local",
), pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True)
# Test with internal URL: https://example.local:8123
await async_process_ha_core_config(
hass,
{"internal_url": "https://example.local:8123"},
)
assert hass.config.internal_url == "https://example.local:8123"
assert _get_internal_url(hass) == "https://example.local:8123"
assert _get_internal_url(hass, allow_ip=False) == "https://example.local:8123"
assert _get_internal_url(hass, require_ssl=True) == "https://example.local:8123"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
# Test with internal URL: http://example.local:80/
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:80/"},
)
assert hass.config.internal_url == "http://example.local:80/"
assert _get_internal_url(hass) == "http://example.local"
assert _get_internal_url(hass, allow_ip=False) == "http://example.local"
assert _get_internal_url(hass, require_standard_port=True) == "http://example.local"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
# Test with internal URL: https://example.local:443
await async_process_ha_core_config(
hass,
{"internal_url": "https://example.local:443"},
)
assert hass.config.internal_url == "https://example.local:443"
assert _get_internal_url(hass) == "https://example.local"
assert _get_internal_url(hass, allow_ip=False) == "https://example.local"
assert (
_get_internal_url(hass, require_standard_port=True) == "https://example.local"
)
assert _get_internal_url(hass, require_ssl=True) == "https://example.local"
# Test with internal URL: https://192.168.0.1
await async_process_ha_core_config(
hass,
{"internal_url": "https://192.168.0.1"},
)
assert hass.config.internal_url == "https://192.168.0.1"
assert _get_internal_url(hass) == "https://192.168.0.1"
assert _get_internal_url(hass, require_standard_port=True) == "https://192.168.0.1"
assert _get_internal_url(hass, require_ssl=True) == "https://192.168.0.1"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
# Test with internal URL: http://192.168.0.1:8123
await async_process_ha_core_config(
hass,
{"internal_url": "http://192.168.0.1:8123"},
)
assert hass.config.internal_url == "http://192.168.0.1:8123"
assert _get_internal_url(hass) == "http://192.168.0.1:8123"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="192.168.0.1"
):
assert (
_get_internal_url(hass, require_current_request=True)
== "http://192.168.0.1:8123"
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(
hass, require_current_request=True, require_standard_port=True
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True, require_ssl=True)
async def test_get_url_internal_fallback(hass: HomeAssistant):
"""Test getting an instance URL when the user has not set an internal URL."""
assert hass.config.internal_url is None
hass.config.api = Mock(
use_ssl=False, port=8123, deprecated_base_url=None, local_ip="192.168.123.123"
)
assert _get_internal_url(hass) == "http://192.168.123.123:8123"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
hass.config.api = Mock(
use_ssl=False, port=80, deprecated_base_url=None, local_ip="192.168.123.123"
)
assert _get_internal_url(hass) == "http://192.168.123.123"
assert (
_get_internal_url(hass, require_standard_port=True) == "http://192.168.123.123"
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
hass.config.api = Mock(use_ssl=True, port=443, deprecated_base_url=None)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
# Do no accept any local loopback address as fallback
hass.config.api = Mock(
use_ssl=False, port=80, deprecated_base_url=None, local_ip="127.0.0.1"
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
async def test_get_url_external(hass: HomeAssistant):
"""Test getting an instance URL when the user has set an external URL."""
assert hass.config.external_url is None
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True)
# Test with external URL: http://example.com:8123
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com:8123"},
)
assert hass.config.external_url == "http://example.com:8123"
assert _get_external_url(hass) == "http://example.com:8123"
assert _get_external_url(hass, allow_cloud=False) == "http://example.com:8123"
assert _get_external_url(hass, allow_ip=False) == "http://example.com:8123"
assert _get_external_url(hass, prefer_cloud=True) == "http://example.com:8123"
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="example.com"
):
assert (
_get_external_url(hass, require_current_request=True)
== "http://example.com:8123"
)
with pytest.raises(NoURLAvailableError):
_get_external_url(
hass, require_current_request=True, require_standard_port=True
)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True, require_ssl=True)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="no_match.example.com",
), pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True)
# Test with external URL: http://example.com:80/
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com:80/"},
)
assert hass.config.external_url == "http://example.com:80/"
assert _get_external_url(hass) == "http://example.com"
assert _get_external_url(hass, allow_cloud=False) == "http://example.com"
assert _get_external_url(hass, allow_ip=False) == "http://example.com"
assert _get_external_url(hass, prefer_cloud=True) == "http://example.com"
assert _get_external_url(hass, require_standard_port=True) == "http://example.com"
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_ssl=True)
# Test with external url: https://example.com:443/
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com:443/"},
)
assert hass.config.external_url == "https://example.com:443/"
assert _get_external_url(hass) == "https://example.com"
assert _get_external_url(hass, allow_cloud=False) == "https://example.com"
assert _get_external_url(hass, allow_ip=False) == "https://example.com"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.com"
assert _get_external_url(hass, require_ssl=False) == "https://example.com"
assert _get_external_url(hass, require_standard_port=True) == "https://example.com"
# Test with external URL: https://example.com:80
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com:80"},
)
assert hass.config.external_url == "https://example.com:80"
assert _get_external_url(hass) == "https://example.com:80"
assert _get_external_url(hass, allow_cloud=False) == "https://example.com:80"
assert _get_external_url(hass, allow_ip=False) == "https://example.com:80"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.com:80"
assert _get_external_url(hass, require_ssl=True) == "https://example.com:80"
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_standard_port=True)
# Test with external URL: https://192.168.0.1
await async_process_ha_core_config(
hass,
{"external_url": "https://192.168.0.1"},
)
assert hass.config.external_url == "https://192.168.0.1"
assert _get_external_url(hass) == "https://192.168.0.1"
assert _get_external_url(hass, allow_cloud=False) == "https://192.168.0.1"
assert _get_external_url(hass, prefer_cloud=True) == "https://192.168.0.1"
assert _get_external_url(hass, require_standard_port=True) == "https://192.168.0.1"
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_ssl=True)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="192.168.0.1"
):
assert (
_get_external_url(hass, require_current_request=True)
== "https://192.168.0.1"
)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True, require_ssl=True)
async def test_get_cloud_url(hass: HomeAssistant):
"""Test getting an instance URL when the user has set an external URL."""
assert hass.config.external_url is None
hass.config.components.add("cloud")
with patch.object(
hass.components.cloud,
"async_remote_ui_url",
return_value="https://example.nabu.casa",
):
assert _get_cloud_url(hass) == "https://example.nabu.casa"
with pytest.raises(NoURLAvailableError):
_get_cloud_url(hass, require_current_request=True)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="example.nabu.casa",
):
assert (
_get_cloud_url(hass, require_current_request=True)
== "https://example.nabu.casa"
)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="no_match.nabu.casa",
), pytest.raises(NoURLAvailableError):
_get_cloud_url(hass, require_current_request=True)
with patch.object(
hass.components.cloud,
"async_remote_ui_url",
side_effect=cloud.CloudNotAvailable,
):
with pytest.raises(NoURLAvailableError):
_get_cloud_url(hass)
async def test_get_external_url_cloud_fallback(hass: HomeAssistant):
"""Test getting an external instance URL with cloud fallback."""
assert hass.config.external_url is None
# Test with external URL: http://1.1.1.1:8123
await async_process_ha_core_config(
hass,
{"external_url": "http://1.1.1.1:8123"},
)
assert hass.config.external_url == "http://1.1.1.1:8123"
assert _get_external_url(hass, prefer_cloud=True) == "http://1.1.1.1:8123"
# Add Cloud to the previous test
hass.config.components.add("cloud")
with patch.object(
hass.components.cloud,
"async_remote_ui_url",
return_value="https://example.nabu.casa",
):
assert _get_external_url(hass, allow_cloud=False) == "http://1.1.1.1:8123"
assert _get_external_url(hass, allow_ip=False) == "https://example.nabu.casa"
assert _get_external_url(hass, prefer_cloud=False) == "http://1.1.1.1:8123"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.nabu.casa"
assert _get_external_url(hass, require_ssl=True) == "https://example.nabu.casa"
assert (
_get_external_url(hass, require_standard_port=True)
== "https://example.nabu.casa"
)
# Test with external URL: https://example.com
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
assert hass.config.external_url == "https://example.com"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.com"
# Add Cloud to the previous test
hass.config.components.add("cloud")
with patch.object(
hass.components.cloud,
"async_remote_ui_url",
return_value="https://example.nabu.casa",
):
assert _get_external_url(hass, allow_cloud=False) == "https://example.com"
assert _get_external_url(hass, allow_ip=False) == "https://example.com"
assert _get_external_url(hass, prefer_cloud=False) == "https://example.com"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.nabu.casa"
assert _get_external_url(hass, require_ssl=True) == "https://example.com"
assert (
_get_external_url(hass, require_standard_port=True) == "https://example.com"
)
assert (
_get_external_url(hass, prefer_cloud=True, allow_cloud=False)
== "https://example.com"
)
async def test_get_url(hass: HomeAssistant):
"""Test getting an instance URL."""
assert hass.config.external_url is None
assert hass.config.internal_url is None
with pytest.raises(NoURLAvailableError):
get_url(hass)
hass.config.api = Mock(
use_ssl=False, port=8123, deprecated_base_url=None, local_ip="192.168.123.123"
)
assert get_url(hass) == "http://192.168.123.123:8123"
assert get_url(hass, prefer_external=True) == "http://192.168.123.123:8123"
with pytest.raises(NoURLAvailableError):
get_url(hass, allow_internal=False)
# Test only external
hass.config.api = None
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
assert hass.config.external_url == "https://example.com"
assert hass.config.internal_url is None
assert get_url(hass) == "https://example.com"
# Test preference or allowance
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local", "external_url": "https://example.com"},
)
assert hass.config.external_url == "https://example.com"
assert hass.config.internal_url == "http://example.local"
assert get_url(hass) == "http://example.local"
assert get_url(hass, prefer_external=True) == "https://example.com"
assert get_url(hass, allow_internal=False) == "https://example.com"
assert (
get_url(hass, prefer_external=True, allow_external=False)
== "http://example.local"
)
with pytest.raises(NoURLAvailableError):
get_url(hass, allow_external=False, require_ssl=True)
with pytest.raises(NoURLAvailableError):
get_url(hass, allow_external=False, allow_internal=False)
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="example.com"
), patch("homeassistant.helpers.network.current_request"):
assert get_url(hass, require_current_request=True) == "https://example.com"
assert (
get_url(hass, require_current_request=True, require_ssl=True)
== "https://example.com"
)
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, allow_external=False)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="example.local"
), patch("homeassistant.helpers.network.current_request"):
assert get_url(hass, require_current_request=True) == "http://example.local"
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, allow_internal=False)
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, require_ssl=True)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="no_match.example.com",
), pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True)
async def test_get_request_host(hass: HomeAssistant):
"""Test getting the host of the current web request from the request context."""
with pytest.raises(NoURLAvailableError):
_get_request_host()
with patch("homeassistant.helpers.network.current_request") as mock_request_context:
mock_request = Mock()
mock_request.url = "http://example.com:8123/test/request"
mock_request_context.get = Mock(return_value=mock_request)
assert _get_request_host() == "example.com"
async def test_get_deprecated_base_url_internal(hass: HomeAssistant):
"""Test getting an internal instance URL from the deprecated base_url."""
# Test with SSL local URL
hass.config.api = Mock(deprecated_base_url="https://example.local")
assert _get_deprecated_base_url(hass, internal=True) == "https://example.local"
assert (
_get_deprecated_base_url(hass, internal=True, allow_ip=False)
== "https://example.local"
)
assert (
_get_deprecated_base_url(hass, internal=True, require_ssl=True)
== "https://example.local"
)
assert (
_get_deprecated_base_url(hass, internal=True, require_standard_port=True)
== "https://example.local"
)
# Test with no SSL, local IP URL
hass.config.api = Mock(deprecated_base_url="http://10.10.10.10:8123")
assert _get_deprecated_base_url(hass, internal=True) == "http://10.10.10.10:8123"
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True, require_standard_port=True)
# Test with SSL, local IP URL
hass.config.api = Mock(deprecated_base_url="https://10.10.10.10")
assert _get_deprecated_base_url(hass, internal=True) == "https://10.10.10.10"
assert (
_get_deprecated_base_url(hass, internal=True, require_ssl=True)
== "https://10.10.10.10"
)
assert (
_get_deprecated_base_url(hass, internal=True, require_standard_port=True)
== "https://10.10.10.10"
)
# Test external URL
hass.config.api = Mock(deprecated_base_url="https://example.com")
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True, allow_ip=False)
# Test with loopback
hass.config.api = Mock(deprecated_base_url="https://127.0.0.42")
with pytest.raises(NoURLAvailableError):
assert _get_deprecated_base_url(hass, internal=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, internal=True, require_standard_port=True)
async def test_get_deprecated_base_url_external(hass: HomeAssistant):
"""Test getting an external instance URL from the deprecated base_url."""
# Test with SSL and external domain on standard port
hass.config.api = Mock(deprecated_base_url="https://example.com:443/")
assert _get_deprecated_base_url(hass) == "https://example.com"
assert _get_deprecated_base_url(hass, require_ssl=True) == "https://example.com"
assert (
_get_deprecated_base_url(hass, require_standard_port=True)
== "https://example.com"
)
# Test without SSL and external domain on non-standard port
hass.config.api = Mock(deprecated_base_url="http://example.com:8123/")
assert _get_deprecated_base_url(hass) == "http://example.com:8123"
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, require_standard_port=True)
# Test SSL on external IP
hass.config.api = Mock(deprecated_base_url="https://1.1.1.1")
assert _get_deprecated_base_url(hass) == "https://1.1.1.1"
assert _get_deprecated_base_url(hass, require_ssl=True) == "https://1.1.1.1"
assert (
_get_deprecated_base_url(hass, require_standard_port=True) == "https://1.1.1.1"
)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, allow_ip=False)
# Test with private IP
hass.config.api = Mock(deprecated_base_url="https://10.10.10.10")
with pytest.raises(NoURLAvailableError):
assert _get_deprecated_base_url(hass)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, require_standard_port=True)
# Test with local domain
hass.config.api = Mock(deprecated_base_url="https://example.local")
with pytest.raises(NoURLAvailableError):
assert _get_deprecated_base_url(hass)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, require_standard_port=True)
# Test with loopback
hass.config.api = Mock(deprecated_base_url="https://127.0.0.42")
with pytest.raises(NoURLAvailableError):
assert _get_deprecated_base_url(hass)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_deprecated_base_url(hass, require_standard_port=True)
async def test_get_internal_url_with_base_url_fallback(hass: HomeAssistant):
"""Test getting an internal instance URL with the deprecated base_url fallback."""
hass.config.api = Mock(
use_ssl=False, port=8123, deprecated_base_url=None, local_ip="192.168.123.123"
)
assert hass.config.internal_url is None
assert _get_internal_url(hass) == "http://192.168.123.123:8123"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
# Add base_url
hass.config.api = Mock(
use_ssl=False, port=8123, deprecated_base_url="https://example.local"
)
assert _get_internal_url(hass) == "https://example.local"
assert _get_internal_url(hass, allow_ip=False) == "https://example.local"
assert (
_get_internal_url(hass, require_standard_port=True) == "https://example.local"
)
assert _get_internal_url(hass, require_ssl=True) == "https://example.local"
# Add internal URL
await async_process_ha_core_config(
hass,
{"internal_url": "https://internal.local"},
)
assert _get_internal_url(hass) == "https://internal.local"
assert _get_internal_url(hass, allow_ip=False) == "https://internal.local"
assert (
_get_internal_url(hass, require_standard_port=True) == "https://internal.local"
)
assert _get_internal_url(hass, require_ssl=True) == "https://internal.local"
# Add internal URL, mixed results
await async_process_ha_core_config(
hass,
{"internal_url": "http://internal.local:8123"},
)
assert _get_internal_url(hass) == "http://internal.local:8123"
assert _get_internal_url(hass, allow_ip=False) == "http://internal.local:8123"
assert (
_get_internal_url(hass, require_standard_port=True) == "https://example.local"
)
assert _get_internal_url(hass, require_ssl=True) == "https://example.local"
# Add internal URL set to an IP
await async_process_ha_core_config(
hass,
{"internal_url": "http://10.10.10.10:8123"},
)
assert _get_internal_url(hass) == "http://10.10.10.10:8123"
assert _get_internal_url(hass, allow_ip=False) == "https://example.local"
assert (
_get_internal_url(hass, require_standard_port=True) == "https://example.local"
)
assert _get_internal_url(hass, require_ssl=True) == "https://example.local"
async def test_get_external_url_with_base_url_fallback(hass: HomeAssistant):
"""Test getting an external instance URL with the deprecated base_url fallback."""
hass.config.api = Mock(use_ssl=False, port=8123, deprecated_base_url=None)
assert hass.config.internal_url is None
with pytest.raises(NoURLAvailableError):
_get_external_url(hass)
# Test with SSL and external domain on standard port
hass.config.api = Mock(deprecated_base_url="https://example.com:443/")
assert _get_external_url(hass) == "https://example.com"
assert _get_external_url(hass, allow_ip=False) == "https://example.com"
assert _get_external_url(hass, require_ssl=True) == "https://example.com"
assert _get_external_url(hass, require_standard_port=True) == "https://example.com"
# Add external URL
await async_process_ha_core_config(
hass,
{"external_url": "https://external.example.com"},
)
assert _get_external_url(hass) == "https://external.example.com"
assert _get_external_url(hass, allow_ip=False) == "https://external.example.com"
assert (
_get_external_url(hass, require_standard_port=True)
== "https://external.example.com"
)
assert _get_external_url(hass, require_ssl=True) == "https://external.example.com"
# Add external URL, mixed results
await async_process_ha_core_config(
hass,
{"external_url": "http://external.example.com:8123"},
)
assert _get_external_url(hass) == "http://external.example.com:8123"
assert _get_external_url(hass, allow_ip=False) == "http://external.example.com:8123"
assert _get_external_url(hass, require_standard_port=True) == "https://example.com"
assert _get_external_url(hass, require_ssl=True) == "https://example.com"
# Add external URL set to an IP
await async_process_ha_core_config(
hass,
{"external_url": "http://1.1.1.1:8123"},
)
assert _get_external_url(hass) == "http://1.1.1.1:8123"
assert _get_external_url(hass, allow_ip=False) == "https://example.com"
assert _get_external_url(hass, require_standard_port=True) == "https://example.com"
assert _get_external_url(hass, require_ssl=True) == "https://example.com"
async def test_get_current_request_url_with_known_host(
hass: HomeAssistant, current_request
):
"""Test getting current request URL with known hosts addresses."""
hass.config.api = Mock(
use_ssl=False, port=8123, local_ip="127.0.0.1", deprecated_base_url=None
)
assert hass.config.internal_url is None
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True)
# Ensure we accept localhost
with patch(
"homeassistant.helpers.network._get_request_host", return_value="localhost"
):
assert get_url(hass, require_current_request=True) == "http://localhost:8123"
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, require_ssl=True)
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, require_standard_port=True)
# Ensure we accept local loopback ip (e.g., 127.0.0.1)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="127.0.0.8"
):
assert get_url(hass, require_current_request=True) == "http://127.0.0.8:8123"
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, allow_ip=False)
# Ensure hostname from Supervisor is accepted transparently
mock_component(hass, "hassio")
hass.components.hassio.is_hassio = Mock(return_value=True)
hass.components.hassio.get_host_info = Mock(
return_value={"hostname": "homeassistant"}
)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="homeassistant.local",
):
assert (
get_url(hass, require_current_request=True)
== "http://homeassistant.local:8123"
)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="unknown.local"
), pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True)
| |
# -*- encoding: utf-8 -*-
from supriya.tools.ugentools.UGen import UGen
class Gendy2(UGen):
r'''A dynamic stochastic synthesis generator.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2
Gendy2.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Noise UGens'
__slots__ = ()
_ordered_input_names = (
'ampdist',
'durdist',
'adparam',
'ddparam',
'minfrequency',
'maxfrequency',
'ampscale',
'durscale',
'init_cps',
'knum',
'a',
'c',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
a=1.17,
adparam=1,
ampdist=1,
ampscale=0.5,
c=0.31,
ddparam=1,
durdist=1,
durscale=0.5,
init_cps=12,
knum=None,
maxfrequency=660,
minfrequency=440,
):
if knum is None:
knum = init_cps
UGen.__init__(
self,
calculation_rate=calculation_rate,
a=a,
adparam=adparam,
ampdist=ampdist,
ampscale=ampscale,
c=c,
ddparam=ddparam,
durdist=durdist,
durscale=durscale,
init_cps=init_cps,
knum=knum,
maxfrequency=maxfrequency,
minfrequency=minfrequency,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
a=1.17,
adparam=1,
ampdist=1,
ampscale=0.5,
c=0.31,
ddparam=1,
durdist=1,
durscale=0.5,
init_cps=12,
knum=None,
maxfrequency=660,
minfrequency=440,
):
r'''Constructs an audio-rate Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2
Gendy2.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
a=a,
adparam=adparam,
ampdist=ampdist,
ampscale=ampscale,
c=c,
ddparam=ddparam,
durdist=durdist,
durscale=durscale,
init_cps=init_cps,
knum=knum,
maxfrequency=maxfrequency,
minfrequency=minfrequency,
)
return ugen
@classmethod
def kr(
cls,
a=1.17,
adparam=1,
ampdist=1,
ampscale=0.5,
c=0.31,
ddparam=1,
durdist=1,
durscale=0.5,
init_cps=12,
knum=None,
maxfrequency=1000,
minfrequency=20,
):
r'''Constructs a control-rate Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.kr(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=1000,
... minfrequency=20,
... )
>>> gendy_2
Gendy2.kr()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
a=a,
adparam=adparam,
ampdist=ampdist,
ampscale=ampscale,
c=c,
ddparam=ddparam,
durdist=durdist,
durscale=durscale,
init_cps=init_cps,
knum=knum,
maxfrequency=maxfrequency,
minfrequency=minfrequency,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def a(self):
r'''Gets `a` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.a
1.17
Returns ugen input.
'''
index = self._ordered_input_names.index('a')
return self._inputs[index]
@property
def adparam(self):
r'''Gets `adparam` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.adparam
1.0
Returns ugen input.
'''
index = self._ordered_input_names.index('adparam')
return self._inputs[index]
@property
def ampdist(self):
r'''Gets `ampdist` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.ampdist
1.0
Returns ugen input.
'''
index = self._ordered_input_names.index('ampdist')
return self._inputs[index]
@property
def ampscale(self):
r'''Gets `ampscale` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.ampscale
0.5
Returns ugen input.
'''
index = self._ordered_input_names.index('ampscale')
return self._inputs[index]
@property
def c(self):
r'''Gets `c` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.c
0.31
Returns ugen input.
'''
index = self._ordered_input_names.index('c')
return self._inputs[index]
@property
def ddparam(self):
r'''Gets `ddparam` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.ddparam
1.0
Returns ugen input.
'''
index = self._ordered_input_names.index('ddparam')
return self._inputs[index]
@property
def durdist(self):
r'''Gets `durdist` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.durdist
1.0
Returns ugen input.
'''
index = self._ordered_input_names.index('durdist')
return self._inputs[index]
@property
def durscale(self):
r'''Gets `durscale` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.durscale
0.5
Returns ugen input.
'''
index = self._ordered_input_names.index('durscale')
return self._inputs[index]
@property
def init_cps(self):
r'''Gets `init_cps` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.init_cps
12.0
Returns ugen input.
'''
index = self._ordered_input_names.index('init_cps')
return self._inputs[index]
@property
def knum(self):
r'''Gets `knum` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.knum
10.0
Returns ugen input.
'''
index = self._ordered_input_names.index('knum')
return self._inputs[index]
@property
def maxfrequency(self):
r'''Gets `maxfrequency` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.maxfrequency
660.0
Returns ugen input.
'''
index = self._ordered_input_names.index('maxfrequency')
return self._inputs[index]
@property
def minfrequency(self):
r'''Gets `minfrequency` input of Gendy2.
::
>>> gendy_2 = ugentools.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2.minfrequency
440.0
Returns ugen input.
'''
index = self._ordered_input_names.index('minfrequency')
return self._inputs[index]
| |
import time
import math
import numpy as np
from collections import Counter
# Util
class Cluster:
def __init__(self, data, labels, base=2):
self._data = np.array(data).T
self._counters = Counter(labels)
self._labels = np.array(labels)
self._base = base
def ent(self, ent=None, eps=1e-12):
_len = len(self._labels)
if ent is None:
ent = [_val for _val in self._counters.values()]
return max(eps, -sum([_c / _len * math.log(_c / _len, self._base) for _c in ent]))
def con_ent(self, idx):
data = self._data[idx]
features = set(data)
tmp_labels = [data == feature for feature in features]
label_lst = [self._labels[label] for label in tmp_labels]
rs = 0
for data_label, tar_label in zip(tmp_labels, label_lst):
tmp_data = self._data.T[data_label]
_ent = Cluster(tmp_data, tar_label, base=self._base).ent()
rs += len(tmp_data) / len(data) * _ent
return rs
def info_gain(self, idx):
_con_ent = self.con_ent(idx)
_gain = self.ent() - _con_ent
return _gain, _con_ent
# Node
class CvDNode:
def __init__(self, tree=None, max_depth=None, base=2, ent=None,
depth=0, parent=None, is_root=True, prev_feat="Root"):
self._data = self.labels = None
self._max_depth = max_depth
self._base = base
self.ent = ent
self.children = {}
self.category = None
self.tree = tree
if tree is not None:
tree.nodes.append(self)
self.feature_dim = None
self.feats = []
self._depth = depth
self.parent = parent
self.is_root = is_root
self.prev_feat = prev_feat
self.leafs = {}
self.pruned = False
@property
def key(self):
return self._depth, self.prev_feat, id(self)
@property
def height(self):
if self.category is not None:
return 1
return 1 + max([_child.height for _child in self.children.values()])
def feed_data(self, data, labels):
self._data = data
self.labels = labels
def stop(self, eps):
if (
self._data.shape[1] == 1 or (self.ent is not None and self.ent <= eps)
or (self._max_depth is not None and self._depth >= self._max_depth)
):
self._handle_terminate()
return True
return False
def early_stop(self, max_gain, eps):
if max_gain <= eps:
self._handle_terminate()
return True
return False
def get_class(self):
_counter = Counter(self.labels)
return max(_counter.keys(), key=(lambda key: _counter[key]))
def _gen_children(self, feat, con_ent):
features = self._data[:, feat]
_new_feats = self.feats[:]
_new_feats.remove(feat)
for feat in set(features):
_feat_mask = features == feat
_new_node = self.__class__(
self.tree, self._max_depth, self._base, ent=con_ent,
depth=self._depth + 1, parent=self, is_root=False, prev_feat=feat)
_new_node.feats = _new_feats
self.children[feat] = _new_node
_new_node.fit(self._data[_feat_mask, :], self.labels[_feat_mask])
def _handle_terminate(self):
self.category = self.get_class()
_parent = self
while _parent is not None:
_parent.leafs[self.key] = self
_parent = _parent.parent
def fit(self, data, labels, eps=1e-8):
if data is not None and labels is not None:
self.feed_data(data, labels)
if self.stop(eps):
return
_cluster = Cluster(self._data, self.labels, self._base)
_max_gain, _con_ent = _cluster.info_gain(self.feats[0])
_max_feature = self.feats[0]
for feat in self.feats[1:]:
_tmp_gain, _tmp_con_ent = _cluster.info_gain(feat)
if _tmp_gain > _max_gain:
(_max_gain, _con_ent), _max_feature = (_tmp_gain, _tmp_con_ent), feat
if self.early_stop(_max_gain, eps):
return
self.feature_dim = _max_feature
self._gen_children(_max_feature, _con_ent)
if self.is_root:
self.tree.prune()
def mark_pruned(self):
self.pruned = True
if self.children:
for child in self.children.values():
child.mark_pruned()
def prune(self):
if self.category is None:
self.category = self.get_class()
self.feature_dim = None
_pop_lst = [key for key in self.leafs]
_parent = self
while _parent is not None:
for _k in _pop_lst:
_parent.leafs.pop(_k)
_parent.leafs[self.key] = self
_parent = _parent.parent
self.mark_pruned()
self.children = {}
def predict_one(self, x):
if self.category is not None:
return self.category
try:
return self.children[x[self.feature_dim]].predict_one(x)
except KeyError:
return self.get_class()
def predict(self, x):
if self.category is not None:
if self.is_root:
return [self.category] * len(x)
return self.category
x = np.atleast_2d(x)
return [self.predict_one(xx) for xx in x]
def view(self, indent=4):
print(" " * indent * self._depth, self)
for _node in sorted(self.children.values()):
_node.view()
def __lt__(self, other):
return self.prev_feat < other.prev_feat
def __str__(self):
if self.category is None:
return "CvDNode ({}) ({} -> {})".format(
self._depth, self.prev_feat, self.feature_dim)
return "CvDNode ({}) ({} -> class: {})".format(
self._depth, self.prev_feat, self.category)
__repr__ = __str__
# Tree
class CvDBase:
def __init__(self, max_depth=None):
self.nodes = []
self._max_depth = max_depth
self.root = CvDNode(self, max_depth)
@property
def depth(self):
return self.root.height
@staticmethod
def acc(y, y_pred):
return np.sum(np.array(y) == np.array(y_pred)) / len(y)
def fit(self, data=None, labels=None, eps=1e-8):
data, labels = np.array(data), np.array(labels)
self.root.feats = [i for i in range(data.shape[1])]
self.root.fit(data, labels, eps)
def prune(self, alpha=1):
if self.depth <= 2:
return
_tmp_nodes = [node for node in self.nodes if not node.is_root and not node.category]
_old = np.array([sum(
[leaf.ent * len(leaf.labels) for leaf in node.leafs.values()]
) + alpha * len(node.leafs) for node in _tmp_nodes])
_new = np.array([node.ent * len(node.labels) + alpha for node in _tmp_nodes])
_mask = (_old - _new) > 0
arg = np.argmax(_mask)
if _mask[arg]:
_tmp_nodes[arg].prune()
for i in range(len(self.nodes) - 1, -1, -1):
if self.nodes[i].pruned:
self.nodes.pop(i)
self.prune(alpha)
def predict_one(self, x):
return self.root.predict_one(x)
def predict(self, x):
return self.root.predict(x)
def estimate(self, x, y):
y = np.array(y)
print("Acc: {:8.6} %".format(100 * np.sum(self.predict(x) == y) / len(y)))
def view(self):
self.root.view()
def __str__(self):
return "CvDTree ({})".format(self.depth)
__repr__ = __str__
if __name__ == '__main__':
_data, _x, _y = [], [], []
with open("../data.txt", "r") as file:
for line in file:
_data.append(line.split(","))
np.random.shuffle(_data)
for line in _data:
_y.append(line.pop(0))
_x.append(line)
_x, _y = np.array(_x), np.array(_y)
train_num = 5000
x_train = _x[:train_num]
y_train = _y[:train_num]
x_test = _x[train_num:]
y_test = _y[train_num:]
_t = time.time()
_tree = CvDBase()
_tree.fit(x_train, y_train)
_tree.view()
_tree.estimate(x_test, y_test)
print("Time cost: {:8.6}".format(time.time() - _t))
| |
"""
kombu.entity
================
Exchange and Queue declarations.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from .abstract import MaybeChannelBound
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {"transient": TRANSIENT_DELIVERY_MODE,
"persistent": PERSISTENT_DELIVERY_MODE}
__all__ = ["Exchange", "Queue"]
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
:keyword name: See :attr:`name`.
:keyword type: See :attr:`type`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword arguments: See :attr:`arguments`.
.. attribute:: name
Name of the exchange. Default is no name (the default exchange).
.. attribute:: type
AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* `topic`
Wildcard match between the routing key and the routing pattern
specified in the exchange/queue binding. The routing key is
treated as zero or more words delimited by `"."` and
supports special wildcard characters. `"*"` matches a
single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
`"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. This article is recommended reading.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: channel
The channel the exchange is bound to (if bound).
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is :const:`True`.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is :const:`False`.
.. attribute:: delivery_mode
The default delivery mode used for messages. The value is an integer,
or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
.. attribute:: arguments
Additional arguments to specify when the exchange is declared.
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ""
type = "direct"
durable = True
auto_delete = False
delivery_mode = PERSISTENT_DELIVERY_MODE
attrs = (("name", None),
("type", None),
("arguments", None),
("durable", bool),
("auto_delete", bool),
("delivery_mode", lambda m: DELIVERY_MODES.get(m) or m))
def __init__(self, name="", type="", channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def __hash__(self):
return hash("E|%s" % (self.name, ))
def declare(self, nowait=False):
"""Declare the exchange.
Creates the exchange on the broker.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_declare(exchange=self.name,
type=self.type,
durable=self.durable,
auto_delete=self.auto_delete,
arguments=self.arguments,
nowait=nowait)
def Message(self, body, delivery_mode=None, priority=None,
content_type=None, content_encoding=None, properties=None,
headers=None):
"""Create message instance to be sent with :meth:`publish`.
:param body: Message body.
:keyword delivery_mode: Set custom delivery mode. Defaults
to :attr:`delivery_mode`.
:keyword priority: Message priority, 0 to 9. (currently not
supported by RabbitMQ).
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword properties: Message properties.
:keyword headers: Message headers.
"""
properties = {} if properties is None else properties
delivery_mode = delivery_mode or self.delivery_mode
properties["delivery_mode"] = DELIVERY_MODES.get(delivery_mode,
delivery_mode)
return self.channel.prepare_message(body,
properties=properties,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
headers=headers)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
:param message: :meth:`Message` instance to publish.
:param routing_key: Routing key.
:param mandatory: Currently not supported.
:param immediate: Currently not supported.
"""
exchange = exchange or self.name
return self.channel.basic_publish(message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
:keyword if_unused: Delete only if the exchange has no bindings.
Default is :const:`False`.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_delete(exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return False
def __repr__(self):
return super(Exchange, self).__repr__("Exchange %s(%s)" % (self.name,
self.type))
@property
def can_cache_declaration(self):
return self.durable
class Queue(MaybeChannelBound):
"""A Queue declaration.
:keyword name: See :attr:`name`.
:keyword exchange: See :attr:`exchange`.
:keyword routing_key: See :attr:`routing_key`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword exclusive: See :attr:`exclusive`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword queue_arguments: See :attr:`queue_arguments`.
:keyword binding_arguments: See :attr:`binding_arguments`.
.. attribute:: name
Name of the queue. Default is no name (default queue destination).
.. attribute:: exchange
The :class:`Exchange` the queue binds to.
.. attribute:: routing_key
The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
.. attribute:: channel
The channel the Queue is bound to (if bound).
.. attribute:: durable
Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
.. attribute:: auto_delete
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
.. attribute:: queue_arguments
Additional arguments used when declaring the queue.
.. attribute:: binding_arguments
Additional arguments used when binding the queue.
.. attribute:: alias
Unused in Kombu, but applications can take advantage of this.
For example to give alternate names to queues with automatically
generated queue names.
"""
name = ""
exchange = Exchange("")
routing_key = ""
durable = True
exclusive = False
auto_delete = False
no_ack = False
attrs = (("name", None),
("exchange", None),
("routing_key", None),
("queue_arguments", None),
("binding_arguments", None),
("durable", bool),
("exclusive", bool),
("auto_delete", bool),
("no_ack", None),
("alias", None))
def __init__(self, name="", exchange=None, routing_key="", channel=None,
**kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def __hash__(self):
return hash("Q|%s" % (self.name, ))
def when_bound(self):
if self.exchange:
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
if self.exchange:
self.exchange.declare(nowait)
self.queue_declare(nowait, passive=False)
# self.name should be set by queue_declare in the case that
# we're working with anonymous queues
if self.name:
self.queue_bind(nowait)
return self.name
def queue_declare(self, nowait=False, passive=False):
"""Declare queue on the server.
:keyword nowait: Do not wait for a reply.
:keyword passive: If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
ret = self.channel.queue_declare(queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
nowait=nowait)
if not self.name:
self.name = ret[0]
return ret
def queue_bind(self, nowait=False):
"""Create the queue binding on the server."""
return self.channel.queue_bind(queue=self.name,
exchange=self.exchange.name,
routing_key=self.routing_key,
arguments=self.binding_arguments,
nowait=nowait)
def get(self, no_ack=None):
"""Poll the server for a new message.
Returns the message instance if a message was available,
or :const:`None` otherwise.
:keyword no_ack: If set messages received does not have to
be acknowledged.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
"""
no_ack = self.no_ack if no_ack is None else no_ack
message = self.channel.basic_get(queue=self.name, no_ack=no_ack)
if message is not None:
m2p = getattr(self.channel, "message_to_python", None)
if m2p:
message = m2p(message)
return message
def purge(self, nowait=False):
"""Remove all ready messages from the queue."""
return self.channel.queue_purge(queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None, no_ack=None,
nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
:keyword consumer_tag: Unique identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
:keyword no_ack: If set messages received does not have to
be acknowledged.
:keyword nowait: Do not wait for a reply.
:keyword callback: callback called for each delivered message
"""
if no_ack is None:
no_ack = self.no_ack
return self.channel.basic_consume(queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
:keyword if_unused: If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
:keyword if_empty: If set, the server will only delete the queue
if it is empty. If it is not empty a channel error will be raised.
:keyword nowait: Do not wait for a reply.
"""
return self.channel.queue_delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def unbind(self):
"""Delete the binding on the server."""
return self.channel.queue_unbind(queue=self.name,
exchange=self.exchange.name,
routing_key=self.routing_key,
arguments=self.binding_arguments)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return False
def __repr__(self):
return super(Queue, self).__repr__(
"Queue %s -> %s -> %s" % (self.name,
self.exchange,
self.routing_key))
@property
def can_cache_declaration(self):
return self.durable
| |
import matplotlib.pyplot as plt
import numpy as np
# import pandas as pd
# import os
# This is James Sungjin Kim's library
import kutil
def gff_vec( smiles_vec, rad = 2, nBits = 1024):
"It generates a fingerprint vector from a smiles code vector"
return [gff(x, rad, nBits) for x in smiles_vec]
def gfb_vec( smiles_vec, rad = 4, nBits = 1024):
"It generates a fingerprint vector from a smiles code vector"
return [gfb(x, rad, nBits) for x in smiles_vec]
def gff_binlist( smiles_vec, rad = 2, nBits = 1024):
"""
It generates a binary list of fingerprint vector from a smiles code vector.
Each string will be expanded to be the size of nBits such as 1024.
- It shows error message when nBits < 1024 and len(x) > nBits.
- Now bits reduced to match input value of nBit eventhough the real output is large
"""
ff_vec = gff_vec( smiles_vec, rad, nBits)
ff_bin = [ bin(int(x.ToBinary().encode("hex"), 16)) for x in ff_vec]
#Show error message when nBits < 1024 and len(x) > nBits
"""
for x in ff_bin:
if len(x[2:]) > nBits:
print 'The length of x is {0}, which is larger than {1}'.format(len(x[2:]), nBits)
print 'So, the minimal value of nBits must be 1024 generally.'
return [ map( int, list( '0'*(nBits - len(x[2:])) + x[2:])) for x in ff_bin]
"""
return [ list(map( int, list( kutil.sleast(x[2:], nBits)))) for x in ff_bin]
def gfb_binlist( smiles_vec, rad = 4, nBits = 1024):
"""
It generates a binary list of fingerprint vector from a smiles code vector.
Each string will be expanded to be the size of nBits such as 1024.
- It shows error message when nBits < 1024 and len(x) > nBits.
- Now bits reduced to match input value of nBit eventhough the real output is large
- fp clean will be adopted.
"""
ff_vec = gfb_vec( smiles_vec, rad, nBits)
ff_bin = [ bin(int(x.ToBinary().encode("hex"), 16)) for x in ff_vec]
#Show error message when nBits < 1024 and len(x) > nBits
"""
for x in ff_bin:
if len(x[2:]) > nBits:
print 'The length of x is {0}, which is larger than {1}'.format(len(x[2:]), nBits)
print 'So, the minimal value of nBits must be 1024 generally.'
return [ map( int, list( '0'*(nBits - len(x[2:])) + x[2:])) for x in ff_bin]
"""
return [ list(map( int, list( kutil.sleast(x[2:], nBits)))) for x in ff_bin]
def gfp_binlist( smiles_vec, rad = 4, nBits = 1024):
gff_binlist( smiles_vec, rad = rad, nBits = nBits)
def gff_binlist_bnbp( smiles_vec, rad = 2, nBits = 1024, bnbp = 'bn'):
"""
It generates a binary list of fingerprint vector from a smiles code vector.
Each string will be expanded to be the size of nBits such as 1024.
- It shows error message when nBits < 1024 and len(x) > nBits.
- Now bits reduced to match input value of nBit eventhough the real output is large
bnbp --> if binary input, bnbp = 'bn', else if bipolar input, bnbp = 'bp'
"""
ff_vec = gff_vec( smiles_vec, rad, nBits)
ff_bin = [ bin(int(x.ToBinary().encode("hex"), 16)) for x in ff_vec]
if bnbp == 'bp': #bipolar input generation
return [ list(map( kutil.int_bp, list( kutil.sleast(x[2:], nBits)))) for x in ff_bin]
else:
return [ list(map( int, list( kutil.sleast(x[2:], nBits)))) for x in ff_bin]
def gff_M( smiles_vec, rad = 2, nBits = 1024):
"It generated a binary matrix from a smiles code vecor."
return np.mat(gff_binlist( smiles_vec, rad = rad, nBits = nBits))
def gfp_M( smiles_vec, rad = 4, nBits = 1024):
"It generated a binary matrix from a smiles code vecor."
xM = np.mat(gfb_binlist( smiles_vec, rad = rad, nBits = nBits))
#Now fingerprint matrix is cleaned if column is all the same value such as all 1, all 0
return clean_fp_M( xM)
def gff_M_bnbp( smiles_vec, rad = 2, nBits = 1024, bnbp = 'bn'):
"It generated a binary matrix from a smiles code vecor."
return np.mat(gff_binlist_bnbp( smiles_vec, rad, nBits, bnbp))
def calc_tm_dist_int( A_int, B_int):
"""
Calculate tanimoto distance of A_int and B_int
where X_int isinteger fingerprint vlaue of material A.
"""
C_int = A_int & B_int
A_str = bin(A_int)[2:]
B_str = bin(B_int)[2:]
C_str = bin(C_int)[2:]
lmax = max( [len( A_str), len( B_str), len( C_str)])
""" this shows calculation process
print "A:", A_str.ljust( lmax, '0')
print "B:", B_str.ljust( lmax, '0')
print "C:", C_str.ljust( lmax, '0')
"""
a = A_str.count('1')
b = B_str.count('1')
c = C_str.count('1')
# print a, b, c
if a == 0 and b == 0:
tm_dist = 1
else:
tm_dist = float(c) / float( a + b - c)
return tm_dist
def calc_tm_dist( A_smiles, B_smiles):
A_int = ff_int( A_smiles)
B_int = ff_int( B_smiles)
return calc_tm_dist_int( A_int, B_int)
def getw( Xs, Ys, N = 57, nBits = 400):
"It calculate weight vector for specific N and nNBits."
Xs50 = Xs[:N]
Ys50 = Ys[:N]
X = gff_M( Xs50, nBits=400)
y = np.mat( Ys50).T
print(X.shape)
# Xw = y is assumed for Mutiple linear regression
w = np.linalg.pinv( X) * y
#print w
plt.plot( w)
plt.show()
return w
def getw_clean( Xs, Ys, N = None, rad = 2, nBits = 1024):
"Take only 50, each of which has safe smile code."
nXs, nYs = clean_smiles_vec_io( Xs, Ys)
# print len(nXs), len(nYs)
if N is None:
N = len( nXs)
X = gff_M( nXs[:N], rad = rad, nBits = nBits)
y = np.mat( nYs[:N]).T
w = np.linalg.pinv( X) * y
plt.plot( w)
plt.title('Weight Vector')
plt.show()
y_calc = X*w
e = y - y_calc
se = (e.T * e)
mse = (e.T * e) / len(e)
print("SE =", se)
print("MSE =", mse)
print("RMSE =", np.sqrt( mse))
plt.plot(e)
plt.title("Error Vector: y - y_{calc}")
plt.show()
plt.plot(y, label='original')
plt.plot(y_calc, label='predicted')
plt.legend()
plt.title("Output values: org vs. pred")
plt.show()
return w
def getw_clean_bnbp( Xs, Ys, N = None, rad = 2, nBits = 1024, bnbp = 'bn'):
"""
Take only 50, each of which has safe smile code.
Translate the input into bipolar values.
"""
nXs, nYs = clean_smiles_vec_io( Xs, Ys)
# print len(nXs), len(nYs)
if N is None:
N = len( nXs)
X = gff_M_bnbp( nXs[:N], rad = rad, nBits = nBits, bnbp = bnbp)
y = np.mat( nYs[:N]).T
w = np.linalg.pinv( X) * y
plt.plot( w)
plt.title('Weight Vector')
plt.show()
y_calc = X*w
e = y - y_calc
se = (e.T * e)
mse = (e.T * e) / len(e)
print("SE =", se)
print("MSE =", mse)
print("RMSE =", np.sqrt( mse))
plt.plot(e)
plt.title("Error Vector: y - y_{calc}")
plt.show()
plt.plot(y, label='original')
plt.plot(y_calc, label='predicted')
plt.legend()
plt.title("Output values: org vs. pred")
plt.show()
return w
def fpM_pat( xM):
#%matplotlib qt
xM_sum = np.sum( xM, axis = 0)
plt.plot( xM_sum)
plt.xlabel('fingerprint bit')
plt.ylabel('Aggreation number')
plt.show()
def gen_input_files( A, yV, fname_common = 'ann'):
"""
Input files of ann_in.data and ann_run.dat are gerneated.
ann_in.data and ann_run.data are training and testing data, respectively
where ann_run.data does not includes output values.
The files can be used in ann_aq.c (./ann_aq)
* Input: A is a matrix, yV is a vector with numpy.mat form.
"""
# in file
no_of_set = A.shape[0]
no_of_input = A.shape[1]
const_no_of_output = 1 # Now, only 1 output is considerd.
with open("{}_in.data".format( fname_common), "w") as f:
f.write( "%d %d %d\n" % (no_of_set, no_of_input, const_no_of_output))
for ix in range( no_of_set):
for iy in range( no_of_input):
f.write( "{} ".format(A[ix,iy]))
f.write( "\n{}\n".format( yV[ix,0]))
print(("{}_in.data is saved for trainig.".format( fname_common)))
# run file
with open("{}_run.data".format( fname_common), "w") as f:
#In 2015-4-9, the following line is modified since it should not be
#the same to the associated line in ann_in data but it does not include the output length.
f.write( "%d %d\n" % (no_of_set, no_of_input))
for ix in range( no_of_set):
for iy in range( no_of_input):
f.write( "{} ".format(A[ix,iy]))
f.write( "\n")
print(("{}_run.data is saved for testing.".format( fname_common)))
def gen_input_files_valid( At, yt, Av):
"""
Validation is also considerd.
At and yt are for training while Av, yv are for validation.
Input files of ann_in.data and ann_run.dat are gerneated.
The files are used in ann_aq.c (./ann_aq)
* Input: At, Av is matrix, yt, yv is vector
"""
const_no_of_output = 1 # Now, only 1 output is considerd.
# in file
no_of_set = At.shape[0]
no_of_input = At.shape[1]
with open("ann_in.data", "w") as f:
f.write( "%d %d %d\n" % (no_of_set, no_of_input, const_no_of_output))
for ix in range( no_of_set):
for iy in range( no_of_input):
f.write( "{} ".format(At[ix,iy]))
f.write( "\n{}\n".format( yt[ix,0]))
print(("ann_in.data with {0} sets, {1} inputs is saved".format( no_of_set, no_of_input)))
# run file
no_of_set = Av.shape[0]
no_of_input = Av.shape[1]
with open("ann_run.data", "w") as f:
f.write( "%d %d\n" % (no_of_set, no_of_input))
for ix in range( no_of_set):
for iy in range( no_of_input):
f.write( "{} ".format(Av[ix,iy]))
f.write( "\n")
print(("ann_run.data with {0} sets, {1} inputs is saved".format( no_of_set, no_of_input)))
def get_valid_mode_output( aV, yV, rate = 3, more_train = True, center = None):
"""
Data is organized for validation. The part of them becomes training and the other becomes validation.
The flag of 'more_train' represents tranin data is bigger than validation data, and vice versa.
"""
ix = list(range( len( yV)))
if center == None:
center = int(rate/2)
if more_train:
ix_t = [x for x in ix if x%rate != center]
ix_v = [x for x in ix if x%rate == center]
else:
ix_t = [x for x in ix if x%rate == center]
ix_v = [x for x in ix if x%rate != center]
aM_t, yV_t = aV[ix_t, 0], yV[ix_t, 0]
aM_v, yV_v = aV[ix_v, 0], yV[ix_v, 0]
return aM_t, yV_t, aM_v, yV_v
def get_valid_mode_data( aM, yV, rate = 3, more_train = True, center = None):
"""
Data is organized for validation. The part of them becomes training and the other becomes validation.
The flag of 'more_train' represents tranin data is bigger than validation data, and vice versa.
"""
ix = list(range( len( yV)))
if center == None:
center = int(rate/2)
if more_train:
ix_t = [x for x in ix if x%rate != center]
ix_v = [x for x in ix if x%rate == center]
else:
ix_t = [x for x in ix if x%rate == center]
ix_v = [x for x in ix if x%rate != center]
aM_t, yV_t = aM[ix_t, :], yV[ix_t, 0]
aM_v, yV_v = aM[ix_v, :], yV[ix_v, 0]
return aM_t, yV_t, aM_v, yV_v
def _estimate_accuracy_r0( yv, yv_ann, disp = False):
"""
The two column matrix is compared in this function and
It calculates RMSE and r_sqr.
"""
e = yv - yv_ann
se = e.T * e
aae = np.average( np.abs( e))
RMSE = np.sqrt( se / len(e))
# print "RMSE =", RMSE
y_unbias = yv - np.mean( yv)
s_y_unbias = y_unbias.T * y_unbias
r_sqr = 1.0 - se/s_y_unbias
if disp:
print("r_sqr = {0:.3e}, RMSE = {1:.3e}, AAE = {2:.3e}".format( r_sqr[0,0], RMSE[0,0], aae))
return r_sqr[0,0], RMSE[0,0]
def estimate_accuracy( yv, yv_ann, disp = False):
"""
The two column matrix is compared in this function and
It calculates RMSE and r_sqr.
"""
print(yv.shape, yv_ann.shape)
if not( yv.shape[0] > 0 and yv.shape[1] == 1 and yv.shape == yv_ann.shape):
raise TypeError( 'Both input data matrices must be column vectors.')
e = yv - yv_ann
se = e.T * e
aae = np.average( np.abs( e))
RMSE = np.sqrt( se / len(e))
# print "RMSE =", RMSE
y_unbias = yv - np.mean( yv)
s_y_unbias = y_unbias.T * y_unbias
r_sqr = 1.0 - se/s_y_unbias
if disp:
print("r_sqr = {0:.3e}, RMSE = {1:.3e}, AAE = {2:.3e}".format( r_sqr[0,0], RMSE[0,0], aae))
#print "len(e) = ", len(e)
#print "se = ", se
#print "s_y_unbias =", s_y_unbias
return r_sqr[0,0], RMSE[0,0]
def estimate_accuracy3( yv, yv_ann, disp = False):
"""
The two column matrix is compared in this function and
It calculates RMSE and r_sqr.
"""
print(yv.shape, yv_ann.shape)
if not( yv.shape[0] > 0 and yv.shape[1] == 1 and yv.shape == yv_ann.shape):
raise TypeError( 'Both input data matrices must be column vectors.')
e = yv - yv_ann
se = e.T * e
aae = np.average( np.abs( e))
RMSE = np.sqrt( se / len(e))
# print "RMSE =", RMSE
y_unbias = yv - np.mean( yv)
s_y_unbias = y_unbias.T * y_unbias
r_sqr = 1.0 - se/s_y_unbias
if disp:
print("r_sqr = {0:.3e}, RMSE = {1:.3e}, AAE = {2:.3e}".format( r_sqr[0,0], RMSE[0,0], aae))
#print "len(e) = ", len(e)
#print "se = ", se
#print "s_y_unbias =", s_y_unbias
return r_sqr[0,0], RMSE[0,0], aae
def to1D( A):
"""
Regardless of a type of A is array or matrix,
to1D() return 1D numpy array.
"""
return np.array(A).flatten()
def estimate_score3( yv, yv_ann, disp = False):
"""
The two column matrix is compared in this function and
It calculates RMSE and r_sqr.
"""
yv = to1D( yv)
yv_ann = to1D( yv_ann)
if disp:
print("The shape values of yv and yv_ann are", yv.shape, yv_ann.shape)
if not( yv.shape[0] > 0 and yv.shape[0] == yv_ann.shape[0]):
raise TypeError("The length of the input vectors should be equal and more than zero.")
e = yv - yv_ann
MAE = np.average( np.abs( e))
RMSE = np.sqrt( np.average( np.power( e, 2)))
r_sqr = 1.0 - np.average( np.power( e, 2)) / np.average( np.power( yv - np.mean( yv), 2))
if disp:
print("r_sqr = {0:.3e}, RMSE = {1:.3e}, MAE = {2:.3e}".format( r_sqr, RMSE, MAE))
return r_sqr, RMSE, MAE
def clean_fp_M_bias( xM):
iy_list = []
xM_sum = np.sum( xM, 0)
for iy in range( xM.shape[1]):
if xM_sum[0,iy] == 0 or xM_sum[0,iy] == xM.shape[0]:
#print 'deleted: ', iy
iy_list.append( iy)
xM = np.delete(xM, iy_list, 1)
return xM
def clean_fp_M_pattern( xM):
iy_list = []
for iy in range( xM.shape[1]):
if iy not in iy_list:
pat = xM[:, iy]
# print pat
for iy2 in range( iy+1, xM.shape[1]):
if iy2 not in iy_list:
if not np.any( pat - xM[:, iy2]):
iy_list.append( iy2)
#print iy_list
xM = np.delete(xM, iy_list, 1)
return xM
def clean_fp_M( xM):
"""
1. Zero sum column vectors will be removed.
2. All one column vectors wiil be also removed.
3. The same patterns for different position will be merged to one.
* np.all() and np.any() should be understand clearly.
"""
xM = clean_fp_M_bias( xM)
xM = clean_fp_M_pattern( xM)
return xM
def check_fp_M_row_pattern( xM):
"""
If the pattern in row is the same,
it will give the number of the same pattern rows.
"""
ix_list = []
ix_pair_list = []
for ix in range( xM.shape[0]):
if ix not in ix_list:
pat = xM[ix, :]
# print pat
for ix2 in range( ix+1, xM.shape[0]):
if ix2 not in ix_list:
if not np.any( pat - xM[ix2, :]):
ix_list.append( ix2)
ix_pair_list.append( (ix, ix2))
#if len( ix_list):
# print 'The same row pair list is', ix_list
return ix_pair_list
def gfpM_c( smiles_vec, rad = 4, nBits = 1024):
xM = gfpM( smiles_vec, rad = rad, nBits = nBits)
return clean_fp_M( xM)
def list_indices( l, target):
return [i for i,val in enumerate(l) if val == target]
def pd_check_mol2smiles( pd_smiles):
smiles_l = pd_smiles.tolist()
fail_l = check_mol2smiles( smiles_l)
# since siles_l is changed, pd values are also changed.
pd_smiles = smiles_l
return fail_l
def check_dup_list( x_list):
"""
Duplication indices are returned.
"""
# print 'Duplication if false', x_smiles == set( x_smiles)
x_list_dup_count = np.array( [ x_list.count( x) for x in x_list])
return np.where( x_list_dup_count > 1)
def get_duplist( x_list, disp = True):
"""
Duplication indices are returned.
"""
duplist = []
for x in set( x_list):
if x_list.count( x) > 1:
duplist.append( list_indices( x_list, x))
if disp:
print(duplist)
for d in duplist:
print([x_list[x] for x in d])
return duplist
def pd_remove_no_mol2smiles( pdr, smiles_id = 'SMILES'):
"""
Find not working smiles codes
"""
s = pdr[ smiles_id].tolist()
fail_list = get_mol2smiles( s)
pdr = kutil.pd_remove_faillist_ID( pdr, fail_list)
return pdr
def add_new_descriptor( xM, desc_list):
xMT_l = xM.T.tolist()
#print np.shape(xMT_l)
xMT_l.append( desc_list)
#print np.shape(xMT_l)
xM_add = np.mat( xMT_l).T
print(xM_add.shape)
return xM_add
def get_xM_molw( s_l):
molw_l = rdkit_molwt( s_l)
return np.mat( molw_l).T
def get_xM_lasa( s_l):
molw_l = rdkit_LabuteASA( s_l)
return np.mat( molw_l).T
def get_xM_ensemble( s_l, ds_l = ['molw', 'lsas']):
xM_l = list()
for ds in ds_l:
xM_l.append( eval( 'get_xM_{}( s_l)'.format( ds)))
return np.concatenate( xM_l, axis = 1)
def matches( s_l, p, disp = False):
"""
Find matches in list of molecules.
c_l, r_l = matches( s_l, p, ...)
where c_l is the number of matching points and r_l is a index of matching positions.
"""
c_l = list()
r_l = list()
r_l = [ matches_each(s, p, disp) for s in s_l]
c_l = list(map( len, r_l))
return c_l, r_l
def estimate_accuracy4(yEv, yEv_calc, disp = False):
r_sqr = metrics.r2_score( yEv, yEv_calc)
RMSE = np.sqrt( metrics.mean_squared_error( yEv, yEv_calc))
MAE = metrics.mean_absolute_error( yEv, yEv_calc)
DAE = metrics.median_absolute_error( yEv, yEv_calc)
if disp:
print("r^2={0:.2e}, RMSE={1:.2e}, MAE={2:.2e}, DAE={3:.2e}".format( r_sqr, RMSE, MAE, DAE))
return r_sqr, RMSE, MAE, DAE
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base test class for running non-stubbed tests (functional tests)
The FunctionalTest class contains helper methods for starting the API
and Registry server, grabbing the logs of each, cleaning up pidfiles,
and spinning down the servers.
"""
import atexit
import datetime
import errno
import logging
import os
import platform
import shutil
import signal
import socket
import sys
import tempfile
import time
import fixtures
from oslo_serialization import jsonutils
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import six.moves.urllib.parse as urlparse
import testtools
from glance.common import utils
from glance.db.sqlalchemy import api as db_api
from glance import tests as glance_tests
from glance.tests import utils as test_utils
execute, get_unused_port = test_utils.execute, test_utils.get_unused_port
tracecmd_osmap = {'Linux': 'strace', 'FreeBSD': 'truss'}
class Server(object):
"""
Class used to easily manage starting and stopping
a server during functional test runs.
"""
def __init__(self, test_dir, port, sock=None):
"""
Creates a new Server object.
:param test_dir: The directory where all test stuff is kept. This is
passed from the FunctionalTestCase.
:param port: The port to start a server up on.
"""
self.debug = True
self.no_venv = False
self.test_dir = test_dir
self.bind_port = port
self.conf_file_name = None
self.conf_base = None
self.paste_conf_base = None
self.exec_env = None
self.deployment_flavor = ''
self.show_image_direct_url = False
self.show_multiple_locations = False
self.property_protection_file = ''
self.enable_v1_api = True
self.enable_v2_api = True
self.enable_v1_registry = True
self.enable_v2_registry = True
self.needs_database = False
self.log_file = None
self.sock = sock
self.fork_socket = True
self.process_pid = None
self.server_module = None
self.stop_kill = False
self.use_user_token = True
self.send_identity_credentials = False
def write_conf(self, **kwargs):
"""
Writes the configuration file for the server to its intended
destination. Returns the name of the configuration file and
the over-ridden config content (may be useful for populating
error messages).
"""
if not self.conf_base:
raise RuntimeError("Subclass did not populate config_base!")
conf_override = self.__dict__.copy()
if kwargs:
conf_override.update(**kwargs)
# A config file and paste.ini to use just for this test...we don't want
# to trample on currently-running Glance servers, now do we?
conf_dir = os.path.join(self.test_dir, 'etc')
conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name)
if os.path.exists(conf_filepath):
os.unlink(conf_filepath)
paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini")
if os.path.exists(paste_conf_filepath):
os.unlink(paste_conf_filepath)
utils.safe_mkdirs(conf_dir)
def override_conf(filepath, overridden):
with open(filepath, 'w') as conf_file:
conf_file.write(overridden)
conf_file.flush()
return conf_file.name
overridden_core = self.conf_base % conf_override
self.conf_file_name = override_conf(conf_filepath, overridden_core)
overridden_paste = ''
if self.paste_conf_base:
overridden_paste = self.paste_conf_base % conf_override
override_conf(paste_conf_filepath, overridden_paste)
overridden = ('==Core config==\n%s\n==Paste config==\n%s' %
(overridden_core, overridden_paste))
return self.conf_file_name, overridden
def start(self, expect_exit=True, expected_exitcode=0, **kwargs):
"""
Starts the server.
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the servers.
"""
# Ensure the configuration file is written
self.write_conf(**kwargs)
self.create_database()
cmd = ("%(server_module)s --config-file %(conf_file_name)s"
% {"server_module": self.server_module,
"conf_file_name": self.conf_file_name})
cmd = "%s -m %s" % (sys.executable, cmd)
# close the sock and release the unused port closer to start time
if self.exec_env:
exec_env = self.exec_env.copy()
else:
exec_env = {}
pass_fds = set()
if self.sock:
if not self.fork_socket:
self.sock.close()
self.sock = None
else:
fd = os.dup(self.sock.fileno())
exec_env[utils.GLANCE_TEST_SOCKET_FD_STR] = str(fd)
pass_fds.add(fd)
self.sock.close()
self.process_pid = test_utils.fork_exec(cmd,
logfile=os.devnull,
exec_env=exec_env,
pass_fds=pass_fds)
self.stop_kill = not expect_exit
if self.pid_file:
pf = open(self.pid_file, 'w')
pf.write('%d\n' % self.process_pid)
pf.close()
if not expect_exit:
rc = 0
try:
os.kill(self.process_pid, 0)
except OSError:
raise RuntimeError("The process did not start")
else:
rc = test_utils.wait_for_fork(
self.process_pid,
expected_exitcode=expected_exitcode)
# avoid an FD leak
if self.sock:
os.close(fd)
self.sock = None
return (rc, '', '')
def reload(self, expect_exit=True, expected_exitcode=0, **kwargs):
"""
Start and stop the service to reload
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the servers.
"""
self.stop()
return self.start(expect_exit=expect_exit,
expected_exitcode=expected_exitcode, **kwargs)
def create_database(self):
"""Create database if required for this server"""
if self.needs_database:
conf_dir = os.path.join(self.test_dir, 'etc')
utils.safe_mkdirs(conf_dir)
conf_filepath = os.path.join(conf_dir, 'glance-manage.conf')
with open(conf_filepath, 'w') as conf_file:
conf_file.write('[DEFAULT]\n')
conf_file.write('sql_connection = %s' % self.sql_connection)
conf_file.flush()
glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE'
if glance_db_env in os.environ:
# use the empty db created and cached as a tempfile
# instead of spending the time creating a new one
db_location = os.environ[glance_db_env]
os.system('cp %s %s/tests.sqlite'
% (db_location, self.test_dir))
else:
cmd = ('%s -m glance.cmd.manage --config-file %s db sync' %
(sys.executable, conf_filepath))
execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env,
expect_exit=True)
# copy the clean db to a temp location so that it
# can be reused for future tests
(osf, db_location) = tempfile.mkstemp()
os.close(osf)
os.system('cp %s/tests.sqlite %s'
% (self.test_dir, db_location))
os.environ[glance_db_env] = db_location
# cleanup the temp file when the test suite is
# complete
def _delete_cached_db():
try:
os.remove(os.environ[glance_db_env])
except Exception:
glance_tests.logger.exception(
"Error cleaning up the file %s" %
os.environ[glance_db_env])
atexit.register(_delete_cached_db)
def stop(self):
"""
Spin down the server.
"""
if not self.process_pid:
raise Exception('why is this being called? %s' % self.server_name)
if self.stop_kill:
os.kill(self.process_pid, signal.SIGTERM)
rc = test_utils.wait_for_fork(self.process_pid, raise_error=False)
return (rc, '', '')
def dump_log(self, name):
log = logging.getLogger(name)
if not self.log_file or not os.path.exists(self.log_file):
return
fptr = open(self.log_file, 'r')
for line in fptr:
log.info(line.strip())
class ApiServer(Server):
"""
Server object that starts/stops/manages the API server
"""
def __init__(self, test_dir, port, policy_file, delayed_delete=False,
pid_file=None, sock=None, **kwargs):
super(ApiServer, self).__init__(test_dir, port, sock=sock)
self.server_name = 'api'
self.server_module = 'glance.cmd.%s' % self.server_name
self.default_store = kwargs.get("default_store", "file")
self.bind_host = "127.0.0.1"
self.registry_host = "127.0.0.1"
self.key_file = ""
self.cert_file = ""
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.image_dir = os.path.join(self.test_dir, "images")
self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid")
self.log_file = os.path.join(self.test_dir, "api.log")
self.image_size_cap = 1099511627776
self.delayed_delete = delayed_delete
self.owner_is_tenant = True
self.workers = 0
self.scrub_time = 5
self.image_cache_dir = os.path.join(self.test_dir,
'cache')
self.image_cache_driver = 'sqlite'
self.policy_file = policy_file
self.policy_default_rule = 'default'
self.property_protection_rule_format = 'roles'
self.image_member_quota = 10
self.image_property_quota = 10
self.image_tag_quota = 10
self.image_location_quota = 2
self.disable_path = None
self.needs_database = True
default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION',
default_sql_connection)
self.data_api = kwargs.get("data_api",
"glance.db.sqlalchemy.api")
self.user_storage_quota = '0'
self.lock_path = self.test_dir
self.location_strategy = 'location_order'
self.store_type_location_strategy_preference = ""
self.send_identity_headers = False
self.conf_base = """[DEFAULT]
debug = %(debug)s
default_log_levels = eventlet.wsgi.server=DEBUG
bind_host = %(bind_host)s
bind_port = %(bind_port)s
key_file = %(key_file)s
cert_file = %(cert_file)s
metadata_encryption_key = %(metadata_encryption_key)s
registry_host = %(registry_host)s
registry_port = %(registry_port)s
use_user_token = %(use_user_token)s
send_identity_credentials = %(send_identity_credentials)s
log_file = %(log_file)s
image_size_cap = %(image_size_cap)d
delayed_delete = %(delayed_delete)s
owner_is_tenant = %(owner_is_tenant)s
workers = %(workers)s
scrub_time = %(scrub_time)s
send_identity_headers = %(send_identity_headers)s
image_cache_dir = %(image_cache_dir)s
image_cache_driver = %(image_cache_driver)s
data_api = %(data_api)s
sql_connection = %(sql_connection)s
show_image_direct_url = %(show_image_direct_url)s
show_multiple_locations = %(show_multiple_locations)s
user_storage_quota = %(user_storage_quota)s
enable_v1_api = %(enable_v1_api)s
enable_v2_api = %(enable_v2_api)s
lock_path = %(lock_path)s
property_protection_file = %(property_protection_file)s
property_protection_rule_format = %(property_protection_rule_format)s
image_member_quota=%(image_member_quota)s
image_property_quota=%(image_property_quota)s
image_tag_quota=%(image_tag_quota)s
image_location_quota=%(image_location_quota)s
location_strategy=%(location_strategy)s
allow_additional_image_properties = True
[oslo_policy]
policy_file = %(policy_file)s
policy_default_rule = %(policy_default_rule)s
[paste_deploy]
flavor = %(deployment_flavor)s
[store_type_location_strategy]
store_type_preference = %(store_type_location_strategy_preference)s
[glance_store]
filesystem_store_datadir=%(image_dir)s
default_store = %(default_store)s
"""
self.paste_conf_base = """[pipeline:glance-api]
pipeline =
cors
healthcheck
versionnegotiation
gzip
unauthenticated-context
rootapp
[pipeline:glance-api-caching]
pipeline = cors healthcheck versionnegotiation gzip unauthenticated-context
cache rootapp
[pipeline:glance-api-cachemanagement]
pipeline =
cors
healthcheck
versionnegotiation
gzip
unauthenticated-context
cache
cache_manage
rootapp
[pipeline:glance-api-fakeauth]
pipeline = cors healthcheck versionnegotiation gzip fakeauth context rootapp
[pipeline:glance-api-noauth]
pipeline = cors healthcheck versionnegotiation gzip context rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_app_factory
/: apiversions
/v1: apiv1app
/v2: apiv2app
[app:apiversions]
paste.app_factory = glance.api.versions:create_resource
[app:apiv1app]
paste.app_factory = glance.api.v1.router:API.factory
[app:apiv2app]
paste.app_factory = glance.api.v2.router:API.factory
[filter:healthcheck]
paste.filter_factory = oslo_middleware:Healthcheck.factory
backends = disable_by_file
disable_by_file_path = %(disable_path)s
[filter:versionnegotiation]
paste.filter_factory =
glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
[filter:gzip]
paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
[filter:cache]
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
[filter:cache_manage]
paste.filter_factory =
glance.api.middleware.cache_manage:CacheManageFilter.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
allowed_origin=http://valid.example.com
"""
class RegistryServer(Server):
"""
Server object that starts/stops/manages the Registry server
"""
def __init__(self, test_dir, port, policy_file, sock=None):
super(RegistryServer, self).__init__(test_dir, port, sock=sock)
self.server_name = 'registry'
self.server_module = 'glance.cmd.%s' % self.server_name
self.needs_database = True
default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION',
default_sql_connection)
self.bind_host = "127.0.0.1"
self.pid_file = os.path.join(self.test_dir, "registry.pid")
self.log_file = os.path.join(self.test_dir, "registry.log")
self.owner_is_tenant = True
self.workers = 0
self.api_version = 1
self.user_storage_quota = '0'
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.policy_file = policy_file
self.policy_default_rule = 'default'
self.disable_path = None
self.conf_base = """[DEFAULT]
debug = %(debug)s
bind_host = %(bind_host)s
bind_port = %(bind_port)s
log_file = %(log_file)s
sql_connection = %(sql_connection)s
sql_idle_timeout = 3600
api_limit_max = 1000
limit_param_default = 25
owner_is_tenant = %(owner_is_tenant)s
enable_v2_registry = %(enable_v2_registry)s
workers = %(workers)s
user_storage_quota = %(user_storage_quota)s
metadata_encryption_key = %(metadata_encryption_key)s
[oslo_policy]
policy_file = %(policy_file)s
policy_default_rule = %(policy_default_rule)s
[paste_deploy]
flavor = %(deployment_flavor)s
"""
self.paste_conf_base = """[pipeline:glance-registry]
pipeline = healthcheck unauthenticated-context registryapp
[pipeline:glance-registry-fakeauth]
pipeline = healthcheck fakeauth context registryapp
[pipeline:glance-registry-trusted-auth]
pipeline = healthcheck context registryapp
[app:registryapp]
paste.app_factory = glance.registry.api:API.factory
[filter:healthcheck]
paste.filter_factory = oslo_middleware:Healthcheck.factory
backends = disable_by_file
disable_by_file_path = %(disable_path)s
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
class ScrubberDaemon(Server):
"""
Server object that starts/stops/manages the Scrubber server
"""
def __init__(self, test_dir, policy_file, daemon=False, **kwargs):
# NOTE(jkoelker): Set the port to 0 since we actually don't listen
super(ScrubberDaemon, self).__init__(test_dir, 0)
self.server_name = 'scrubber'
self.server_module = 'glance.cmd.%s' % self.server_name
self.daemon = daemon
self.registry_host = "127.0.0.1"
self.image_dir = os.path.join(self.test_dir, "images")
self.scrub_time = 5
self.pid_file = os.path.join(self.test_dir, "scrubber.pid")
self.log_file = os.path.join(self.test_dir, "scrubber.log")
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.lock_path = self.test_dir
default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION',
default_sql_connection)
self.policy_file = policy_file
self.policy_default_rule = 'default'
self.send_identity_headers = False
self.admin_role = 'admin'
self.conf_base = """[DEFAULT]
debug = %(debug)s
log_file = %(log_file)s
daemon = %(daemon)s
wakeup_time = 2
scrub_time = %(scrub_time)s
registry_host = %(registry_host)s
registry_port = %(registry_port)s
metadata_encryption_key = %(metadata_encryption_key)s
lock_path = %(lock_path)s
sql_connection = %(sql_connection)s
sql_idle_timeout = 3600
send_identity_headers = %(send_identity_headers)s
admin_role = %(admin_role)s
[glance_store]
filesystem_store_datadir=%(image_dir)s
[oslo_policy]
policy_file = %(policy_file)s
policy_default_rule = %(policy_default_rule)s
"""
def start(self, expect_exit=True, expected_exitcode=0, **kwargs):
if 'daemon' in kwargs:
expect_exit = False
return super(ScrubberDaemon, self).start(
expect_exit=expect_exit,
expected_exitcode=expected_exitcode,
**kwargs)
class FunctionalTest(test_utils.BaseTestCase):
"""
Base test class for any test that wants to test the actual
servers and clients and not just the stubbed out interfaces
"""
inited = False
disabled = False
launched_servers = []
def setUp(self):
super(FunctionalTest, self).setUp()
self.test_dir = self.useFixture(fixtures.TempDir()).path
self.api_protocol = 'http'
self.api_port, api_sock = test_utils.get_unused_port_and_socket()
self.registry_port, reg_sock = test_utils.get_unused_port_and_socket()
self.include_scrubber = True
self.tracecmd = tracecmd_osmap.get(platform.system())
conf_dir = os.path.join(self.test_dir, 'etc')
utils.safe_mkdirs(conf_dir)
self.copy_data_file('schema-image.json', conf_dir)
self.copy_data_file('policy.json', conf_dir)
self.copy_data_file('property-protections.conf', conf_dir)
self.copy_data_file('property-protections-policies.conf', conf_dir)
self.property_file_roles = os.path.join(conf_dir,
'property-protections.conf')
property_policies = 'property-protections-policies.conf'
self.property_file_policies = os.path.join(conf_dir,
property_policies)
self.policy_file = os.path.join(conf_dir, 'policy.json')
self.api_server = ApiServer(self.test_dir,
self.api_port,
self.policy_file,
sock=api_sock)
self.registry_server = RegistryServer(self.test_dir,
self.registry_port,
self.policy_file,
sock=reg_sock)
self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file)
self.pid_files = [self.api_server.pid_file,
self.registry_server.pid_file,
self.scrubber_daemon.pid_file]
self.files_to_destroy = []
self.launched_servers = []
def tearDown(self):
if not self.disabled:
self.cleanup()
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_database(self.registry_server.sql_connection)
self._reset_database(self.api_server.sql_connection)
super(FunctionalTest, self).tearDown()
self.api_server.dump_log('api_server')
self.registry_server.dump_log('registry_server')
self.scrubber_daemon.dump_log('scrubber_daemon')
def set_policy_rules(self, rules):
fap = open(self.policy_file, 'w')
fap.write(jsonutils.dumps(rules))
fap.close()
def _reset_database(self, conn_string):
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('sqlite'):
# We leave behind the sqlite DB for failing tests to aid
# in diagnosis, as the file size is relatively small and
# won't interfere with subsequent tests as it's in a per-
# test directory (which is blown-away if the test is green)
pass
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
if auth_pieces[1].strip():
password = "-p%s" % auth_pieces[1]
sql = ("drop database if exists %(database)s; "
"create database %(database)s;") % {'database': database}
cmd = ("mysql -u%(user)s %(password)s -h%(host)s "
"-e\"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
def cleanup(self):
"""
Makes sure anything we created or started up in the
tests are destroyed or spun down
"""
# NOTE(jbresnah) call stop on each of the servers instead of
# checking the pid file. stop() will wait until the child
# server is dead. This eliminates the possibility of a race
# between a child process listening on a port actually dying
# and a new process being started
servers = [self.api_server,
self.registry_server,
self.scrubber_daemon]
for s in servers:
try:
s.stop()
except Exception:
pass
for f in self.files_to_destroy:
if os.path.exists(f):
os.unlink(f)
def start_server(self,
server,
expect_launch,
expect_exit=True,
expected_exitcode=0,
**kwargs):
"""
Starts a server on an unused port.
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the server.
:param server: the server to launch
:param expect_launch: true iff the server is expected to
successfully start
:param expect_exit: true iff the launched process is expected
to exit in a timely fashion
:param expected_exitcode: expected exitcode from the launcher
"""
self.cleanup()
# Start up the requested server
exitcode, out, err = server.start(expect_exit=expect_exit,
expected_exitcode=expected_exitcode,
**kwargs)
if expect_exit:
self.assertEqual(expected_exitcode, exitcode,
"Failed to spin up the requested server. "
"Got: %s" % err)
self.launched_servers.append(server)
launch_msg = self.wait_for_servers([server], expect_launch)
self.assertTrue(launch_msg is None, launch_msg)
def start_with_retry(self, server, port_name, max_retries,
expect_launch=True,
**kwargs):
"""
Starts a server, with retries if the server launches but
fails to start listening on the expected port.
:param server: the server to launch
:param port_name: the name of the port attribute
:param max_retries: the maximum number of attempts
:param expect_launch: true iff the server is expected to
successfully start
:param expect_exit: true iff the launched process is expected
to exit in a timely fashion
"""
launch_msg = None
for i in range(max_retries):
exitcode, out, err = server.start(expect_exit=not expect_launch,
**kwargs)
name = server.server_name
self.assertEqual(0, exitcode,
"Failed to spin up the %s server. "
"Got: %s" % (name, err))
launch_msg = self.wait_for_servers([server], expect_launch)
if launch_msg:
server.stop()
server.bind_port = get_unused_port()
setattr(self, port_name, server.bind_port)
else:
self.launched_servers.append(server)
break
self.assertTrue(launch_msg is None, launch_msg)
def start_servers(self, **kwargs):
"""
Starts the API and Registry servers (glance-control api start
& glance-control registry start) on unused ports. glance-control
should be installed into the python path
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the servers.
"""
self.cleanup()
# Start up the API and default registry server
# We start the registry server first, as the API server config
# depends on the registry port - this ordering allows for
# retrying the launch on a port clash
self.start_with_retry(self.registry_server, 'registry_port', 3,
**kwargs)
kwargs['registry_port'] = self.registry_server.bind_port
self.start_with_retry(self.api_server, 'api_port', 3, **kwargs)
if self.include_scrubber:
exitcode, out, err = self.scrubber_daemon.start(**kwargs)
self.assertEqual(0, exitcode,
"Failed to spin up the Scrubber daemon. "
"Got: %s" % err)
def ping_server(self, port):
"""
Simple ping on the port. If responsive, return True, else
return False.
:note We use raw sockets, not ping here, since ping uses ICMP and
has no concept of ports...
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(("127.0.0.1", port))
return True
except socket.error:
return False
finally:
s.close()
def ping_server_ipv6(self, port):
"""
Simple ping on the port. If responsive, return True, else
return False.
:note We use raw sockets, not ping here, since ping uses ICMP and
has no concept of ports...
The function uses IPv6 (therefore AF_INET6 and ::1).
"""
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try:
s.connect(("::1", port))
return True
except socket.error:
return False
finally:
s.close()
def wait_for_servers(self, servers, expect_launch=True, timeout=30):
"""
Tight loop, waiting for the given server port(s) to be available.
Returns when all are pingable. There is a timeout on waiting
for the servers to come up.
:param servers: Glance server ports to ping
:param expect_launch: Optional, true iff the server(s) are
expected to successfully start
:param timeout: Optional, defaults to 30 seconds
:returns: None if launch expectation is met, otherwise an
assertion message
"""
now = datetime.datetime.now()
timeout_time = now + datetime.timedelta(seconds=timeout)
replied = []
while (timeout_time > now):
pinged = 0
for server in servers:
if self.ping_server(server.bind_port):
pinged += 1
if server not in replied:
replied.append(server)
if pinged == len(servers):
msg = 'Unexpected server launch status'
return None if expect_launch else msg
now = datetime.datetime.now()
time.sleep(0.05)
failed = list(set(servers) - set(replied))
msg = 'Unexpected server launch status for: '
for f in failed:
msg += ('%s, ' % f.server_name)
if os.path.exists(f.pid_file):
pid = f.process_pid
trace = f.pid_file.replace('.pid', '.trace')
if self.tracecmd:
cmd = '%s -p %d -o %s' % (self.tracecmd, pid, trace)
try:
execute(cmd, raise_error=False, expect_exit=False)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError('No executable found for "%s" '
'command.' % self.tracecmd)
else:
raise
time.sleep(0.5)
if os.path.exists(trace):
msg += ('\n%s:\n%s\n' % (self.tracecmd,
open(trace).read()))
self.add_log_details(failed)
return msg if expect_launch else None
def stop_server(self, server, name):
"""
Called to stop a single server in a normal fashion using the
glance-control stop method to gracefully shut the server down.
:param server: the server to stop
"""
# Spin down the requested server
server.stop()
def stop_servers(self):
"""
Called to stop the started servers in a normal fashion. Note
that cleanup() will stop the servers using a fairly draconian
method of sending a SIGTERM signal to the servers. Here, we use
the glance-control stop method to gracefully shut the server down.
This method also asserts that the shutdown was clean, and so it
is meant to be called during a normal test case sequence.
"""
# Spin down the API and default registry server
self.stop_server(self.api_server, 'API server')
self.stop_server(self.registry_server, 'Registry server')
if self.include_scrubber:
self.stop_server(self.scrubber_daemon, 'Scrubber daemon')
self._reset_database(self.registry_server.sql_connection)
def run_sql_cmd(self, sql):
"""
Provides a crude mechanism to run manual SQL commands for backend
DB verification within the functional tests.
The raw result set is returned.
"""
engine = db_api.get_engine()
return engine.execute(sql)
def copy_data_file(self, file_name, dst_dir):
src_file_name = os.path.join('glance/tests/etc', file_name)
shutil.copy(src_file_name, dst_dir)
dst_file_name = os.path.join(dst_dir, file_name)
return dst_file_name
def add_log_details(self, servers=None):
logs = [s.log_file for s in (servers or self.launched_servers)]
for log in logs:
if os.path.exists(log):
testtools.content.attach_file(self, log)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.