gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import os, sys, io
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import apx
import unittest
class TestCompilePackProg(unittest.TestCase):
def test_packU8(self):
dataElement = apx.DataElement.UInt8(minVal=0, maxVal=3)
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.UINT8_LEN, 0, 0, 0, apx.OPCODE_PACK_U8]))
def test_packU16(self):
dataElement = apx.DataElement.UInt16()
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.UINT16_LEN, 0, 0, 0, apx.OPCODE_PACK_U16]))
def test_packU32(self):
dataElement = apx.DataElement.UInt32()
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.UINT32_LEN, 0, 0, 0, apx.OPCODE_PACK_U32]))
def test_packS8(self):
dataElement = apx.DataElement.SInt8(minVal=0, maxVal=3)
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.SINT8_LEN, 0, 0, 0, apx.OPCODE_PACK_S8]))
def test_packS16(self):
dataElement = apx.DataElement.SInt16()
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.SINT16_LEN, 0, 0, 0, apx.OPCODE_PACK_S16]))
def test_packS32(self):
dataElement = apx.DataElement.SInt32()
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.SINT32_LEN, 0, 0, 0, apx.OPCODE_PACK_S32]))
def test_packU8AR(self):
dataElement = apx.DataElement.UInt8(minVal=0, maxVal=3, arrayLen = 10)
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.UINT8_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_PACK_U8AR, dataElement.arrayLen, 0]))
def test_packU16AR(self):
dataElement = apx.DataElement.UInt16(minVal=0, maxVal=3, arrayLen = 5)
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.UINT16_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_PACK_U16AR, dataElement.arrayLen, 0]))
def test_packU32AR(self):
dataElement = apx.DataElement.UInt32(minVal=0, maxVal=3, arrayLen = 3)
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.UINT32_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_PACK_U32AR, dataElement.arrayLen, 0]))
def test_packS8AR(self):
dataElement = apx.DataElement.SInt8(minVal=0, maxVal=3, arrayLen = 10)
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.SINT8_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_PACK_S8AR, dataElement.arrayLen, 0]))
def test_packS16AR(self):
dataElement = apx.DataElement.SInt16(minVal=0, maxVal=3, arrayLen = 5)
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.SINT16_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_PACK_S16AR, dataElement.arrayLen, 0]))
def test_packS32AR(self):
dataElement = apx.DataElement.SInt32(minVal=0, maxVal=3, arrayLen = 3)
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.SINT32_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_PACK_S32AR, dataElement.arrayLen, 0]))
def test_packString(self):
dataElement = apx.DataElement.String(arrayLen = 20)
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_PACK_PROG, apx.UINT8_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_PACK_STR, dataElement.arrayLen, 0]))
def test_packRecord(self):
child1 = apx.DataElement.UInt16("SoundId")
child2 = apx.DataElement.UInt8("Volume", arrayLen = 4)
child3 = apx.DataElement.UInt8("Repetitions")
packLen = apx.UINT16_LEN + 4*apx.UINT8_LEN + apx.UINT8_LEN
dataElement = apx.DataElement.Record(elements = [child1, child2, child3])
compiler = apx.Compiler()
prog = compiler.compilePackProg(dataElement)
self.assertIsInstance(prog, bytes)
expected = bytes([
apx.OPCODE_PACK_PROG, packLen, 0, 0, 0,
apx.OPCODE_RECORD_ENTER,
apx.OPCODE_RECORD_SELECT])+"SoundId".encode("ascii")+bytes([0,apx.OPCODE_PACK_U16,
apx.OPCODE_RECORD_SELECT])+"Volume".encode("ascii")+bytes([0,apx.OPCODE_PACK_U8AR, 4, 0,
apx.OPCODE_RECORD_SELECT])+"Repetitions".encode("ascii")+bytes([0,apx.OPCODE_PACK_U8, apx.OPCODE_RECORD_LEAVE])
self.assertEqual(prog, expected)
class TestCompileUnpackProg(unittest.TestCase):
def test_unpackU8(self):
dataElement = apx.DataElement.UInt8(minVal=0, maxVal=3)
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.UINT8_LEN, 0, 0, 0, apx.OPCODE_UNPACK_U8]))
def test_unpackU16(self):
dataElement = apx.DataElement.UInt16()
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.UINT16_LEN, 0, 0, 0, apx.OPCODE_UNPACK_U16]))
def test_unpackU32(self):
dataElement = apx.DataElement.UInt32()
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.UINT32_LEN, 0, 0, 0, apx.OPCODE_UNPACK_U32]))
def test_unpackS8(self):
dataElement = apx.DataElement.SInt8(minVal=0, maxVal=3)
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.SINT8_LEN, 0, 0, 0, apx.OPCODE_UNPACK_S8]))
def test_unpackS16(self):
dataElement = apx.DataElement.SInt16()
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.SINT16_LEN, 0, 0, 0, apx.OPCODE_UNPACK_S16]))
def test_unpackS32(self):
dataElement = apx.DataElement.SInt32()
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.SINT32_LEN, 0, 0, 0, apx.OPCODE_UNPACK_S32]))
def test_unpackU8AR(self):
dataElement = apx.DataElement.UInt8(minVal=0, maxVal=3, arrayLen = 10)
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.UINT8_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_UNPACK_U8AR, dataElement.arrayLen, 0]))
def test_unpackU16AR(self):
dataElement = apx.DataElement.UInt16(minVal=0, maxVal=3, arrayLen = 5)
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.UINT16_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_UNPACK_U16AR, dataElement.arrayLen, 0]))
def test_unpackU32AR(self):
dataElement = apx.DataElement.UInt32(minVal=0, maxVal=3, arrayLen = 3)
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.UINT32_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_UNPACK_U32AR, dataElement.arrayLen, 0]))
def test_unpackS8AR(self):
dataElement = apx.DataElement.SInt8(arrayLen = 10)
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.SINT8_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_UNPACK_S8AR, dataElement.arrayLen, 0]))
def test_unpackS16AR(self):
dataElement = apx.DataElement.SInt16(arrayLen = 5)
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.SINT16_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_UNPACK_S16AR, dataElement.arrayLen, 0]))
def test_unpackS32AR(self):
dataElement = apx.DataElement.SInt32(arrayLen = 3)
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
self.assertEqual(prog, bytes([apx.OPCODE_UNPACK_PROG, apx.SINT32_LEN * dataElement.arrayLen, 0, 0, 0, apx.OPCODE_UNPACK_S32AR, dataElement.arrayLen, 0]))
def test_unpackRecord(self):
child1 = apx.DataElement.UInt16("SoundId")
child2 = apx.DataElement.UInt8("Volume", arrayLen = 4)
child3 = apx.DataElement.UInt8("Repetitions")
packLen = apx.UINT16_LEN + 4*apx.UINT8_LEN + apx.UINT8_LEN
dataElement = apx.DataElement.Record(elements = [child1, child2, child3])
compiler = apx.Compiler()
prog = compiler.compileUnpackProg(dataElement)
self.assertIsInstance(prog, bytes)
expected = bytes([
apx.OPCODE_UNPACK_PROG, packLen, 0, 0, 0,
apx.OPCODE_RECORD_ENTER,
apx.OPCODE_RECORD_SELECT])+"SoundId\0".encode('ascii')+bytes([
apx.OPCODE_UNPACK_U16,
apx.OPCODE_RECORD_SELECT])+"Volume\0".encode('ascii')+bytes([
apx.OPCODE_UNPACK_U8AR, 4, 0,
apx.OPCODE_RECORD_SELECT])+"Repetitions\0".encode('ascii')+bytes([
apx.OPCODE_UNPACK_U8,
apx.OPCODE_RECORD_LEAVE,
])
self.assertEqual(prog, expected)
class TestCompilerFromApxNode(unittest.TestCase):
def test_compile_require_ports(self):
compiler = apx.Compiler()
node = apx.Node('TestNode')
node.append(apx.DataType('TestType1_T', 'S(0,1000)'))
node.append(apx.DataType('TestType2_T', 'a[32]'))
port = node.append(apx.RequirePort('Signal1','C','=255'))
prog = compiler.exec(port)
expected = bytes([
apx.OPCODE_UNPACK_PROG, 1, 0, 0, 0,
apx.OPCODE_UNPACK_U8,
])
self.assertEqual(prog, expected)
port = node.append(apx.RequirePort('Signal2','T[0]','=255'))
prog = compiler.exec(port)
expected = bytes([
apx.OPCODE_UNPACK_PROG, 2, 0, 0, 0,
apx.OPCODE_UNPACK_U16,
])
self.assertEqual(prog, expected)
port = node.append(apx.RequirePort('Signal3', 'T["TestType2_T"]', '=""'))
prog = compiler.exec(port)
expected = bytes([
apx.OPCODE_UNPACK_PROG, 32, 0, 0, 0,
apx.OPCODE_UNPACK_STR, 32, 0
])
self.assertEqual(prog, expected)
def test_compile_provide_ports(self):
compiler = apx.Compiler()
node = apx.Node('TestNode')
node.append(apx.DataType('TestType1_T', 'C'))
node.append(apx.DataType('TestType2_T', 'C[10]'))
node.append(apx.DataType('TestType3_T', 'S'))
node.append(apx.DataType('TestType4_T', 'S[10]'))
node.append(apx.DataType('TestType5_T', 'L'))
node.append(apx.DataType('TestType6_T', 'L[10]'))
node.append(apx.ProvidePort('U8Signal', 'T["TestType1_T"]'))
node.append(apx.ProvidePort('U8ArraySignal', 'T["TestType2_T"]'))
node.append(apx.ProvidePort('U16Signal', 'T["TestType3_T"]'))
node.append(apx.ProvidePort('U16ArraySignal', 'T["TestType4_T"]'))
node.append(apx.ProvidePort('U32Signal', 'T["TestType5_T"]'))
node.append(apx.ProvidePort('U32ArraySignal', 'T["TestType6_T"]'))
prog = compiler.exec(node.find('U8Signal'))
expected = bytes([
apx.OPCODE_PACK_PROG, apx.UINT8_LEN, 0, 0, 0,
apx.OPCODE_PACK_U8
])
self.assertEqual(prog, expected)
prog = compiler.exec(node.find('U8ArraySignal'))
expected = bytes([
apx.OPCODE_PACK_PROG, apx.UINT8_LEN*10, 0, 0, 0,
apx.OPCODE_PACK_U8AR, 10, 0
])
self.assertEqual(prog, expected)
prog = compiler.exec(node.find('U16Signal'))
expected = bytes([
apx.OPCODE_PACK_PROG, apx.UINT16_LEN, 0, 0, 0,
apx.OPCODE_PACK_U16
])
self.assertEqual(prog, expected)
prog = compiler.exec(node.find('U16ArraySignal'))
expected = bytes([
apx.OPCODE_PACK_PROG, apx.UINT16_LEN*10, 0, 0, 0,
apx.OPCODE_PACK_U16AR, 10, 0
])
self.assertEqual(prog, expected)
prog = compiler.exec(node.find('U32Signal'))
expected = bytes([
apx.OPCODE_PACK_PROG, apx.UINT32_LEN, 0, 0, 0,
apx.OPCODE_PACK_U32
])
self.assertEqual(prog, expected)
prog = compiler.exec(node.find('U32ArraySignal'))
expected = bytes([
apx.OPCODE_PACK_PROG, apx.UINT32_LEN*10, 0, 0, 0,
apx.OPCODE_PACK_U32AR, 10, 0
])
self.assertEqual(prog, expected)
def test_compile_record_provide_port(self):
compiler = apx.Compiler()
node = apx.Node('TestNode')
node.append(apx.DataType('SoundId_T', 'S'))
node.append(apx.DataType('Volume_T', 'C'))
node.append(apx.DataType('Repetitions_T', 'C'))
node.append(apx.DataType('SoundRequest_T', '{"SoundId"T["SoundId_T"]"Volume"T["Volume_T"]"Repetitions"T["Repetitions_T"]}'))
node.append(apx.ProvidePort('SoundRequest', 'T["SoundRequest_T"]', '={65535,255,255}'))
prog = compiler.exec(node.find('SoundRequest'))
expected = bytes([
apx.OPCODE_PACK_PROG, (apx.UINT16_LEN+apx.UINT8_LEN+apx.UINT8_LEN), 0, 0, 0,
apx.OPCODE_RECORD_ENTER,
apx.OPCODE_RECORD_SELECT])+"SoundId\0".encode('ascii')+bytes([
apx.OPCODE_PACK_U16,
apx.OPCODE_RECORD_SELECT])+"Volume\0".encode('ascii')+bytes([
apx.OPCODE_PACK_U8,
apx.OPCODE_RECORD_SELECT])+"Repetitions\0".encode('ascii')+bytes([
apx.OPCODE_PACK_U8,
apx.OPCODE_RECORD_LEAVE,
])
self.assertEqual(prog, expected)
def test_compile_record_require_port(self):
compiler = apx.Compiler()
node = apx.Node('TestNode')
node.append(apx.DataType('SoundId_T', 'S'))
node.append(apx.DataType('Volume_T', 'C'))
node.append(apx.DataType('Repetitions_T', 'C'))
node.append(apx.DataType('SoundRequest_T', '{"SoundId"T["SoundId_T"]"Volume"T["Volume_T"]"Repetitions"T["Repetitions_T"]}'))
node.append(apx.RequirePort('SoundRequest', 'T["SoundRequest_T"]', '={65535,255,255}'))
prog = compiler.exec(node.find('SoundRequest'))
expected = bytes([
apx.OPCODE_UNPACK_PROG, (apx.UINT16_LEN+apx.UINT8_LEN+apx.UINT8_LEN), 0, 0, 0,
apx.OPCODE_RECORD_ENTER,
apx.OPCODE_RECORD_SELECT])+"SoundId\0".encode('ascii')+bytes([
apx.OPCODE_UNPACK_U16,
apx.OPCODE_RECORD_SELECT])+"Volume\0".encode('ascii')+bytes([
apx.OPCODE_UNPACK_U8,
apx.OPCODE_RECORD_SELECT])+"Repetitions\0".encode('ascii')+bytes([
apx.OPCODE_UNPACK_U8,
apx.OPCODE_RECORD_LEAVE,
])
self.assertEqual(prog, expected)
if __name__ == '__main__':
unittest.main()
|
|
import numpy as np
import pytest
from scipy import linalg
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_array_equal, assert_no_warnings
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_allclose(np.exp(y_log_proba_pred1), y_proba_pred1,
rtol=1e-6, atol=1e-6, err_msg='solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert np.any(y_pred3 != y3), 'solver %s' % solver
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
@pytest.mark.parametrize("n_classes", [2, 3])
@pytest.mark.parametrize("solver", ["svd", "lsqr", "eigen"])
def test_lda_predict_proba(solver, n_classes):
def generate_dataset(n_samples, centers, covariances, random_state=None):
"""Generate a multivariate normal data given some centers and
covariances"""
rng = check_random_state(random_state)
X = np.vstack([rng.multivariate_normal(mean, cov,
size=n_samples // len(centers))
for mean, cov in zip(centers, covariances)])
y = np.hstack([[clazz] * (n_samples // len(centers))
for clazz in range(len(centers))])
return X, y
blob_centers = np.array([[0, 0], [-10, 40], [-30, 30]])[:n_classes]
blob_stds = np.array([[[10, 10], [10, 100]]] * len(blob_centers))
X, y = generate_dataset(
n_samples=90000, centers=blob_centers, covariances=blob_stds,
random_state=42
)
lda = LinearDiscriminantAnalysis(solver=solver, store_covariance=True,
shrinkage=None).fit(X, y)
# check that the empirical means and covariances are close enough to the
# one used to generate the data
assert_allclose(lda.means_, blob_centers, atol=1e-1)
assert_allclose(lda.covariance_, blob_stds[0], atol=1)
# implement the method to compute the probability given in The Elements
# of Statistical Learning (cf. p.127, Sect. 4.4.5 "Logistic Regression
# or LDA?")
precision = linalg.inv(blob_stds[0])
alpha_k = []
alpha_k_0 = []
for clazz in range(len(blob_centers) - 1):
alpha_k.append(
np.dot(precision,
(blob_centers[clazz] - blob_centers[-1])[:, np.newaxis]))
alpha_k_0.append(
np.dot(- 0.5 * (blob_centers[clazz] +
blob_centers[-1])[np.newaxis, :], alpha_k[-1]))
sample = np.array([[-22, 22]])
def discriminant_func(sample, coef, intercept, clazz):
return np.exp(intercept[clazz] + np.dot(sample, coef[clazz]))
prob = np.array([float(
discriminant_func(sample, alpha_k, alpha_k_0, clazz) /
(1 + sum([discriminant_func(sample, alpha_k, alpha_k_0, clazz)
for clazz in range(n_classes - 1)]))) for clazz in range(
n_classes - 1)])
prob_ref = 1 - np.sum(prob)
# check the consistency of the computed probability
# all probabilities should sum to one
prob_ref_2 = float(
1 / (1 + sum([discriminant_func(sample, alpha_k, alpha_k_0, clazz)
for clazz in range(n_classes - 1)]))
)
assert prob_ref == pytest.approx(prob_ref_2)
# check that the probability of LDA are close to the theoretical
# probabilties
assert_allclose(lda.predict_proba(sample),
np.hstack([prob, prob_ref])[np.newaxis],
atol=1e-2)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert X_transformed.shape[1] == 1
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert X_transformed.shape[1] == 1
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert clf_lda_eigen.explained_variance_ratio_.shape == (2,), (
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert clf_lda_svd.explained_variance_ratio_.shape == (2,), (
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert clf.fit(x, y).score(x, y) == 1.0, (
'using covariance: %s' % solver)
def test_lda_store_covariance():
# Test for solver 'lsqr' and 'eigen'
# 'store_covariance' has no effect on 'lsqr' and 'eigen' solvers
for solver in ('lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver).fit(X6, y6)
assert hasattr(clf, 'covariance_')
# Test the actual attribute:
clf = LinearDiscriminantAnalysis(solver=solver,
store_covariance=True).fit(X6, y6)
assert hasattr(clf, 'covariance_')
assert_array_almost_equal(
clf.covariance_,
np.array([[0.422222, 0.088889], [0.088889, 0.533333]])
)
# Test for SVD solver, the default is to not set the covariances_ attribute
clf = LinearDiscriminantAnalysis(solver='svd').fit(X6, y6)
assert not hasattr(clf, 'covariance_')
# Test the actual attribute:
clf = LinearDiscriminantAnalysis(solver=solver,
store_covariance=True).fit(X6, y6)
assert hasattr(clf, 'covariance_')
assert_array_almost_equal(
clf.covariance_,
np.array([[0.422222, 0.088889], [0.088889, 0.533333]])
)
@pytest.mark.parametrize('n_features', [3, 5])
@pytest.mark.parametrize('n_classes', [5, 3])
def test_lda_dimension_warning(n_classes, n_features):
rng = check_random_state(0)
n_samples = 10
X = rng.randn(n_samples, n_features)
# we create n_classes labels by repeating and truncating a
# range(n_classes) until n_samples
y = np.tile(range(n_classes), n_samples // n_classes + 1)[:n_samples]
max_components = min(n_features, n_classes - 1)
for n_components in [max_components - 1, None, max_components]:
# if n_components <= min(n_classes - 1, n_features), no warning
lda = LinearDiscriminantAnalysis(n_components=n_components)
assert_no_warnings(lda.fit, X, y)
for n_components in [max_components + 1,
max(n_features, n_classes - 1) + 1]:
# if n_components > min(n_classes - 1, n_features), raise error.
# We test one unit higher than max_components, and then something
# larger than both n_features and n_classes - 1 to ensure the test
# works for any value of n_component
lda = LinearDiscriminantAnalysis(n_components=n_components)
msg = "n_components cannot be larger than "
with pytest.raises(ValueError, match=msg):
lda.fit(X, y)
@pytest.mark.parametrize("data_type, expected_type", [
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)
])
def test_lda_dtype_match(data_type, expected_type):
for (solver, shrinkage) in solver_shrinkage:
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
clf.fit(X.astype(data_type), y.astype(data_type))
assert clf.coef_.dtype == expected_type
def test_lda_numeric_consistency_float32_float64():
for (solver, shrinkage) in solver_shrinkage:
clf_32 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
clf_32.fit(X.astype(np.float32), y.astype(np.float32))
clf_64 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
clf_64.fit(X.astype(np.float64), y.astype(np.float64))
# Check value consistency between types
rtol = 1e-6
assert_allclose(clf_32.coef_, clf_64.coef_, rtol=rtol)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert np.any(y_pred3 != y7)
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert n_pos2 > n_pos
def test_qda_store_covariance():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert not hasattr(clf, 'covariance_')
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariance=True).fit(X6, y6)
assert hasattr(clf, 'covariance_')
assert_array_almost_equal(
clf.covariance_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariance_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert np.any(y_pred != y6)
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
@pytest.mark.parametrize("solver", ['svd, lsqr', 'eigen'])
def test_raises_value_error_on_same_number_of_classes_and_samples(solver):
"""
Tests that if the number of samples equals the number
of classes, a ValueError is raised.
"""
X = np.array([[0.5, 0.6], [0.6, 0.5]])
y = np.array(["a", "b"])
clf = LinearDiscriminantAnalysis(solver=solver)
with pytest.raises(ValueError, match="The number of samples must be more"):
clf.fit(X, y)
|
|
# -*- coding: utf-8 -*-
import subprocess
from django.http import HttpResponse
from django.utils.timezone import now as timezone_now
from zerver.lib.test_helpers import (
most_recent_message,
most_recent_usermessage,
POSTRequestMock)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import (
get_display_recipient,
get_realm,
get_stream,
get_client,
Recipient,
UserProfile,
UserActivity,
Realm
)
from zerver.lib.actions import (
encode_email_address,
do_create_user
)
from zerver.lib.email_mirror import (
process_message, process_stream_message, ZulipEmailForwardError,
create_missed_message_address,
get_missed_message_token_from_address,
)
from zerver.lib.digest import handle_digest_email, enqueue_emails
from zerver.lib.send_email import FromAddress
from zerver.lib.notifications import (
handle_missedmessage_emails,
)
from zerver.management.commands import email_mirror
from email.mime.text import MIMEText
import datetime
import time
import re
import ujson
import mock
import os
import sys
from six.moves import cStringIO as StringIO
from django.conf import settings
from zerver.lib.str_utils import force_str
from typing import Any, Callable, Dict, Mapping, Union, Text
class TestEmailMirrorLibrary(ZulipTestCase):
def test_get_missed_message_token(self):
# type: () -> None
def get_token(address):
# type: (Text) -> Text
with self.settings(EMAIL_GATEWAY_PATTERN="%s@example.com"):
return get_missed_message_token_from_address(address)
address = 'mm' + ('x' * 32) + '@example.com'
token = get_token(address)
self.assertEqual(token, 'x' * 32)
# This next section was a bug at one point--we'd treat ordinary
# user addresses that happened to begin with "mm" as being
# the special mm+32chars tokens.
address = 'mmathers@example.com'
with self.assertRaises(ZulipEmailForwardError):
get_token(address)
# Now test the case where we our address does not match the
# EMAIL_GATEWAY_PATTERN.
# This used to crash in an ugly way; we want to throw a proper
# exception.
address = 'alice@not-the-domain-we-were-expecting.com'
with self.assertRaises(ZulipEmailForwardError):
get_token(address)
class TestStreamEmailMessagesSuccess(ZulipTestCase):
def test_receive_stream_email_messages_success(self):
# type: () -> None
# build dummy messages for stream
# test valid incoming stream message is processed properly
user_profile = self.example_user('hamlet')
self.login(user_profile.email)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = MIMEText('TestStreamEmailMessages Body') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestStreamEmailMessages Body")
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
class TestStreamEmailMessagesEmptyBody(ZulipTestCase):
def test_receive_stream_email_messages_empty_body(self):
# type: () -> None
# build dummy messages for stream
# test message with empty body is not sent
user_profile = self.example_user('hamlet')
self.login(user_profile.email)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
headers = {}
headers['Reply-To'] = self.example_email('othello')
# empty body
incoming_valid_message = MIMEText('') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
exception_message = ""
debug_info = {} # type: Dict[str, Any]
# process_message eats the exception & logs an error which can't be parsed here
# so calling process_stream_message directly
try:
process_stream_message(incoming_valid_message['To'],
incoming_valid_message['Subject'],
incoming_valid_message,
debug_info)
except ZulipEmailForwardError as e:
# empty body throws exception
exception_message = str(e)
self.assertEqual(exception_message, "Unable to find plaintext or HTML message body")
class TestMissedPersonalMessageEmailMessages(ZulipTestCase):
def test_receive_missed_personal_message_email_messages(self):
# type: () -> None
# build dummy messages for missed messages email reply
# have Hamlet send Othello a PM. Othello will reply via email
# Hamlet will receive the message.
email = self.example_email('hamlet')
self.login(email)
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": self.example_email('othello')})
self.assert_json_success(result)
user_profile = self.example_user('othello')
usermessage = most_recent_usermessage(user_profile)
# we don't want to send actual emails but we do need to create and store the
# token for looking up who did reply.
mm_address = create_missed_message_address(user_profile, usermessage.message)
incoming_valid_message = MIMEText('TestMissedMessageEmailMessages Body') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestMissedMessageEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('othello')
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# self.login(self.example_email("hamlet"))
# confirm that Hamlet got the message
user_profile = self.example_user('hamlet')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('othello'))
self.assertEqual(message.recipient.id, user_profile.id)
self.assertEqual(message.recipient.type, Recipient.PERSONAL)
class TestMissedHuddleMessageEmailMessages(ZulipTestCase):
def test_receive_missed_huddle_message_email_messages(self):
# type: () -> None
# build dummy messages for missed messages email reply
# have Othello send Iago and Cordelia a PM. Cordelia will reply via email
# Iago and Othello will receive the message.
email = self.example_email('othello')
self.login(email)
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": ujson.dumps([self.example_email('cordelia'),
self.example_email('iago')])})
self.assert_json_success(result)
user_profile = self.example_user('cordelia')
usermessage = most_recent_usermessage(user_profile)
# we don't want to send actual emails but we do need to create and store the
# token for looking up who did reply.
mm_address = create_missed_message_address(user_profile, usermessage.message)
incoming_valid_message = MIMEText('TestMissedHuddleMessageEmailMessages Body') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestMissedHuddleMessageEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('cordelia')
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = self.example_email('cordelia')
process_message(incoming_valid_message)
# Confirm Iago received the message.
user_profile = self.example_user('iago')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedHuddleMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('cordelia'))
self.assertEqual(message.recipient.type, Recipient.HUDDLE)
# Confirm Othello received the message.
user_profile = self.example_user('othello')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedHuddleMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('cordelia'))
self.assertEqual(message.recipient.type, Recipient.HUDDLE)
class TestEmptyGatewaySetting(ZulipTestCase):
def test_missed_message(self):
# type: () -> None
email = self.example_email('othello')
self.login(email)
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": ujson.dumps([self.example_email('cordelia'),
self.example_email('iago')])})
self.assert_json_success(result)
user_profile = self.example_user('cordelia')
usermessage = most_recent_usermessage(user_profile)
with self.settings(EMAIL_GATEWAY_PATTERN=''):
mm_address = create_missed_message_address(user_profile, usermessage.message)
self.assertEqual(mm_address, FromAddress.NOREPLY)
def test_encode_email_addr(self):
# type: () -> None
stream = get_stream("Denmark", get_realm("zulip"))
with self.settings(EMAIL_GATEWAY_PATTERN=''):
test_address = encode_email_address(stream)
self.assertEqual(test_address, '')
class TestDigestEmailMessages(ZulipTestCase):
@mock.patch('zerver.lib.digest.enough_traffic')
@mock.patch('zerver.lib.digest.send_future_email')
def test_receive_digest_email_messages(self, mock_send_future_email, mock_enough_traffic):
# type: (mock.MagicMock, mock.MagicMock) -> None
# build dummy messages for missed messages email reply
# have Hamlet send Othello a PM. Othello will reply via email
# Hamlet will receive the message.
email = self.example_email('hamlet')
self.login(email)
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": self.example_email('othello')})
self.assert_json_success(result)
user_profile = self.example_user('othello')
cutoff = time.mktime(datetime.datetime(year=2016, month=1, day=1).timetuple())
handle_digest_email(user_profile.id, cutoff)
self.assertEqual(mock_send_future_email.call_count, 1)
self.assertEqual(mock_send_future_email.call_args[1]['to_user_id'], user_profile.id)
@mock.patch('zerver.lib.digest.queue_digest_recipient')
@mock.patch('zerver.lib.digest.timezone_now')
def test_inactive_users_queued_for_digest(self, mock_django_timezone, mock_queue_digest_recipient):
# type: (mock.MagicMock, mock.MagicMock) -> None
cutoff = timezone_now()
# Test Tuesday
mock_django_timezone.return_value = datetime.datetime(year=2016, month=1, day=5)
# Mock user activity for each user
realm = get_realm("zulip")
for realm in Realm.objects.filter(deactivated=False, show_digest_email=True):
for user_profile in UserProfile.objects.filter(realm=realm):
UserActivity.objects.create(
last_visit=cutoff - datetime.timedelta(days=1),
user_profile=user_profile,
count=0,
client=get_client('test_client'))
# Check that inactive users are enqueued
enqueue_emails(cutoff)
self.assertEqual(mock_queue_digest_recipient.call_count, 13)
@mock.patch('zerver.lib.digest.queue_digest_recipient')
@mock.patch('zerver.lib.digest.timezone_now')
def test_active_users_not_enqueued(self, mock_django_timezone, mock_queue_digest_recipient):
# type: (mock.MagicMock, mock.MagicMock) -> None
cutoff = timezone_now()
# A Tuesday
mock_django_timezone.return_value = datetime.datetime(year=2016, month=1, day=5)
for realm in Realm.objects.filter(deactivated=False, show_digest_email=True):
for user_profile in UserProfile.objects.filter(realm=realm):
UserActivity.objects.create(
last_visit=cutoff + datetime.timedelta(days=1),
user_profile=user_profile,
count=0,
client=get_client('test_client'))
# Check that an active user is not enqueued
enqueue_emails(cutoff)
self.assertEqual(mock_queue_digest_recipient.call_count, 0)
@mock.patch('zerver.lib.digest.queue_digest_recipient')
@mock.patch('zerver.lib.digest.timezone_now')
def test_only_enqueue_on_valid_day(self, mock_django_timezone, mock_queue_digest_recipient):
# type: (mock.MagicMock, mock.MagicMock) -> None
# Not a Tuesday
mock_django_timezone.return_value = datetime.datetime(year=2016, month=1, day=6)
# Check that digests are not sent on days other than Tuesday.
cutoff = timezone_now()
enqueue_emails(cutoff)
self.assertEqual(mock_queue_digest_recipient.call_count, 0)
@mock.patch('zerver.lib.digest.queue_digest_recipient')
@mock.patch('zerver.lib.digest.timezone_now')
def test_no_email_digest_for_bots(self, mock_django_timezone, mock_queue_digest_recipient):
# type: (mock.MagicMock, mock.MagicMock) -> None
cutoff = timezone_now()
# A Tuesday
mock_django_timezone.return_value = datetime.datetime(year=2016, month=1, day=5)
bot = do_create_user('some_bot@example.com', 'password', get_realm('zulip'), 'some_bot', '',
bot_type=UserProfile.DEFAULT_BOT)
UserActivity.objects.create(
last_visit=cutoff - datetime.timedelta(days=1),
user_profile=bot,
count=0,
client=get_client('test_client'))
# Check that bots are not sent emails
enqueue_emails(cutoff)
for arg in mock_queue_digest_recipient.call_args_list:
user = arg[0][0]
self.assertNotEqual(user.id, bot.id)
class TestReplyExtraction(ZulipTestCase):
def test_reply_is_extracted_from_plain(self):
# type: () -> None
# build dummy messages for stream
# test valid incoming stream message is processed properly
email = self.example_email('hamlet')
self.login(email)
user_profile = self.example_user('hamlet')
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
text = """Reply
-----Original Message-----
Quote"""
incoming_valid_message = MIMEText(text) # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "Reply")
def test_reply_is_extracted_from_html(self):
# type: () -> None
# build dummy messages for stream
# test valid incoming stream message is processed properly
email = self.example_email('hamlet')
self.login(email)
user_profile = self.example_user('hamlet')
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
html = """
<html>
<body>
<p>Reply</p>
<blockquote>
<div>
On 11-Apr-2011, at 6:54 PM, Bob <bob@example.com> wrote:
</div>
<div>
Quote
</div>
</blockquote>
</body>
</html>
"""
incoming_valid_message = MIMEText(html, 'html') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, 'Reply')
MAILS_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "fixtures", "email")
class TestScriptMTA(ZulipTestCase):
def test_success(self):
# type: () -> None
script = os.path.join(os.path.dirname(__file__),
'../../scripts/lib/email-mirror-postfix')
sender = self.example_email('hamlet')
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
template_path = os.path.join(MAILS_DIR, "simple.txt")
with open(template_path) as template_file:
mail_template = template_file.read()
mail = mail_template.format(stream_to_address=stream_to_address, sender=sender)
read_pipe, write_pipe = os.pipe()
os.write(write_pipe, mail.encode())
os.close(write_pipe)
subprocess.check_call(
[script, '-r', force_str(stream_to_address), '-s', settings.SHARED_SECRET, '-t'],
stdin=read_pipe)
def test_error_no_recipient(self):
# type: () -> None
script = os.path.join(os.path.dirname(__file__),
'../../scripts/lib/email-mirror-postfix')
sender = self.example_email('hamlet')
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
template_path = os.path.join(MAILS_DIR, "simple.txt")
with open(template_path) as template_file:
mail_template = template_file.read()
mail = mail_template.format(stream_to_address=stream_to_address, sender=sender)
read_pipe, write_pipe = os.pipe()
os.write(write_pipe, mail.encode())
os.close(write_pipe)
success_call = True
try:
subprocess.check_output([script, '-s', settings.SHARED_SECRET, '-t'],
stdin=read_pipe)
except subprocess.CalledProcessError as e:
self.assertEqual(
e.output,
b'5.1.1 Bad destination mailbox address: No missed message email address.\n'
)
self.assertEqual(e.returncode, 67)
success_call = False
self.assertFalse(success_call)
class TestEmailMirrorTornadoView(ZulipTestCase):
def send_private_message(self):
# type: () -> Text
email = self.example_email('othello')
self.login(email)
result = self.client_post(
"/json/messages",
{
"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": ujson.dumps([self.example_email('cordelia'), self.example_email('iago')])
})
self.assert_json_success(result)
user_profile = self.example_user('cordelia')
user_message = most_recent_usermessage(user_profile)
return create_missed_message_address(user_profile, user_message.message)
@mock.patch('zerver.lib.email_mirror.queue_json_publish')
def send_offline_message(self, to_address, sender, mock_queue_json_publish):
# type: (str, str, mock.Mock) -> HttpResponse
template_path = os.path.join(MAILS_DIR, "simple.txt")
with open(template_path) as template_file:
mail_template = template_file.read()
mail = mail_template.format(stream_to_address=to_address, sender=sender)
def check_queue_json_publish(queue_name, event, processor, call_consume_in_tests):
# type: (str, Union[Mapping[str, Any], str], Callable[[Any], None], bool) -> None
self.assertEqual(queue_name, "email_mirror")
self.assertEqual(event, {"rcpt_to": to_address, "message": mail})
mock_queue_json_publish.side_effect = check_queue_json_publish
request_data = {
"recipient": to_address,
"msg_text": mail
}
post_data = dict(
data=ujson.dumps(request_data),
secret=settings.SHARED_SECRET
)
return self.client_post('/email_mirror_message', post_data)
def test_success_stream(self):
# type: () -> None
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
result = self.send_offline_message(stream_to_address, self.example_email('hamlet'))
self.assert_json_success(result)
def test_error_to_stream_with_wrong_address(self):
# type: () -> None
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
stream_to_address = stream_to_address.replace("Denmark", "Wrong_stream")
result = self.send_offline_message(stream_to_address, self.example_email('hamlet'))
self.assert_json_error(
result,
"5.1.1 Bad destination mailbox address: "
"Please use the address specified in your Streams page.")
def test_success_to_private(self):
# type: () -> None
mm_address = self.send_private_message()
result = self.send_offline_message(mm_address, self.example_email('cordelia'))
self.assert_json_success(result)
def test_using_mm_address_twice(self):
# type: () -> None
mm_address = self.send_private_message()
self.send_offline_message(mm_address, self.example_email('cordelia'))
result = self.send_offline_message(mm_address, self.example_email('cordelia'))
self.assert_json_error(
result,
"5.1.1 Bad destination mailbox address: Bad or expired missed message address.")
def test_wrong_missed_email_private_message(self):
# type: () -> None
self.send_private_message()
mm_address = 'mm' + ('x' * 32) + '@testserver'
result = self.send_offline_message(mm_address, self.example_email('cordelia'))
self.assert_json_error(
result,
"5.1.1 Bad destination mailbox address: Bad or expired missed message address.")
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells RPCAPI
"""
from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
from nova.openstack.common import rpc
from nova import test
CONF = cfg.CONF
CONF.import_opt('topic', 'nova.cells.opts', group='cells')
class CellsAPITestCase(test.NoDBTestCase):
"""Test case for cells.api interfaces."""
def setUp(self):
super(CellsAPITestCase, self).setUp()
self.fake_topic = 'fake_topic'
self.fake_context = 'fake_context'
self.flags(topic=self.fake_topic, enable=True, group='cells')
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _stub_rpc_method(self, rpc_method, result):
call_info = {}
def fake_rpc_method(ctxt, topic, msg, *args, **kwargs):
call_info['context'] = ctxt
call_info['topic'] = topic
call_info['msg'] = msg
return result
self.stubs.Set(rpc, rpc_method, fake_rpc_method)
return call_info
def _check_result(self, call_info, method, args, version=None):
if version is None:
version = self.cells_rpcapi.BASE_RPC_API_VERSION
self.assertEqual(self.fake_context, call_info['context'])
self.assertEqual(self.fake_topic, call_info['topic'])
self.assertEqual(method, call_info['msg']['method'])
msg_version = call_info['msg']['version']
self.assertIsInstance(msg_version, basestring,
msg="Message version %s is not a string" %
msg_version)
self.assertEqual(version, call_info['msg']['version'])
self.assertEqual(args, call_info['msg']['args'])
def test_cast_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': False}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.cast_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
def test_call_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
fake_response = 'fake_response'
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': True}
call_info = self._stub_rpc_method('call', fake_response)
result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
self.assertEqual(fake_response, result)
def test_schedule_run_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.schedule_run_instance(
self.fake_context, arg1=1, arg2=2, arg3=3)
expected_args = {'host_sched_kwargs': {'arg1': 1,
'arg2': 2,
'arg3': 3}}
self._check_result(call_info, 'schedule_run_instance',
expected_args)
def test_build_instances(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.build_instances(
self.fake_context, instances=['1', '2'],
image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
expected_args = {'build_inst_kwargs': {'instances': ['1', '2'],
'image': {'fake': 'image'},
'arg1': 1,
'arg2': 2,
'arg3': 3}}
self._check_result(call_info, 'build_instances',
expected_args, version='1.8')
def test_get_capacities(self):
capacity_info = {"capacity": "info"}
call_info = self._stub_rpc_method('call',
result=capacity_info)
result = self.cells_rpcapi.get_capacities(self.fake_context,
cell_name="name")
self._check_result(call_info, 'get_capacities',
{'cell_name': 'name'}, version='1.9')
self.assertEqual(capacity_info, result)
def test_instance_update_at_top(self):
fake_info_cache = {'id': 1,
'instance': 'fake_instance',
'other': 'moo'}
fake_sys_metadata = [{'id': 1,
'key': 'key1',
'value': 'value1'},
{'id': 2,
'key': 'key2',
'value': 'value2'}]
fake_instance = {'id': 2,
'security_groups': 'fake',
'instance_type': 'fake',
'volumes': 'fake',
'cell_name': 'fake',
'name': 'fake',
'metadata': 'fake',
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata,
'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_update_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_update_at_top',
expected_args)
def test_instance_destroy_at_top(self):
fake_instance = {'uuid': 'fake-uuid'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_destroy_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_destroy_at_top',
expected_args)
def test_instance_delete_everywhere(self):
fake_instance = {'uuid': 'fake-uuid'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_delete_everywhere(
self.fake_context, fake_instance,
'fake-type')
expected_args = {'instance': fake_instance,
'delete_type': 'fake-type'}
self._check_result(call_info, 'instance_delete_everywhere',
expected_args)
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 2,
'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_fault_create_at_top(
self.fake_context, fake_instance_fault)
expected_args = {'instance_fault': fake_instance_fault}
self._check_result(call_info, 'instance_fault_create_at_top',
expected_args)
def test_bw_usage_update_at_top(self):
update_args = ('fake_uuid', 'fake_mac', 'fake_start_period',
'fake_bw_in', 'fake_bw_out', 'fake_ctr_in',
'fake_ctr_out')
update_kwargs = {'last_refreshed': 'fake_refreshed'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bw_usage_update_at_top(
self.fake_context, *update_args, **update_kwargs)
bw_update_info = {'uuid': 'fake_uuid',
'mac': 'fake_mac',
'start_period': 'fake_start_period',
'bw_in': 'fake_bw_in',
'bw_out': 'fake_bw_out',
'last_ctr_in': 'fake_ctr_in',
'last_ctr_out': 'fake_ctr_out',
'last_refreshed': 'fake_refreshed'}
expected_args = {'bw_update_info': bw_update_info}
self._check_result(call_info, 'bw_usage_update_at_top',
expected_args)
def test_get_cell_info_for_neighbors(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_cell_info_for_neighbors(
self.fake_context)
self._check_result(call_info, 'get_cell_info_for_neighbors', {},
version='1.1')
self.assertEqual(result, 'fake_response')
def test_sync_instances(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.sync_instances(self.fake_context,
project_id='fake_project', updated_since='fake_time',
deleted=True)
expected_args = {'project_id': 'fake_project',
'updated_since': 'fake_time',
'deleted': True}
self._check_result(call_info, 'sync_instances', expected_args,
version='1.1')
def test_service_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
fake_filters = {'key1': 'val1', 'key2': 'val2'}
result = self.cells_rpcapi.service_get_all(self.fake_context,
filters=fake_filters)
expected_args = {'filters': fake_filters}
self._check_result(call_info, 'service_get_all', expected_args,
version='1.2')
self.assertEqual(result, 'fake_response')
def test_service_get_by_compute_host(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_get_by_compute_host(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'service_get_by_compute_host',
expected_args,
version='1.2')
self.assertEqual(result, 'fake_response')
def test_get_host_uptime(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_host_uptime(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'get_host_uptime',
expected_args,
version='1.17')
self.assertEqual(result, 'fake_response')
def test_service_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_update(
self.fake_context, host_name='fake-host-name',
binary='nova-api', params_to_update={'disabled': True})
expected_args = {
'host_name': 'fake-host-name',
'binary': 'nova-api',
'params_to_update': {'disabled': True}}
self._check_result(call_info, 'service_update',
expected_args,
version='1.7')
self.assertEqual(result, 'fake_response')
def test_proxy_rpc_to_manager(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.proxy_rpc_to_manager(
self.fake_context, rpc_message='fake-msg',
topic='fake-topic', call=True, timeout=-1)
expected_args = {'rpc_message': 'fake-msg',
'topic': 'fake-topic',
'call': True,
'timeout': -1}
self._check_result(call_info, 'proxy_rpc_to_manager',
expected_args,
version='1.2')
self.assertEqual(result, 'fake_response')
def test_task_log_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.task_log_get_all(self.fake_context,
task_name='fake_name',
period_beginning='fake_begin',
period_ending='fake_end',
host='fake_host',
state='fake_state')
expected_args = {'task_name': 'fake_name',
'period_beginning': 'fake_begin',
'period_ending': 'fake_end',
'host': 'fake_host',
'state': 'fake_state'}
self._check_result(call_info, 'task_log_get_all', expected_args,
version='1.3')
self.assertEqual(result, 'fake_response')
def test_compute_node_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
hypervisor_match='fake-match')
expected_args = {'hypervisor_match': 'fake-match'}
self._check_result(call_info, 'compute_node_get_all', expected_args,
version='1.4')
self.assertEqual(result, 'fake_response')
def test_compute_node_stats(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_stats(self.fake_context)
expected_args = {}
self._check_result(call_info, 'compute_node_stats',
expected_args, version='1.4')
self.assertEqual(result, 'fake_response')
def test_compute_node_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get(self.fake_context,
'fake_compute_id')
expected_args = {'compute_id': 'fake_compute_id'}
self._check_result(call_info, 'compute_node_get',
expected_args, version='1.4')
self.assertEqual(result, 'fake_response')
def test_actions_get(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.actions_get(self.fake_context,
fake_instance)
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid']}
self._check_result(call_info, 'actions_get', expected_args,
version='1.5')
self.assertEqual(result, 'fake_response')
def test_actions_get_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.actions_get, self.fake_context,
fake_instance)
def test_action_get_by_request_id(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_get_by_request_id(self.fake_context,
fake_instance,
'req-fake')
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid'],
'request_id': 'req-fake'}
self._check_result(call_info, 'action_get_by_request_id',
expected_args, version='1.5')
self.assertEqual(result, 'fake_response')
def test_action_get_by_request_id_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_get_by_request_id,
self.fake_context, fake_instance, 'req-fake')
def test_action_events_get(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_events_get(self.fake_context,
fake_instance,
'fake-action')
expected_args = {'cell_name': 'region!child',
'action_id': 'fake-action'}
self._check_result(call_info, 'action_events_get', expected_args,
version='1.5')
self.assertEqual(result, 'fake_response')
def test_action_events_get_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_events_get,
self.fake_context, fake_instance, 'fake-action')
def test_consoleauth_delete_tokens(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context,
'fake-uuid')
expected_args = {'instance_uuid': 'fake-uuid'}
self._check_result(call_info, 'consoleauth_delete_tokens',
expected_args, version='1.6')
def test_validate_console_port(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.validate_console_port(self.fake_context,
'fake-uuid', 'fake-port', 'fake-type')
expected_args = {'instance_uuid': 'fake-uuid',
'console_port': 'fake-port',
'console_type': 'fake-type'}
self._check_result(call_info, 'validate_console_port',
expected_args, version='1.6')
self.assertEqual(result, 'fake_response')
def test_bdm_update_or_create_at_top(self):
fake_bdm = {'id': 2, 'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_update_or_create_at_top(
self.fake_context, fake_bdm, create='fake-create')
expected_args = {'bdm': fake_bdm, 'create': 'fake-create'}
self._check_result(call_info, 'bdm_update_or_create_at_top',
expected_args, version='1.10')
def test_bdm_destroy_at_top(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_destroy_at_top(self.fake_context,
'fake-uuid',
device_name='fake-device',
volume_id='fake-vol')
expected_args = {'instance_uuid': 'fake-uuid',
'device_name': 'fake-device',
'volume_id': 'fake-vol'}
self._check_result(call_info, 'bdm_destroy_at_top',
expected_args, version='1.10')
def test_get_migrations(self):
call_info = self._stub_rpc_method('call', None)
filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
self.cells_rpcapi.get_migrations(self.fake_context, filters)
expected_args = {'filters': filters}
self._check_result(call_info, 'get_migrations', expected_args,
version="1.11")
def test_instance_update_from_api(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_update_from_api(
self.fake_context, 'fake-instance',
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset='admin_reset')
expected_args = {'instance': 'fake-instance',
'expected_vm_state': 'exp_vm',
'expected_task_state': 'exp_task',
'admin_state_reset': 'admin_reset'}
self._check_result(call_info, 'instance_update_from_api',
expected_args, version='1.16')
def test_start_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.start_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'start_instance',
expected_args, version='1.12')
def test_stop_instance_cast(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=True)
expected_args = {'instance': 'fake-instance',
'do_cast': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.12')
def test_stop_instance_call(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=False)
expected_args = {'instance': 'fake-instance',
'do_cast': False}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.12')
self.assertEqual(result, 'fake_response')
def test_cell_create(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_create(self.fake_context, 'values')
expected_args = {'values': 'values'}
self._check_result(call_info, 'cell_create',
expected_args, version='1.13')
self.assertEqual(result, 'fake_response')
def test_cell_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_update(self.fake_context,
'cell_name', 'values')
expected_args = {'cell_name': 'cell_name',
'values': 'values'}
self._check_result(call_info, 'cell_update',
expected_args, version='1.13')
self.assertEqual(result, 'fake_response')
def test_cell_delete(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_delete(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_delete',
expected_args, version='1.13')
self.assertEqual(result, 'fake_response')
def test_cell_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_get(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_get',
expected_args, version='1.13')
self.assertEqual(result, 'fake_response')
def test_reboot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reboot_instance(
self.fake_context, 'fake-instance',
block_device_info='ignored', reboot_type='HARD')
expected_args = {'instance': 'fake-instance',
'reboot_type': 'HARD'}
self._check_result(call_info, 'reboot_instance',
expected_args, version='1.14')
def test_pause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.pause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'pause_instance',
expected_args, version='1.19')
def test_unpause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.unpause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'unpause_instance',
expected_args, version='1.19')
def test_suspend_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.suspend_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'suspend_instance',
expected_args, version='1.15')
def test_resume_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resume_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'resume_instance',
expected_args, version='1.15')
def test_terminate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.terminate_instance(self.fake_context,
'fake-instance', [])
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'terminate_instance',
expected_args, version='1.18')
def test_soft_delete_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.soft_delete_instance(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'soft_delete_instance',
expected_args, version='1.18')
def test_resize_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resize_instance(self.fake_context,
'fake-instance',
dict(cow='moo'),
'fake-hint',
'fake-flavor',
'fake-reservations')
expected_args = {'instance': 'fake-instance',
'flavor': 'fake-flavor',
'extra_instance_updates': dict(cow='moo')}
self._check_result(call_info, 'resize_instance',
expected_args, version='1.20')
def test_live_migrate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.live_migrate_instance(self.fake_context,
'fake-instance',
'fake-host',
'fake-block',
'fake-commit')
expected_args = {'instance': 'fake-instance',
'block_migration': 'fake-block',
'disk_over_commit': 'fake-commit',
'host_name': 'fake-host'}
self._check_result(call_info, 'live_migrate_instance',
expected_args, version='1.20')
def test_revert_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.revert_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-dest',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'revert_resize',
expected_args, version='1.21')
def test_confirm_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.confirm_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-source',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'confirm_resize',
expected_args, version='1.21')
def test_reset_network(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reset_network(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'reset_network',
expected_args, version='1.22')
def test_inject_network_info(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.inject_network_info(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'inject_network_info',
expected_args, version='1.23')
def test_snapshot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.snapshot_instance(self.fake_context,
'fake-instance',
'image-id')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id'}
self._check_result(call_info, 'snapshot_instance',
expected_args, version='1.24')
def test_backup_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.backup_instance(self.fake_context,
'fake-instance',
'image-id',
'backup-type',
'rotation')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id',
'backup_type': 'backup-type',
'rotation': 'rotation'}
self._check_result(call_info, 'backup_instance',
expected_args, version='1.24')
|
|
# --------------------------------------------------------------------------- #
import logging
from abc import ABCMeta
import collections
import datetime
from cryptography import x509
from cryptography.x509.oid import NameOID, ExtensionOID
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.backends import default_backend
from idna.core import InvalidCodepoint
from arroyo.crypto import PublicKey
from arroyo.utils import file_to_bytes, bytes_to_file
from . import EncodingType
# --------------------------------------------------------------------------- #
# Typing
from typing import Union, List, Dict
from arroyo.crypto import PrivateKey
# --------------------------------------------------------------------------- #
__all__ = ["x509Cert", "x509CertSignReq"]
LOG = logging.getLogger(__name__)
# --------------------------------------------------------------------------- #
class x509Base(metaclass=ABCMeta):
"""
High-level x509 Object Abstraction Base Class.
"""
# Implementation Variables:
# _x509_obj: The underlying x509 object from Cryptography
# __encoding: The underlying variable for the encoding getter/setter
@classmethod
def from_file(cls, path: str, **kwargs) -> "x509Base":
"""
Create a new X509 Object from a given file.
:param path: The path of the key file to load.
:param kwargs: Additional key-word arguments to pass to the x509
objects's init method.
:return: A new ``BaseX509`` subclass representing the loaded
file.
:raises FileNotFoundError: If the given key file could not be found.
"""
return cls(file_to_bytes(path), **kwargs)
def __init__(self, x509_obj):
"""
Creates a new instance of a ``x509Base`` subclass.
"""
self.__encoding = None
self._x509_obj = x509_obj
def __bytes__(self):
return self.to_bytes()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
this_bytes = self.to_bytes(encoding=EncodingType.DER)
other_bytes = other.to_bytes(encoding=EncodingType.DER)
return this_bytes == other_bytes
def __ne__(self, other):
return not self.__eq__(other)
@property
def encoding(self) -> EncodingType:
"""
Returns the certificate serialization encoding.
"""
return self.__encoding or EncodingType.DER
@encoding.setter
def encoding(self, value: EncodingType) -> None:
"""
Sets the certificate serialization encoding.
"""
try:
value = EncodingType(value)
except ValueError:
raise ValueError("Encoding must be a type of EncodingType")
self.__encoding = value
def to_bytes(self, *, encoding: EncodingType = None) -> bytes:
"""
Returns the x509 object as bytes.
By default, the value of the ``encoding`` instance attribute is used
to determine the byte serialization encoding. This behavior can be
overridden by providing an explicit `encoding` value.
:param encoding: Override the object's encoding before converting to
bytes.
:return: The bytes of the x509 object encoded with the given
encoding.
"""
encoding = encoding or self.encoding
return self._x509_obj.public_bytes(encoding)
def to_file(self, path: str, **kwargs) -> bytes:
"""
Writes the x509 object to a file.
:param path: The path at which to write the new x509 object file.
:param kwargs: Additional keyword arguments to pass into the object's
`to_bytes` method.
"""
bytes_to_file(
path, self.to_bytes(**kwargs)
)
class x509Cert(x509Base):
"""
High level abstraction for X509 Certificates.
This class is used to hide implementation level details for how
X509 certificates are actually handled.
"""
@classmethod
def from_csr(cls, csr: "x509CertSignReq", key: PrivateKey,
issuer: "x509Cert" = None,
not_valid_before: datetime.datetime = None,
not_valid_after: datetime.datetime = None,
serial_number: int = None, is_ca: bool = False):
issuer = issuer or csr # type: x509Base
private_key = serialization.load_der_private_key(
key.to_bytes(encoding=EncodingType.DER),
None,
default_backend()
)
builder = x509.CertificateBuilder().subject_name(
csr._x509_obj.subject
).issuer_name(
issuer._x509_obj.subject
).public_key(
csr._x509_obj.public_key()
).not_valid_before(
not_valid_before or datetime.datetime.today()
).not_valid_after(
# TODO: Will probably want this to max out at the issuer's
# TODO: not_valid_after value (if provided)
not_valid_after or (
datetime.datetime.today() + datetime.timedelta(days=365 * 2))
).serial_number(
serial_number or x509.random_serial_number()
)
# Load Extensions
for e in csr._x509_obj.extensions:
builder = builder.add_extension(
e.value,
critical=e.critical
)
if is_ca:
builder = builder.add_extension(
x509.BasicConstraints(True, None),
critical=True
)
# Sign and Return a New x509Cert Instance containing the new Cert.
cert = builder.sign(private_key, hashes.SHA256(), default_backend())
crt_bytes = cert.public_bytes(EncodingType.DER)
return cls(data=crt_bytes)
def __init__(self, data: bytes):
"""
Creates a new Cert object from the given bytes.
Changing the produced object will NOT change the underlying
bytes. The new object must first be exported.
:param data: The bytes of the certificate to load.
:return: A new ``Cert`` representing the loaded certificate.
:raises TypeError: If the value for ``data`` cannot be treated
as bytes.
:raises ValueError: If the given value for ``data`` cannot be properly
decoded
"""
if not isinstance(data, bytes):
raise TypeError("Value of 'data' must be bytes")
args = (data, default_backend())
# (1) Try loading as DER
try:
super().__init__(x509_obj=x509.load_der_x509_certificate(*args))
self.encoding = EncodingType.DER
return
except ValueError:
pass
# (2) Try loading as PEM
try:
super().__init__(x509_obj=x509.load_pem_x509_certificate(*args))
self.encoding = EncodingType.PEM
return
except ValueError:
pass
# Could not load - bytes not in suitable format.
raise ValueError("Could not find a suitable encoding for 'data' "
"bytes, the data may not be a valid X509 certificate")
def __contains__(self, item):
return self.public_key == item
@property
def issuer(self) -> Dict[str, str]:
issuer = {}
for name in self._x509_obj.issuer:
issuer[name.oid._name] = name.value
return issuer
@property
def public_key(self) -> PublicKey:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Use the SubjectPublicKeyInfo format since it can be used on all
# key types.
fmt = serialization.PublicFormat.SubjectPublicKeyInfo
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
k = self._x509_obj.public_key()
data = k.public_bytes(self.encoding, fmt)
return PublicKey(data=data)
class x509CertSignReq(x509Base):
"""
High-level abstraction class for x509 Certificate Signing Requests (CSRs).
"""
@classmethod
def generate(cls, key: PrivateKey, subj_alt_dns_names: list = None, *,
CN: str = None, O: str = None, OU: str = None, L: str = None,
ST: str = None, C: str = None):
"""
Generates a new Certificate Signing Request (CSR) with the given
parameters.
:param key: Private key used to sign the CSR.
:param subj_alt_dns_names: DNS name(s) to be included in the Subject
Alternative Name (SAN).
:param CN: Common Name, typically a wildcard name.
:param O: Organization Name.
:param OU: Organizational Unit Name.
:param L: Locality or City Name.
:param ST: State or Province Name.
:param C: Country Name.
:return: A new ``x509CertSignReq`` representing the newly generated
csr.
:raises ValueError: If the given value for a certificate field is not
valid.
"""
subj_alt_dns_names = subj_alt_dns_names or list()
if isinstance(subj_alt_dns_names, str):
subj_alt_dns_names = list(subj_alt_dns_names, )
# Build the Subject Distinguished Name
dn = []
try:
if CN:
dn.append(x509.NameAttribute(NameOID.COMMON_NAME, CN))
if O:
dn.append(x509.NameAttribute(NameOID.ORGANIZATION_NAME, O))
if OU:
dn.append(
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, OU)
)
if L:
dn.append(x509.NameAttribute(NameOID.LOCALITY_NAME, L))
if ST:
dn.append(
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST)
)
if C:
dn.append(x509.NameAttribute(NameOID.COUNTRY_NAME, C))
except ValueError as e:
raise ValueError("Invalid value: {}".format(str(e)))
# Build the SAN
san = []
for name in subj_alt_dns_names:
san.append(x509.DNSName(name))
# Build the CSR Parameters
builder = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name(dn)
).add_extension(
x509.SubjectAlternativeName(san),
critical=False
)
# Sign the CSR
private_key = serialization.load_der_private_key(
key.to_bytes(encoding=EncodingType.DER),
None,
default_backend()
)
try:
csr = builder.sign(private_key, hashes.SHA256(), default_backend())
except InvalidCodepoint as e:
raise ValueError("Invalid value: {}".format(str(e)))
# Serialize the CSR to Bytes
csr_bytes = csr.public_bytes(EncodingType.DER)
return cls(data=csr_bytes)
def __init__(self, data: bytes):
"""
Creates a new ``x509CertSignReq object from the given bytes.
Changing the produced object will NOT change the underlying
bytes. The new object must first be exported.
:param data: The bytes of the CSR to load.
:return: A new ``x509CertSignReq`` representing the loaded CSR.
:raises TypeError: If the value for ``data`` cannot be treated
as bytes.
:raises ValueError: If the given value for ``data`` cannot be properly
decoded
"""
if not isinstance(data, bytes):
raise TypeError("Value of 'data' must be bytes")
args = (data, default_backend())
# (1) Try loading as DER
try:
super().__init__(x509_obj=x509.load_der_x509_csr(*args))
self.encoding = EncodingType.DER
return
except ValueError:
pass
# (2) Try loading as PEM
try:
super().__init__(x509_obj=x509.load_pem_x509_csr(*args))
self.encoding = EncodingType.PEM
return
except ValueError:
pass
# Could not load - bytes not in suitable format.
raise ValueError("Could not find a suitable encoding for 'data' "
"bytes, the data may not be a valid X509 CSR")
def get_subj_alt_dns_names(self) -> List:
san = self._x509_obj.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
)
return san.value.get_values_for_type(x509.DNSName)
|
|
#!/usr/bin/python
# coding=utf-8
import api
from . import batched, parse_datetime, parse_date
from cache import SchedulesDirectCache
from common import Status, LineupMap, LineupMapList, ScheduleList, Headend, Lineup, ChangeLineupResponse, ServiceRegion
import logging
import hashlib
class SchedulesDirect(object):
def __init__(self, username, password, cache_path="./sdcache.db"):
self._logger = logging.getLogger(__name__) # type: logging.Logger
self._username = username # type: unicode
self._password = hashlib.sha1(password).hexdigest() # type: unicode
self._cache = SchedulesDirectCache(cache_path) # type: SchedulesDirectCache
self._cache.init_database()
self._force_program_refresh = False # type: bool
self._subscribed_lineups = None # type: List[Lineup]
self._token = None # type: unicode
self._status = None # type: Status
def get_token(self):
self._token = api.get_token(self._username, self._password)["token"]
def get_status(self):
self._status = Status.from_dict(api.get_status(self._token))
return self._status
def is_online(self):
return self._status.system_status.status == u"Online"
def get_service_countries(self):
"""
:return:
"""
service_region_dict = api.get_service_countries()
return [ServiceRegion.from_dict(service_region) for service_region in service_region_dict]
def get_headends_by_postal_code(self, country, postal_code):
"""
:param country:
:param postal_code:
:return:
:rtype: list[Headend]
"""
headends_dict = api.get_headends_by_postal_code(self._token, country, postal_code)
return [Headend.from_dict(headend) for headend in headends_dict]
def get_subscribed_lineups(self): # type: () -> List[Lineup]
"""
:return:
"""
if self._subscribed_lineups is not None:
return self._subscribed_lineups
lineups_dict = api.get_subscribed_lineups(self._token)
if "response" in lineups_dict and lineups_dict["response"] == "NO_LINEUPS":
self._subscribed_lineups = []
else:
lineups_dict = lineups_dict["lineups"]
self._subscribed_lineups = [Lineup.from_dict(lineup_dict) for lineup_dict in lineups_dict]
return self._subscribed_lineups
def add_lineup(self, lineup_id):
response = api.add_lineup(self._token, lineup_id)
self._subscribed_lineups = None
return ChangeLineupResponse.from_dict(response)
def remove_lineup(self, lineup_id):
response = api.remove_lineup(self._token, lineup_id)
self._subscribed_lineups = None
return ChangeLineupResponse.from_dict(response)
def get_lineup_map(self, lineup_id, modified=None): # type: (...) -> LineupMap
"""
:param lineup_id:
:param modified:
:return:
"""
lineup_map = self._cache.get_lineup(lineup_id, modified)
if lineup_map is None:
lineup_map = api.get_lineup(self._token, lineup_id)
self._cache.add_lineup(lineup_id, parse_datetime(lineup_map["metadata"]["modified"]), lineup_map)
return LineupMap.from_dict(lineup_map)
def get_lineup_map_list(self, lineups): # type: (...) -> LineupMapList
"""
:param lineups:
:return:
"""
lineup_map_list = LineupMapList()
for lineup in lineups:
lineup_map = self.get_lineup_map(lineup.lineup_id, lineup.modified)
lineup_map_list.append(lineup_map)
return lineup_map_list
def cache_programs(self, program_hash_list):
self._logger.debug(u"Searching for uncached programs...")
self._cache.add_program_hashes(program_hash_list)
programs_to_fetch = self._cache.get_program_delta()
#self._logger.info(u"Found %s program(s) missing from cache.", len(programs_to_fetch))
#if len(programs_to_fetch) == 0:
# return
for batch in batched(programs_to_fetch, 5000):
self._logger.info(u"Requesting %s programs from SchedulesDirect.", len(batch))
programs = api.get_programs(self._token, batch)
# remove (currently) unused cast and crew properties
for program in programs:
if "cast" in program:
del program["cast"]
if "crew" in program:
del program["crew"]
self._logger.info(u"Adding %s program(s) to program cache.", len(programs))
self._cache.add_programs(programs)
def cache_artwork(self):
artwork_to_fetch = self._cache.get_artwork_delta()
#self._logger.info(u"Found %s program artwork missing from cache.", len(artwork_to_fetch))
#if len(artwork_to_fetch) == 0:
# return
for batch in batched(artwork_to_fetch, 500):
self._logger.info(u"Requesting %s program artwork from SchedulesDirect.", len(batch))
artwork = api.get_metadata(batch)
artwork_errors = (art for art in artwork if isinstance(art[u"data"], dict))
for artwork_error in artwork_errors:
self._logger.warn(u"Artwork for %s returned %s %s", artwork_error["programID"], artwork_error["data"]["errorCode"], artwork_error["data"]["errorMessage"])
self._logger.info(u"Adding program artwork to cache.")
self._cache.add_artwork(art for art in artwork if isinstance(art[u"data"], list))
def refresh_cache(self, schedule_hash_set):
with self._cache:
changed_schedule_list = self.cache_schedules(schedule_hash_set)
if len(changed_schedule_list) != 0:
self.cache_programs(changed_schedule_list.get_program_hash_list())
self._cache.update_program_max_schedule_dates(changed_schedule_list.get_program_max_schedule_dates())
self._logger.info(u"Caching artwork...")
self.cache_artwork()
self._logger.info(u"Deleting expired schedules...")
self._cache.delete_expired_schedules()
self._logger.info(u"Deleting expired programs...")
self._cache.delete_expired_programs()
self._logger.info(u"Deleting expired artwork...")
self._cache.delete_expired_artwork()
self._logger.info(u"Compressing cache database...")
self._cache.compress_database()
def get_cached_programs(self, program_ids):
return {program.program_id: program for program in self._cache.get_programs(program_ids)}
def get_cached_artwork(self, artwork_ids):
return {program_artwork.artwork_id: program_artwork for program_artwork in self._cache.get_artwork(artwork_ids)}
def get_schedule_hash_list(self, station_ids):
self._logger.info(u"Requesting schedule hashes for %s stations...", len(station_ids))
schedule_md5s_request = [{u"stationID": station} for station in station_ids]
result = api.get_schedule_md5s(self._token, schedule_md5s_request)
schedule_hash_list = [(station_id, parse_date(date), result[station_id][date]["md5"]) for station_id in result for date in result[station_id]]
return schedule_hash_list
def cache_schedules(self, schedule_hash_list): # type: (...) -> ScheduleList
"""
:param schedule_hash_list:
:return:
"""
self._cache.add_schedule_hashes(schedule_hash_list)
schedules_to_fetch = self._cache.get_schedule_delta()
#self._logger.info(u"Found %s schedule(s) missing from cache.", len(schedules_to_fetch))
schedules_request = {}
for (station_id, schedule_date) in schedules_to_fetch:
if station_id not in schedules_request:
schedules_request[station_id] = []
schedules_request[station_id].append(schedule_date.strftime("%Y-%m-%d"))
if len(schedules_request) != 0:
#self._logger.info(u"Requesting %s schedules from SchedulesDirect.", len(schedules_to_fetch))
schedules_response = api.get_schedules(self._token, [{"stationID": station_id, "date": schedules_request[station_id]} for station_id in schedules_request])
self._cache.add_schedules(schedules_response)
return ScheduleList.from_iterable(schedules_response)
return ScheduleList()
def get_cached_schedules(self, schedule_keys): # type: (...) -> ScheduleList
"""
:param schedule_keys:
:return:
"""
return ScheduleList(self._cache.get_schedules(schedule_keys))
def read_filter(self, lineup_map_list):
import os.path
import json
filter = None
if not os.path.isfile("./filter.json"):
filter = {}
for lineup_map in lineup_map_list:
lineup_id = lineup_map.lineup.lineup_id
filter[lineup_id] = {"default_import": 1}
for channel in lineup_map.channels:
filter[lineup_id][channel.channel] = {"callsign": channel.station.callsign, "import": 1}
f = open("./filter.json", "w")
json.dump(filter, f, indent=4, sort_keys=True)
f.close()
return filter
filter_changed = False
f = open("./filter.json", "r")
filter = json.load(f)
f.close()
for lineup_map in lineup_map_list:
lineup_id = lineup_map.lineup.lineup_id
if lineup_id not in filter:
filter[lineup_id] = {"default_import": 1}
filter_changed = True
elif "default_import" not in filter[lineup_id]:
filter[lineup_id]["default_import"] = 1
filter_changed = True
for channel in lineup_map.channels:
channel_num = channel.channel
if channel_num not in filter[lineup_id]:
filter[lineup_id][channel_num] = filter[lineup_id]["default_import"]
filter_changed = True
elif filter[lineup_id][channel_num]["callsign"] != channel.station.callsign:
filter[lineup_id][channel_num]["callsign"] = channel.station.callsign
filter_changed = True
if filter_changed:
f = open("./filter.json", "w")
json.dump(filter, f, indent=4, sort_keys=True)
f.close()
return filter
def manage(self):
while True:
print(u"\nManage Account Options:\n")
print(u"1. List subscribed lineups")
print(u"2. Add lineup")
print(u"3. Remove lineup")
print(u"4. List lineup channels.")
print(u"\nChoose an option or 'x' to exit.")
choice = raw_input("> ")
if choice == "x":
break
elif choice == "1":
self._list_subscribed_lineups()
elif choice == "2":
self._add_lineup()
elif choice == "3":
self._remove_lineup()
elif choice == "4":
self._list_lineup_channels()
def _list_subscribed_lineups(self):
lineups = self.get_subscribed_lineups()
print(u"\nSubscribed Lineups:\n")
for lineup in lineups:
print(u"Lineup:\t{0}".format(lineup.lineup_id))
print(u"Name:\t{0}".format(lineup.name))
print(u"Transport:\t{0}".format(lineup.transport))
print(u"Location:\t{0}".format(lineup.location))
print(u"")
def _add_lineup(self):
while True:
print(u"\nAdd Lineup\n")
print(u"Enter 3-character country/region code or 'x' to cancel:")
country_code = raw_input("> ")
if country_code == "x":
break
while True:
print(u"Enter zip/postal code or 'x' to cancel:")
postal_code = raw_input("> ")
if postal_code == "x":
break
headends = self.get_headends_by_postal_code(country_code, postal_code)
while True:
subscribed_lineups = self.get_subscribed_lineups()
subscribed_lineup_ids = [lineup.lineup_id for lineup in subscribed_lineups]
headend_lineups = [(headend, lineup) for headend in headends for lineup in headend.lineups if lineup.lineup_id not in subscribed_lineup_ids]
transport_set = {headend.type for (headend, lineup) in headend_lineups}
options = []
count = 0
for transport in transport_set:
print(u"\nTransport: {0}\n".format(transport))
for (headend, lineup) in [(headend, lineup) for (headend, lineup) in headend_lineups if headend.type == transport]:
options.append((headend, lineup))
count += 1
print(u"\t{0}. {1.name} ({2.location})".format(count, lineup, headend))
print(u"\nChoose a lineup to add or 'x' to cancel.")
choice = raw_input("> ")
if choice == "x":
break
choice = int(choice) - 1
(headend, lineup) = options[choice]
print(u"Are you sure you want to add '{0} ({1})'? (y/n)".format(lineup.name, headend.location))
if raw_input("> ") != "y":
continue
response = self.add_lineup(lineup.lineup_id)
print(u"Schedules Direct returned '{0}'.".format(response.response_status.message))
print(u"{0} lineup changes remaining.\n".format(response.changes_remaining))
def _list_lineup_channels(self):
while True:
print(u"\nList Lineup Channels\n")
subscribed_lineups = self.get_subscribed_lineups()
options = []
count = 0
for lineup in subscribed_lineups:
count += 1
options.append(lineup)
print(u"{0}. {1.name} ({1.location})".format(count, lineup))
print(u"\nChoose a lineup to list channels or 'x' to cancel.")
choice = raw_input("> ")
if choice == "x":
break
choice = int(choice) - 1
lineup = options[choice]
lineup_map = self.get_lineup_map(lineup.lineup_id)
for channel in lineup_map.channels:
print(u"{0}\t{1.callsign} '{1.name}'".format(channel.channel, channel.station))
def _remove_lineup(self):
while True:
print(u"\nRemove Lineup\n")
subscribed_lineups = self.get_subscribed_lineups()
options = []
count = 0
for lineup in subscribed_lineups:
count += 1
options.append(lineup)
print(u"{0}. {1.name} ({1.location})".format(count, lineup))
print(u"\nChoose a lineup to remove or 'x' to cancel.")
choice = raw_input("> ")
if choice == "x":
break
choice = int(choice) - 1
lineup = options[choice]
print(u"Are you sure you want to remove '{0.name} ({0.location})'? (y/n)".format(lineup))
if raw_input("> ") != "y":
continue
response = self.remove_lineup(lineup.lineup_id)
print(u"\nSchedules Direct returned '{0}'.".format(response.response_status.message))
print(u"{0} lineup changes remaining.\n".format(response.changes_remaining))
|
|
#!/usr/bin/env python
# Copyright (C) 2015 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# ./runtest.py -b build_data.json -a tag -f test.manifest
import sys
import os
import time
import unittest
import inspect
from functools import wraps
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
sys.path.append(os.path.join(BASEDIR, "oeqa"))
from optparse import OptionParser
from oeqa.oetest import oeTest
from oeqa.oetest import oeRuntimeTest
from oeqa.oetest import TestContext as OETestContext
from oeqa.utils.sshcontrol import SSHControl
from oeqa.utils.decorators import gettag
import oeqa.utils.decorators
__tag_prefix = "tag__"
def tag(*args, **kwargs):
"""Decorator that adds attributes to classes or functions
for use with the Attribute (-a) plugin.
"""
def wrap_ob(ob):
for name in args:
setattr(ob, __tag_prefix + name, True)
for name, value in kwargs.items():
setattr(ob, __tag_prefix + name, value)
return ob
return wrap_ob
oeqa.utils.decorators.tag = tag
class FakeTarget(object):
def __init__(self, d):
self.connection = None
self.ip = None
self.port = None
self.server_ip = None
self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime())
self.testdir = d.getVar("TEST_LOG_DIR", True)
self.pn = d.getVar("PN", True)
def exportStart(self, mainTarget=True):
if mainTarget:
self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
sshloglink = os.path.join(self.testdir, "ssh_target_log")
if os.path.lexists(sshloglink):
os.remove(sshloglink)
os.symlink(self.sshlog, sshloglink)
print("SSH log file: %s" % self.sshlog)
else:
self.sshlog = os.path.join(self.testdir, "ssh_target_log_%s_%s" % (self.ip, self.datetime))
self.connection = SSHControl(self.ip, port=self.port, logfile=self.sshlog)
def run(self, cmd, timeout=None):
return self.connection.run(cmd, timeout)
def copy_to(self, localpath, remotepath):
return self.connection.copy_to(localpath, remotepath)
def copy_from(self, remotepath, localpath):
return self.connection.copy_from(remotepath, localpath)
class MyDataDict(dict):
def getVar(self, key, unused = None):
return self.get(key, "")
class TestContext(object):
def __init__(self):
self.d = None
self.target = None
class RuntestTestContext(OETestContext):
def __init__(self, tc):
d = tc.d
self.targets = tc.targets
super(RuntestTestContext, self).__init__(d)
self.pkgmanifest = tc.pkgmanifest
self.target = tc.target
self.tagexp = tc.tagexp
self.imagefeatures = tc.imagefeatures
self.distrofeatures = tc.distrofeatures
def _get_test_suites(self):
return self.d.getVar("testslist", True)
def _get_tests_list(self, *args, **kwargs):
return self.testsuites
def _get_test_suites_required(self, *args, **kwargs):
return self.testsuites
def loadTests(self):
super(RuntestTestContext, self).loadTests()
setattr(oeRuntimeTest, "pscmd", "ps -ef" if oeTest.hasPackage("procps") else "ps")
try:
import simplejson as json
except ImportError:
import json
def setUp(self):
pass
def tearDown(self):
pass
oeRuntimeTest.setUp = setUp
oeRuntimeTest.tearDown = tearDown
def wrap_runner(runner, *wargs, **wkwargs):
@wraps(runner)
def __wrapper(*args, **kwargs):
# args and kwargs will overwrite the wargs and wkwargs
_args = list(args)
_args.extend(wargs[len(args):] if len(wargs) > len(args) else [])
kw = wkwargs.copy()
kw.update(kwargs)
return runner(*_args, **kw)
return __wrapper
def main():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--target-ip", dest="ip", action="append",
help="The IP address of the target machine. Use this to \
overwrite the value determined from TEST_TARGET_IP at build time")
parser.add_option("-s", "--server-ip", dest="server_ip",
help="The IP address of this machine. Use this to \
overwrite the value determined from TEST_SERVER_IP at build time.")
parser.add_option("-d", "--deploy-dir", dest="deploy_dir",
default=os.path.join(BASEDIR, "deploy"),
help="Full path to the package feeds, that this \
the contents of what used to be DEPLOY_DIR on the build machine. \
If not specified it will use the value specified in the json if \
that directory actually exists or it will error out.")
parser.add_option("-l", "--log-dir", dest="log_dir",
help="This sets the path for TEST_LOG_DIR. If not specified \
the current dir is used. This is used for usually creating a \
ssh log file and a scp test file.")
parser.add_option("-f", "--test-manifest", dest="tests_list",
help="The test list file")
parser.add_option("-b", "--build-data", dest="build_data",
help="The build data file.")
parser.add_option("-a", "--tag", dest="tag",
help="The tags to filter test case")
parser.add_option("-m", "--machine", dest="machine",
help="""The target machine:quark intel-corei7-64 beaglebone""")
parser.add_option("-n", "--nativearch", dest="nativearch",
help="The native arch")
parser.add_option("-x", "--xunit", dest="xunit",
help="Output directory to put results in xUnit XML format")
(options, args) = parser.parse_args()
tc = TestContext()
#inject testcase list
tclist = []
if not options.tests_list:
options.tests_list = os.path.join(os.path.dirname(__file__), "testplan", "iottest.manifest")
for each_manifest in options.tests_list.split():
with open(each_manifest, "r") as f:
tl = filter(lambda x: x and not x.startswith('#'),
[n.strip() for n in f.readlines()])
for x in tl:
tclist.append(x)
tc.testslist = tclist
print (tc.testslist)
#add testsrequired for skipModule
tc.testsrequired = tc.testslist
deployDir = os.path.abspath(options.deploy_dir)
if not os.path.isdir(deployDir):
raise Exception("The path to DEPLOY_DIR does not exists: %s" % deployDir)
if options.machine:
machine = options.machine
else:
parser.error("Please specify target machine by -m")
if options.xunit:
try:
import xmlrunner
except Exception:
raise Exception(
"xUnit output requested but unittest-xml-reporting not installed")
unittest.TextTestRunner = wrap_runner(xmlrunner.XMLTestRunner, output=options.xunit)
if options.build_data:
build_data = options.build_data
else:
build_data = os.path.join(deployDir, "files", machine, "builddata.json")
#get build data from file
with open(build_data, "r") as f:
loaded = json.load(f)
#inject build datastore
d = MyDataDict()
if "d" in loaded:
for key in loaded["d"].keys():
d[key] = loaded["d"][key]
d["DEPLOY_DIR"], d["MACHINE"] = deployDir, machine
if options.log_dir:
d["TEST_LOG_DIR"] = os.path.abspath(options.log_dir)
else:
d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__))
navarch = os.popen("uname -m").read().strip()
d["BUILD_ARCH"] = "x86_64" if not navarch else navarch
if options.nativearch:
d["BUILD_ARCH"] = options.nativearch
d["testslist"] = tc.testslist
setattr(tc, "d", d)
#inject build package manifest
pkgs = [pname.strip() for pname in loaded["pkgmanifest"]]
setattr(tc, "pkgmanifest", "\n".join(pkgs))
#inject target information
targets = []
targets_ip = options.ip if options.ip else ["192.168.7.2"]
server_ip = options.server_ip if options.server_ip else "192.168.7.1"
first = True
for ip in targets_ip:
target = FakeTarget(d)
buf_ip = ip.split(":", 1)
[target.ip, target.port] = buf_ip if len(buf_ip) == 2 else [buf_ip[0], "22"]
target.server_ip = server_ip
target.exportStart(first)
first = False
targets.append(target)
setattr(tc, "targets", targets)
setattr(tc, "target", targets[0])
setattr(oeRuntimeTest, "targets", targets)
#inject others
for key in loaded.keys():
if key not in ["testslist", "d", "target", "pkgmanifest"]:
setattr(tc, key, loaded[key])
setattr(tc, "tagexp", options.tag)
runner = RuntestTestContext(tc)
runner.loadTests()
runner.runTests()
return 0
if __name__ == "__main__":
ret = main()
sys.exit(ret)
|
|
"""
Methods for generating "perfect data" hessians.
"""
__docformat__ = "restructuredtext en"
import copy
import logging
logger = logging.getLogger('RxnNets.PerfectData')
import sets
import scipy
import scipy.integrate
import Dynamics
from SloppyCell import HAVE_PYPAR, my_rank, my_host, num_procs
if HAVE_PYPAR:
import pypar
def apply_func_to_traj(traj, func, only_nonderivs=False):
"""
Return a trajectory with func applied to each variable stored in the
trajectory
"""
if only_nonderivs:
keys = [key for key in self.key_column.keys()
if not isinstance(key, tuple)]
else:
keys = None
ret_traj = traj.copy_subset(keys)
for var, col in traj.key_column.items():
vals = func(traj, var)
ret_traj.values[:,col] = vals
return ret_traj
def update_typical_vals(networks, int_times, rtol = 1e-9, fraction=1.0,
cutoff=1e-14):
"""
Update the typical var values for a group of networks.
Find the maximum of each variable over the integrations. In each network
the typical value is set to fraction of that maximum. If that maximum value
is less than cutoff, the typical value is set to 1.
networks List of networks to work with
int_times List of corresponding integration endpoints
(ie. [(0, 100), (0, 50)])
fraction Relative size of typical value, compared to max value over
the integrations.
rtol Relative tolerance for the integrations.
cutoff Values below this are considered to be zero
"""
max_vals = {}
for net, times in zip(networks, int_times):
traj = Dynamics.integrate(net, times, rtol=rtol, fill_traj=True)
for var_id in net.variables.keys():
curr_max = max(traj.get_var_traj(var_id))
max_vals[var_id] = max(curr_max, max_vals.get(var_id, 0))
for var_id, val in max_vals.items():
for net in networks:
if net.variables.has_key(var_id):
if val > cutoff:
net.set_var_typical_val(var_id, val*fraction)
else:
net.set_var_typical_val(var_id, 1.0)
return max_vals
def typ_val_uncert(fraction = 0.1, cutoff=1e-14):
"""
This is an uncertainty that is fraction of the variable's typical value.
"""
def sigmaFunc(traj, data_id):
sigma = traj.get_var_typical_val(data_id) * fraction
if sigma < cutoff:
logger.warn('sigma < cutoff value (%g) for variable %s! '
'Taking sigma = 1.' % (cutoff, data_id))
sigma = 1
return sigma
return sigmaFunc
def discrete_data(net, params, pts, interval, vars=None, random=False,
uncert_func=typ_val_uncert(0.1, 1e-14)):
"""
Return a set of data points for the given network generated at the given
parameters.
net Network to generate data for
params Parameters for this evaluation of the network
pts Number of data points to output
interval Integration interval
vars Variables to output data for, defaults to all species in net
random If False data points are distributed evenly over interval
If True they are spread randomly and uniformly over each
variable
uncert_func Function that takes in a trajectory and a variable id and
returns what uncertainty should be assumed for that variable,
either as a scalar or a list the same length as the trajectory.
"""
# Default for vars
if vars is None:
vars = net.species.keys()
# Assign observed times to each variable
var_times = {}
for var in vars:
if random:
var_times[var] = scipy.rand(pts) * (interval[1]-interval[0]) + interval[0]
else:
var_times[var] = scipy.linspace(interval[0], interval[1], pts)
# Create a sorted list of the unique times in the var_times dict
int_times = sets.Set(scipy.ravel(var_times.values()))
int_times.add(0)
int_times = list(int_times)
int_times.sort()
# Get the trajectory
traj = Dynamics.integrate(net, int_times, params=params, fill_traj=False)
# Build up our data dictionary
data = {}
for var, times in var_times.items():
var_data = {}
data[var] = var_data
# Calculate our uncertainties
var_uncerts = uncert_func(traj, var)
for time in times:
val = traj.get_var_val(var, time)
if scipy.isscalar(var_uncerts):
uncert = var_uncerts
else:
index = traj._get_time_index(time)
uncert = var_uncerts[index]
var_data[time] = (val, uncert)
return data
def hessian_log_params(sens_traj, data_ids=None, opt_ids=None,
fixed_sf=False, return_dict=False,
uncert_func=typ_val_uncert(1.0, 1e-14)):
"""
Calculate the "perfect data" hessian in log parameters given a sensitivity
trajectory.
sens_traj Sensitivity trajectory of Network being considered.
data_ids A sequence of variable id's to assume we have data for. If
data_ids is None, all dynamic and assigned variables will be
used.
opt_ids A sequence of parameter id's to calculate derivatives with
respect to. The hessian is (len(opt_ids) x len(opt_ids)).
If opt_ids is None, all optimizable variables are considered.
fixed_sf If True, calculate the hessian assuming fixed scale factors.
return_dict If True, returned values are (hess, hess_dict). hess_dict is a
dictionary keyed on the elements of data_ids; each corresponding
value is the hessian assuming data only on a single variable.
hess is the sum of all these hessians
uncert_func Function that takes in a trajectory and a variable id and
returns what uncertainty should be assumed for that variable,
either as a scalar or a list the same length as the trajectory.
"""
if data_ids is None:
data_ids = sens_traj.dynamicVarKeys + sens_traj.assignedVarKeys
if opt_ids is None:
opt_ids = sens_traj.optimizableVarKeys
data_sigmas = {}
for data_id in data_ids:
ds = uncert_func(sens_traj, data_id)
if scipy.isscalar(ds):
ds = scipy.zeros(len(sens_traj), scipy.float_) + ds
data_sigmas[data_id] = ds
vars_assigned = [data_ids[node::num_procs] for node in range(num_procs)]
for worker in range(1, num_procs):
logger.debug('Sending to worker %i.' % worker)
# reduce the amount we have to pickle
# The only things the worker needs in the sens_traj are those that
# refer to data_ids it has to deal with.
vars_needed = sets.Set(sens_traj.optimizableVarKeys)
vars_needed.union_update(vars_assigned[worker])
for var in vars_assigned[worker]:
vars_needed.union_update([(var, ov) for ov in opt_ids])
worker_traj = sens_traj.copy_subset(vars_needed)
# And the only uncertainties it needs have to do with those data_ids
worker_ds = dict([(var, data_sigmas[var])
for var in vars_assigned[worker]])
command = 'PerfectData.compute_sf_LMHessian_conts(sens_traj, data_ids,'\
'data_sigmas, opt_ids, fixed_sf)'
args = {'sens_traj': worker_traj, 'data_ids': vars_assigned[worker],
'data_sigmas': worker_ds, 'opt_ids': opt_ids,
'fixed_sf': fixed_sf}
pypar.send((command, args), worker)
hess_dict = compute_sf_LMHessian_conts(sens_traj, vars_assigned[0],
data_sigmas, opt_ids, fixed_sf)
for worker in range(1, num_procs):
logger.debug('Receiving from worker %i.' % worker)
hess_dict.update(pypar.receive(worker))
hess = scipy.sum(hess_dict.values(), axis=0)
if return_dict:
return hess, hess_dict
else:
return hess
def compute_sf_LMHessian_conts(sens_traj, data_ids, data_sigmas, opt_ids,
fixed_sf):
hess_dict = {}
for data_id in data_ids:
if fixed_sf:
sf_deriv = dict([(id, 0) for id in opt_ids])
else:
sf_deriv = get_sf_derivs(sens_traj, data_id, data_sigmas[data_id],
opt_ids)
hess_dict[data_id] = computeLMHessianContribution(sens_traj, data_id,
data_sigmas[data_id],
opt_ids, sf_deriv)
return hess_dict
def get_intervals(traj):
# We want to break up our integrals when events fire, so first we figure out
# when they fired by looking for duplicated times in the trajectory
times = traj.get_times()
eventIndices = scipy.compress(scipy.diff(times) == 0,
scipy.arange(len(times)))
intervals = zip([0] + list(eventIndices + 1),
list(eventIndices + 1) + [len(times)])
return intervals
def get_sf_derivs(traj, dataId, data_sigma, optIds):
scaleFactorDerivs = {}
intTheorySq = 0
for start, end in get_intervals(traj):
y = traj.get_var_traj(dataId)[start:end]
times = traj.get_times()[start:end]
sigma = data_sigma[start:end]
value = scipy.integrate.simps((y/sigma)**2, times,
even='last')
intTheorySq += value
for optId in optIds:
optValue = abs(traj.get_var_traj(optId)[start:end])
sens = traj.get_var_traj((dataId, optId))[start:end]*optValue
numerator = scipy.integrate.simps(sens*y/sigma**2, times,
even='last')
scaleFactorDerivs.setdefault(optId, 0)
scaleFactorDerivs[optId] += -numerator
for optId in optIds:
if intTheorySq != 0:
scaleFactorDerivs[optId] /= intTheorySq
else:
scaleFactorDerivs[optId] = 0
return scaleFactorDerivs
def computeLMHessianContribution(traj, dataId, data_sigma, optIds,
scaleFactorDerivs):
LMHessian = scipy.zeros((len(optIds), len(optIds)), scipy.float_)
# We break up our integral at event firings.
for start, end in get_intervals(traj):
times = traj.timepoints[start:end]
y = traj.getVariableTrajectory(dataId)[start:end]
sigma = data_sigma[start:end]
for optIndex1, optId1 in enumerate(optIds):
# We convert our sensitivity trajectory to a sensitivity wrt the
# log(abs()) by multiplying by the parmeter value.
optValue = abs(traj.get_var_traj(optId1)[start:end])
sens1 = traj.get_var_traj((dataId, optId1))[start:end]*optValue
dB1 = scaleFactorDerivs[optId1]
for jj, optId2 in enumerate(optIds[optIndex1:]):
optIndex2 = jj + optIndex1
optValue = abs(traj.get_var_traj(optId2)[start:end])
sens2 = traj.get_var_traj((dataId, optId2))[start:end]*optValue
dB2 = scaleFactorDerivs[optId2]
integrand = (sens1 + dB1 * y) * (sens2 + dB2 * y)/ sigma**2
# We do the even='last' for speed. Otherwise, for an even number
# of points, simps does twice as much work as for an odd
# number.
# In tests it really doesn't make much difference in accurary.
value = scipy.integrate.simps(integrand, times,
even='last')
LMHessian[optIndex1][optIndex2] += value
if optIndex1 != optIndex2:
LMHessian[optIndex2][optIndex1] += value
LMHessian /= (traj.timepoints[-1] - traj.timepoints[0])
return LMHessian
|
|
"""
TODO:
Implement a test that proves file configs override rather than overwrite
the defaults. Unfortunately this functionality will have to be implemented
first.
"""
import os
from unittest import mock
import pytest
import requests
from pianodb.pianodb import (number_of_workers, gen_dummy_cmd, get_config,
get_track_features)
class MockPage:
def __init__(self, status_code=200, content=''):
self.status_code = status_code
self.content = content if content else """
<!-- https://www.pandora.com/great-jazz-trio/s-wonderful/take-5 -->
<div class="artist_name" title="The Great Jazz Trio">
<span>by</span>
<span itemprop="byArtist">
<a href="/great-jazz-trio" class="artist_link hash">The Great Jazz Trio</a>
</span>
</div>
<div class="album_title" title="'S Wonderful">
<span>on</span>
<a href="/great-jazz-trio/s-wonderful" itemprop="inAlbum" class="album_link hash">'S Wonderful</a>
</div>
<div class="song_features clearfix">
<h2>Features of This Track</h2>
a piano solo<br>
an acoustic bass solo<br>
a groove oriented approach<br>
vamping harmony<br>
<div style="display: none;">
unusual rhythms<br>
</div>
<p>These are just a few of the hundreds of attributes cataloged for this track by the Music Genome Project.</p>
<a href="#" class="show_more">show more</a>
</div>
"""
@mock.patch('pianodb.pianodb.multiprocessing')
def test_pianodb_number_of_workers_is_double_cpu_count_plus_one(mp):
"""
Test that ``pianodb`` determines the number of workers to be double the CPU
count plus one.
Note:
This test patches the multiprocessing.cpu_count function to return a
constant that does not depend on the actual CPU count.
"""
mp.cpu_count.return_value = 6
assert number_of_workers() == 13
def test_pianodb_can_generate_dummy_click_commands():
"""
Test that ``pianodb`` can generate dummy instances of ``Click.Command`` that
have the correct ``name``, ``help``, and ``short_help``.
"""
cmd = gen_dummy_cmd('dummy')
assert cmd.name == 'dummy'
assert cmd.help == ("This is an unimplimented pianobar eventcmd handler. "
"Calling this subcommand will do absolutely nothing.")
assert cmd.short_help == 'unimplimented pianobar eventcmd'
@mock.patch.dict(os.environ, {'HOME': '/home/cleesej'})
@mock.patch('builtins.open', create=True)
@mock.patch('tempfile.gettempdir')
@mock.patch('pianodb.pianodb.multiprocessing')
def test_pianodb_has_config_defaults(mp, tmpdir, mock_open):
"""
Test that ``pianodb`` has config defaults that are used when getting its
configuration. In the absence of an option defined in a config file the
``pianodb`` config should contain these defaults.
"""
database = '/home/cleesej/.config/pianobar/piano.db'
server_database = '/faketmp/piano.db'
# Pretend we have a CPU count of 4.
mp.cpu_count.return_value = 4
# Pretend we have a fake temp dir.
tmpdir.return_value = '/faketmp'
# Pretend open will read a file with nothing in it.
mock_open.side_effect = [
mock.mock_open(read_data="").return_value,
]
# This is probably a good rationale for having a global default config dict.
expected_config = {
'client': {
'remote': None,
'threshold': 10,
'token': None,
'database': database,
},
'server': {
'interface': 'localhost',
'port': 8000,
'workers': 9,
'database': server_database,
}
}
# overrides: os.environ, os.path, open, multiprocessing.cpu_count
config = get_config()
assert config == expected_config
@mock.patch.dict(os.environ, {'HOME': '/home/cleesej'})
@mock.patch('builtins.open', create=True)
@mock.patch('tempfile.gettempdir')
@mock.patch('pianodb.pianodb.multiprocessing')
def test_pianodb_can_load_configs_from_optional_path(mp, tmpdir, mock_open):
"""
Test that ``pianodb`` can load a config file from a path other than
its own internal default by using the optional ``path`` argument.
"""
# Pretend we have a CPU count of 8.
mp.cpu_count.return_value = 8
# Pretend we have a fake temp dir.
tmpdir.gettempdir.return_value = '/faketmp'
# Pretend open will read a file with nothing in it.
mock_open.side_effect = [
mock.mock_open(read_data="").return_value,
]
config = get_config(path='/spam/and/eggs')
mock_open.assert_called_once_with('/spam/and/eggs', 'r')
@mock.patch.dict(os.environ, {'HOME': '/home/cleesej'})
def test_pianodb_exits_fatally_without_a_config_file():
"""
Test that ``pianodb`` raises a ``SystemExit`` error with the appropriate
error message when attempting to load a nonexistent config.
"""
with pytest.raises(SystemExit) as err:
config = get_config(path='nonexistent')
assert str(err.value) == 'could not load config'
def test_pianodb_can_get_track_features(monkeypatch):
"""
Test that ``pianodb`` can extract track features from a specially formatted
web page.
"""
def _mock_page(url):
return MockPage()
monkeypatch.setattr(requests, 'get', _mock_page)
expected = [
'a piano solo',
'an acoustic bass solo',
'a groove oriented approach',
'vamping harmony',
'unusual rhythms',
]
assert get_track_features('https://fake-url.tld') == expected
def test_pianodb_track_features_empty_if_status_code_is_not_200(monkeypatch):
"""
Test that ``pianodb`` track features are empty when ``requests`` returns
a ``status_code`` that is not ``200``.
"""
def _mock_page(url):
return MockPage(status_code=418, content='teapot')
monkeypatch.setattr(requests, 'get', _mock_page)
assert get_track_features('https://fake-url.tld') == []
def test_pianodb_track_features_empty_if_requests_connection_error(monkeypatch):
"""
Test that ``pianodb`` track features are empty when ``requests`` raises a
``ConnectionError``.
"""
def _raise_connection_error(url):
raise requests.ConnectionError()
monkeypatch.setattr(requests, 'get', _raise_connection_error)
assert get_track_features('https://fake-url.tld') == []
|
|
import os
import json
import sys
import shlex
import subprocess
import logging
import itertools, collections
import re
om_script = "model2dae.mos" # name of generated Open Modelica script
class CygwinError(Exception):
"""Exception raised for dealing with Cygwin.
Attributes:
msg -- explanation of the error
"""
def __init__(self, msg):
self.msg = msg
def error(msg):
log.critical('{} -- exiting.'.format(msg))
sys.exit(1)
def getHSalExe():
if sys.platform.startswith('win'): # windows
import _winreg
analysis_tools_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Wow6432Node\META\AnalysisTools',
0,
_winreg.KEY_READ | _winreg.KEY_WOW64_32KEY)
number_of_keys = _winreg.QueryInfoKey(analysis_tools_key)[0] # 0 means number of sub_keys
for sub_key_id in range(0, number_of_keys):
sub_key_name = _winreg.EnumKey(analysis_tools_key, sub_key_id)
if sub_key_name == "HybridSal":
sub_key = _winreg.OpenKey(analysis_tools_key, sub_key_name)
number_of_values = _winreg.QueryInfoKey(sub_key)[1]
for value_id in range(0, number_of_values):
value_tuple = _winreg.EnumValue(sub_key, value_id)
if value_tuple[0] == "InstallLocation": # we foud what we were looking for
return os.path.join(value_tuple[1], "bin", "hsalRA.exe")
return None
def updateEnviron(log,varname,value):
if varname in os.environ:
os.environ[varname] += os.pathsep + value
log.debug(" Added {} to existing {} environment variable.".format(value,varname))
else:
os.environ.update({varname:value})
log.debug(" Created envrionment variable {}: {}".format(varname,os.environ[varname]))
def printStdOutErr(log,name,stdout,stderr):
if stdout: # not empty
print
log.info(" === start of {} STDOUT ===\n{}".format(name,stdout.strip()))
log.info(" === end of {} STDOUT ===".format(name))
if stderr: # not empty
print
log.info(" === start of {} STDERR ===\n{}".format(name,stderr.strip()))
log.info(" === end of {} STDERR ===".format(name))
def main():
# -------------------------------------------------
# set up logging
if not os.path.isdir('log'):
os.mkdir('log')
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
#datefmt='%m-%d %H:%M',
filename='log/HybridSal_runner.log',
filemode='w')
# define a handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# tell the handler to use simpler format
console.setFormatter(logging.Formatter('%(levelname)-8s %(message)s'))
# add the handler to the main logger
log = logging.getLogger(__name__)
log.addHandler(console)
# -------------------------------------------------
log.info("* Identify test bench for verification...")
cyphy_dir = os.path.normpath("CyPhy")
if not os.path.isdir(cyphy_dir):
error('Couldn\'t find directory {}'.format(cyphy_dir))
# read model config data
model_config = os.path.normpath(os.path.join(cyphy_dir,"model_config.json"))
if not os.path.isfile(model_config):
error('Couldn\'t find file {}'.format(model_config))
with open(model_config,'r') as f:
data = json.load(f)
log.info(" Read configuration from file {}".format(model_config))
#print json.dumps(data,indent=2)
# setup OPENMODELICALIBRARY (if any library paths are given)
lib_paths = ""
# supports relative paths and adds the package paths if exist
for lib_path in data["lib_package_paths"]:
lib_full_path = os.path.abspath(os.path.join(cyphy_dir,lib_path))
if os.path.exists(lib_full_path):
log.debug(" Found additional library path {}".format(lib_full_path))
lib_paths += lib_full_path
lib_paths += os.pathsep
else:
log.warning("This library path does not exist, so not adding to env: {}".format(lib_full_path))
if lib_paths: # not empty
if 'OPENMODELICALIBRARY' in os.environ:
os.environ['OPENMODELICALIBRARY'] += os.pathsep + lib_paths
log.debug(" Added paths to existing OPENMODELICALIBRARY environment variable.")
else:
if os.name == 'nt':
om_std_lib = os.path.join(os.environ['OPENMODELICAHOME'],"lib","omlibrary")
elif os.name == 'posix':
om_std_lib = os.sep + os.path.join("usr","lib","omlibrary")
else:
error('Only Windows and Linux are supported.')
om_lib = {'OPENMODELICALIBRARY':'{}{}{}'.format(om_std_lib,os.pathsep,lib_paths)}
os.environ.update(om_lib)
log.debug(" Created envrionment variable OPENMODELICALIBRARY: {}".format(os.environ['OPENMODELICALIBRARY']))
# write .mos file from data
model_name = data["verification_model_name"]
result_prefix = model_name.split('.')[2]
om_script_path = os.path.join(cyphy_dir,om_script)
with open(om_script_path,'w') as f:
print >> f, '''
echo(false);
setCommandLineOptions("+debug=failtrace");
setCommandLineOptions("+showErrorMessages");
loadModel(Modelica, {{"{}"}});
loadFile("{}");'''.format(data["MSL_version"],data["model_file_name"])
for name in data["lib_package_names"]:
f.write('loadModel({});\n'.format(name))
print >> f, '''dumpXMLDAE({},"optimiser",addMathMLCode=true,fileNamePrefix="{}");
getErrorString();
'''.format(model_name,result_prefix)
log.info(" Written Open Modelica script to file {}".format(om_script_path))
# -------------------------------------------------
print
log.info("* Translate test bench to DAEs using Open Modelica...")
daexml_file = os.path.abspath(os.path.join(cyphy_dir,result_prefix+".xml"))
if sys.platform.startswith('win'): # windows
omc_path = os.path.normpath(os.path.join(os.environ["OPENMODELICAHOME"], "bin", "omc.exe"))
if not os.path.isfile(omc_path):
error('Couldn\'t find Open Modelica compiler {}'.format(omc_path))
cmd = "{} {}".format(omc_path,om_script).split()
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cyphy_dir)
(stdout, stderr) = process.communicate()
printStdOutErr(log,"omc.exe",stdout,stderr)
log.debug(" Called 'omc.exe' with script {}".format(os.path.join(cyphy_dir,om_script)))
log.debug(" This should have created file {}".format(daexml_file))
else:
log.warning("Skipping omc.exe because not in Windows!")
# -------------------------------------------------
print
log.info("* Run HybridSal on DAE xml...")
if not os.path.isfile(daexml_file):
error('Couldn\'t find DAE XML file\n {}'.format(daexml_file))
log.debug(" Found DAE XML file at {}".format(daexml_file))
if sys.platform.startswith('win'):
hsal_exe = getHSalExe()
if not os.path.isfile(hsal_exe):
error('Couldn\'t find HybridSal executable at\n {}'.format(hsal_exe))
log.debug(" Found HybridSal at {}".format(hsal_exe))
# updating PATH and Java classpath... (still need to run from hsal_dir!)
hsal_dir = os.path.dirname(hsal_exe)
updateEnviron(log,'PATH',hsal_dir)
hsal2xml_jar = os.path.join(hsal_dir,'hybridsal2xml.jar')
updateEnviron(log,'CLASSPATH',hsal2xml_jar)
log.debug(" Running HybridSal on file {}".format(daexml_file))
#cmd = "{} {} {}".format(hsal_exe,daexml_file,os.path.abspath(os.path.join(cyphy_dir,"msd_prop.json"))).split() # TODO: remove this!
cmd = "{} {}".format(hsal_exe,daexml_file).split()
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=hsal_dir)
(stdout, stderr) = process.communicate()
printStdOutErr(log,"HybridSal",stdout,stderr)
# -------------------------------------------------
print
log.info("* Processing the result...")
# read manifest
summary_file = os.path.join(os.path.dirname(cyphy_dir),'testbench_manifest.json')
summary_results = {}
with open(summary_file,'r') as f:
summary_results = json.load(f)
# update status
if process.returncode: # HybridSal created a non-zero return code
log.warning('HybridSal failed with code {}'.format(process.returncode))
summary_results['Status'] = 'FAILED'
else:
summary_results['Status'] = 'OK'
if not 'FormalVerification' in summary_results:
summary_results['FormalVerification'] = []
verif_results = summary_results['FormalVerification']
hsal_results = {'Source': 'SRI', 'Result': 'UNKNOWN', 'ReqName': '-unknown-', 'Details': []}
# obtain "ReqName" entry (if it exists)
limits_file = os.path.abspath(os.path.join(cyphy_dir,"limits.json"))
if os.path.isfile(limits_file):
log.debug(" Found limits in file {}".format(limits_file))
limits = {}
with open(limits_file,'r') as f:
limits = json.load(f)
if 'LimitChecks' in limits:
log.debug(" Found limit checks")
checks = limits['LimitChecks'][0] # TODO: more checks on items in this list!
if 'ModelicaRecordName' in checks:
log.debug(" Found 'ModelicaRecordName' in limit checks")
hsal_results['ReqName'] = checks['ModelicaRecordName']
# process results
sal_output_file = os.path.abspath(os.path.join(cyphy_dir,result_prefix+"ModelResult.txt"))
with open(sal_output_file,'r') as f:
sal_output = f.read()
if 'no counterexample' in sal_output.lower():
hsal_results['Result'] = 'SUCCESS'
log.info(" Result: no counterexample")
elif 'counterexample' in sal_output.lower():
hsal_results['Result'] = 'FAIL'
# parse counterexample into data structure for "GroupBody":
group_body = []
# markers and collectors in loop:
step = None
transition = None
assigns = {}
information = ''
# patterns for parsing:
stepRE = re.compile(r'Step\s+(\d+):') # Step <N>:
assignRE = re.compile(r'---\s+(.+)\s+---') # --- <assignments title> ---
varsRE = re.compile(r'(\S+)\s+=\s+(\S+)') # <var> = <value>
# loop over lines (plus a final delimiter)
sal_lines = sal_output.splitlines()
sal_lines.append('-') # last delimiter to make loop work
iterator = range(len(sal_lines)).__iter__()
for i in iterator:
line = sal_lines[i]
# skip blank or certain lines:
if not line.strip() or line.startswith(('Counterexample','=','Path')):
log.debug(" -- skipping: {}".format(line))
continue
stepM = stepRE.match(line)
if stepM:
# parse "Step" information:
step = int(stepM.group(1))
transition = None
log.debug(" -- found step {}".format(step))
# consume next line:
i = next(iterator)
line = sal_lines[i]
assignM = assignRE.match(line)
if not assignM:
log.warning(" Couldn't find assignment title at {}: {}".format(i,line))
elif step is not None:
# collect assignments until reaching line that starts with "-" or end
varsM = varsRE.match(line)
if line.startswith('-'):
group_body.append({'Step':step, assignM.group(1):assigns})
assigns = {} # reset
transition = step
step = None
elif varsM:
assigns[varsM.group(1)] = varsM.group(2)
else:
log.warning(" Couldn't find var assignment at {}: {}".format(i,line))
elif transition is not None:
if line.startswith('-'):
group_body.append({'Transition':'{} -> {}'.format(transition,transition+1), 'Information':information})
transition = None
elif line.startswith('Transition Information:'):
information = '' # reset
else:
# collect transition information
information += line
else:
log.warning(" Cannot parse Counterexample line at {}: {}".format(i,line))
hsal_results['Details'].append({'GroupTitle': 'Counterexample generated by HybridSal with these steps and transitions','GroupBody': group_body})
log.info(" Result: counterexample found...\n"+sal_output)
else:
log.warning(" No result found!")
# update verification results
mapping = dict((item['Source'], item) for item in verif_results)
mapping['SRI'] = hsal_results # this will replace prior entries, if present
# write results back
summary_results['FormalVerification'] = mapping.values()
with open(summary_file,'w') as f:
json.dump(summary_results,f,indent=2)
log.info(" Summary results file written as {}".format(summary_file))
#print json.dumps(summary_results['FormalVerification'],indent=2) # TODO: remove after testing
else:
log.warning("Skipping hsalRA.exe because not in Windows!")
if __name__ == "__main__":
main()
|
|
from __future__ import unicode_literals
from django.conf.urls import patterns, url, include
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.test import TestCase
from django.utils import six
from rest_framework import HTTP_HEADER_ENCODING
from rest_framework import exceptions
from rest_framework import permissions
from rest_framework import renderers
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import (
BaseAuthentication,
TokenAuthentication,
BasicAuthentication,
SessionAuthentication,
)
from rest_framework.authtoken.models import Token
from rest_framework.test import APIRequestFactory, APIClient
from rest_framework.views import APIView
import base64
factory = APIRequestFactory()
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def put(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = patterns(
'',
(r'^session/$', MockView.as_view(authentication_classes=[SessionAuthentication])),
(r'^basic/$', MockView.as_view(authentication_classes=[BasicAuthentication])),
(r'^token/$', MockView.as_view(authentication_classes=[TokenAuthentication])),
(r'^auth-token/$', 'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework'))
)
class BasicAuthTests(TestCase):
"""Basic authentication"""
urls = 'tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def test_post_form_passing_basic_auth(self):
"""Ensure POSTing json over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post('/basic/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_basic_auth(self):
"""Ensure POSTing form over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post('/basic/', {'example': 'example'}, format='json', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_failing_basic_auth(self):
"""Ensure POSTing form over basic auth without correct credentials fails"""
response = self.csrf_client.post('/basic/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_basic_auth(self):
"""Ensure POSTing json over basic auth without correct credentials fails"""
response = self.csrf_client.post('/basic/', {'example': 'example'}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'Basic realm="api"')
class SessionAuthTests(TestCase):
"""User session authentication"""
urls = 'tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.non_csrf_client = APIClient(enforce_csrf_checks=False)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def tearDown(self):
self.csrf_client.logout()
def test_login_view_renders_on_get(self):
"""
Ensure the login template renders for a basic GET.
cf. [#1810](https://github.com/tomchristie/django-rest-framework/pull/1810)
"""
response = self.csrf_client.get('/auth/login/')
self.assertContains(response, '<label for="id_username">Username:</label>')
def test_post_form_session_auth_failing_csrf(self):
"""
Ensure POSTing form over session authentication without CSRF token fails.
"""
self.csrf_client.login(username=self.username, password=self.password)
response = self.csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_post_form_session_auth_passing(self):
"""
Ensure POSTing form over session authentication with logged in user and CSRF token passes.
"""
self.non_csrf_client.login(username=self.username, password=self.password)
response = self.non_csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_put_form_session_auth_passing(self):
"""
Ensure PUTting form over session authentication with logged in user and CSRF token passes.
"""
self.non_csrf_client.login(username=self.username, password=self.password)
response = self.non_csrf_client.put('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_session_auth_failing(self):
"""
Ensure POSTing form over session authentication without logged in user fails.
"""
response = self.csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TokenAuthTests(TestCase):
"""Token authentication"""
urls = 'tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.key = 'abcd1234'
self.token = Token.objects.create(key=self.key, user=self.user)
def test_post_form_passing_token_auth(self):
"""Ensure POSTing json over token auth with correct credentials passes and does not require CSRF"""
auth = 'Token ' + self.key
response = self.csrf_client.post('/token/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_token_auth(self):
"""Ensure POSTing form over token auth with correct credentials passes and does not require CSRF"""
auth = "Token " + self.key
response = self.csrf_client.post('/token/', {'example': 'example'}, format='json', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_makes_one_db_query(self):
"""Ensure that authenticating a user using a token performs only one DB query"""
auth = "Token " + self.key
def func_to_test():
return self.csrf_client.post('/token/', {'example': 'example'}, format='json', HTTP_AUTHORIZATION=auth)
self.assertNumQueries(1, func_to_test)
def test_post_form_failing_token_auth(self):
"""Ensure POSTing form over token auth without correct credentials fails"""
response = self.csrf_client.post('/token/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_token_auth(self):
"""Ensure POSTing json over token auth without correct credentials fails"""
response = self.csrf_client.post('/token/', {'example': 'example'}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_token_has_auto_assigned_key_if_none_provided(self):
"""Ensure creating a token with no key will auto-assign a key"""
self.token.delete()
token = Token.objects.create(user=self.user)
self.assertTrue(bool(token.key))
def test_generate_key_returns_string(self):
"""Ensure generate_key returns a string"""
token = Token()
key = token.generate_key()
self.assertTrue(isinstance(key, six.string_types))
def test_token_login_json(self):
"""Ensure token login view using JSON POST works."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': self.password}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['token'], self.key)
def test_token_login_json_bad_creds(self):
"""Ensure token login view using JSON POST fails if bad credentials are used."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': "badpass"}, format='json')
self.assertEqual(response.status_code, 400)
def test_token_login_json_missing_fields(self):
"""Ensure token login view using JSON POST fails if missing fields."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username}, format='json')
self.assertEqual(response.status_code, 400)
def test_token_login_form(self):
"""Ensure token login view using form POST works."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': self.password})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['token'], self.key)
class IncorrectCredentialsTests(TestCase):
def test_incorrect_credentials(self):
"""
If a request contains bad authentication credentials, then
authentication should run and error, even if no permissions
are set on the view.
"""
class IncorrectCredentialsAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('Bad credentials')
request = factory.get('/')
view = MockView.as_view(
authentication_classes=(IncorrectCredentialsAuth,),
permission_classes=()
)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, {'detail': 'Bad credentials'})
class FailingAuthAccessedInRenderer(TestCase):
def setUp(self):
class AuthAccessingRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'txt'
def render(self, data, media_type=None, renderer_context=None):
request = renderer_context['request']
if request.user.is_authenticated():
return b'authenticated'
return b'not authenticated'
class FailingAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('authentication failed')
class ExampleView(APIView):
authentication_classes = (FailingAuth,)
renderer_classes = (AuthAccessingRenderer,)
def get(self, request):
return Response({'foo': 'bar'})
self.view = ExampleView.as_view()
def test_failing_auth_accessed_in_renderer(self):
"""
When authentication fails the renderer should still be able to access
`request.user` without raising an exception. Particularly relevant
to HTML responses that might reasonably access `request.user`.
"""
request = factory.get('/')
response = self.view(request)
content = response.render().content
self.assertEqual(content, b'not authenticated')
|
|
import difflib
from os import error
import sqlite3
import json
from contextlib import closing
from Template import SQL
# from TX74 import TextContent # difflib import here
class Base(object):
'''universal holder for raw data
SQLite: data means a table, type = sql
JSON:
CSV
XML
'''
def __init__(self, data='', type=''):
if type == 'sql':
pass
class DataBase(object):
"""db_path can be a log_file, it creates record in sqlite in the same path"""
def __init__(self, db_path, db_type="sqlite", active=False):
self.active = active
self.sql = SQL.select_tables_in_db
if not db_path:
print('db path not selected, running in memory...')
if '.log' in db_path:
self.db_file = db_path.replace('.log', '.sqlite')
else:
self.db_file = db_path
if any(db_type in s for s in ['ctb', 'sqlite3', 'db']):
self.type = 'sqlite3'
elif any(db_type in s for s in ['xml', 'xslt', 'rss']):
self.type = 'xml'
else:
self.type = 'not_defined'
if self.active:
self.obj_conn = sqlite3.connect(db_path)
print('successfully connected to database ', db_path)
# else:
# print('database', db_path, 'without active connection')
self.obj_list = self.return_many(SQL.select_tables_in_db)
self.view_list = self.return_many(SQL.select_views_in_db)
def result_set(self, sql, just_one=True):
if self.active:
execution = self.obj_conn.execute(sql)
try:
if just_one:
return execution.fetchone()
else:
return execution.fetchall()
except:
return None
else:
try:
conn = sqlite3.connect(self.db_file)
if just_one:
result = conn.execute(sql).fetchone()
else:
result = conn.execute(sql).fetchall()
conn.close()
return result
except sqlite3.OperationalError:
pass
#print('error on:', sql)
def execute(self, sql):
try:
if self.active:
self.obj_conn.execute(sql)
self.obj_conn.commit()
else:
conn = sqlite3.connect(self.db_file)
conn.cursor().execute(sql)
conn.commit()
conn.close()
except Exception as ex:
print('some exception occured:', str(ex.args), ':', sql)
def return_one(self, sql):
return self.result_set(sql, just_one=True)
def return_many(self, sql):
return self.result_set(sql, just_one=False)
def return_field_content(self, table, field, condition):
return self.return_one(SQL.select_where.format(field, table, condition))
def object_exist(self, object_name):
return bool(self.return_one(SQL.table_exist.format(object_name))[0])
def jsonize(self, db_obj=''):
if db_obj:
columns = [x.split()[0] for x in self.object_structure(db_obj)]
json_content = [
dict(zip(columns, row)) for row in self.object_all_rows(db_obj)
]
return json.dumps(json_content)
else:
for obj_name, obj_type in self.obj_list:
with open(self.db_file + '.' + obj_name + '.json', 'w+') as stream:
print('#' * 5, 'json write file...', self.db_file + '.' + obj_name + '.json')
stream.write(json.dumps(jsonize_tab(columns, content)))
def object_structure(self, object_name, object_type=''):
if not object_type:
result = self.return_one(SQL.table_structure.format(object_name, object_name))[0]
else:
result = self.return_many(SQL.table_structure_type.format(object_name, object_name, object_type))[0]
result = str(result).replace('\r\n', '').replace('\n', '') # remove line breaks first
if result.lower().startswith('create view'):
result = str(result).replace(' from ', ' FROM ').replace(' select ', ' SELECT ').replace(' as ', ' AS ')
print(result)
full_field_list = result.split(' SELECT ')[1].split(' FROM ')[0].split(',')
field_list = [field.split('.')[-1] for field in full_field_list]
elif result.lower().startswith('create table'):
field_list = result.split('(')[1].split(')')[0].split(',')
# clear the list from spaces
return [field.strip() for field in field_list]
def object_create(self, object_name):
if not self.return_one(SQL.table_exist.format(object_name)):
print('creating object: ' + object_name)
def object_all_rows(self, object_name):
return self.return_many(SQL.select.format('*', object_name))
def log_to_database(self, table_name, sql, ddl=''):
if len(sql.split('VALUES (')[-1].split(','))>2:
time_stamp = sql.split('VALUES (')[-1].split(',')[-3].strip()
object_name = sql.split('VALUES (')[-1].split(',')[0].strip()
else:
time_stamp = sql.split('VALUES (')[-1].split(',')[-2].replace('"','')
object_name = sql.split('VALUES (')[0].split(',')[-1].split(')')[0]
if not self.object_exist(table_name):
if 'log' in table_name.lower():
ddl_command = SQL.table_ddl_log
elif 'dirlist' in table_name.lower():
ddl_command = SQL.table_ddl_dir
elif ddl:
ddl_command = ddl
else:
print('please submit table ddl command, table not exist')
self.execute(SQL.table_ddl.format(table_name, ddl_command))
print('table ' + table_name + ' created')
if not self.return_one(SQL.log_value_exist.format(table_name, time_stamp, object_name)):
self.execute(sql)
else:
print('table {0} already contains ({1})'.format(table_name, time_stamp))
def determine_id_col(self, table, field=''):
i = 0
for column in self.object_structure(table):
if field:
if '\t' in column.split(' ')[0]:
column = column.replace('\t', ' ').replace('`', '').strip()
if field == column.split(' ')[0]:
return i
else:
i += 1
elif 'unique' in column.lower() or 'id' in column.lower():
if '\t' in column.split(' ')[0]:
column = column.replace('\t', ' ').replace('`', '').strip()
return column.split(' ')[0]
def dump_it(what='', where=''):
try:
con = sqlite3.connect(what)
with open(where, 'w', encoding="utf-8") as f:
# PATH # 1
for line in con.iterdump():
f.write(f'{line}\n')
print(line)
# PATH # 2
#con.backup()
except Exception as error:
print(error)
def get_query_type(sql, qry_type):
"""get table name from SQL text"""
if qry_type == 'DDL':
print('CREATE/DROP/ALTER/RENAME TABLE')
elif qry_type == 'DML':
print('INSERT/UPDATE/DELETE/SELECT TABLE')
elif qry_type == 'DCL':
print('GRANT/REVOKE')
return 'sql result'
def is_data_type(field):
# https://stackoverflow.com/questions/47072285/determine-data-type-from-csv-python
predict = ''
for record in field:
if not predict:
predict = 'int' if isinstance(record, int) else 'str'
try:
int(record)
except ValueError:
pass
else:
return int
try:
float(record)
except ValueError:
return str
else:
return float
def jsonize_tab(sqlite_cols, sqlite_content):
# print(json.dumps(json_content))
return [dict(zip(sqlite_cols.keys(), row)) for row in sqlite_content]
def query_db(file, table='', query=''):
'''expects sqlite file to query
%table: returns table content, otherwise leave empty
%query: if query submitted, returns result set'''
try: # to connect to sqlite database
connection = sqlite3.connect(file)
except error as e:
print(e)
finally:
if connection:
print('Connect to', file , 'successfully.')
cursor = connection.cursor()
try: # to get the dataset
if table:
dataset = cursor.execute(f'SELECT * FROM {table};').fetchall()
elif query:
dataset = cursor.execute(query).fetchall()
else:
dataset = cursor.execute('SELECT tbl_name, type FROM sqlite_master;').fetchall()
except Exception as e:
print(e)
finally: # close the connection and sanitize
with closing(sqlite3.connect(file)) as connection:
with closing(connection.cursor()) as cursor:
rows = cursor.execute("SELECT 1").fetchall()
print(rows)
return dataset
def temp_connect_database(database, do_some_work=''):
# connect to database
db = DataBase(database, active=False)
if not do_some_work:
do_some_work = 'explore'
print(db.obj_list)
def compare_databases(db1, db2, concrete_table=''):
db_left = DataBase(db1)
db_right = DataBase(db2)
table_list = db_left.obj_list
if concrete_table:
one_table_list = [x for x in table_list if concrete_table in x]
table_list = one_table_list
for table in table_list:
print('*' * 100)
print('*' * 100)
if 'sqlite_sequence' in table:
continue
if not db_right.object_exist(table[0]):
print('missing table "{0}" in {1}'.format(db1, db2))
else:
id_col = db_left.determine_id_col(table[0])
id_col_id = db_left.determine_id_col(table[0], id_col)
print("""table - {0} - ID column identified - {1}
(index position {2})""".format(table[0], id_col, id_col_id))
for row in db_left.object_all_rows(table[0]):
where = id_col + ' = ' + str(row[id_col_id])
col_num = 0
for column in db_left.object_structure(table[0]):
if id_col in column or 'ts_' in column:
col_num += 1
continue
mirror = db_right.return_field_content(table[0], column.split(' ')[0], where)
if not mirror:
print('{0} !cannot get mirrored column: {1} for row: {2}'.format(' ' * 5, column, where))
continue
if difflib.SequenceMatcher(a=row[col_num].lower(), b=mirror[0].lower()).ratio() < 1:
try:
print('=' * 100)
print(row[col_num])
print('-' * 100)
print(mirror[0])
print('=' * 100)
except Exception as ex:
print(ex.args[0].replace('\n', ' '))
col_num += 1
if __name__ == '__main__':
dump_it('C:\\_Run\\Script\\System\\H808E.ctb', 'C:\\_Run\\Script\\Structure\\Database\\H808E.sql')
|
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from django.template import Context, loader
from django.utils import datetime_safe, six
from haystack.exceptions import SearchFieldError
from haystack.utils import get_model_ct_tuple
class NOT_PROVIDED:
pass
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the SearchFields variants.
class SearchField(object):
"""The base implementation of a search field."""
field_type = None
def __init__(self, model_attr=None, use_template=False, template_name=None,
document=False, indexed=True, stored=True, faceted=False,
default=NOT_PROVIDED, null=False, index_fieldname=None,
facet_class=None, boost=1.0, weight=None):
# Track what the index thinks this field is called.
self.instance_name = None
self.model_attr = model_attr
self.use_template = use_template
self.template_name = template_name
self.document = document
self.indexed = indexed
self.stored = stored
self.faceted = faceted
self._default = default
self.null = null
self.index_fieldname = index_fieldname
self.boost = weight or boost
self.is_multivalued = False
# We supply the facet_class for making it easy to create a faceted
# field based off of this field.
self.facet_class = facet_class
if self.facet_class is None:
self.facet_class = FacetCharField
self.set_instance_name(None)
def set_instance_name(self, instance_name):
self.instance_name = instance_name
if self.index_fieldname is None:
self.index_fieldname = self.instance_name
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def prepare(self, obj):
"""
Takes data from the provided object and prepares it for storage in the
index.
"""
# Give priority to a template.
if self.use_template:
return self.prepare_template(obj)
elif self.model_attr is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.model_attr.split('__')
current_object = obj
for attr in attrs:
if not hasattr(current_object, attr):
raise SearchFieldError("The model '%s' does not have a model_attr '%s'." % (repr(obj), attr))
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
else:
raise SearchFieldError("The model '%s' has an empty model_attr '%s' and doesn't allow a default or null value." % (repr(obj), attr))
if callable(current_object):
return current_object()
return current_object
if self.has_default():
return self.default
else:
return None
def prepare_template(self, obj):
"""
Flattens an object for indexing.
This loads a template
(``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and
returns the result of rendering that template. ``object`` will be in
its context.
"""
if self.instance_name is None and self.template_name is None:
raise SearchFieldError("This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.")
if self.template_name is not None:
template_names = self.template_name
if not isinstance(template_names, (list, tuple)):
template_names = [template_names]
else:
app_label, model_name = get_model_ct_tuple(obj)
template_names = ['search/indexes/%s/%s_%s.txt' % (app_label, model_name, self.instance_name)]
t = loader.select_template(template_names)
return t.render(Context({'object': obj}))
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
class CharField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetCharField
super(CharField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(CharField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return six.text_type(value)
class LocationField(SearchField):
field_type = 'location'
def prepare(self, obj):
from haystack.utils.geo import ensure_point
value = super(LocationField, self).prepare(obj)
if value is None:
return None
pnt = ensure_point(value)
pnt_lng, pnt_lat = pnt.get_coords()
return "%s,%s" % (pnt_lat, pnt_lng)
def convert(self, value):
from haystack.utils.geo import ensure_point, Point
if value is None:
return None
if hasattr(value, 'geom_type'):
value = ensure_point(value)
return value
if isinstance(value, six.string_types):
lat, lng = value.split(',')
elif isinstance(value, (list, tuple)):
# GeoJSON-alike
lat, lng = value[1], value[0]
elif isinstance(value, dict):
lat = value.get('lat', 0)
lng = value.get('lon', 0)
value = Point(float(lng), float(lat))
return value
class NgramField(CharField):
field_type = 'ngram'
def __init__(self, **kwargs):
if kwargs.get('faceted') is True:
raise SearchFieldError("%s can not be faceted." % self.__class__.__name__)
super(NgramField, self).__init__(**kwargs)
class EdgeNgramField(NgramField):
field_type = 'edge_ngram'
class IntegerField(SearchField):
field_type = 'integer'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetIntegerField
super(IntegerField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(IntegerField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(SearchField):
field_type = 'float'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetFloatField
super(FloatField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(FloatField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDecimalField
super(DecimalField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(DecimalField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return six.text_type(value)
class BooleanField(SearchField):
field_type = 'boolean'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetBooleanField
super(BooleanField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(BooleanField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return bool(value)
class DateField(SearchField):
field_type = 'date'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateField
super(DateField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise SearchFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
class DateTimeField(SearchField):
field_type = 'datetime'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateTimeField
super(DateTimeField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second']))
else:
raise SearchFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
class MultiValueField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetMultiValueField
if kwargs.get('use_template') is True:
raise SearchFieldError("'%s' fields can not use templates to prepare their data." % self.__class__.__name__)
super(MultiValueField, self).__init__(**kwargs)
self.is_multivalued = True
def prepare(self, obj):
return self.convert(super(MultiValueField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return list(value)
class FacetField(SearchField):
"""
``FacetField`` is slightly different than the other fields because it can
work in conjunction with other fields as its data source.
Accepts an optional ``facet_for`` kwarg, which should be the field name
(not ``index_fieldname``) of the field it should pull data from.
"""
instance_name = None
def __init__(self, **kwargs):
handled_kwargs = self.handle_facet_parameters(kwargs)
super(FacetField, self).__init__(**handled_kwargs)
def handle_facet_parameters(self, kwargs):
if kwargs.get('faceted', False):
raise SearchFieldError("FacetField (%s) does not accept the 'faceted' argument." % self.instance_name)
if not kwargs.get('null', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'null' argument." % self.instance_name)
if not kwargs.get('indexed', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'indexed' argument." % self.instance_name)
if kwargs.get('facet_class'):
raise SearchFieldError("FacetField (%s) does not accept the 'facet_class' argument." % self.instance_name)
self.facet_for = None
self.facet_class = None
# Make sure the field is nullable.
kwargs['null'] = True
if 'facet_for' in kwargs:
self.facet_for = kwargs['facet_for']
del(kwargs['facet_for'])
return kwargs
def get_facet_for_name(self):
return self.facet_for or self.instance_name
class FacetCharField(FacetField, CharField):
pass
class FacetIntegerField(FacetField, IntegerField):
pass
class FacetFloatField(FacetField, FloatField):
pass
class FacetDecimalField(FacetField, DecimalField):
pass
class FacetBooleanField(FacetField, BooleanField):
pass
class FacetDateField(FacetField, DateField):
pass
class FacetDateTimeField(FacetField, DateTimeField):
pass
class FacetMultiValueField(FacetField, MultiValueField):
pass
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
import os
import sys
import Axon
import pygame
from Axon.Component import component
from Axon.Ipc import WaitComplete, producerFinished, shutdownMicroprocess
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Chassis.ConnectedServer import SimpleServer
from Kamaelia.Internet.TCPClient import TCPClient
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Visualisation.PhysicsGraph.chunks_to_lines import chunks_to_lines
from Kamaelia.Visualisation.PhysicsGraph.lines_to_tokenlists import lines_to_tokenlists as text_to_tokenlists
from Kamaelia.Util.NullSink import nullSinkComponent
from Kamaelia.Util.Backplane import Backplane, PublishTo, SubscribeTo
from Kamaelia.Util.Detuple import SimpleDetupler
from Kamaelia.Util.Console import ConsoleEchoer
# Ticker
from Kamaelia.UI.Pygame.Ticker import Ticker
from Kamaelia.UI.Pygame.Display import PygameDisplay
from Kamaelia.Protocol.Framing import DataChunker, DataDeChunker
#
# The following application specific components will probably be rolled
# back into the repository.
#
from Kamaelia.Apps.Whiteboard.TagFiltering import TagAndFilterWrapper, FilterAndTagWrapper
from Kamaelia.Apps.Whiteboard.TagFiltering import TagAndFilterWrapperKeepingTag, FilterAndTagWrapperKeepingTag
from Kamaelia.Apps.Whiteboard.Tokenisation import tokenlists_to_lines, lines_to_tokenlists
from Kamaelia.Apps.Whiteboard.Canvas import Canvas
from Kamaelia.Apps.Whiteboard.Painter import Painter
from Kamaelia.Apps.Whiteboard.SingleShot import OneShot
from Kamaelia.Apps.Whiteboard.CheckpointSequencer import CheckpointSequencer
from Kamaelia.Apps.Whiteboard.Entuple import Entuple
from Kamaelia.Apps.Whiteboard.Routers import Router, TwoWaySplitter, ConditionalSplitter
from Kamaelia.Apps.Whiteboard.Palette import buildPalette, colours
from Kamaelia.Apps.Whiteboard.Options import parseOptions
from Kamaelia.Apps.Whiteboard.UI import PagingControls, Eraser, ClearPage, SaveDeck, LoadDeck, ClearScribbles, Delete
from Kamaelia.Apps.Whiteboard.CommandConsole import CommandConsole
#from Kamaelia.Apps.Whiteboard.SmartBoard import SmartBoard
from Kamaelia.Apps.Whiteboard.Webcam import VideoCaptureSource
try:
from Kamaelia.Codec.Speex import SpeexEncode,SpeexDecode
except Exception, e:
print "Speex not available, using null components instead"
SpeexEncode = nullSinkComponent
SpeexDecode = nullSinkComponent
try:
from Kamaelia.Apps.Whiteboard.Audio import SoundInput
except ImportError:
print "SoundInput not available, using NullSink instead"
SoundInput = nullSinkComponent
try:
from Kamaelia.Apps.Whiteboard.Audio import SoundOutput
except ImportError:
print "SoundOutput not available, using NullSink instead"
SoundOutput = nullSinkComponent
try:
from Kamaelia.Apps.Whiteboard.Audio import RawAudioMixer
except ImportError:
print "RawAudioMixer not available, using NullSink instead"
RawAudioMixer = nullSinkComponent
notepad = None
if len(sys.argv) >1:
if os.path.exists(sys.argv[1]):
if os.path.isdir(sys.argv[1]):
notepad = sys.argv[1]
if (notepad is None) and os.path.exists("Scribbles"):
if os.path.isdir("Scribbles"):
notepad = "Scribbles"
if (notepad is None):
#N = os.path.join(os.path.expanduser("~"),"Scribbles")
N = "Scribbles"
if not os.path.exists(N):
os.makedirs(N)
if os.path.isdir(N):
notepad = N
if (notepad is None):
print "Can't figure out what to do with piccies. Exitting"
sys.exit(0)
if not os.path.exists("Decks"):
os.makedirs("Decks")
#
# Misplaced encapsulation --> Kamaelia.Apps.Whiteboard.Palette
#
colours_order = [ "black", "red", "orange", "yellow", "green", "turquoise", "blue", "purple", "darkgrey", "lightgrey" ]
num_pages = 0
for x in os.listdir(notepad):
if (os.path.splitext(x)[1] == ".png"):
num_pages += 1
#num_pages = len(os.listdir(notepad))
if (num_pages < 1):
num_pages = 1
def FilteringPubsubBackplane(backplaneID,**FilterTagWrapperOptions):
"""Sends tagged events to a backplane. Emits events not tagged by this pubsub."""
return FilterAndTagWrapper(
Pipeline(
PublishTo(backplaneID),
# well, should be to separate pipelines, this is lazier!
SubscribeTo(backplaneID),
),
**FilterTagWrapperOptions
)
def clientconnector(whiteboardBackplane="WHITEBOARD", audioBackplane="AUDIO", port=1500):
return Pipeline(
chunks_to_lines(),
lines_to_tokenlists(),
Graphline(
ROUTER = Router( ((lambda T : T[0]=="SOUND"), "audio"),
((lambda T : T[0]!="SOUND"), "whiteboard"),
),
WHITEBOARD = FilteringPubsubBackplane(whiteboardBackplane),
AUDIO = Pipeline(
SimpleDetupler(1), # remove 'SOUND' tag
SpeexDecode(3),
FilteringPubsubBackplane(audioBackplane, dontRemoveTag=True),
RawAudioMixer(),
SpeexEncode(3),
Entuple(prefix=["SOUND"],postfix=[]),
),
linkages = {
# incoming messages go to a router
("", "inbox") : ("ROUTER", "inbox"),
# distribute messages to appropriate destinations
("ROUTER", "audio") : ("AUDIO", "inbox"),
("ROUTER", "whiteboard") : ("WHITEBOARD", "inbox"),
# aggregate all output
("AUDIO", "outbox") : ("", "outbox"),
("WHITEBOARD", "outbox") : ("", "outbox"),
# shutdown routing, not sure if this will actually work, but hey!
("", "control") : ("ROUTER", "control"),
("ROUTER", "signal") : ("AUDIO", "control"),
("AUDIO", "signal") : ("WHITEBOARD", "control"),
("WHITEBOARD", "signal") : ("", "signal")
},
),
tokenlists_to_lines(),
)
class SurfaceToJpeg(Axon.Component.component):
Inboxes = ["inbox", "inbox2", "control"]
Outboxes = ["outbox", "outbox2", "signal"]
def __init__(self):
super(SurfaceToJpeg, self).__init__()
def main(self):
while (1):
while (self.dataReady("inbox")):
data = self.recv("inbox")
imagestring = pygame.image.tostring(data,"RGB")
self.send(imagestring, "outbox")
while (self.dataReady("inbox2")):
data = self.recv("inbox2")
try: # Prevent crashing with malformed received images
image = pygame.image.fromstring(data,(190,140),"RGB")
self.send(image, "outbox2")
except Exception, e:
pass
self.pause()
yield 1
def clientconnectorwc(webcamBackplane="WEBCAM", port=1501):
return Pipeline(
#chunks_to_lines(),
Graphline(
WEBCAM = FilteringPubsubBackplane(webcamBackplane),
CONVERTER = SurfaceToJpeg(),
FRAMER = DataChunker(),
CONSOLE = ConsoleEchoer(),
DEFRAMER = DataDeChunker(),
linkages = {
("", "inbox") : ("DEFRAMER", "inbox"),
("DEFRAMER", "outbox") : ("CONVERTER", "inbox2"),
("CONVERTER", "outbox2") : ("WEBCAM", "inbox"),
("WEBCAM", "outbox") : ("CONVERTER", "inbox"),
("CONVERTER", "outbox") : ("FRAMER", "inbox"),
("FRAMER", "outbox") : ("", "outbox"),
},
),
)
#/-------------------------------------------------------------------------
# Server side of the system
#
def LocalEventServer(whiteboardBackplane="WHITEBOARD", audioBackplane="AUDIO", port=1500):
def configuredClientConnector():
return clientconnector(whiteboardBackplane=whiteboardBackplane,
audioBackplane=audioBackplane,
port=port)
return SimpleServer(protocol=clientconnector, port=port)
def LocalWebcamEventServer(webcamBackplane="WEBCAM", port=1501):
def configuredClientConnector():
return clientconnectorwc(webcamBackplane=webcamBackplane,
port=port)
return SimpleServer(protocol=clientconnectorwc, port=port)
#/-------------------------------------------------------------------------
# Client side of the system
#
def EventServerClients(rhost, rport,
whiteboardBackplane="WHITEBOARD",
audioBackplane="AUDIO"):
# plug a TCPClient into the backplane
loadingmsg = "Fetching sketch from server..."
return Graphline(
# initial messages sent to the server, and the local whiteboard
GETIMG = Pipeline(
OneShot(msg=[["GETIMG"]]),
tokenlists_to_lines()
),
BLACKOUT = OneShot(msg="CLEAR 0 0 0\r\n"
"WRITE 100 100 24 255 255 255 "+loadingmsg+"\r\n"),
NETWORK = TCPClient(host=rhost,port=rport),
APPCOMMS = clientconnector(whiteboardBackplane=whiteboardBackplane,
audioBackplane=audioBackplane),
linkages = {
("GETIMG", "outbox") : ("NETWORK", "inbox"), # Single shot out
("APPCOMMS", "outbox") : ("NETWORK", "inbox"), # Continuous out
("BLACKOUT", "outbox") : ("APPCOMMS", "inbox"), # Single shot in
("NETWORK", "outbox") : ("APPCOMMS", "inbox"), # Continuous in
}
)
def WebcamEventServerClients(rhost, rport,
webcamBackplane="WEBCAM"):
# plug a TCPClient into the backplane
return Graphline(
NETWORK = TCPClient(host=rhost,port=rport),
APPCOMMS = clientconnectorwc(webcamBackplane=webcamBackplane),
linkages = {
("APPCOMMS", "outbox") : ("NETWORK", "inbox"), # Continuous out
("NETWORK", "outbox") : ("APPCOMMS", "inbox"), # Continuous in
}
)
#/-------------------------------------------------------------------------
class LocalPageEventsFilter(ConditionalSplitter): # This is a data tap/siphon/demuxer
def condition(self, data):
return (data == [["prev"]]) or (data == [["next"]])
def true(self,data):
self.send((data[0][0], "local"), "true")
SLIDESPEC = notepad+"/slide.%d.png"
def makeBasicSketcher(left=0,top=0,width=1024,height=768):
return Graphline( CANVAS = Canvas( position=(left,top+32+1),size=(width-192,(height-(32+15)-1)),bgcolour=(255,255,255),notepad=notepad ),
PAINTER = Painter(),
PALETTE = buildPalette( cols=colours, order=colours_order, topleft=(left+64,top), size=32 ),
ERASER = Eraser(left,top),
CLEAR = ClearPage(left+(64*5)+32*len(colours)+1,top),
SAVEDECK = SaveDeck(left+(64*8)+32*len(colours)+1,top),
LOADDECK = LoadDeck(left+(64*7)+32*len(colours)+1,top),
# SMARTBOARD = SmartBoard(),
DELETE = Delete(left+(64*6)+32*len(colours)+1,top),
CLOSEDECK = ClearScribbles(left+(64*9)+32*len(colours)+1,top),
# QUIT = Quit(left+(64*10)+32*len(colours)+1,top),
PAGINGCONTROLS = PagingControls(left+64+32*len(colours)+1,top),
#LOCALPAGINGCONTROLS = LocalPagingControls(left+(64*6)+32*len(colours),top),
LOCALPAGEEVENTS = LocalPageEventsFilter(),
HISTORY = CheckpointSequencer(lambda X: [["LOAD", SLIDESPEC % (X,)]],
lambda X: [["SAVE", SLIDESPEC % (X,)]],
lambda X: [["CLEAR"]],
initial = 1,
highest = num_pages,
notepad = notepad,
),
PAINT_SPLITTER = TwoWaySplitter(),
#LOCALEVENT_SPLITTER = TwoWaySplitter(),
DEBUG = ConsoleEchoer(),
TICKER = Ticker(position=(left,top+height-15),background_colour=(220,220,220),text_colour=(0,0,0),text_height=(17),render_right=(width),render_bottom=(15)),
linkages = {
("CANVAS", "eventsOut") : ("PAINTER", "inbox"),
("PALETTE", "outbox") : ("PAINTER", "colour"),
("ERASER", "outbox") : ("PAINTER", "erase"),
("PAINTER", "outbox") : ("PAINT_SPLITTER", "inbox"),
("CLEAR","outbox") : ("PAINT_SPLITTER", "inbox"),
("PAINT_SPLITTER", "outbox") : ("CANVAS", "inbox"),
("PAINT_SPLITTER", "outbox2") : ("", "outbox"), # send to network
("SAVEDECK", "outbox") : ("CANVAS", "inbox"),
("LOADDECK", "outbox") : ("CANVAS", "inbox"),
("CLOSEDECK", "outbox") : ("CANVAS", "inbox"),
("DELETE", "outbox") : ("CANVAS", "inbox"),
# ("QUIT", "outbox") : ("CANVAS", "inbox"),
#("LOCALPAGINGCONTROLS","outbox") : ("LOCALEVENT_SPLITTER", "inbox"),
#("LOCALEVENT_SPLITTER", "outbox2"): ("", "outbox"), # send to network
#("LOCALEVENT_SPLITTER", "outbox") : ("LOCALPAGEEVENTS", "inbox"),
("", "inbox") : ("LOCALPAGEEVENTS", "inbox"),
("LOCALPAGEEVENTS", "false") : ("CANVAS", "inbox"),
("LOCALPAGEEVENTS", "true") : ("HISTORY", "inbox"),
("PAGINGCONTROLS","outbox") : ("HISTORY", "inbox"),
("HISTORY","outbox") : ("CANVAS", "inbox"),
("CANVAS", "outbox") : ("", "outbox"),
("CANVAS","surfacechanged") : ("HISTORY", "inbox"),
("CANVAS", "toTicker") : ("TICKER", "inbox"),
("CANVAS", "toHistory") : ("HISTORY", "inbox"),
# ("SMARTBOARD", "colour") : ("PAINTER", "colour"),
# ("SMARTBOARD", "erase") : ("PAINTER", "erase"),
# ("SMARTBOARD", "toTicker") : ("TICKER", "inbox"),
},
)
class ProperSurfaceDisplayer(Axon.Component.component):
Inboxes = ["inbox", "control", "callback"]
Outboxes= ["outbox", "signal", "display_signal"]
remotecams = [0,0,0,0]
remotecamcount = [25,25,25,25]
displaysize = (640, 480)
def __init__(self, **argd):
super(ProperSurfaceDisplayer, self).__init__(**argd)
self.disprequest = { "DISPLAYREQUEST" : True,
"callback" : (self,"callback"),
"size": self.displaysize,
"position" : self.position,
"bgcolour" : self.bgcolour}
def pygame_display_flip(self):
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
def getDisplay(self):
displayservice = PygameDisplay.getDisplayService()
self.link((self,"display_signal"), displayservice)
self.send(self.disprequest, "display_signal")
while not self.dataReady("callback"):
self.pause()
yield 1
self.display = self.recv("callback")
self.display.fill( (self.bgcolour) )
def main(self):
yield Axon.Ipc.WaitComplete(self.getDisplay())
if 1: # pointless instruction
# initialise five webcam windows
if (self.webcam == 1):
snapshot = "No Local Camera"
font = pygame.font.Font(None,22)
self.display.fill( (0,0,0) )
snapshot = font.render(snapshot, False, (255,255,255))
self.display.blit(snapshot, (34,56))
self.pygame_display_flip()
elif (self.webcam == 2):
snapshot = "No Remote Camera"
font = pygame.font.Font(None,22)
self.display.fill( (0,0,0),pygame.Rect(0,0,190,140*4))
snapshot = font.render(snapshot, False, (255,255,255))
self.display.blit(snapshot, (25,56))
self.display.blit(snapshot, (25,56+140*1+1))
self.display.blit(snapshot, (25,56+140*2+2))
self.display.blit(snapshot, (25,56+140*3+3))
self.pygame_display_flip()
while 1:
if (self.webcam):
while self.dataReady("inbox"):
snapshot = self.recv("inbox")
if (self.webcam == 1):
#snapshot=snapshot.convert()
self.display.blit(snapshot, (0,0))
self.pygame_display_flip()
elif (self.webcam == 2):
# remove tag
tag = snapshot[0]
data = snapshot[1]
pretagged = False
# allocate tag to a cam window
for x in self.remotecams:
if (x == tag):
pretagged = True
if (pretagged == False):
if (self.remotecams[0] == 0):
self.remotecams[0] = tag
elif ((self.remotecams[1] == 0)):
self.remotecams[1] = tag
elif ((self.remotecams[2] == 0)):
self.remotecams[2] = tag
elif ((self.remotecams[2] == 0)):
self.remotecams[3] = tag
# public cam pic to window if one is available
iteration = 0
for x in self.remotecams:
if (self.remotecams[iteration] == tag):
offset = (140 * iteration + iteration * 1)
self.display.blit(data, (0,0+offset))
self.remotecamcount[iteration] = 25 # reset cam count to prevent 'no remote cam'
iteration += 1
# Reset remote cameras where clients have disconnected (remotecamcount = 0)
iteration = 0
for x in self.remotecamcount:
if (self.remotecamcount[iteration] == 0):
snapshot = "No Remote Camera"
font = pygame.font.Font(None,22)
offset = (iteration * 140 + iteration * 1)
self.display.fill( (0,0,0),pygame.Rect(0,offset,190,140))
snapshot = font.render(snapshot, False, (255,255,255))
self.display.blit(snapshot, (25,56+offset))
self.remotecams[iteration] = 0
elif (self.remotecamcount[iteration] > 0):
self.remotecamcount[iteration] -= 1
iteration += 1
self.pygame_display_flip()
while not self.anyReady():
self.pause()
yield 1
yield 1
else:
self.pause()
yield 1
if __name__=="__main__":
left = 0
top = 0
width = 1024
height = 768
BACKGROUND = ProperSurfaceDisplayer(displaysize = (1024, 768), position = (0, 0), bgcolour=(0,0,0), webcam = 0).activate()
mainsketcher = \
Graphline( SKETCHER = makeBasicSketcher(left,top+1,width,height-1),
CONSOLE = CommandConsole(),
linkages = { ('','inbox'):('SKETCHER','inbox'),
('SKETCHER','outbox'):('','outbox'),
('CONSOLE','outbox'):('SKETCHER','inbox'),
}
)
camera = Graphline( LOCALWEBCAM = VideoCaptureSource(),
WCCANVAS = ProperSurfaceDisplayer(displaysize = (190, 140), position = (1024-191,32+2), bgcolour=(0,0,0), webcam = 1),
REMWCCANVAS = ProperSurfaceDisplayer(displaysize = (190, 140*4+4), position = (1024-191,32+140+3), bgcolour=(0,0,0), webcam = 2),
CAM_SPLITTER = TwoWaySplitter(),
CONSOLE = ConsoleEchoer(),
linkages = { ('','inbox'):('REMWCCANVAS','inbox'),
('LOCALWEBCAM','outbox'):('CAM_SPLITTER','inbox'),
('CAM_SPLITTER','outbox2'):('WCCANVAS','inbox'),
('CAM_SPLITTER','outbox'):('','outbox'),
}
)
# primary whiteboard
Pipeline( SubscribeTo("WHITEBOARD"),
TagAndFilterWrapper(mainsketcher),
PublishTo("WHITEBOARD")
).activate()
# primary sound IO - tagged and filtered, so can't hear self
Pipeline( SubscribeTo("AUDIO"),
TagAndFilterWrapperKeepingTag(
Pipeline(
RawAudioMixer(),
SoundOutput(),
######
SoundInput(),
),
),
PublishTo("AUDIO"),
).activate()
# primary webcam - capture > to jpeg > framing > backplane > TCPC > Deframing > etc
Pipeline( SubscribeTo("WEBCAM"),
TagAndFilterWrapperKeepingTag(camera),
PublishTo("WEBCAM"),
).activate()
rhost, rport, serveport = parseOptions()
# setup a server, if requested
if serveport:
LocalEventServer("WHITEBOARD", "AUDIO", port=serveport).activate()
LocalWebcamEventServer("WEBCAM", port=(serveport+1)).activate()
# connect to remote host & port, if requested
if rhost and rport:
EventServerClients(rhost, rport, "WHITEBOARD", "AUDIO").activate()
WebcamEventServerClients(rhost, (rport + 1), "WEBCAM").activate()
# sys.path.append("../Introspection")
# from Profiling import FormattedProfiler
#
# Pipeline(FormattedProfiler( 20.0, 1.0),
# ConsoleEchoer()
# ).activate()
Backplane("WHITEBOARD").activate()
Backplane("WEBCAM").activate()
Backplane("AUDIO").run()
|
|
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
import xarray as xr
import datacube
import dc_utilities as utilities
# Command line tool imports
import argparse
import os
import collections
import gdal
from datetime import datetime
# Author: KMF
# Creation date: 2016-06-14
def create_mosaic(dataset_in, clean_mask=None, no_data=-9999):
"""
Description:
Creates a most recent - oldest mosaic of the input dataset. If no clean mask is given,
the 'cf_mask' variable must be included in the input dataset, as it will be used
to create a clean mask
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain
coordinates: time, latitude, longitude
variables: variables to be mosaicked
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - mosaicked data with
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Create clean_mask from cfmask if none given
if not clean_mask:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
data_vars = dataset_in.data_vars # Dict object with key as the name of the variable
# and each value as the DataArray of that variable
mosaic = collections.OrderedDict() # Dict to contain variable names as keys and
# numpy arrays containing mosaicked data
for key in data_vars:
# Get raw data for current variable and mask the data
data = data_vars[key].values
masked = np.full(data.shape, no_data)
masked[clean_mask] = data[clean_mask]
out = np.full(masked.shape[1:], no_data)
# Mosaic current variable (most recent - oldest)
for index in reversed(range(len(clean_mask))):
swap = np.reshape(np.in1d(out.reshape(-1), [no_data]),
out.shape)
out[swap] = masked[index][swap]
mosaic[key] = (['latitude', 'longitude'], out)
latitude = dataset_in.latitude
longitude = dataset_in.longitude
dataset_out = xr.Dataset(mosaic,
coords={'latitude': latitude,
'longitude': longitude})
return dataset_out
def main(platform, product_type, min_lon, max_lon, min_lat, max_lat,
red, green, blue, start_date, end_date, dc_config):
"""
Description:
Command-line mosaicking tool - creates a true color mosaic from the
data retrieved by the Data Cube and save a GeoTIFF of the results
Assumptions:
The command-line tool assumes there is a measurement called cf_mask
Inputs:
platform (str)
product_type (str)
min_lon (str)
max_lon (str)
min_lat (str)
max_lat (str)
start_date (str)
end_date (str)
dc_config (str)
"""
# Initialize data cube object
dc = datacube.Datacube(config=dc_config,
app='dc-mosaicker')
# Validate arguments
products = dc.list_products()
platform_names = set([product[6] for product in products.values])
if platform not in platform_names:
print 'ERROR: Invalid platform.'
print 'Valid platforms are:'
for name in platform_names:
print name
return
product_names = [product[0] for product in products.values]
if product_type not in product_names:
print 'ERROR: Invalid product type.'
print 'Valid product types are:'
for name in product_names:
print name
return
measurements = dc.list_measurements()
index_1 = measurements.keys()[0] # Doesn't matter what the actual value is,
# just need to get into the next layer of the
# DataFrame.. better way?
bands = set(measurements[index_1][product_type].keys())
if not set([red, green, blue]).issubset(bands):
print 'ERROR: Invalid product type.'
print 'Valid product types are:'
for band in bands:
print band
return
try:
min_lon = float(args.min_lon)
max_lon = float(args.max_lon)
min_lat = float(args.min_lat)
max_lat = float(args.max_lat)
except:
print 'ERROR: Longitudes/Latitudes must be float values'
return
try:
start_date_str = start_date
end_date_str = end_date
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except:
print 'ERROR: Invalid date format. Date format: YYYY-MM-DD'
return
if not os.path.exists(dc_config):
print 'ERROR: Invalid file path for dc_config'
return
# Retrieve data from Data Cube
dataset_in = dc.load(platform=platform,
product=product_type,
time=(start_date, end_date),
lon=(min_lon, max_lon),
lat=(min_lat, max_lat),
measurements=[red, green, blue, 'cf_mask'])
# Get information needed for saving as GeoTIFF
# Spatial ref
crs = dataset_in.crs
spatial_ref = utilities.get_spatial_ref(crs)
# Upper left coordinates
ul_lon = dataset_in.longitude.values[0]
ul_lat = dataset_in.latitude.values[0]
# Resolution
products = dc.list_products()
resolution = products.resolution[products.name == 'ls7_ledaps']
lon_dist = resolution.values[0][1]
lat_dist = resolution.values[0][0]
# Rotation
lon_rtn = 0
lat_rtn = 0
geotransform = (ul_lon, lon_dist, lon_rtn, ul_lat, lat_rtn, lat_dist)
mosaic = create_mosaic(dataset_in)
out_file = ( str(min_lon) + '_' + str(min_lat) + '_'
+ start_date_str + '_' + end_date_str
+ '_mosaic.tif' )
utilities.save_to_geotiff(out_file, gdal.GDT_Float32, mosaic, geotransform, spatial_ref)
if __name__ == '__main__':
start_time = datetime.now()
parser = argparse.ArgumentParser()
parser.add_argument('platform', help='Data platform; example: LANDSAT_7')
parser.add_argument('product', help='Product type; example: ls7_ledaps')
parser.add_argument('min_lon', help='Minimum longitude')
parser.add_argument('max_lon', help='Maximum longitude')
parser.add_argument('min_lat', help='Minimum latitude')
parser.add_argument('max_lat', help='Maximum latitude')
parser.add_argument('start_date', help='Start date; format: YYYY-MM-DD')
parser.add_argument('end_date', help='End date; format: YYYY-MM-DD')
parser.add_argument('red', nargs='?', default='red',
help='Band to map to the red color channel')
parser.add_argument('green', nargs='?', default='green',
help='Band to map to the green color channel')
parser.add_argument('blue', nargs='?', default='blue',
help='Band to map to the blue color channel')
parser.add_argument('dc_config', nargs='?', default='~/.datacube.conf',
help='Datacube configuration path; default: ~/.datacube.conf')
args = parser.parse_args()
main(args.platform, args.product,
args.min_lon, args.max_lon,
args.min_lat, args.max_lat,
args.red, args.green, args.blue,
args.start_date, args.end_date,
args.dc_config)
end_time = datetime.now()
print 'Execution time: ' + str(end_time - start_time)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import builtins
from tensorflow.core.framework import types_pb2
# pywrap_tensorflow must be imported prior to _dtypes for the MacOS linker
# to resolve the protobufs properly.
# pylint: disable=unused-import,g-bad-import-order
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import _dtypes
from tensorflow.python.util.tf_export import tf_export
_np_bfloat16 = pywrap_tensorflow.TF_bfloat16_type()
# pylint: disable=slots-on-old-class
@tf_export("dtypes.DType", "DType")
class DType(_dtypes.DType):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.uint32`: 32-bit unsigned integer.
* `tf.uint64`: 64-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
* `tf.resource`: Handle to a mutable resource.
* `tf.variant`: Values of arbitrary types.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
"""
__slots__ = ()
@property
def _is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def _as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype).min
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("-0x1.FEp127"))
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype).max
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("0x1.FEp127"))
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
"""Return intensity limits, i.e.
(min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional If True, clip the negative range (i.e.
return 0 for min intensity) even if the image dtype allows negative
values. Returns
min, max : tuple Lower and upper intensity limits.
"""
min, max = dtype_range[self.as_numpy_dtype] # pylint: disable=redefined-builtin
if clip_negative:
min = 0 # pylint: disable=redefined-builtin
return min, max
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (other.as_datatype_enum,
other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
if type(other) != DType: # pylint: disable=unidiomatic-typecheck
try:
other = as_dtype(other)
except TypeError:
return False
return self._type_enum == other._type_enum # pylint: disable=protected-access
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
# "If a class that overrides __eq__() needs to retain the implementation
# of __hash__() from a parent class, the interpreter must be told this
# explicitly by setting __hash__ = <ParentClass>.__hash__."
# TODO(slebedev): Remove once __eq__ and __ne__ are implemented in C++.
__hash__ = _dtypes.DType.__hash__
def __reduce__(self):
return as_dtype, (self.name,)
# pylint: enable=slots-on-old-class
# Define data type range of numpy dtype
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)
}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
tf_export("dtypes.resource", "resource").export_constant(__name__, "resource")
variant = DType(types_pb2.DT_VARIANT)
tf_export("dtypes.variant", "variant").export_constant(__name__, "variant")
float16 = DType(types_pb2.DT_HALF)
tf_export("dtypes.float16", "float16").export_constant(__name__, "float16")
half = float16
tf_export("dtypes.half", "half").export_constant(__name__, "half")
float32 = DType(types_pb2.DT_FLOAT)
tf_export("dtypes.float32", "float32").export_constant(__name__, "float32")
float64 = DType(types_pb2.DT_DOUBLE)
tf_export("dtypes.float64", "float64").export_constant(__name__, "float64")
double = float64
tf_export("dtypes.double", "double").export_constant(__name__, "double")
int32 = DType(types_pb2.DT_INT32)
tf_export("dtypes.int32", "int32").export_constant(__name__, "int32")
uint8 = DType(types_pb2.DT_UINT8)
tf_export("dtypes.uint8", "uint8").export_constant(__name__, "uint8")
uint16 = DType(types_pb2.DT_UINT16)
tf_export("dtypes.uint16", "uint16").export_constant(__name__, "uint16")
uint32 = DType(types_pb2.DT_UINT32)
tf_export("dtypes.uint32", "uint32").export_constant(__name__, "uint32")
uint64 = DType(types_pb2.DT_UINT64)
tf_export("dtypes.uint64", "uint64").export_constant(__name__, "uint64")
int16 = DType(types_pb2.DT_INT16)
tf_export("dtypes.int16", "int16").export_constant(__name__, "int16")
int8 = DType(types_pb2.DT_INT8)
tf_export("dtypes.int8", "int8").export_constant(__name__, "int8")
string = DType(types_pb2.DT_STRING)
tf_export("dtypes.string", "string").export_constant(__name__, "string")
complex64 = DType(types_pb2.DT_COMPLEX64)
tf_export("dtypes.complex64",
"complex64").export_constant(__name__, "complex64")
complex128 = DType(types_pb2.DT_COMPLEX128)
tf_export("dtypes.complex128",
"complex128").export_constant(__name__, "complex128")
int64 = DType(types_pb2.DT_INT64)
tf_export("dtypes.int64", "int64").export_constant(__name__, "int64")
bool = DType(types_pb2.DT_BOOL) # pylint: disable=redefined-builtin
tf_export("dtypes.bool", "bool").export_constant(__name__, "bool")
qint8 = DType(types_pb2.DT_QINT8)
tf_export("dtypes.qint8", "qint8").export_constant(__name__, "qint8")
quint8 = DType(types_pb2.DT_QUINT8)
tf_export("dtypes.quint8", "quint8").export_constant(__name__, "quint8")
qint16 = DType(types_pb2.DT_QINT16)
tf_export("dtypes.qint16", "qint16").export_constant(__name__, "qint16")
quint16 = DType(types_pb2.DT_QUINT16)
tf_export("dtypes.quint16", "quint16").export_constant(__name__, "quint16")
qint32 = DType(types_pb2.DT_QINT32)
tf_export("dtypes.qint32", "qint32").export_constant(__name__, "qint32")
resource_ref = DType(types_pb2.DT_RESOURCE_REF)
variant_ref = DType(types_pb2.DT_VARIANT_REF)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
tf_export("dtypes.bfloat16", "bfloat16").export_constant(__name__, "bfloat16")
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint32_ref = DType(types_pb2.DT_UINT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
uint64_ref = DType(types_pb2.DT_UINT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_UINT32: uint32,
types_pb2.DT_UINT64: uint64,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_RESOURCE: resource,
types_pb2.DT_VARIANT: variant,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT32_REF: uint32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_UINT64_REF: uint64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
types_pb2.DT_RESOURCE_REF: resource_ref,
types_pb2.DT_VARIANT_REF: variant_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_UINT32: "uint32",
types_pb2.DT_UINT64: "uint64",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_VARIANT: "variant",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT32_REF: "uint32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_UINT64_REF: "uint64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
types_pb2.DT_VARIANT_REF: "variant_ref",
}
_STRING_TO_TF = {
value: _INTERN_TABLE[key] for key, value in _TYPE_TO_STRING.items()
}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8)])
_np_quint8 = np.dtype([("quint8", np.uint8)])
_np_qint16 = np.dtype([("qint16", np.int16)])
_np_quint16 = np.dtype([("quint16", np.uint16)])
_np_qint32 = np.dtype([("qint32", np.int32)])
# _np_bfloat16 is defined by a module import.
# Custom struct dtype for directly-fed ResourceHandles of supported type(s).
np_resource = np.dtype([("resource", np.ubyte)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = {
np.float16: float16,
np.float32: float32,
np.float64: float64,
np.int32: int32,
np.int64: int64,
np.uint8: uint8,
np.uint16: uint16,
np.uint32: uint32,
np.uint64: uint64,
np.int16: int16,
np.int8: int8,
np.complex64: complex64,
np.complex128: complex128,
np.object_: string,
np.string_: string,
np.unicode_: string,
np.bool_: bool,
_np_qint8: qint8,
_np_quint8: quint8,
_np_qint16: qint16,
_np_quint16: quint16,
_np_qint32: qint32,
_np_bfloat16: bfloat16,
}
# Map (some) NumPy platform dtypes to TF ones using their fixed-width
# synonyms. Note that platform dtypes are not always simples aliases,
# i.e. reference equality is not guaranteed. See e.g. numpy/numpy#9799.
for pdt in [
np.intc,
np.uintc,
np.int_,
np.uint,
np.longlong,
np.ulonglong,
]:
if pdt not in _NP_TO_TF:
_NP_TO_TF[pdt] = next(
_NP_TO_TF[dt] for dt in _NP_TO_TF if dt == pdt().dtype)
TF_VALUE_DTYPES = set(_NP_TO_TF.values())
_TF_TO_NP = {
types_pb2.DT_HALF:
np.float16,
types_pb2.DT_FLOAT:
np.float32,
types_pb2.DT_DOUBLE:
np.float64,
types_pb2.DT_INT32:
np.int32,
types_pb2.DT_UINT8:
np.uint8,
types_pb2.DT_UINT16:
np.uint16,
types_pb2.DT_UINT32:
np.uint32,
types_pb2.DT_UINT64:
np.uint64,
types_pb2.DT_INT16:
np.int16,
types_pb2.DT_INT8:
np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING:
np.object,
types_pb2.DT_COMPLEX64:
np.complex64,
types_pb2.DT_COMPLEX128:
np.complex128,
types_pb2.DT_INT64:
np.int64,
types_pb2.DT_BOOL:
np.bool,
types_pb2.DT_QINT8:
_np_qint8,
types_pb2.DT_QUINT8:
_np_quint8,
types_pb2.DT_QINT16:
_np_qint16,
types_pb2.DT_QUINT16:
_np_quint16,
types_pb2.DT_QINT32:
_np_qint32,
types_pb2.DT_BFLOAT16:
_np_bfloat16,
# Ref types
types_pb2.DT_HALF_REF:
np.float16,
types_pb2.DT_FLOAT_REF:
np.float32,
types_pb2.DT_DOUBLE_REF:
np.float64,
types_pb2.DT_INT32_REF:
np.int32,
types_pb2.DT_UINT32_REF:
np.uint32,
types_pb2.DT_UINT8_REF:
np.uint8,
types_pb2.DT_UINT16_REF:
np.uint16,
types_pb2.DT_INT16_REF:
np.int16,
types_pb2.DT_INT8_REF:
np.int8,
types_pb2.DT_STRING_REF:
np.object,
types_pb2.DT_COMPLEX64_REF:
np.complex64,
types_pb2.DT_COMPLEX128_REF:
np.complex128,
types_pb2.DT_INT64_REF:
np.int64,
types_pb2.DT_UINT64_REF:
np.uint64,
types_pb2.DT_BOOL_REF:
np.bool,
types_pb2.DT_QINT8_REF:
_np_qint8,
types_pb2.DT_QUINT8_REF:
_np_quint8,
types_pb2.DT_QINT16_REF:
_np_qint16,
types_pb2.DT_QUINT16_REF:
_np_quint16,
types_pb2.DT_QINT32_REF:
_np_qint32,
types_pb2.DT_BFLOAT16_REF:
_np_bfloat16,
}
_QUANTIZED_DTYPES_NO_REF = frozenset([qint8, quint8, qint16, quint16, qint32])
_QUANTIZED_DTYPES_REF = frozenset(
[qint8_ref, quint8_ref, qint16_ref, quint16_ref, qint32_ref])
QUANTIZED_DTYPES = _QUANTIZED_DTYPES_REF.union(_QUANTIZED_DTYPES_NO_REF)
tf_export(
"dtypes.QUANTIZED_DTYPES",
v1=["dtypes.QUANTIZED_DTYPES",
"QUANTIZED_DTYPES"]).export_constant(__name__, "QUANTIZED_DTYPES")
_PYTHON_TO_TF = {
builtins.float: float32,
builtins.bool: bool,
builtins.object: string
}
_ANY_TO_TF = {}
_ANY_TO_TF.update(_INTERN_TABLE)
_ANY_TO_TF.update(_STRING_TO_TF)
_ANY_TO_TF.update(_PYTHON_TO_TF)
_ANY_TO_TF.update(_NP_TO_TF)
# Ensure no collisions.
assert len(_ANY_TO_TF) == sum(
len(d) for d in [_INTERN_TABLE, _STRING_TO_TF, _PYTHON_TO_TF, _NP_TO_TF])
@tf_export("dtypes.as_dtype", "as_dtype")
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType` object. This may
currently be a `tf.DType` object, a [`DataType`
enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
if isinstance(type_value, np.dtype):
try:
return _NP_TO_TF[type_value.type]
except KeyError:
pass
try:
return _ANY_TO_TF[type_value]
except KeyError:
pass
raise TypeError("Cannot convert value %r to a TensorFlow DType." %
(type_value,))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class VirtualNetworksOperations(object):
"""VirtualNetworksOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def _delete_initial(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, virtual_network_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetwork or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2016_12_01.models.VirtualNetwork or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, virtual_network_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetwork')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual
network operation
:type parameters:
~azure.mgmt.network.v2016_12_01.models.VirtualNetwork
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
VirtualNetwork or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_12_01.models.VirtualNetwork]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetwork
:rtype:
~azure.mgmt.network.v2016_12_01.models.VirtualNetworkPaged[~azure.mgmt.network.v2016_12_01.models.VirtualNetwork]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetwork
:rtype:
~azure.mgmt.network.v2016_12_01.models.VirtualNetworkPaged[~azure.mgmt.network.v2016_12_01.models.VirtualNetwork]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def check_ip_address_availability(
self, resource_group_name, virtual_network_name, ip_address=None, custom_headers=None, raw=False, **operation_config):
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: IPAddressAvailabilityResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2016_12_01.models.IPAddressAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if ip_address is not None:
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IPAddressAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
from collections import namedtuple
from itertools import groupby
from pathlib import Path
from typing import List, Optional
from urllib.parse import unquote, urlparse
from loguru import logger
from flexget import plugin
from flexget.components.ftp.sftp_client import SftpClient, SftpError
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.task import Task
from flexget.utils.template import RenderError, render_from_entry
logger = logger.bind(name='sftp')
# Constants
DEFAULT_SFTP_PORT: int = 22
DEFAULT_CONNECT_TRIES: int = 3
DEFAULT_SOCKET_TIMEOUT_SEC: int = 15
SftpConfig = namedtuple(
'SftpConfig', ['host', 'port', 'username', 'password', 'private_key', 'private_key_pass']
)
class SftpList:
"""
Generate entries from SFTP. This plugin requires the pysftp Python module and its dependencies.
Configuration:
host: Host to connect to.
port: Port the remote SSH server is listening on (default 22).
username: Username to log in as.
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server.
private_key_pass: Password for the private key (if needed).
recursive: Indicates whether the listing should be recursive.
get_size: Indicates whetern to calculate the size of the remote file/directory.
WARNING: This can be very slow when computing the size of directories!
files_only: Indicates wheter to omit diredtories from the results.
dirs: List of directories to download.
socket_timeout_sec: Socket timeout in seconds (default 15 seconds).
connection_tries: Number of times to attempt to connect before failing (default 3).
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
recursive: False
get_size: True
files_only: False
dirs:
- '/path/to/list/'
- '/another/path/'
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'files_only': {'type': 'boolean', 'default': True},
'recursive': {'type': 'boolean', 'default': False},
'get_size': {'type': 'boolean', 'default': True},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'dirs': one_or_more({'type': 'string'}),
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
"""
Sets defaults for the provided configuration
"""
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('dirs', ['.'])
return config
@classmethod
def on_task_input(cls, task: Task, config: dict) -> List[Entry]:
"""
Input task handler
"""
config = cls.prepare_config(config)
files_only: bool = config['files_only']
recursive: bool = config['recursive']
get_size: bool = config['get_size']
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
directories: List[str] = []
if isinstance(config['dirs'], list):
directories.extend(config['dirs'])
else:
directories.append(config['dirs'])
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp: SftpClient = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
entries: List[Entry] = sftp.list_directories(directories, recursive, get_size, files_only)
sftp.close()
return entries
class SftpDownload:
"""
Download files from a SFTP server. This plugin requires the pysftp Python module and its
dependencies.
Configuration:
to: Destination path; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar.
recursive: Indicates whether to download directory contents recursively.
delete_origin: Indicates whether to delete the remote files(s) once they've been downloaded.
socket_timeout_sec: Socket timeout in seconds
connection_tries: Number of times to attempt to connect before failing (default 3).
Example:
sftp_download:
to: '/Volumes/External/Drobo/downloads'
delete_origin: False
"""
schema = {
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'recursive': {'type': 'boolean', 'default': True},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'required': ['to'],
'additionalProperties': False,
}
@classmethod
def download_entry(cls, entry: Entry, config: dict, sftp: SftpClient) -> None:
"""
Downloads the file(s) described in entry
"""
path: str = unquote(urlparse(entry['url']).path) or '.'
delete_origin: bool = config['delete_origin']
recursive: bool = config['recursive']
to: str = config['to']
try:
sftp.download(path, to, recursive, delete_origin)
except SftpError as e:
entry.fail(e) # type: ignore
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
"""Register this as an output plugin"""
@classmethod
def on_task_download(cls, task: Task, config: dict) -> None:
"""
Task handler for sftp_download plugin
"""
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
# Download entries by host so we can reuse the connection
for sftp_config, entries in groupby(task.accepted, cls._get_sftp_config):
if not sftp_config:
continue
error_message: Optional[str] = None
sftp: Optional[SftpClient] = None
try:
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
except Exception as e:
error_message = f'Failed to connect to {sftp_config.host} ({e})'
for entry in entries:
if sftp:
cls.download_entry(entry, config, sftp)
else:
entry.fail(error_message)
if sftp:
sftp.close()
@classmethod
def _get_sftp_config(cls, entry: Entry):
"""
Parses a url and returns a hashable config, source path, and destination path
"""
# parse url
parsed = urlparse(entry['url'])
host: str = parsed.hostname
username: str = parsed.username
password: str = parsed.password
port: int = parsed.port or DEFAULT_SFTP_PORT
# get private key info if it exists
private_key: str = entry.get('private_key')
private_key_pass: str = entry.get('private_key_pass')
config: Optional[SftpConfig] = None
if parsed.scheme == 'sftp':
config = SftpConfig(host, port, username, password, private_key, private_key_pass)
else:
logger.warning('Scheme does not match SFTP: {}', entry['url'])
return config
class SftpUpload:
"""
Upload files to a SFTP server. This plugin requires the pysftp Python module and its
dependencies.
host: Host to connect to
port: Port the remote SSH server is listening on. Defaults to port 22.
username: Username to log in as
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server
private_key_pass: Password for the private key (if needed)
to: Path to upload the file to; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar.
delete_origin: Indicates whether to delete the original file after a successful
upload.
socket_timeout_sec: Socket timeout in seconds
connection_tries: Number of times to attempt to connect before failing (default 3).
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
to: /TV/{{series_name}}/Series {{series_season}}
delete_origin: False
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'to': {'type': 'string'},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
"""
Sets defaults for the provided configuration
"""
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('to', None)
return config
@classmethod
def handle_entry(cls, entry: Entry, sftp: SftpClient, config: dict):
to: str = config['to']
location: str = entry['location']
delete_origin: bool = config['delete_origin']
if to:
try:
to = render_from_entry(to, entry)
except RenderError as e:
logger.error('Could not render path: {}', to)
entry.fail(str(e)) # type: ignore
return
try:
sftp.upload(location, to)
except SftpError as e:
entry.fail(str(e)) # type: ignore
if delete_origin and Path(location).is_file():
try:
Path(location).unlink()
except Exception as e:
logger.warning('Failed to delete file {} ({})', location, e) # type: ignore
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
"""Uploads accepted entries to the specified SFTP server."""
config = cls.prepare_config(config)
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
for entry in task.accepted:
if sftp:
logger.debug('Uploading file: {}', entry['location'])
cls.handle_entry(entry, sftp, config)
else:
entry.fail('SFTP connection failed.')
def task_config_to_sftp_config(config: dict) -> SftpConfig:
"""
Creates an SFTP connection from a Flexget config object
"""
host: int = config['host']
port: int = config['port']
username: str = config['username']
password: str = config['password']
private_key: str = config['private_key']
private_key_pass: str = config['private_key_pass']
return SftpConfig(host, port, username, password, private_key, private_key_pass)
def sftp_connect(
sftp_config: SftpConfig, socket_timeout_sec: int, connection_tries: int
) -> SftpClient:
sftp_client: SftpClient = SftpClient(
host=sftp_config.host,
username=sftp_config.username,
private_key=sftp_config.private_key,
password=sftp_config.password,
port=sftp_config.port,
private_key_pass=sftp_config.private_key_pass,
connection_tries=connection_tries,
)
sftp_client.set_socket_timeout(socket_timeout_sec)
return sftp_client
@event('plugin.register')
def register_plugin() -> None:
plugin.register(SftpList, 'sftp_list', api_ver=2)
plugin.register(SftpDownload, 'sftp_download', api_ver=2)
plugin.register(SftpUpload, 'sftp_upload', api_ver=2)
|
|
"""Test the Z-Wave over MQTT config flow."""
from unittest.mock import patch
import pytest
from homeassistant import config_entries
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.components.ozw.config_flow import TITLE
from homeassistant.components.ozw.const import DOMAIN
from tests.common import MockConfigEntry
ADDON_DISCOVERY_INFO = {
"addon": "OpenZWave",
"host": "host1",
"port": 1234,
"username": "name1",
"password": "pass1",
}
@pytest.fixture(name="supervisor")
def mock_supervisor_fixture():
"""Mock Supervisor."""
with patch("homeassistant.components.hassio.is_hassio", return_value=True):
yield
@pytest.fixture(name="addon_info")
def mock_addon_info():
"""Mock Supervisor add-on info."""
with patch("homeassistant.components.hassio.async_get_addon_info") as addon_info:
addon_info.return_value = {}
yield addon_info
@pytest.fixture(name="addon_running")
def mock_addon_running(addon_info):
"""Mock add-on already running."""
addon_info.return_value["state"] = "started"
return addon_info
@pytest.fixture(name="addon_installed")
def mock_addon_installed(addon_info):
"""Mock add-on already installed but not running."""
addon_info.return_value["state"] = "stopped"
addon_info.return_value["version"] = "1.0"
return addon_info
@pytest.fixture(name="addon_options")
def mock_addon_options(addon_info):
"""Mock add-on options."""
addon_info.return_value["options"] = {}
return addon_info.return_value["options"]
@pytest.fixture(name="set_addon_options")
def mock_set_addon_options():
"""Mock set add-on options."""
with patch(
"homeassistant.components.hassio.async_set_addon_options"
) as set_options:
yield set_options
@pytest.fixture(name="install_addon")
def mock_install_addon():
"""Mock install add-on."""
with patch("homeassistant.components.hassio.async_install_addon") as install_addon:
yield install_addon
@pytest.fixture(name="start_addon")
def mock_start_addon():
"""Mock start add-on."""
with patch("homeassistant.components.hassio.async_start_addon") as start_addon:
yield start_addon
async def test_user_not_supervisor_create_entry(hass, mqtt):
"""Test the user step creates an entry not on Supervisor."""
with patch(
"homeassistant.components.ozw.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"usb_path": None,
"network_key": None,
"use_addon": False,
"integration_created_addon": False,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_mqtt_not_setup(hass):
"""Test that mqtt is required."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "mqtt_required"
async def test_one_instance_allowed(hass):
"""Test that only one instance is allowed."""
entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_not_addon(hass, supervisor, mqtt):
"""Test opting out of add-on on Supervisor."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.ozw.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": False}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"usb_path": None,
"network_key": None,
"use_addon": False,
"integration_created_addon": False,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_addon_running(hass, supervisor, addon_running, addon_options):
"""Test add-on already running on Supervisor."""
addon_options["device"] = "/test"
addon_options["network_key"] = "abc123"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.ozw.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"usb_path": "/test",
"network_key": "abc123",
"use_addon": True,
"integration_created_addon": False,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_addon_info_failure(hass, supervisor, addon_info):
"""Test add-on info failure."""
addon_info.side_effect = HassioAPIError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "abort"
assert result["reason"] == "addon_info_failed"
async def test_addon_installed(
hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon
):
"""Test add-on already installed but not running on Supervisor."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
with patch(
"homeassistant.components.ozw.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"usb_path": "/test", "network_key": "abc123"}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"usb_path": "/test",
"network_key": "abc123",
"use_addon": True,
"integration_created_addon": False,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_set_addon_config_failure(
hass, supervisor, addon_installed, addon_options, set_addon_options
):
"""Test add-on set config failure."""
set_addon_options.side_effect = HassioAPIError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"usb_path": "/test", "network_key": "abc123"}
)
assert result["type"] == "abort"
assert result["reason"] == "addon_set_config_failed"
async def test_start_addon_failure(
hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon
):
"""Test add-on start failure."""
start_addon.side_effect = HassioAPIError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"usb_path": "/test", "network_key": "abc123"}
)
assert result["type"] == "form"
assert result["errors"] == {"base": "addon_start_failed"}
async def test_addon_not_installed(
hass,
supervisor,
addon_installed,
install_addon,
addon_options,
set_addon_options,
start_addon,
):
"""Test add-on not installed."""
addon_installed.return_value["version"] = None
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "progress"
# Make sure the flow continues when the progress task is done.
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "start_addon"
with patch(
"homeassistant.components.ozw.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"usb_path": "/test", "network_key": "abc123"}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"usb_path": "/test",
"network_key": "abc123",
"use_addon": True,
"integration_created_addon": True,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_install_addon_failure(hass, supervisor, addon_installed, install_addon):
"""Test add-on install failure."""
addon_installed.return_value["version"] = None
install_addon.side_effect = HassioAPIError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "progress"
# Make sure the flow continues when the progress task is done.
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == "addon_install_failed"
async def test_supervisor_discovery(hass, supervisor, addon_running, addon_options):
"""Test flow started from Supervisor discovery."""
addon_options["device"] = "/test"
addon_options["network_key"] = "abc123"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
with patch(
"homeassistant.components.ozw.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"usb_path": "/test",
"network_key": "abc123",
"use_addon": True,
"integration_created_addon": False,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_clean_discovery_on_user_create(
hass, supervisor, addon_running, addon_options
):
"""Test discovery flow is cleaned up when a user flow is finished."""
addon_options["device"] = "/test"
addon_options["network_key"] = "abc123"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.ozw.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": False}
)
await hass.async_block_till_done()
assert len(hass.config_entries.flow.async_progress()) == 0
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"usb_path": None,
"network_key": None,
"use_addon": False,
"integration_created_addon": False,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_abort_discovery_with_user_flow(
hass, supervisor, addon_running, addon_options
):
"""Test discovery flow is aborted if a user flow is in progress."""
await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
assert len(hass.config_entries.flow.async_progress()) == 1
async def test_abort_discovery_with_existing_entry(
hass, supervisor, addon_running, addon_options
):
"""Test discovery flow is aborted if an entry already exists."""
entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE, unique_id=DOMAIN)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_discovery_addon_not_running(
hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon
):
"""Test discovery with add-on already installed but not running."""
addon_options["device"] = None
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["step_id"] == "hassio_confirm"
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["step_id"] == "start_addon"
assert result["type"] == "form"
async def test_discovery_addon_not_installed(
hass, supervisor, addon_installed, install_addon, addon_options
):
"""Test discovery with add-on not installed."""
addon_installed.return_value["version"] = None
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["step_id"] == "hassio_confirm"
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["step_id"] == "install_addon"
assert result["type"] == "progress"
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "start_addon"
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from testrunner import testhelp
from conary_test import rephelp
from conary_test import dbstoretest
from conary.repository import errors
from conary.repository.netrepos import netauth
from conary.repository.netrepos.auth_tokens import AuthToken
from conary.repository.netrepos.trovestore import TroveStore
from conary.server import schema
from conary import sqlite3
from conary import versions
from conary.deps import deps
class NetAuthTest(dbstoretest.DBStoreTestBase):
def _setupDB(self):
db = self.getDB()
schema.createSchema(db)
schema.setupTempTables(db)
return db
def _addUserRole(self, na, username, password):
na.addRole(username)
na.addUser(username, password)
na.addRoleMember(username, username)
def testManageAcls(self):
db = self._setupDB()
ts = TroveStore(db)
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
authToken = ("testuser", "testpass", [ (None, None) ], None )
self._addUserRole(na, "testuser", "testpass")
na.addAcl("testuser", None, None, write = True)
## TODO Test the trove/label aspects of ACL management
na.deleteAcl("testuser", None, None)
#If the delete above failed, this will throw an exception
na.addAcl("testuser", None, None)
assert(na.authCheck(authToken, admin=False) == True)
assert(na.authCheck(authToken, admin=True) == False)
#Now give the user has admin rights
na.setAdmin("testuser", True)
assert(na.authCheck(authToken, admin=True) == True)
def testNetAuth(self):
db = self._setupDB()
ts = TroveStore(db)
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
self._addUserRole(na, "testuser", "testpass")
na.addAcl("testuser", None, None, write = True, remove = True)
self._addUserRole(na, "luser", "luserpass")
na.addAcl("luser", None, None)
self._addUserRole(na, "root", "rootpass")
na.addAcl("root", None, None, write = True, remove = True)
na.setAdmin("root", True)
authToken = ("testuser", "testpass", [ (None, None) ], None )
badAuthToken = ("testuser", "testPass", [ (None, None) ], None )
luserToken = ("luser", "luserpass", [ (None, None) ], None )
badLuserToken = ("luser", "luserfoo", [ (None, None) ], None )
rootToken = ("root", "rootpass", [ (None, None) ], None )
assert(na.check(authToken, write=False) != False)
assert(na.check(authToken, write=True) != False)
assert(na.check(badAuthToken, write=False) != True)
assert(na.check(badAuthToken, write=True) != True)
assert(na.check(luserToken, write=False) != False)
assert(na.check(luserToken, write=True) != True)
assert(na.authCheck(rootToken, admin=True) != False)
assert(na.authCheck(luserToken, admin=True) != True)
assert(na.check(rootToken, remove=True) != False)
assert(na.check(luserToken, remove=True) != True)
assert(na.check(authToken, remove=True) != False)
assert(na.check(authToken) != False)
assert(na.check(badAuthToken) != True)
# Shim clients, with a ValidPasswordToken
entitlements = [(None, None)]
authShim = ("testuser", netauth.ValidPasswordToken, entitlements, None)
luserShim = ("luser", netauth.ValidPasswordToken, entitlements, None)
badShim = ("nobody", netauth.ValidPasswordToken, entitlements, None)
assert na.check(authShim, write=False)
assert na.check(authShim, write=True)
assert na.check(authShim, remove=True)
assert na.check(luserShim, write=False)
assert not na.check(luserShim, write=True)
assert not na.check(luserShim, remove=True)
assert not na.check(badShim, write=False)
# Shim clients, with a ValidUser()
authRoleShim = (netauth.ValidUser('testuser'), None, entitlements, None)
luserRoleShim = (netauth.ValidUser('luser'), None, entitlements, None)
badRoleShim = (netauth.ValidUser('nobody'), None, entitlements, None)
assert na.check(authRoleShim, write=False)
assert na.check(authRoleShim, write=True)
assert na.check(authRoleShim, remove=True)
assert na.check(luserRoleShim, write=False)
assert not na.check(luserRoleShim, write=True)
assert not na.check(luserRoleShim, remove=True)
assert not na.check(badRoleShim, write=False)
try:
na.addAcl("testuser", None, None, write = True)
except errors.PermissionAlreadyExists:
pass
else:
self.fail("PermissionAlreadyExists exception expected")
try:
na.addUser("teStUseR", 'TeStPaSs')
except (errors.UserAlreadyExists, errors.RoleAlreadyExists):
pass
else:
self.fail("UserAlreadyExists or RoleAlreadyExists exception expected")
try:
na.addRole("lUseR")
except errors.RoleAlreadyExists:
pass
else:
self.fail("RoleAlreadyExists exception expected")
def testDeleteAuth(self):
db = self._setupDB()
ts = TroveStore(db)
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
# schema creation creates these for us - we need to get rid of them for this test
for user in na.userAuth.getUserList():
na.deleteUserByName(user)
for group in na.getRoleList():
na.deleteRoleByName(group)
self._addUserRole(na, "deluser1", "delpass")
na.addAcl("deluser1", None, None)
self._addUserRole(na, "deluser2", "delpass")
na.addAcl("deluser2", None, None, write = True)
na.addRoleMember("deluser2", "deluser1")
# Create another group and add deluser1 to said group
na.addRole('delgroup1')
na.addAcl('delgroup1', None, None, write = True)
na.setAdmin('delgroup1', True)
na.addRoleMember("delgroup1", "deluser1")
na.addRole('delgroup2')
na.addAcl('delgroup2', None, None, write = True)
na.setAdmin('delgroup2', True)
na.addRoleMember("delgroup2", "deluser1")
na.addRoleMember("delgroup2", "deluser2")
self.assertEqual(na.getRoles('deluser1'),
['deluser1', 'deluser2', 'delgroup1', 'delgroup2'])
self.assertEqual(na.getRoles('deluser2'),
['deluser2', 'delgroup2'])
# Delete user2 and see if group2 still lists user2
na.deleteUserByName('deluser2')
self.assertEqual(list(na.getRoleMembers('delgroup2')),
['deluser1'] )
self.assertEqual(list(na.userAuth.getRolesByUser('deluser1')),
[ 'deluser1', 'deluser2', 'delgroup1', 'delgroup2' ])
na.deleteRole('delgroup1')
self.assertEqual(list(na.userAuth.getRolesByUser('deluser1')),
[ 'deluser1', 'deluser2', 'delgroup2' ])
# because deluser1 will have no acl, it should go too
na.deleteAcl("deluser1", None, None)
na.deleteUserByName('deluser1')
self.assertEqual(list(na.getRoleMembers('delgroup2')), [])
na.deleteRole('delgroup2')
self.assertEqual(na.getRoleList(), ['deluser2'] )
na.deleteRole('deluser2')
try:
na.deleteUserByName("nonexistentUser")
except errors.UserNotFound:
pass
else:
self.fail("UserNotFound exception expected")
#try adding a user to make sure that it happens successfully
self._addUserRole(na, 'user1afterdel', 'testpass')
#delete the group, but not the user
na.deleteRole('user1afterdel')
self.assertEqual(na.getRoleList(), [] )
self.assertEqual(list(na.userAuth.getUserList()),
[('user1afterdel')])
self._addUserRole(na, 'user2afterdel', 'testpass')
na.addAcl("user2afterdel", None, None)
na.deleteUserByName('user1afterdel')
def testChangePassword(self):
db = self._setupDB()
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
self._addUserRole(na, "testuser", "testpass")
na.addAcl("testuser", None, None)
authToken = ("testuser", "testpass", [ (None, None) ], None )
authToken2 = ("testuser", "newpass", [ (None, None) ], None )
assert(na.check(authToken) != False)
na.changePassword("testuser", "newpass")
assert(na.check(authToken) != True)
assert(na.check(authToken2) != False)
def testNetAuthQueries(self):
db = self._setupDB()
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
# schema creation creates these for us - we need to get rid of them for this test
for user in na.userAuth.getUserList():
na.deleteUserByName(user)
for group in na.getRoleList():
na.deleteRoleByName(group)
self._addUserRole(na, "testuser", "testpass")
na.addAcl("testuser", None, "conary.rpath.com@rpl:linux", write = True)
users = na.userAuth.getUserList()
groups = na.getRoleList()
assert(users == [('testuser')])
assert(groups == [('testuser')])
groupsByUser = list(na.userAuth.getRolesByUser(users[0]))
perms = list(na.iterPermsByRole(groupsByUser[0]))
assert(groupsByUser == [('testuser')])
assert(perms == [("conary.rpath.com@rpl:linux", 'ALL', 1, 0)])
dictperms = na.getPermsByRole(groupsByUser[0])
assert perms[0][0] == dictperms[0]['label']
assert perms[0][1] == dictperms[0]['item']
assert perms[0][2] == dictperms[0]['canWrite']
assert perms[0][3] == dictperms[0]['canRemove']
def testAddRole(self):
db = self._setupDB()
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
na.addUser("testuser", "testpass")
na.addRole("testgroup")
na.addRoleMember("testgroup", "testuser")
groupsByUser = list(na.userAuth.getRolesByUser("testuser"))
assert(groupsByUser == [ "testgroup" ])
def testManageRole(self):
db = self._setupDB()
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
self._addUserRole(na, "testuser", "testpass")
self._addUserRole(na, "testuser1", "testpass")
na.addRole("testgroup")
na.renameRole("testuser", "renamedgrp")
assert(list(na.userAuth.getRolesByUser("testuser")) ==
[ "renamedgrp" ])
#Should do nothing, but definitely not throw an exception
na.renameRole("testuser", "renamedgrp")
#This will just change the case of the groupname
na.renameRole("testuser", "reNameDgrp")
try:
na.renameRole("testgroup", "renamedgrp")
except errors.RoleAlreadyExists:
db.rollback()
else:
self.fail("RoleAlreadyExists exception expected")
try:
na.renameRole("testgroup", "reNameDgrp")
except errors.RoleAlreadyExists:
db.rollback()
else:
self.fail("RoleAlreadyExists exception expected")
na.updateRoleMembers("testgroup", [ 'testuser', 'testuser1' ])
assert(list(na.getRoleMembers("testgroup")) ==
['testuser', 'testuser1'])
na.updateRoleMembers("testgroup", [])
assert(list(na.getRoleMembers("testgroup")) == [])
@testhelp.context('entitlements')
def testManageEntitlements(self):
db = self._setupDB()
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
self._addUserRole(na, "normal", "normalpass")
na.addAcl("normal", None, None, write = True)
normalToken = ("normal", "normalpass", [], None )
self._addUserRole(na, "owner", "ownerpass")
na.addAcl("owner", None, None, write = True)
ownerToken = ("owner", "ownerpass", [], None )
self._addUserRole(na, "root", "rootpass")
na.addAcl("root", None, None, write = True)
na.setAdmin("root", True)
rootToken = ("root", "rootpass", [], None )
na.addRole("specialReads")
na.addRole("entOwner")
na.addRoleMember("entOwner", "owner")
self.assertRaises(errors.InsufficientPermission, na.addEntitlementClass,
normalToken, "cust1", "specialReads")
self.assertRaises(errors.InsufficientPermission, na.addEntitlementClass,
(rootToken[0], "foo", [], None ),
"cust1", "specialReads")
self.assertRaises(errors.RoleNotFound, na.addEntitlementClass,
rootToken, "cust1", "unknownRole")
na.addEntitlementClass(rootToken, "cust1", "specialReads")
self.assertRaises(errors.EntitlementClassAlreadyExists, na.addEntitlementClass,
rootToken, "cust1", "specialReads")
self.assertRaises(errors.InsufficientPermission,
na.addEntitlementClassOwner,
normalToken, "specialReads", "cust1")
na.addEntitlementClassOwner(rootToken, "entOwner", "cust1")
self.assertRaises(errors.InsufficientPermission, na.addEntitlementKey,
normalToken, "cust1", "ENTITLEMENT0")
na.addEntitlementKey(rootToken, "cust1", "ENTITLEMENT0")
na.addEntitlementKey(ownerToken, "cust1", "ENTITLEMENT1")
self.assertRaises(errors.EntitlementKeyAlreadyExists, na.addEntitlementKey,
ownerToken, "cust1", "ENTITLEMENT1")
self.assertRaises(errors.InsufficientPermission, na.iterEntitlementKeys,
normalToken, "cust1")
l1 = sorted(na.iterEntitlementKeys(rootToken, "cust1"))
l2 = sorted(na.iterEntitlementKeys(ownerToken, "cust1"))
self.assertEqual(l1, l2)
self.assertEqual(l1, [ 'ENTITLEMENT0', 'ENTITLEMENT1' ])
def testCheckTrove(self):
db = self._setupDB()
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
assert(na.checkTrove("foo", "foo"))
assert(na.checkTrove("^foo$", "foo"))
assert(not na.checkTrove("foo", "barfoo"))
assert(not na.checkTrove("foo", "foobar"))
assert(na.checkTrove("foo.*", "foo:runtime"))
def testNetAuthCheck(self):
db = self._setupDB()
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
self._addUserRole(na, "testuser", "testpass")
na.addAcl("testuser", ".*:runtime", "conary.rpath.com@label:1")
tu = ("testuser", "testpass", [ (None, None) ], None )
v1 = versions.VersionFromString("/conary.rpath.com@label:1/1-1")
v2 = versions.VersionFromString("/conary.rpath.com@label:2/1-1")
v3 = versions.VersionFromString("/conary.rpath.com@label:3/1-1")
assert(na.check(tu, label=v1.branch().label(), trove="foo:runtime"))
assert(not na.check(tu, label=v1.branch().label(), trove="foo:runtime", write=True))
assert(not na.check(tu, label=v1.branch().label(), trove="foo:lib"))
assert(not na.check(tu, label=v1.branch().label(), trove="fooruntime"))
assert(not na.check(tu, label=v2.branch().label(), trove="foo:runtime"))
# try old format of the authTokens
assert(na.check(("testuser", "testpass", None, None),
label=v1.branch().label(), trove="foo:runtime"))
assert(na.check(("testuser", "testpass", [ (None, None) ] ),
label=v1.branch().label(), trove="foo:runtime"))
self._addUserRole(na, "gooduser", "goodpass")
gu = ("gooduser", "goodpass", [ (None, None) ], None )
na.addAcl("gooduser", None, None)
na.addAcl("gooduser", ".*:devel", None, write=True)
na.addAcl("gooduser", ".*:test", "conary.rpath.com@label:1", write=True)
na.addAcl("gooduser", None, "conary.rpath.com@label:2", write=True, remove=True)
assert(na.check(gu, label=v1.branch().label(), trove="foo"))
assert(na.check(gu, label=v2.branch().label(), trove="foo"))
assert(na.check(gu, label=v3.branch().label(), trove="foo"))
assert(na.check(gu, label=v1.branch().label(), trove="foo:devel", write=True))
assert(na.check(gu, label=v2.branch().label(), trove="bar:devel", write=True))
assert(na.check(gu, label=v3.branch().label(), trove="baz:devel", write=True))
assert(na.check(gu, label=v1.branch().label(), trove="foo:test", write=True))
assert(na.check(gu, label=v2.branch().label(), trove="foo:test", write=True))
assert(not na.check(gu, label=v3.branch().label(), trove="foo:test", write=True))
assert(not na.check(gu, label=v1.branch().label(), trove="foo:runtime", write=True))
self.assertEqual(na.commitCheck(gu, [("foo:devel",v1),("bar:devel",v2),("baz:devel",v3)]),
[True]*3)
self.assertEqual(na.commitCheck(gu, [("foo:test",v1),("bar:test",v1),("baz:test",v1)]),
[True]*3)
self.assertEqual(na.commitCheck(gu, [("foo:junk",v2),("bar:test",v2),("baz:lib",v2)]),
[True]*3)
self.assertEqual(na.commitCheck(gu, [("foo:test",v1),("bar:test",v1),("baz:lib",v1)]),
[True,True,False])
self._addUserRole(na, "zerouser", "zeropass")
zu = ("zerouser", "zeropass", [ (None, None) ], None )
assert(not na.check(zu, label=v1.branch().label(), trove="foo"))
assert(not na.check(zu, label=v2.branch().label(), trove="ALL"))
assert(not na.check(zu, label=v3.branch().label(), trove="foo:runtime"))
self.assertEqual(na.commitCheck(zu, [("foo", v1)]), [False])
# Try the shim bypass token
bypass = ("gooduser", netauth.ValidPasswordToken,
[ (None, None) ],None )
self.assertTrue(na.check(bypass,
label=v1.branch().label(), trove="foo"))
self.assertTrue(na.check(bypass,
label=v2.branch().label(), trove="foo:devel", write=True))
self.assertTrue(na.check(bypass,
label=v3.branch().label(), trove="foo:runtime"))
bypass_zero = ("zerouser", netauth.ValidPasswordToken,
[ (None, None) ],None )
self.assertFalse(na.check(bypass_zero,
label=v1.branch().label(), trove="foo"))
self.assertFalse(na.check(bypass_zero,
label=v2.branch().label(), trove="foo:devel", write=True))
self.assertFalse(na.check(bypass_zero,
label=v3.branch().label(), trove="foo:runtime"))
def testInvalidNames(self):
db = self.getDB()
schema.createSchema(db)
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
try:
na.addUser("test user", "testpass")
except errors.InvalidName, e:
self.assertEqual(str(e), 'InvalidName: test user')
try:
na.addRole("test group")
except errors.InvalidName, e:
self.assertEqual(str(e), 'InvalidName: test group')
def testInvalidEntitlementClass(self):
db = self.getDB()
schema.createSchema(db)
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
self._addUserRole(na, "root", "rootpass")
na.setAdmin("root", True)
self.assertRaises(errors.UnknownEntitlementClass,
na.addEntitlementKey,
("root", "rootpass", None, None), "group", "1234")
def testRoleFilters(self):
db = self._setupDB()
na = netauth.NetworkAuthorization(db, "conary.rpath.com")
self._addUserRole(na, "testuser", "testpass")
roleId = na._getRoleIdByName('testuser')
geoip = {
'1.2.3.4': deps.parseFlavor('country.XC'),
'5.6.7.8': deps.parseFlavor('country.XB'),
}
na.geoIp.getFlags = lambda x: geoip[x]
na.setRoleFilters({'testuser': (
deps.parseFlavor('!country.XA,!country.XB'), None)})
self.assertEqual(na.getRoleFilters(['testuser']),
{'testuser': (
deps.parseFlavor('!country.XA,!country.XB'), deps.Flavor())})
token = AuthToken('testuser', 'testpass', remote_ip='1.2.3.4')
self.assertEqual(
na.getAuthRoles(db.cursor(), token), set([roleId]))
token = AuthToken('testuser', 'testpass', remote_ip='5.6.7.8')
level = netauth.log.level
netauth.log.setLevel(100)
try:
self.assertRaises(errors.InsufficientPermission,
na.getAuthRoles, db.cursor(), token)
finally:
netauth.log.setLevel(level)
class NetAuthTest2(rephelp.RepositoryHelper):
def _setupDB(self):
self.openRepository()
db = self.servers.servers[0].reposDB.connect()
schema.setupTempTables(db)
return db
def _addUserRole(self, na, username, password):
na.addRole(username)
na.addUser(username, password)
na.addRoleMember(username, username)
def testBatchCheck(self):
if sqlite3.sqlite_version_info() < (3,7,0):
raise testhelp.SkipTestException("buggy sqlite; use embedded sqlite")
self.openRepository()
db = self._setupDB()
na = netauth.NetworkAuthorization(db, "localhost")
db.transaction()
self._addUserRole(na, "ro", "ro")
na.addAcl("ro", "foo:.*", label=None, write=False)
ro = ("ro", "ro", [ (None, None) ], None )
self._addUserRole(na, "rw", "rw")
na.addAcl("rw", "foo:.*", label=None, write=True)
rw = ("rw", "rw", [ (None, None) ], None )
self._addUserRole(na, "mixed", "mixed")
na.addAcl("mixed", "foo:.*", label=None, write=False)
na.addAcl("mixed", "foo:runtime", label=None, write=True)
mixed = ("mixed", "mixed", [ (None, None) ], None )
db.commit()
fr = self.addComponent("foo:runtime")
fd = self.addComponent("foo:devel")
troveList = [ (fr.getName(), fr.getVersion().asString(), fr.getFlavor().freeze()),
(fd.getName(), fd.getVersion().asString(), fd.getFlavor().freeze())]
self.assertEqual(na.batchCheck(ro, troveList), [True,True])
self.assertEqual(na.batchCheck(ro, troveList, write=True), [False,False])
self.assertEqual(na.batchCheck(rw, troveList), [True,True])
self.assertEqual(na.batchCheck(rw, troveList, write=True), [True,True])
self.assertEqual(na.batchCheck(mixed, troveList), [True,True])
self.assertEqual(na.batchCheck(mixed, troveList, write=True), [True,False])
|
|
# Copyright 2013-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from __future__ import print_function
import boto
import argparse
import json
import threading
import time
import datetime
from argparse import RawTextHelpFormatter
from random import choice
from string import lowercase
from boto.kinesis.exceptions import ResourceNotFoundException
# To preclude inclusion of aws keys into this code, you may temporarily add
# your AWS credentials to the file:
# ~/.boto
# as follows:
# [Credentials]
# aws_access_key_id = <your access key>
# aws_secret_access_key = <your secret key>
make_string = lambda x: "".join(choice(lowercase) for i in range(x))
def get_or_create_stream(stream_name, shard_count):
stream = None
try:
stream = kinesis.describe_stream(stream_name)
print (json.dumps(stream, sort_keys=True, indent=2,
separators=(',', ': ')))
except ResourceNotFoundException as rnfe:
while (stream is None) or ('ACTIVE' not in stream['StreamDescription']['StreamStatus']):
if stream is None:
print ('Could not find ACTIVE stream:{0} trying to create.'.format(
stream_name))
kinesis.create_stream(stream_name, shard_count)
else:
print ("Stream status: %s" % stream['StreamDescription']['StreamStatus'])
time.sleep(1)
stream = kinesis.describe_stream(stream_name)
return stream
def sum_posts(kinesis_actors):
"""Sum all posts across an array of KinesisPosters
"""
total_records = 0
for actor in kinesis_actors:
total_records += actor.total_records
return total_records
class KinesisPoster(threading.Thread):
"""The Poster thread that repeatedly posts records to shards in a given
Kinesis stream.
"""
def __init__(self, stream_name, partition_key, poster_time=30, quiet=False,
name=None, group=None, filename=None, args=(), kwargs={}):
super(KinesisPoster, self).__init__(name=name, group=group,
args=args, kwargs=kwargs)
self._pending_records = []
self.stream_name = stream_name
self.partition_key = partition_key
self.quiet = quiet
self.default_records = [
make_string(100), make_string(1000), make_string(500),
make_string(5000), make_string(10), make_string(750),
make_string(10), make_string(2000), make_string(500)
]
self.poster_time = poster_time
self.total_records = 0
self.file_contents = None
if filename is not None:
print('~> opening file:{0}'.format(filename))
with open(filename, 'r') as content_file:
self.file_contents = content_file.read(40000)
def add_records(self, records):
""" Add given records to the Poster's pending records list.
"""
print('~> adding records:{0}'.format(records))
if len(records) is 1:
self._pending_records.extend(records[0])
else:
self._pending_records.extend(records)
def put_all_records(self):
"""Put all pending records in the Kinesis stream."""
precs = self._pending_records
self._pending_records = []
self.put_records(precs)
self.total_records += len(precs)
return len(precs)
def put_file_contents(self):
if self.file_contents:
response = kinesis.put_record(
stream_name=self.stream_name,
data=self.file_contents, partition_key=self.partition_key)
self.total_records += 1
if self.quiet is False:
print ("-= put seqNum:", response['SequenceNumber'])
def put_records(self, records):
"""Put the given records in the Kinesis stream."""
for record in records:
response = kinesis.put_record(
stream_name=self.stream_name,
data=record, partition_key=self.partition_key)
if self.quiet is False:
print ("-= put seqNum:", response['SequenceNumber'])
def run(self):
start = datetime.datetime.now()
finish = start + datetime.timedelta(seconds=self.poster_time)
while finish > datetime.datetime.now():
if self.file_contents:
self.put_file_contents()
else:
self.add_records(self.default_records)
records_put = self.put_all_records()
if self.quiet is False:
print(' Total Records Put:', self.total_records)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='''Create or attach to a Kinesis stream and put records in the stream''',
formatter_class=RawTextHelpFormatter)
parser.add_argument('stream_name',
help='''the name of the Kinesis stream to either connect with or create''')
parser.add_argument('--region', type=str, default='us-east-1',
help='''the name of the Kinesis region to connect with [default: us-east-1]''')
parser.add_argument('--shard_count', type=int, default=1,
help='''the number of shards to create in the stream, if creating [default: 1]''')
parser.add_argument('--partition_key', default='PyKinesisExample',
help='''the partition key to use when communicating records to the
stream [default: 'PyKinesisExample-##']''')
parser.add_argument('--poster_count', type=int, default=2,
help='''the number of poster threads [default: 10]''')
parser.add_argument('--poster_time', type=int, default=30,
help='''how many seconds the poster threads should put records into
the stream [default: 30]''')
parser.add_argument('--record_file', type=str, default=None,
help='''the file whose contents to use as a record''')
parser.add_argument('--quiet', action='store_true', default=False,
help='''reduce console output to just initialization info''')
parser.add_argument('--delete_stream', action='store_true', default=False,
help='''delete the Kinesis stream matching the given stream_name''')
parser.add_argument('--describe_only', action='store_true', default=False,
help='''only describe the Kinesis stream matching the given stream_name''')
threads = []
args = parser.parse_args()
kinesis = boto.kinesis.connect_to_region(region_name = args.region)
if (args.delete_stream):
# delete the given Kinesis stream name
kinesis.delete_stream(stream_name=args.stream_name)
else:
start_time = datetime.datetime.now()
if args.describe_only is True:
# describe the given Kinesis stream name
stream = kinesis.describe_stream(args.stream_name)
print (json.dumps(stream, sort_keys=True, indent=2,
separators=(',', ': ')))
else:
stream = get_or_create_stream(args.stream_name, args.shard_count)
# Create a KinesisPoster thread up to the poster_count value
for pid in xrange(args.poster_count):
# create poster name per poster thread
poster_name = 'shard_poster:%s' % pid
# create partition key per poster thread
part_key = args.partition_key + '-' + str(pid)
poster = KinesisPoster(
stream_name=args.stream_name,
partition_key=part_key, # poster's partition key
poster_time=args.poster_time,
name=poster_name, # thread name
filename=args.record_file,
quiet=args.quiet)
poster.daemon = True
threads.append(poster)
print ('starting: ', poster_name)
poster.start()
# Wait for all threads to complete
for t in threads:
t.join()
finish_time = datetime.datetime.now()
duration = (finish_time - start_time).total_seconds()
total_records = sum_posts(threads)
print ("-=> Exiting Poster Main <=-")
print (" Total Records:", total_records)
print (" Total Time:", duration)
print (" Records / sec:", total_records / duration)
|
|
import time
from collections import namedtuple
from datetime import timedelta
import requests
from django.contrib.sessions.models import Session
from django.db import connection
from django.db.models import Count
from django.db.models import Sum
from django.db.utils import OperationalError
from django.utils import timezone
from kolibri.core.analytics import SUPPORTED_OS
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import UserSessionLog
from kolibri.utils.server import NotRunning
from kolibri.utils.server import PID_FILE
try:
import kolibri.utils.pskolibri as psutil
except NotImplementedError:
# This module can't work on this OS
psutil = None
def get_db_info():
"""
Returns information about the sessions and users the current
Kolibri server has in use
"""
# Users information
active_sessions = "unknown"
active_users = active_users_minute = None
try:
connection.ensure_connection()
# Sessions active in the last 10 minutes (includes guest accesses):
active_sessions = str(
Session.objects.filter(expire_date__gte=timezone.now()).count()
)
last_ten_minutes = timezone.now() - timedelta(minutes=10)
last_minute = timezone.now() - timedelta(minutes=1)
# Active logged users:
active_users = str(
UserSessionLog.objects.filter(
last_interaction_timestamp__gte=last_ten_minutes
).count()
)
# Logged users with activity in the last minute:
active_users_minute = str(
UserSessionLog.objects.filter(
last_interaction_timestamp__gte=last_minute
).count()
)
except OperationalError:
print("Database unavailable, impossible to retrieve users and sessions info")
return (active_sessions, active_users, active_users_minute)
def get_channels_usage_info():
"""
Scan the channels Kolibri has installed, getting information on how many times
their resources have been accessed and how long they have been used
:returns: List containing namedtuples, with each channel: id, name, accesses and time spent
"""
channels_info = []
ChannelsInfo = namedtuple("ChannelsInfo", "id name accesses time_spent")
try:
connection.ensure_connection()
channels = ChannelMetadata.objects.values("id", "name")
channel_stats = ContentSessionLog.objects.values("channel_id").annotate(
time_spent=Sum("time_spent"), total=Count("channel_id")
)
for channel in channels:
stats = channel_stats.filter(channel_id=channel["id"])
if stats:
channels_info.append(
ChannelsInfo(
id=channel["id"],
name=channel["name"],
accesses=str(stats[0]["total"]),
time_spent="{:.2f} s".format(stats[0]["time_spent"]),
)
)
else:
channels_info.append(
ChannelsInfo(
id=channel["id"],
name=channel["name"],
accesses="0",
time_spent="0.00 s",
)
)
except OperationalError:
print("Database unavailable, impossible to retrieve channels usage info")
return channels_info
def get_requests_info():
"""
Returns timing information on some Kolibri pages that can be hit without credentials
:returns: tuple of strings containing time in seconds when requesting
- Kolibri homepage
- Kolibri recommended channels
- Kolibri channels list
"""
def format_url(url, base_url):
formatted = "{base_url}{url}&contentCacheKey={cache}".format(
base_url=base_url, url=url, cache=time.time()
)
return formatted
_, port = get_kolibri_process_info()
if port:
base_url = "http://localhost:{}".format(port)
homepage_time = "{:.2f} s".format(
requests.get(base_url).elapsed.total_seconds()
)
recommended_url = format_url(
"/api/content/contentnode_slim/popular/?include_coach_content=false",
base_url,
)
recommended_time = "{:.2f} s".format(
requests.get(recommended_url).elapsed.total_seconds()
)
channels_url = format_url("/api/content/channel/?available=true", base_url)
channels_time = "{:.2f} s".format(
requests.get(channels_url).elapsed.total_seconds()
)
else:
homepage_time = recommended_time = channels_time = None
return (homepage_time, recommended_time, channels_time)
def get_machine_info():
"""
Gets information on the memory, cpu and processes in the server
:returns: tuple of strings containing cpu percentage, used memory, free memory and number of active processes
"""
if not SUPPORTED_OS:
return (None, None, None, None)
used_cpu = str(psutil.cpu_percent())
used_memory = str(psutil.virtual_memory().used / pow(10, 6)) # In Megabytes
total_memory = str(psutil.virtual_memory().total / pow(10, 6)) # In Megabytes
total_processes = str(len(psutil.pids()))
return (used_cpu, used_memory, total_memory, total_processes)
def get_kolibri_process_info():
"""
Return information on the Kolibri process running in the machine
:returns: tuple of integers containing PID and TCP Port of
the running (if any) Kolibri server in this same machine
"""
kolibri_pid = None
kolibri_port = None
try:
with open(PID_FILE, "r") as f:
kolibri_pid = int(f.readline())
kolibri_port = int(f.readline())
except IOError:
pass # Kolibri PID file does not exist
except ValueError:
pass # corrupted Kolibri PID file
return (kolibri_pid, kolibri_port)
def get_kolibri_process_cmd():
"""
Retrieve from the OS the command line executed to run Kolibri server
:returns: tuple with command line and its arguments
"""
if not SUPPORTED_OS:
return None
kolibri_pid, _ = get_kolibri_process_info()
try:
kolibri_proc = psutil.Process(kolibri_pid)
except psutil.NoSuchProcess:
# Kolibri server is not running
raise NotRunning(0)
return kolibri_proc.cmdline()
def get_kolibri_use(development=False):
"""
Gets information on the memory and cpu usage of the current Kolibri process
:returns: tuple of strings containing cpu percentage and virtual memory used (in Mb)
"""
if not SUPPORTED_OS:
return (None, None)
kolibri_mem = kolibri_cpu = "None"
kolibri_pid, _ = get_kolibri_process_info()
if kolibri_pid:
try:
kolibri_proc = psutil.Process(kolibri_pid)
kolibri_mem = str(kolibri_proc.memory_info().rss / pow(10, 6))
kolibri_cpu = str(kolibri_proc.cpu_percent())
except psutil.NoSuchProcess:
# Kolibri server is not running
raise NotRunning(0)
return (kolibri_cpu, kolibri_mem)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc.
import mock
from mock import call
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
import neutron.services.firewall.drivers.linux.iptables_fwaas as fwaas
from neutron.tests import base
from neutron.tests.unit import test_api_v2
_uuid = test_api_v2._uuid
FAKE_SRC_PREFIX = '10.0.0.0/24'
FAKE_DST_PREFIX = '20.0.0.0/24'
FAKE_PROTOCOL = 'tcp'
FAKE_SRC_PORT = 5000
FAKE_DST_PORT = 22
FAKE_FW_ID = 'fake-fw-uuid'
class IptablesFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFwaasTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.addCleanup(self.utils_exec_p.stop)
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_cls_p.start()
self.addCleanup(self.iptables_cls_p.stop)
self.firewall = fwaas.IptablesFwaasDriver()
def _fake_rules_v4(self, fwid, apply_list):
rule_list = []
rule1 = {'enabled': True,
'action': 'allow',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '80',
'source_ip_address': '10.24.4.2'}
rule2 = {'enabled': True,
'action': 'deny',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '22'}
ingress_chain = ('iv4%s' % fwid)[:11]
egress_chain = ('ov4%s' % fwid)[:11]
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
v4filter_inst.chains.append(ingress_chain)
v4filter_inst.chains.append(egress_chain)
rule_list.append(rule1)
rule_list.append(rule2)
return rule_list
def _fake_firewall_no_rule(self):
rule_list = []
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall_with_admin_down(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': False,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_apply_list(self, router_count=1):
apply_list = []
while router_count > 0:
iptables_inst = mock.Mock()
v4filter_inst = mock.Mock()
v6filter_inst = mock.Mock()
v4filter_inst.chains = []
v6filter_inst.chains = []
iptables_inst.ipv4 = {'filter': v4filter_inst}
iptables_inst.ipv6 = {'filter': v6filter_inst}
router_info_inst = mock.Mock()
router_info_inst.iptables_manager = iptables_inst
apply_list.append(router_info_inst)
router_count -= 1
return apply_list
def _setup_firewall_with_rules(self, func, router_count=1):
apply_list = self._fake_apply_list(router_count=router_count)
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall(rule_list)
func(apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT'
rule2 = '-p tcp --dport 22 -j DROP'
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
bname = fwaas.iptables_manager.binary_name
ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
calls = [call.ensure_remove_chain('iv4fake-fw-uuid'),
call.ensure_remove_chain('ov4fake-fw-uuid'),
call.ensure_remove_chain('fwaas-default-policy'),
call.add_chain('fwaas-default-policy'),
call.add_rule('fwaas-default-policy', '-j DROP'),
call.add_chain(ingress_chain),
call.add_rule(ingress_chain, invalid_rule),
call.add_rule(ingress_chain, est_rule),
call.add_chain(egress_chain),
call.add_rule(egress_chain, invalid_rule),
call.add_rule(egress_chain, est_rule),
call.add_rule(ingress_chain, rule1),
call.add_rule(egress_chain, rule1),
call.add_rule(ingress_chain, rule2),
call.add_rule(egress_chain, rule2),
call.add_rule('FORWARD',
'-o qr-+ -j %s' % ipt_mgr_ichain),
call.add_rule('FORWARD',
'-i qr-+ -j %s' % ipt_mgr_echain),
call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
v4filter_inst.assert_has_calls(calls)
def test_create_firewall_no_rules(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.create_firewall(apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
bname = fwaas.iptables_manager.binary_name
for ip_version in (4, 6):
ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
calls = [call.ensure_remove_chain('iv%sfake-fw-uuid' % ip_version),
call.ensure_remove_chain('ov%sfake-fw-uuid' % ip_version),
call.ensure_remove_chain('fwaas-default-policy'),
call.add_chain('fwaas-default-policy'),
call.add_rule('fwaas-default-policy', '-j DROP'),
call.add_chain(ingress_chain),
call.add_rule(ingress_chain, invalid_rule),
call.add_rule(ingress_chain, est_rule),
call.add_chain(egress_chain),
call.add_rule(egress_chain, invalid_rule),
call.add_rule(egress_chain, est_rule),
call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
if ip_version == 4:
v4filter_inst = apply_list[0].iptables_manager.ipv4['filter']
v4filter_inst.assert_has_calls(calls)
else:
v6filter_inst = apply_list[0].iptables_manager.ipv6['filter']
v6filter_inst.assert_has_calls(calls)
def test_create_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.create_firewall)
def test_create_firewall_with_rules_two_routers(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
router_count=2)
def test_update_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.update_firewall)
def test_delete_firewall(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.delete_firewall(apply_list, firewall)
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
calls = [call.ensure_remove_chain(ingress_chain),
call.ensure_remove_chain(egress_chain),
call.ensure_remove_chain('fwaas-default-policy')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_admin_down(self):
apply_list = self._fake_apply_list()
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall_with_admin_down(rule_list)
self.firewall.create_firewall(apply_list, firewall)
calls = [call.ensure_remove_chain('iv4fake-fw-uuid'),
call.ensure_remove_chain('ov4fake-fw-uuid'),
call.ensure_remove_chain('fwaas-default-policy'),
call.add_chain('fwaas-default-policy'),
call.add_rule('fwaas-default-policy', '-j DROP')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
|
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
include_dir = ''
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir and not include_dir in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
if not include_dir in gyp_includes_set:
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, target_dicts, data):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return value
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
cc_target = GetCompilerPath(target_list, target_dicts, data)
if cc_target:
command = shlex.split(cc_target)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
out_name = os.path.join(toplevel_build, 'eclipse-cdt-settings.xml')
gyp.common.EnsureDirExists(out_name)
out = open(out_name, 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name,
params)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name, params)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
out.close()
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError, "--generator_output not implemented for eclipse"
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""this module contains a set of functions to handle inference on astroid trees
"""
__doctype__ = "restructuredtext en"
from itertools import chain
from astroid import nodes
from astroid.manager import AstroidManager
from astroid.exceptions import (AstroidError, InferenceError, NoDefault,
NotFoundError, UnresolvableName)
from astroid.bases import (YES, Instance, InferenceContext,
_infer_stmts, copy_context, path_wrapper,
raise_if_nothing_infered)
from astroid.protocols import (
_arguments_infer_argname,
BIN_OP_METHOD, UNARY_OP_METHOD)
MANAGER = AstroidManager()
class CallContext(object):
"""when inferring a function call, this class is used to remember values
given as argument
"""
def __init__(self, args, starargs, dstarargs):
self.args = []
self.nargs = {}
for arg in args:
if isinstance(arg, nodes.Keyword):
self.nargs[arg.arg] = arg.value
else:
self.args.append(arg)
self.starargs = starargs
self.dstarargs = dstarargs
def infer_argument(self, funcnode, name, context):
"""infer a function argument value according to the call context"""
# 1. search in named keywords
try:
return self.nargs[name].infer(context)
except KeyError:
# Function.args.args can be None in astroid (means that we don't have
# information on argnames)
argindex = funcnode.args.find_argname(name)[0]
if argindex is not None:
# 2. first argument of instance/class method
if argindex == 0 and funcnode.type in ('method', 'classmethod'):
if context.boundnode is not None:
boundnode = context.boundnode
else:
# XXX can do better ?
boundnode = funcnode.parent.frame()
if funcnode.type == 'method':
if not isinstance(boundnode, Instance):
boundnode = Instance(boundnode)
return iter((boundnode,))
if funcnode.type == 'classmethod':
return iter((boundnode,))
# if we have a method, extract one position
# from the index, so we'll take in account
# the extra parameter represented by `self` or `cls`
if funcnode.type in ('method', 'classmethod'):
argindex -= 1
# 2. search arg index
try:
return self.args[argindex].infer(context)
except IndexError:
pass
# 3. search in *args (.starargs)
if self.starargs is not None:
its = []
for infered in self.starargs.infer(context):
if infered is YES:
its.append((YES,))
continue
try:
its.append(infered.getitem(argindex, context).infer(context))
except (InferenceError, AttributeError):
its.append((YES,))
except (IndexError, TypeError):
continue
if its:
return chain(*its)
# 4. XXX search in **kwargs (.dstarargs)
if self.dstarargs is not None:
its = []
for infered in self.dstarargs.infer(context):
if infered is YES:
its.append((YES,))
continue
try:
its.append(infered.getitem(name, context).infer(context))
except (InferenceError, AttributeError):
its.append((YES,))
except (IndexError, TypeError):
continue
if its:
return chain(*its)
# 5. */** argument, (Tuple or Dict)
if name == funcnode.args.vararg:
return iter((nodes.const_factory(())))
if name == funcnode.args.kwarg:
return iter((nodes.const_factory({})))
# 6. return default value if any
try:
return funcnode.args.default_value(name).infer(context)
except NoDefault:
raise InferenceError(name)
# .infer method ###############################################################
def infer_end(self, context=None):
"""inference's end for node such as Module, Class, Function, Const...
"""
yield self
nodes.Module._infer = infer_end
nodes.Class._infer = infer_end
nodes.Function._infer = infer_end
nodes.Lambda._infer = infer_end
nodes.Const._infer = infer_end
nodes.List._infer = infer_end
nodes.Tuple._infer = infer_end
nodes.Dict._infer = infer_end
nodes.Set._infer = infer_end
def _higher_function_scope(node):
""" Search for the first function which encloses the given
scope. This can be used for looking up in that function's
scope, in case looking up in a lower scope for a particular
name fails.
:param node: A scope node.
:returns:
``None``, if no parent function scope was found,
otherwise an instance of :class:`astroid.scoped_nodes.Function`,
which encloses the given node.
"""
current = node
while current.parent and not isinstance(current.parent, nodes.Function):
current = current.parent
if current and current.parent:
return current.parent
def infer_name(self, context=None):
"""infer a Name: use name lookup rules"""
frame, stmts = self.lookup(self.name)
if not stmts:
# Try to see if the name is enclosed in a nested function
# and use the higher (first function) scope for searching.
# TODO: should this be promoted to other nodes as well?
parent_function = _higher_function_scope(self.scope())
if parent_function:
_, stmts = parent_function.lookup(self.name)
if not stmts:
raise UnresolvableName(self.name)
context = context.clone()
context.lookupname = self.name
return _infer_stmts(stmts, context, frame)
nodes.Name._infer = path_wrapper(infer_name)
nodes.AssName.infer_lhs = infer_name # won't work with a path wrapper
def infer_callfunc(self, context=None):
"""infer a CallFunc node by trying to guess what the function returns"""
callcontext = context.clone()
callcontext.callcontext = CallContext(self.args, self.starargs, self.kwargs)
callcontext.boundnode = None
for callee in self.func.infer(context):
if callee is YES:
yield callee
continue
try:
if hasattr(callee, 'infer_call_result'):
for infered in callee.infer_call_result(self, callcontext):
yield infered
except InferenceError:
## XXX log error ?
continue
nodes.CallFunc._infer = path_wrapper(raise_if_nothing_infered(infer_callfunc))
def infer_import(self, context=None, asname=True):
"""infer an Import node: return the imported module/object"""
name = context.lookupname
if name is None:
raise InferenceError()
if asname:
yield self.do_import_module(self.real_name(name))
else:
yield self.do_import_module(name)
nodes.Import._infer = path_wrapper(infer_import)
def infer_name_module(self, name):
context = InferenceContext()
context.lookupname = name
return self.infer(context, asname=False)
nodes.Import.infer_name_module = infer_name_module
def infer_from(self, context=None, asname=True):
"""infer a From nodes: return the imported module/object"""
name = context.lookupname
if name is None:
raise InferenceError()
if asname:
name = self.real_name(name)
module = self.do_import_module()
try:
context = copy_context(context)
context.lookupname = name
return _infer_stmts(module.getattr(name, ignore_locals=module is self.root()), context)
except NotFoundError:
raise InferenceError(name)
nodes.From._infer = path_wrapper(infer_from)
def infer_getattr(self, context=None):
"""infer a Getattr node by using getattr on the associated object"""
for owner in self.expr.infer(context):
if owner is YES:
yield owner
continue
try:
context.boundnode = owner
for obj in owner.igetattr(self.attrname, context):
yield obj
context.boundnode = None
except (NotFoundError, InferenceError):
context.boundnode = None
except AttributeError:
# XXX method / function
context.boundnode = None
nodes.Getattr._infer = path_wrapper(raise_if_nothing_infered(infer_getattr))
nodes.AssAttr.infer_lhs = raise_if_nothing_infered(infer_getattr) # # won't work with a path wrapper
def infer_global(self, context=None):
if context.lookupname is None:
raise InferenceError()
try:
return _infer_stmts(self.root().getattr(context.lookupname), context)
except NotFoundError:
raise InferenceError()
nodes.Global._infer = path_wrapper(infer_global)
def infer_subscript(self, context=None):
"""infer simple subscription such as [1,2,3][0] or (1,2,3)[-1]"""
value = next(self.value.infer(context))
if value is YES:
yield YES
return
index = next(self.slice.infer(context))
if index is YES:
yield YES
return
if isinstance(index, nodes.Const):
try:
assigned = value.getitem(index.value, context)
except AttributeError:
raise InferenceError()
except (IndexError, TypeError):
yield YES
return
# Prevent inferring if the infered subscript
# is the same as the original subscripted object.
if self is assigned:
yield YES
return
for infered in assigned.infer(context):
yield infered
else:
raise InferenceError()
nodes.Subscript._infer = path_wrapper(infer_subscript)
nodes.Subscript.infer_lhs = raise_if_nothing_infered(infer_subscript)
def infer_unaryop(self, context=None):
for operand in self.operand.infer(context):
try:
yield operand.infer_unary_op(self.op)
except TypeError:
continue
except AttributeError:
meth = UNARY_OP_METHOD[self.op]
if meth is None:
yield YES
else:
try:
# XXX just suppose if the type implement meth, returned type
# will be the same
operand.getattr(meth)
yield operand
except GeneratorExit:
raise
except:
yield YES
nodes.UnaryOp._infer = path_wrapper(infer_unaryop)
def _infer_binop(operator, operand1, operand2, context, failures=None):
if operand1 is YES:
yield operand1
return
try:
for valnode in operand1.infer_binary_op(operator, operand2, context):
yield valnode
except AttributeError:
try:
# XXX just suppose if the type implement meth, returned type
# will be the same
operand1.getattr(BIN_OP_METHOD[operator])
yield operand1
except:
if failures is None:
yield YES
else:
failures.append(operand1)
def infer_binop(self, context=None):
failures = []
for lhs in self.left.infer(context):
for val in _infer_binop(self.op, lhs, self.right, context, failures):
yield val
for lhs in failures:
for rhs in self.right.infer(context):
for val in _infer_binop(self.op, rhs, lhs, context):
yield val
nodes.BinOp._infer = path_wrapper(infer_binop)
def infer_arguments(self, context=None):
name = context.lookupname
if name is None:
raise InferenceError()
return _arguments_infer_argname(self, name, context)
nodes.Arguments._infer = infer_arguments
def infer_ass(self, context=None):
"""infer a AssName/AssAttr: need to inspect the RHS part of the
assign node
"""
stmt = self.statement()
if isinstance(stmt, nodes.AugAssign):
return stmt.infer(context)
stmts = list(self.assigned_stmts(context=context))
return _infer_stmts(stmts, context)
nodes.AssName._infer = path_wrapper(infer_ass)
nodes.AssAttr._infer = path_wrapper(infer_ass)
def infer_augassign(self, context=None):
failures = []
for lhs in self.target.infer_lhs(context):
for val in _infer_binop(self.op, lhs, self.value, context, failures):
yield val
for lhs in failures:
for rhs in self.value.infer(context):
for val in _infer_binop(self.op, rhs, lhs, context):
yield val
nodes.AugAssign._infer = path_wrapper(infer_augassign)
# no infer method on DelName and DelAttr (expected InferenceError)
def infer_empty_node(self, context=None):
if not self.has_underlying_object():
yield YES
else:
try:
for infered in MANAGER.infer_ast_from_something(self.object,
context=context):
yield infered
except AstroidError:
yield YES
nodes.EmptyNode._infer = path_wrapper(infer_empty_node)
def infer_index(self, context=None):
return self.value.infer(context)
nodes.Index._infer = infer_index
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from future import standard_library
standard_library.install_aliases()
try:
import oauth2 as oauth
except:
oauth = None
import cgi
import logging
import sys
from datetime import datetime
from axes.decorators import axes_dispatch
import django.contrib.auth.views
from django.core.exceptions import SuspiciousOperation
from django.contrib.auth import login, get_backends, authenticate
from django.contrib.sessions.models import Session
from django.http import HttpResponseRedirect
from django.urls import reverse
from hadoop.fs.exceptions import WebHdfsException
from notebook.connectors.base import get_api
from useradmin.models import get_profile, UserProfile, User, Group
from useradmin.views import ensure_home_directory, require_change_password
from desktop.auth import forms as auth_forms
from desktop.auth.backend import OIDCBackend
from desktop.auth.forms import ImpersonationAuthenticationForm, OrganizationUserCreationForm, OrganizationAuthenticationForm
from desktop.conf import OAUTH, ENABLE_ORGANIZATIONS, SESSION
from desktop.lib import fsmanager
from desktop.lib.django_util import render, login_notrequired, JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.log.access import access_log, access_warn, last_access_map
from desktop.views import samlgroup_check
from desktop.settings import LOAD_BALANCER_COOKIE
if sys.version_info[0] > 2:
from urllib.parse import urlencode as urllib_urlencode
from django.utils.translation import gettext as _
else:
from urllib import urlencode as urllib_urlencode
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
def get_current_users():
"""Return dictionary of User objects and
a dictionary of the user's IP address and last access time"""
current_users = {}
for session in Session.objects.all():
try:
uid = session.get_decoded().get(django.contrib.auth.SESSION_KEY)
except SuspiciousOperation:
# If secret_key changed, this resolution won't work.
uid = None
if uid is not None:
try:
userobj = User.objects.get(pk=uid)
current_users[userobj] = last_access_map.get(userobj.username, {})
except User.DoesNotExist:
LOG.debug("User with id=%d does not exist" % uid)
return current_users
def first_login_ever():
backends = get_backends()
for backend in backends:
if hasattr(backend, 'is_first_login_ever') and backend.is_first_login_ever():
return True
return False
# We want unique method name to represent HUE-3 vs HUE-4 method call.
# This is required because of reverse('desktop.auth.views.dt_login') below which needs uniqueness to work correctly.
@login_notrequired
def dt_login_old(request, from_modal=False):
return dt_login(request, from_modal)
@login_notrequired
@axes_dispatch
def dt_login(request, from_modal=False):
if request.method == 'GET':
redirect_to = request.GET.get('next', '/')
else:
redirect_to = request.POST.get('next', '/')
is_first_login_ever = first_login_ever()
backend_names = auth_forms.get_backend_names()
is_active_directory = auth_forms.is_active_directory()
is_ldap_option_selected = 'server' not in request.POST or request.POST.get('server') == 'LDAP' or \
request.POST.get('server') in auth_forms.get_ldap_server_keys()
if is_active_directory and is_ldap_option_selected:
UserCreationForm = auth_forms.LdapUserCreationForm
AuthenticationForm = auth_forms.LdapAuthenticationForm
else:
UserCreationForm = auth_forms.UserCreationForm
if 'ImpersonationBackend' in backend_names:
AuthenticationForm = ImpersonationAuthenticationForm
else:
AuthenticationForm = auth_forms.AuthenticationForm
if ENABLE_ORGANIZATIONS.get():
UserCreationForm = OrganizationUserCreationForm
AuthenticationForm = OrganizationAuthenticationForm
if request.method == 'POST':
request.audit = {
'operation': 'USER_LOGIN',
'username': request.POST.get('username', request.POST.get('email'))
}
# For first login, need to validate user info!
first_user_form = is_first_login_ever and UserCreationForm(data=request.POST) or None
first_user = first_user_form and first_user_form.is_valid()
if first_user or not is_first_login_ever:
auth_form = AuthenticationForm(request=request, data=request.POST)
if auth_form.is_valid():
# Must login by using the AuthenticationForm. It provides 'backend' on the User object.
user = auth_form.get_user()
userprofile = get_profile(user)
login(request, user)
# If Test cookie exists , it should be deleted
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
if request.fs is None:
request.fs = fsmanager.get_filesystem(request.fs_ref)
try:
ensure_home_directory(request.fs, user)
except (IOError, WebHdfsException) as e:
LOG.error('Could not create home directory at login for %s.' % user, exc_info=e)
if require_change_password(userprofile):
return HttpResponseRedirect('/hue' + reverse('useradmin:useradmin.views.edit_user', kwargs={'username': user.username}))
userprofile.first_login = False
userprofile.last_activity = datetime.now()
if userprofile.creation_method == UserProfile.CreationMethod.EXTERNAL: # This is to fix a bug in Hue 4.3
userprofile.creation_method = UserProfile.CreationMethod.EXTERNAL.name
userprofile.update_data({'auth_backend': user.backend})
userprofile.save()
msg = 'Successful login for user: %s' % user.username
request.audit['operationText'] = msg
access_warn(request, msg)
if from_modal or request.GET.get('fromModal', 'false') == 'true':
return JsonResponse({'auth': True})
else:
return HttpResponseRedirect(redirect_to)
else:
request.audit['allowed'] = False
msg = 'Failed login for user: %s' % request.POST.get('username', request.POST.get('email'))
request.audit['operationText'] = msg
access_warn(request, msg)
if from_modal or request.GET.get('fromModal', 'false') == 'true':
return JsonResponse({'auth': False})
else:
first_user_form = None
auth_form = AuthenticationForm()
# SAML/OIDC user is already authenticated in djangosaml2.views.login
if hasattr(request, 'fs') and (
'KnoxSpnegoDjangoBackend' in backend_names or 'SpnegoDjangoBackend' in backend_names or 'OIDCBackend' in backend_names or
'SAML2Backend' in backend_names
) and request.user.is_authenticated:
if request.fs is None:
request.fs = fsmanager.get_filesystem(request.fs_ref)
try:
ensure_home_directory(request.fs, request.user)
except (IOError, WebHdfsException) as e:
LOG.error('Could not create home directory for %s user %s.' % ('OIDC' if 'OIDCBackend' in backend_names else 'SAML', request.user))
if request.user.is_authenticated and not from_modal:
return HttpResponseRedirect(redirect_to)
if is_active_directory and not is_ldap_option_selected and \
request.method == 'POST' and request.user.username != request.POST.get('username'):
# local user login failed, give the right auth_form with 'server' field
auth_form = auth_forms.LdapAuthenticationForm()
if not from_modal and SESSION.ENABLE_TEST_COOKIE.get() :
request.session.set_test_cookie()
if 'SAML2Backend' in backend_names:
request.session['samlgroup_permitted_flag'] = samlgroup_check(request)
renderable_path = 'login.mako'
if from_modal:
renderable_path = 'login_modal.mako'
response = render(renderable_path, request, {
'action': reverse('desktop_auth_views_dt_login'),
'form': first_user_form or auth_form,
'next': redirect_to,
'first_login_ever': is_first_login_ever,
'login_errors': request.method == 'POST',
'backend_names': backend_names,
'active_directory': is_active_directory,
'user': request.user
})
if not request.user.is_authenticated:
response.delete_cookie(LOAD_BALANCER_COOKIE) # Note: might be re-balanced to another Hue on login.
return response
def dt_logout(request, next_page=None):
"""Log out the user"""
username = request.user.get_username()
request.audit = {
'username': username,
'operation': 'USER_LOGOUT',
'operationText': 'Logged out user: %s' % username
}
# Close Impala session on logout
session_app = "impala"
if request.user.has_hue_permission(action='access', app=session_app):
session = {"type": session_app, "sourceMethod": " dt_logout"}
try:
get_api(request, session).close_session(session)
except PopupException as e:
LOG.warning("Error closing %s session: %s" % (session_app, e.message.encode('utf-8')))
except Exception as e:
LOG.warning("Error closing %s session: %s" % (session_app, e))
backends = get_backends()
if backends:
for backend in backends:
if hasattr(backend, 'logout'):
try:
response = backend.logout(request, next_page)
if response:
return response
except Exception as e:
LOG.warning('Potential error on logout for user: %s with exception: %s' % (username, e))
if len([backend for backend in backends if hasattr(backend, 'logout')]) == len(backends):
LOG.warning("Failed to log out from all backends for user: %s" % (username))
response = django.contrib.auth.views.LogoutView.as_view(next_page=next_page)(request)
response.delete_cookie(LOAD_BALANCER_COOKIE)
return response
def profile(request):
"""
Dumps JSON for user-profile information.
"""
return render(None, request, _profile_dict(request.user))
def _profile_dict(user):
return dict(
username=user.username,
first_name=user.first_name,
last_name=user.last_name,
last_login=str(user.last_login), # datetime object needs to be converted
email=user.email
)
# OAuth is based on Twitter as example.
@login_notrequired
def oauth_login(request):
assert oauth is not None
consumer = oauth.Consumer(OAUTH.CONSUMER_KEY.get(), OAUTH.CONSUMER_SECRET.get())
client = oauth.Client(consumer)
resp, content = client.request(
OAUTH.REQUEST_TOKEN_URL.get(), "POST", body=urllib_urlencode({
'oauth_callback': 'http://' + request.get_host() + '/login/oauth_authenticated/'
}))
if resp['status'] != '200':
raise Exception(_("Invalid response from OAuth provider: %s") % resp)
request.session['request_token'] = dict(cgi.parse_qsl(content))
url = "%s?oauth_token=%s" % (OAUTH.AUTHENTICATE_URL.get(), request.session['request_token']['oauth_token'])
return HttpResponseRedirect(url)
@login_notrequired
def oauth_authenticated(request):
consumer = oauth.Consumer(OAUTH.CONSUMER_KEY.get(), OAUTH.CONSUMER_SECRET.get())
token = oauth.Token(request.session['request_token']['oauth_token'], request.session['request_token']['oauth_token_secret'])
client = oauth.Client(consumer, token)
resp, content = client.request(OAUTH.ACCESS_TOKEN_URL.get(), "GET")
if resp['status'] != '200':
raise Exception(_("Invalid response from OAuth provider: %s") % resp)
access_token = dict(cgi.parse_qsl(content))
user = authenticate(access_token=access_token)
login(request, user)
redirect_to = request.GET.get('next', '/')
return HttpResponseRedirect(redirect_to)
@login_notrequired
def oidc_failed(request):
if request.user.is_authenticated:
return HttpResponseRedirect('/')
access_warn(request, "401 Unauthorized by oidc")
return render("oidc_failed.mako", request, dict(uri=request.build_absolute_uri()), status=401)
|
|
from OpenGLCffi.GL import params
@params(api='gl', prms=['n', 'ids'])
def glCreateTransformFeedbacks(n, ids):
pass
@params(api='gl', prms=['xfb', 'index', 'buffer'])
def glTransformFeedbackBufferBase(xfb, index, buffer):
pass
@params(api='gl', prms=['xfb', 'index', 'buffer', 'offset', 'size'])
def glTransformFeedbackBufferRange(xfb, index, buffer, offset, size):
pass
@params(api='gl', prms=['xfb', 'pname', 'param'])
def glGetTransformFeedbackiv(xfb, pname, param):
pass
@params(api='gl', prms=['xfb', 'pname', 'index', 'param'])
def glGetTransformFeedbacki_v(xfb, pname, index, param):
pass
@params(api='gl', prms=['xfb', 'pname', 'index', 'param'])
def glGetTransformFeedbacki64_v(xfb, pname, index, param):
pass
@params(api='gl', prms=['n', 'buffers'])
def glCreateBuffers(n, buffers):
pass
@params(api='gl', prms=['buffer', 'size', 'data', 'flags'])
def glNamedBufferStorage(buffer, size, data, flags):
pass
@params(api='gl', prms=['buffer', 'size', 'data', 'usage'])
def glNamedBufferData(buffer, size, data, usage):
pass
@params(api='gl', prms=['buffer', 'offset', 'size', 'data'])
def glNamedBufferSubData(buffer, offset, size, data):
pass
@params(api='gl', prms=['readBuffer', 'writeBuffer', 'readOffset', 'writeOffset', 'size'])
def glCopyNamedBufferSubData(readBuffer, writeBuffer, readOffset, writeOffset, size):
pass
@params(api='gl', prms=['buffer', 'internalformat', 'format', 'type', 'data'])
def glClearNamedBufferData(buffer, internalformat, format, type, data):
pass
@params(api='gl', prms=['buffer', 'internalformat', 'offset', 'size', 'format', 'type', 'data'])
def glClearNamedBufferSubData(buffer, internalformat, offset, size, format, type, data):
pass
@params(api='gl', prms=['buffer', 'access'])
def glMapNamedBuffer(buffer, access):
pass
@params(api='gl', prms=['buffer', 'offset', 'length', 'access'])
def glMapNamedBufferRange(buffer, offset, length, access):
pass
@params(api='gl', prms=['buffer'])
def glUnmapNamedBuffer(buffer):
pass
@params(api='gl', prms=['buffer', 'offset', 'length'])
def glFlushMappedNamedBufferRange(buffer, offset, length):
pass
@params(api='gl', prms=['buffer', 'pname', 'params'])
def glGetNamedBufferParameteriv(buffer, pname, params):
pass
@params(api='gl', prms=['buffer', 'pname', 'params'])
def glGetNamedBufferParameteri64v(buffer, pname, params):
pass
@params(api='gl', prms=['buffer', 'pname', 'params'])
def glGetNamedBufferPointerv(buffer, pname, params):
pass
@params(api='gl', prms=['buffer', 'offset', 'size', 'data'])
def glGetNamedBufferSubData(buffer, offset, size, data):
pass
@params(api='gl', prms=['n', 'framebuffers'])
def glCreateFramebuffers(n, framebuffers):
pass
@params(api='gl', prms=['framebuffer', 'attachment', 'renderbuffertarget', 'renderbuffer'])
def glNamedFramebufferRenderbuffer(framebuffer, attachment, renderbuffertarget, renderbuffer):
pass
@params(api='gl', prms=['framebuffer', 'pname', 'param'])
def glNamedFramebufferParameteri(framebuffer, pname, param):
pass
@params(api='gl', prms=['framebuffer', 'attachment', 'texture', 'level'])
def glNamedFramebufferTexture(framebuffer, attachment, texture, level):
pass
@params(api='gl', prms=['framebuffer', 'attachment', 'texture', 'level', 'layer'])
def glNamedFramebufferTextureLayer(framebuffer, attachment, texture, level, layer):
pass
@params(api='gl', prms=['framebuffer', 'buf'])
def glNamedFramebufferDrawBuffer(framebuffer, buf):
pass
@params(api='gl', prms=['framebuffer', 'n', 'bufs'])
def glNamedFramebufferDrawBuffers(framebuffer, n, bufs):
pass
@params(api='gl', prms=['framebuffer', 'src'])
def glNamedFramebufferReadBuffer(framebuffer, src):
pass
@params(api='gl', prms=['framebuffer', 'numAttachments', 'attachments'])
def glInvalidateNamedFramebufferData(framebuffer, numAttachments, attachments):
pass
@params(api='gl', prms=['framebuffer', 'numAttachments', 'attachments', 'x', 'y', 'width', 'height'])
def glInvalidateNamedFramebufferSubData(framebuffer, numAttachments, attachments, x, y, width, height):
pass
@params(api='gl', prms=['framebuffer', 'buffer', 'drawbuffer', 'value'])
def glClearNamedFramebufferiv(framebuffer, buffer, drawbuffer, value):
pass
@params(api='gl', prms=['framebuffer', 'buffer', 'drawbuffer', 'value'])
def glClearNamedFramebufferuiv(framebuffer, buffer, drawbuffer, value):
pass
@params(api='gl', prms=['framebuffer', 'buffer', 'drawbuffer', 'value'])
def glClearNamedFramebufferfv(framebuffer, buffer, drawbuffer, value):
pass
@params(api='gl', prms=['framebuffer', 'buffer', 'drawbuffer', 'depth', 'stencil'])
def glClearNamedFramebufferfi(framebuffer, buffer, drawbuffer, depth, stencil):
pass
@params(api='gl', prms=['readFramebuffer', 'drawFramebuffer', 'srcX0', 'srcY0', 'srcX1', 'srcY1', 'dstX0', 'dstY0', 'dstX1', 'dstY1', 'mask', 'filter'])
def glBlitNamedFramebuffer(readFramebuffer, drawFramebuffer, srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter):
pass
@params(api='gl', prms=['framebuffer', 'target'])
def glCheckNamedFramebufferStatus(framebuffer, target):
pass
@params(api='gl', prms=['framebuffer', 'pname', 'param'])
def glGetNamedFramebufferParameteriv(framebuffer, pname, param):
pass
@params(api='gl', prms=['framebuffer', 'attachment', 'pname', 'params'])
def glGetNamedFramebufferAttachmentParameteriv(framebuffer, attachment, pname, params):
pass
@params(api='gl', prms=['n', 'renderbuffers'])
def glCreateRenderbuffers(n, renderbuffers):
pass
@params(api='gl', prms=['renderbuffer', 'internalformat', 'width', 'height'])
def glNamedRenderbufferStorage(renderbuffer, internalformat, width, height):
pass
@params(api='gl', prms=['renderbuffer', 'samples', 'internalformat', 'width', 'height'])
def glNamedRenderbufferStorageMultisample(renderbuffer, samples, internalformat, width, height):
pass
@params(api='gl', prms=['renderbuffer', 'pname', 'params'])
def glGetNamedRenderbufferParameteriv(renderbuffer, pname, params):
pass
@params(api='gl', prms=['target', 'n', 'textures'])
def glCreateTextures(target, n, textures):
pass
@params(api='gl', prms=['texture', 'internalformat', 'buffer'])
def glTextureBuffer(texture, internalformat, buffer):
pass
@params(api='gl', prms=['texture', 'internalformat', 'buffer', 'offset', 'size'])
def glTextureBufferRange(texture, internalformat, buffer, offset, size):
pass
@params(api='gl', prms=['texture', 'levels', 'internalformat', 'width'])
def glTextureStorage1D(texture, levels, internalformat, width):
pass
@params(api='gl', prms=['texture', 'levels', 'internalformat', 'width', 'height'])
def glTextureStorage2D(texture, levels, internalformat, width, height):
pass
@params(api='gl', prms=['texture', 'levels', 'internalformat', 'width', 'height', 'depth'])
def glTextureStorage3D(texture, levels, internalformat, width, height, depth):
pass
@params(api='gl', prms=['texture', 'samples', 'internalformat', 'width', 'height', 'fixedsamplelocations'])
def glTextureStorage2DMultisample(texture, samples, internalformat, width, height, fixedsamplelocations):
pass
@params(api='gl', prms=['texture', 'samples', 'internalformat', 'width', 'height', 'depth', 'fixedsamplelocations'])
def glTextureStorage3DMultisample(texture, samples, internalformat, width, height, depth, fixedsamplelocations):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'width', 'format', 'type', 'pixels'])
def glTextureSubImage1D(texture, level, xoffset, width, format, type, pixels):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'width', 'height', 'format', 'type', 'pixels'])
def glTextureSubImage2D(texture, level, xoffset, yoffset, width, height, format, type, pixels):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'zoffset', 'width', 'height', 'depth', 'format', 'type', 'pixels'])
def glTextureSubImage3D(texture, level, xoffset, yoffset, zoffset, width, height, depth, format, type, pixels):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'width', 'format', 'imageSize', 'data'])
def glCompressedTextureSubImage1D(texture, level, xoffset, width, format, imageSize, data):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'width', 'height', 'format', 'imageSize', 'data'])
def glCompressedTextureSubImage2D(texture, level, xoffset, yoffset, width, height, format, imageSize, data):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'zoffset', 'width', 'height', 'depth', 'format', 'imageSize', 'data'])
def glCompressedTextureSubImage3D(texture, level, xoffset, yoffset, zoffset, width, height, depth, format, imageSize, data):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'x', 'y', 'width'])
def glCopyTextureSubImage1D(texture, level, xoffset, x, y, width):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'x', 'y', 'width', 'height'])
def glCopyTextureSubImage2D(texture, level, xoffset, yoffset, x, y, width, height):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'zoffset', 'x', 'y', 'width', 'height'])
def glCopyTextureSubImage3D(texture, level, xoffset, yoffset, zoffset, x, y, width, height):
pass
@params(api='gl', prms=['texture', 'pname', 'param'])
def glTextureParameterf(texture, pname, param):
pass
@params(api='gl', prms=['texture', 'pname', 'param'])
def glTextureParameterfv(texture, pname, param):
pass
@params(api='gl', prms=['texture', 'pname', 'param'])
def glTextureParameteri(texture, pname, param):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glTextureParameterIiv(texture, pname, params):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glTextureParameterIuiv(texture, pname, params):
pass
@params(api='gl', prms=['texture', 'pname', 'param'])
def glTextureParameteriv(texture, pname, param):
pass
@params(api='gl', prms=['texture'])
def glGenerateTextureMipmap(texture):
pass
@params(api='gl', prms=['unit', 'texture'])
def glBindTextureUnit(unit, texture):
pass
@params(api='gl', prms=['texture', 'level', 'format', 'type', 'bufSize', 'pixels'])
def glGetTextureImage(texture, level, format, type, bufSize, pixels):
pass
@params(api='gl', prms=['texture', 'level', 'bufSize', 'pixels'])
def glGetCompressedTextureImage(texture, level, bufSize, pixels):
pass
@params(api='gl', prms=['texture', 'level', 'pname', 'params'])
def glGetTextureLevelParameterfv(texture, level, pname, params):
pass
@params(api='gl', prms=['texture', 'level', 'pname', 'params'])
def glGetTextureLevelParameteriv(texture, level, pname, params):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glGetTextureParameterfv(texture, pname, params):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glGetTextureParameterIiv(texture, pname, params):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glGetTextureParameterIuiv(texture, pname, params):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glGetTextureParameteriv(texture, pname, params):
pass
@params(api='gl', prms=['n', 'arrays'])
def glCreateVertexArrays(n, arrays):
pass
@params(api='gl', prms=['vaobj', 'index'])
def glDisableVertexArrayAttrib(vaobj, index):
pass
@params(api='gl', prms=['vaobj', 'index'])
def glEnableVertexArrayAttrib(vaobj, index):
pass
@params(api='gl', prms=['vaobj', 'buffer'])
def glVertexArrayElementBuffer(vaobj, buffer):
pass
@params(api='gl', prms=['vaobj', 'bindingindex', 'buffer', 'offset', 'stride'])
def glVertexArrayVertexBuffer(vaobj, bindingindex, buffer, offset, stride):
pass
@params(api='gl', prms=['vaobj', 'first', 'count', 'buffers', 'offsets', 'strides'])
def glVertexArrayVertexBuffers(vaobj, first, count, buffers, offsets, strides):
pass
@params(api='gl', prms=['vaobj', 'attribindex', 'bindingindex'])
def glVertexArrayAttribBinding(vaobj, attribindex, bindingindex):
pass
@params(api='gl', prms=['vaobj', 'attribindex', 'size', 'type', 'normalized', 'relativeoffset'])
def glVertexArrayAttribFormat(vaobj, attribindex, size, type, normalized, relativeoffset):
pass
@params(api='gl', prms=['vaobj', 'attribindex', 'size', 'type', 'relativeoffset'])
def glVertexArrayAttribIFormat(vaobj, attribindex, size, type, relativeoffset):
pass
@params(api='gl', prms=['vaobj', 'attribindex', 'size', 'type', 'relativeoffset'])
def glVertexArrayAttribLFormat(vaobj, attribindex, size, type, relativeoffset):
pass
@params(api='gl', prms=['vaobj', 'bindingindex', 'divisor'])
def glVertexArrayBindingDivisor(vaobj, bindingindex, divisor):
pass
@params(api='gl', prms=['vaobj', 'pname', 'param'])
def glGetVertexArrayiv(vaobj, pname, param):
pass
@params(api='gl', prms=['vaobj', 'index', 'pname', 'param'])
def glGetVertexArrayIndexediv(vaobj, index, pname, param):
pass
@params(api='gl', prms=['vaobj', 'index', 'pname', 'param'])
def glGetVertexArrayIndexed64iv(vaobj, index, pname, param):
pass
@params(api='gl', prms=['n', 'samplers'])
def glCreateSamplers(n, samplers):
pass
@params(api='gl', prms=['n', 'pipelines'])
def glCreateProgramPipelines(n, pipelines):
pass
@params(api='gl', prms=['target', 'n', 'ids'])
def glCreateQueries(target, n, ids):
pass
@params(api='gl', prms=['id', 'buffer', 'pname', 'offset'])
def glGetQueryBufferObjecti64v(id, buffer, pname, offset):
pass
@params(api='gl', prms=['id', 'buffer', 'pname', 'offset'])
def glGetQueryBufferObjectiv(id, buffer, pname, offset):
pass
@params(api='gl', prms=['id', 'buffer', 'pname', 'offset'])
def glGetQueryBufferObjectui64v(id, buffer, pname, offset):
pass
@params(api='gl', prms=['id', 'buffer', 'pname', 'offset'])
def glGetQueryBufferObjectuiv(id, buffer, pname, offset):
pass
|
|
from panda3d.core import *
from panda3d.direct import *
from DistributedNPCToonBase import *
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
import NPCToons
from direct.task.Task import Task
from toontown.toonbase import TTLocalizer
from toontown.pets import PetshopGUI
from toontown.hood import ZoneUtil
from toontown.toontowngui import TeaserPanel
from otp.nametag.NametagConstants import *
class DistributedNPCPetclerk(DistributedNPCToonBase):
def __init__(self, cr):
DistributedNPCToonBase.__init__(self, cr)
self.isLocalToon = 0
self.av = None
self.button = None
self.popupInfo = None
self.petshopGui = None
self.petSeeds = None
self.waitingForPetSeeds = False
self.npcType = 'Pet Clerk'
return
def disable(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupPetshopGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.popupInfo:
self.popupInfo.destroy()
self.popupInfo = None
if self.petshopGui:
self.petshopGui.destroy()
self.petshopGui = None
self.av = None
if self.isLocalToon:
base.localAvatar.posCamera(0, 0)
DistributedNPCToonBase.disable(self)
return
def generate(self):
DistributedNPCToonBase.generate(self)
self.eventDict = {}
self.eventDict['guiDone'] = 'guiDone'
self.eventDict['petAdopted'] = 'petAdopted'
self.eventDict['petReturned'] = 'petReturned'
self.eventDict['fishSold'] = 'fishSold'
def getCollSphereRadius(self):
return 4.0
def allowedToEnter(self):
if hasattr(base, 'ttAccess') and base.ttAccess and base.ttAccess.canAccess():
return True
return False
def handleOkTeaser(self):
self.dialog.destroy()
del self.dialog
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
def handleCollisionSphereEnter(self, collEntry):
if self.allowedToEnter():
base.cr.playGame.getPlace().fsm.request('purchase')
self.sendUpdate('avatarEnter', [])
else:
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('stopped')
self.dialog = TeaserPanel.TeaserPanel(pageName='tricks', doneFunc=self.handleOkTeaser)
def __handleUnexpectedExit(self):
self.notify.warning('unexpected exit')
self.av = None
return
def resetPetshopClerk(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupPetshopGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.petshopGui:
self.petshopGui.destroy()
self.petshopGui = None
self.show()
self.startLookAround()
self.detectAvatars()
self.clearMat()
if self.isLocalToon:
self.freeAvatar()
self.petSeeds = None
self.waitingForPetSeeds = False
return Task.done
def ignoreEventDict(self):
for event in self.eventDict.values():
self.ignore(event)
def setPetSeeds(self, petSeeds):
self.petSeeds = petSeeds
if self.waitingForPetSeeds:
self.waitingForPetSeeds = False
self.popupPetshopGUI(None)
return
def setMovie(self, mode, npcId, avId, extraArgs, timestamp):
timeStamp = ClockDelta.globalClockDelta.localElapsedTime(timestamp)
self.remain = NPCToons.CLERK_COUNTDOWN_TIME - timeStamp
self.npcId = npcId
self.isLocalToon = avId == base.localAvatar.doId
if mode == NPCToons.SELL_MOVIE_CLEAR:
return
if mode == NPCToons.SELL_MOVIE_TIMEOUT:
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.isLocalToon:
self.ignoreEventDict()
if self.popupInfo:
self.popupInfo.reparentTo(hidden)
if self.petshopGui:
self.petshopGui.destroy()
self.petshopGui = None
self.setChatAbsolute(TTLocalizer.STOREOWNER_TOOKTOOLONG, CFSpeech | CFTimeout)
self.resetPetshopClerk()
elif mode == NPCToons.SELL_MOVIE_START:
self.av = base.cr.doId2do.get(avId)
if self.av is None:
self.notify.warning('Avatar %d not found in doId' % avId)
return
else:
self.accept(self.av.uniqueName('disable'), self.__handleUnexpectedExit)
self.setupAvatars(self.av)
if self.isLocalToon:
camera.wrtReparentTo(render)
seq = Sequence((camera.posQuatInterval(1, Vec3(-5, 9, self.getHeight() - 0.5), Vec3(-150, -2, 0), other=self, blendType='easeOut', name=self.uniqueName('lerpCamera'))))
seq.start()
if self.isLocalToon:
taskMgr.doMethodLater(1.0, self.popupPetshopGUI, self.uniqueName('popupPetshopGUI'))
elif mode == NPCToons.SELL_MOVIE_COMPLETE:
self.setChatAbsolute(TTLocalizer.STOREOWNER_THANKSFISH_PETSHOP, CFSpeech | CFTimeout)
self.resetPetshopClerk()
elif mode == NPCToons.SELL_MOVIE_PETRETURNED:
self.setChatAbsolute(TTLocalizer.STOREOWNER_PETRETURNED, CFSpeech | CFTimeout)
self.resetPetshopClerk()
elif mode == NPCToons.SELL_MOVIE_PETADOPTED:
self.setChatAbsolute(TTLocalizer.STOREOWNER_PETADOPTED, CFSpeech | CFTimeout)
self.resetPetshopClerk()
elif mode == NPCToons.SELL_MOVIE_PETCANCELED:
self.setChatAbsolute(TTLocalizer.STOREOWNER_PETCANCELED, CFSpeech | CFTimeout)
self.resetPetshopClerk()
elif mode == NPCToons.SELL_MOVIE_TROPHY:
self.av = base.cr.doId2do.get(avId)
if self.av is None:
self.notify.warning('Avatar %d not found in doId' % avId)
return
else:
numFish, totalNumFish = extraArgs
self.setChatAbsolute(TTLocalizer.STOREOWNER_TROPHY % (numFish, totalNumFish), CFSpeech | CFTimeout)
self.resetPetshopClerk()
elif mode == NPCToons.SELL_MOVIE_NOFISH:
self.setChatAbsolute(TTLocalizer.STOREOWNER_NOFISH, CFSpeech | CFTimeout)
self.resetPetshopClerk()
elif mode == NPCToons.SELL_MOVIE_NO_MONEY:
self.notify.warning('SELL_MOVIE_NO_MONEY should not be called')
self.resetPetshopClerk()
return
def __handlePetAdopted(self, whichPet, nameIndex):
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ADOPTADOOLE: Adopt a doodle.')
base.cr.removePetFromFriendsMap()
self.ignore(self.eventDict['petAdopted'])
self.sendUpdate('petAdopted', [whichPet, nameIndex])
def __handlePetReturned(self):
base.cr.removePetFromFriendsMap()
self.ignore(self.eventDict['petReturned'])
self.sendUpdate('petReturned')
def __handleFishSold(self):
self.ignore(self.eventDict['fishSold'])
self.sendUpdate('fishSold')
def __handleGUIDone(self, bTimedOut = False):
self.ignore(self.eventDict['guiDone'])
self.petshopGui.destroy()
self.petshopGui = None
if not bTimedOut:
self.sendUpdate('transactionDone')
return
def popupPetshopGUI(self, task):
if not self.petSeeds:
self.waitingForPetSeeds = True
return
self.setChatAbsolute('', CFSpeech)
self.acceptOnce(self.eventDict['guiDone'], self.__handleGUIDone)
self.acceptOnce(self.eventDict['petAdopted'], self.__handlePetAdopted)
self.acceptOnce(self.eventDict['petReturned'], self.__handlePetReturned)
self.acceptOnce(self.eventDict['fishSold'], self.__handleFishSold)
self.petshopGui = PetshopGUI.PetshopGUI(self.eventDict, self.petSeeds)
|
|
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from http import HTTPStatus
from unittest import mock
from urllib import parse as urllib
import ddt
from oslo_config import cfg
import pytz
import webob
from cinder.api import common
from cinder.api.v2 import snapshots
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.tests.unit import utils
from cinder import volume
CONF = cfg.CONF
UUID = '00000000-0000-0000-0000-000000000001'
INVALID_UUID = '00000000-0000-0000-0000-000000000002'
def _get_default_snapshot_param():
return {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'created_at': None,
'updated_at': None,
'user_id': 'bcb7746c7a41472d88a1ffac89ba6a9b',
'project_id': '7ffe17a15c724e2aa79fc839540aec15',
'display_name': 'Default name',
'display_description': 'Default description',
'deleted': None,
'volume': {'availability_zone': 'test_zone'}
}
def fake_snapshot_delete(self, context, snapshot):
if snapshot['id'] != UUID:
raise exception.SnapshotNotFound(snapshot['id'])
def fake_snapshot_get(self, context, snapshot_id):
if snapshot_id != UUID:
raise exception.SnapshotNotFound(snapshot_id)
param = _get_default_snapshot_param()
return param
def fake_snapshot_get_all(self, context, search_opts=None):
param = _get_default_snapshot_param()
return [param]
@ddt.ddt
class SnapshotApiTest(test.TestCase):
def setUp(self):
super(SnapshotApiTest, self).setUp()
self.mock_object(scheduler_rpcapi.SchedulerAPI, 'create_snapshot')
self.controller = snapshots.SnapshotsController()
self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
def test_snapshot_create(self):
volume = utils.create_volume(self.ctx, volume_type_id=None)
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
snapshot = {
"volume_id": volume.id,
"force": False,
"name": snapshot_name,
"description": snapshot_description
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v3/snapshots')
resp_dict = self.controller.create(req, body=body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(snapshot_name, resp_dict['snapshot']['name'])
self.assertEqual(snapshot_description,
resp_dict['snapshot']['description'])
self.assertIn('updated_at', resp_dict['snapshot'])
db.volume_destroy(self.ctx, volume.id)
def test_snapshot_create_with_null_validate(self):
volume = utils.create_volume(self.ctx, volume_type_id=None)
snapshot = {
"volume_id": volume.id,
"force": False,
"name": None,
"description": None
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v3/snapshots')
resp_dict = self.controller.create(req, body=body)
self.assertIn('snapshot', resp_dict)
self.assertIsNone(resp_dict['snapshot']['name'])
self.assertIsNone(resp_dict['snapshot']['description'])
db.volume_destroy(self.ctx, volume.id)
@ddt.data(True, 'y', 'true', 'yes', '1', 'on')
def test_snapshot_create_force(self, force_param):
volume = utils.create_volume(self.ctx, status='in-use',
volume_type_id=None)
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
snapshot = {
"volume_id": volume.id,
"force": force_param,
"name": snapshot_name,
"description": snapshot_description
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v3/snapshots')
resp_dict = self.controller.create(req, body=body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(snapshot_name,
resp_dict['snapshot']['name'])
self.assertEqual(snapshot_description,
resp_dict['snapshot']['description'])
self.assertIn('updated_at', resp_dict['snapshot'])
db.volume_destroy(self.ctx, volume.id)
@ddt.data(False, 'n', 'false', 'No', '0', 'off')
def test_snapshot_create_force_failure(self, force_param):
volume = utils.create_volume(self.ctx, status='in-use',
volume_type_id=None)
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
snapshot = {
"volume_id": volume.id,
"force": force_param,
"name": snapshot_name,
"description": snapshot_description
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v3/snapshots')
self.assertRaises(exception.InvalidVolume,
self.controller.create,
req,
body=body)
db.volume_destroy(self.ctx, volume.id)
@ddt.data("**&&^^%%$$##@@", '-1', 2, '01', 'falSE', 0, 'trUE', 1,
"1 ")
def test_snapshot_create_invalid_force_param(self, force_param):
volume = utils.create_volume(self.ctx, status='available',
volume_type_id=None)
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
snapshot = {
"volume_id": volume.id,
"force": force_param,
"name": snapshot_name,
"description": snapshot_description
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v3/snapshots')
self.assertRaises(exception.ValidationError,
self.controller.create,
req,
body=body)
db.volume_destroy(self.ctx, volume.id)
def test_snapshot_create_without_volume_id(self):
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
body = {
"snapshot": {
"force": True,
"name": snapshot_name,
"description": snapshot_description
}
}
req = fakes.HTTPRequest.blank('/v3/snapshots')
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
@ddt.data({"snapshot": {"description": " sample description",
"name": " test"}},
{"snapshot": {"description": "sample description ",
"name": "test "}},
{"snapshot": {"description": " sample description ",
"name": " test name "}})
def test_snapshot_create_with_leading_trailing_spaces(self, body):
volume = utils.create_volume(self.ctx, volume_type_id=None)
body['snapshot']['volume_id'] = volume.id
req = fakes.HTTPRequest.blank('/v3/snapshots')
resp_dict = self.controller.create(req, body=body)
self.assertEqual(body['snapshot']['display_name'].strip(),
resp_dict['snapshot']['name'])
self.assertEqual(body['snapshot']['description'].strip(),
resp_dict['snapshot']['description'])
db.volume_destroy(self.ctx, volume.id)
@mock.patch.object(volume.api.API, "update_snapshot",
side_effect=v2_fakes.fake_snapshot_update)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_snapshot_update(
self, snapshot_get_by_id, volume_get,
snapshot_metadata_get, update_snapshot):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'created_at': "2014-01-01 00:00:00",
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(self.ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get.return_value = fake_volume_obj
updates = {
"name": "Updated Test Name",
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID)
req.environ['cinder.context'] = self.ctx
res_dict = self.controller.update(req, UUID, body=body)
expected = {
'snapshot': {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'size': 100,
'created_at': datetime.datetime(2014, 1, 1, 0, 0, 0,
tzinfo=pytz.utc),
'updated_at': None,
'name': u'Updated Test Name',
'description': u'Default description',
'metadata': {},
}
}
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
@mock.patch.object(volume.api.API, "update_snapshot",
side_effect=v2_fakes.fake_snapshot_update)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_snapshot_update_with_null_validate(
self, snapshot_get_by_id, volume_get,
snapshot_metadata_get, update_snapshot):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'created_at': "2014-01-01 00:00:00",
'volume_size': 100,
'name': 'Default name',
'description': 'Default description',
'expected_attrs': ['metadata'],
}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(self.ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get.return_value = fake_volume_obj
updates = {
"name": None,
"description": None,
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID)
req.environ['cinder.context'] = self.ctx
res_dict = self.controller.update(req, UUID, body=body)
self.assertEqual(fields.SnapshotStatus.AVAILABLE,
res_dict['snapshot']['status'])
self.assertIsNone(res_dict['snapshot']['name'])
self.assertIsNone(res_dict['snapshot']['description'])
def test_snapshot_update_missing_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID)
self.assertRaises(exception.ValidationError,
self.controller.update, req, UUID, body=body)
def test_snapshot_update_invalid_body(self):
body = {'name': 'missing top level snapshot key'}
req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID)
self.assertRaises(exception.ValidationError,
self.controller.update, req, UUID, body=body)
def test_snapshot_update_not_found(self):
self.mock_object(volume.api.API, "get_snapshot", fake_snapshot_get)
updates = {
"name": "Updated Test Name",
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v3/snapshots/not-the-uuid')
self.assertRaises(exception.SnapshotNotFound, self.controller.update,
req, 'not-the-uuid', body=body)
@mock.patch.object(volume.api.API, "update_snapshot",
side_effect=v2_fakes.fake_snapshot_update)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_snapshot_update_with_leading_trailing_spaces(
self, snapshot_get_by_id, volume_get,
snapshot_metadata_get, update_snapshot):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'created_at': "2018-01-14 00:00:00",
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(self.ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get.return_value = fake_volume_obj
updates = {
"name": " test ",
"description": " test "
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID)
req.environ['cinder.context'] = self.ctx
res_dict = self.controller.update(req, UUID, body=body)
expected = {
'snapshot': {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'size': 100,
'created_at': datetime.datetime(2018, 1, 14, 0, 0, 0,
tzinfo=pytz.utc),
'updated_at': None,
'name': u'test',
'description': u'test',
'metadata': {},
}
}
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
@mock.patch.object(volume.api.API, "delete_snapshot",
side_effect=v2_fakes.fake_snapshot_update)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_snapshot_delete(self, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get, delete_snapshot):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(self.ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
snapshot_id = UUID
req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % snapshot_id)
req.environ['cinder.context'] = self.ctx
resp = self.controller.delete(req, snapshot_id)
self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int)
def test_snapshot_delete_invalid_id(self):
self.mock_object(volume.api.API, "delete_snapshot",
fake_snapshot_delete)
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % snapshot_id)
self.assertRaises(exception.SnapshotNotFound, self.controller.delete,
req, snapshot_id)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_snapshot_show(self, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(self.ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID)
req.environ['cinder.context'] = self.ctx
resp_dict = self.controller.show(req, UUID)
self.assertIn('snapshot', resp_dict)
self.assertEqual(UUID, resp_dict['snapshot']['id'])
self.assertIn('updated_at', resp_dict['snapshot'])
def test_snapshot_show_invalid_id(self):
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % snapshot_id)
self.assertRaises(exception.SnapshotNotFound,
self.controller.show, req, snapshot_id)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
@mock.patch('cinder.volume.api.API.get_all_snapshots')
def test_snapshot_detail(self, get_all_snapshots, snapshot_get_by_id,
volume_get_by_id, snapshot_metadata_get):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList(objects=[snapshot_obj])
get_all_snapshots.return_value = snapshots
req = fakes.HTTPRequest.blank('/v3/snapshots/detail')
resp_dict = self.controller.detail(req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(1, len(resp_snapshots))
self.assertIn('updated_at', resp_snapshots[0])
resp_snapshot = resp_snapshots.pop()
self.assertEqual(UUID, resp_snapshot['id'])
@mock.patch.object(db, 'snapshot_get_all_by_project',
v2_fakes.fake_snapshot_get_all_by_project)
@mock.patch.object(db, 'snapshot_get_all',
v2_fakes.fake_snapshot_get_all)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_admin_list_snapshots_limited_to_project(self,
snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v3/%s/snapshots' % fake.PROJECT_ID,
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_list_snapshots_with_limit_and_offset(self,
snapshot_metadata_get):
def list_snapshots_with_limit_and_offset(snaps, is_admin):
req = fakes.HTTPRequest.blank('/v3/%s/snapshots?limit=1'
'&offset=1' % fake.PROJECT_ID,
use_admin_context=is_admin)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
self.assertEqual(snaps[1].id, res['snapshots'][0]['id'])
self.assertIn('updated_at', res['snapshots'][0])
# Test that we get an empty list with an offset greater than the
# number of items
req = fakes.HTTPRequest.blank('/v3/snapshots?limit=1&offset=3')
self.assertEqual({'snapshots': []}, self.controller.index(req))
volume, snaps = self._create_db_snapshots(3)
# admin case
list_snapshots_with_limit_and_offset(snaps, is_admin=True)
# non-admin case
list_snapshots_with_limit_and_offset(snaps, is_admin=False)
@mock.patch.object(db, 'snapshot_get_all_by_project')
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_list_snpashots_with_wrong_limit_and_offset(self,
mock_metadata_get,
mock_snapshot_get_all):
"""Test list with negative and non numeric limit and offset."""
mock_snapshot_get_all.return_value = []
# Negative limit
req = fakes.HTTPRequest.blank('/v3/snapshots?limit=-1&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Non numeric limit
req = fakes.HTTPRequest.blank('/v3/snapshots?limit=a&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Negative offset
req = fakes.HTTPRequest.blank('/v3/snapshots?limit=1&offset=-1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Non numeric offset
req = fakes.HTTPRequest.blank('/v3/snapshots?limit=1&offset=a')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Test that we get an exception HTTPBadRequest(400) with an offset
# greater than the maximum offset value.
url = '/v3/snapshots?limit=1&offset=323245324356534235'
req = fakes.HTTPRequest.blank(url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def _assert_list_next(self, expected_query=None, project=fake.PROJECT_ID,
**kwargs):
"""Check a page of snapshots list."""
# Since we are accessing v2 api directly we don't need to specify
# v2 in the request path, if we did, we'd get /v3/v2 links back
request_path = '/v3/%s/snapshots' % project
expected_path = request_path
# Construct the query if there are kwargs
if kwargs:
request_str = request_path + '?' + urllib.urlencode(kwargs)
else:
request_str = request_path
# Make the request
req = fakes.HTTPRequest.blank(request_str)
res = self.controller.index(req)
# We only expect to have a next link if there is an actual expected
# query.
if expected_query:
# We must have the links
self.assertIn('snapshots_links', res)
links = res['snapshots_links']
# Must be a list of links, even if we only get 1 back
self.assertIsInstance(links, list)
next_link = links[0]
# rel entry must be next
self.assertIn('rel', next_link)
self.assertIn('next', next_link['rel'])
# href entry must have the right path
self.assertIn('href', next_link)
href_parts = urllib.urlparse(next_link['href'])
self.assertEqual(expected_path, href_parts.path)
# And the query from the next link must match what we were
# expecting
params = urllib.parse_qs(href_parts.query)
self.assertDictEqual(expected_query, params)
# Make sure we don't have links if we were not expecting them
else:
self.assertNotIn('snapshots_links', res)
def _create_db_snapshots(self, num_snaps):
volume = utils.create_volume(self.ctx, volume_type_id=None)
snaps = [utils.create_snapshot(self.ctx,
volume.id,
display_name='snap' + str(i))
for i in range(num_snaps)]
self.addCleanup(db.volume_destroy, self.ctx, volume.id)
for snap in snaps:
self.addCleanup(db.snapshot_destroy, self.ctx, snap.id)
snaps.reverse()
return volume, snaps
def test_list_snapshots_next_link_default_limit(self):
"""Test that snapshot list pagination is limited by osapi_max_limit."""
volume, snaps = self._create_db_snapshots(3)
# NOTE(geguileo): Since cinder.api.common.limited has already been
# imported his argument max_limit already has a default value of 1000
# so it doesn't matter that we change it to 2. That's why we need to
# mock it and send it current value. We still need to set the default
# value because other sections of the code use it, for example
# _get_collection_links
CONF.set_default('osapi_max_limit', 2)
def get_pagination_params(params, max_limit=CONF.osapi_max_limit,
original_call=common.get_pagination_params):
return original_call(params, max_limit)
def _get_limit_param(params, max_limit=CONF.osapi_max_limit,
original_call=common._get_limit_param):
return original_call(params, max_limit)
with mock.patch.object(common, 'get_pagination_params',
get_pagination_params), \
mock.patch.object(common, '_get_limit_param',
_get_limit_param):
# The link from the first page should link to the second
self._assert_list_next({'marker': [snaps[1].id]})
# Second page should have no next link
self._assert_list_next(marker=snaps[1].id)
def test_list_snapshots_next_link_with_limit(self):
"""Test snapshot list pagination with specific limit."""
volume, snaps = self._create_db_snapshots(2)
# The link from the first page should link to the second
self._assert_list_next({'limit': ['1'], 'marker': [snaps[0].id]},
limit=1)
# Even though there are no more elements, we should get a next element
# per specification.
expected = {'limit': ['1'], 'marker': [snaps[1].id]}
self._assert_list_next(expected, limit=1, marker=snaps[0].id)
# When we go beyond the number of elements there should be no more
# next links
self._assert_list_next(limit=1, marker=snaps[1].id)
@mock.patch.object(db, 'snapshot_get_all_by_project',
v2_fakes.fake_snapshot_get_all_by_project)
@mock.patch.object(db, 'snapshot_get_all',
v2_fakes.fake_snapshot_get_all)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_admin_list_snapshots_all_tenants(self, snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v3/%s/snapshots?all_tenants=1' %
fake.PROJECT_ID,
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(3, len(res['snapshots']))
@mock.patch.object(db, 'snapshot_get_all')
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_admin_list_snapshots_by_tenant_id(self, snapshot_metadata_get,
snapshot_get_all):
def get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
if 'project_id' in filters and 'tenant1' in filters['project_id']:
return [v2_fakes.fake_snapshot(fake.VOLUME_ID,
tenant_id='tenant1')]
else:
return []
snapshot_get_all.side_effect = get_all
req = fakes.HTTPRequest.blank('/v3/%s/snapshots?all_tenants=1'
'&project_id=tenant1' % fake.PROJECT_ID,
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
@mock.patch.object(db, 'snapshot_get_all_by_project',
v2_fakes.fake_snapshot_get_all_by_project)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_all_tenants_non_admin_gets_all_tenants(self,
snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v3/%s/snapshots?all_tenants=1' %
fake.PROJECT_ID)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
@mock.patch.object(db, 'snapshot_get_all_by_project',
v2_fakes.fake_snapshot_get_all_by_project)
@mock.patch.object(db, 'snapshot_get_all',
v2_fakes.fake_snapshot_get_all)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_non_admin_get_by_project(self, snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v3/%s/snapshots' % fake.PROJECT_ID)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
def _create_snapshot_bad_body(self, body):
req = fakes.HTTPRequest.blank('/v3/%s/snapshots' % fake.PROJECT_ID)
req.method = 'POST'
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_no_body(self):
self._create_snapshot_bad_body(body=None)
def test_create_missing_snapshot(self):
body = {'foo': {'a': 'b'}}
self._create_snapshot_bad_body(body=body)
def test_create_malformed_entity(self):
body = {'snapshot': 'string'}
self._create_snapshot_bad_body(body=body)
|
|
"""Models and database functions for paverave project."""
# import heapq
# import time
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
# from sqlalchemy import Table, Column, Integer, ForeignKey
# Do i need both of these?
# from SQLAlchemy import DateTime
import datetime
# import correlation
# This is the connection to the PostgreSQL database; we're getting this through
# the Flask-SQLAlchemy helper library. On this, we can find the `session`
# object, where we do most of our interactions (like committing, etc.)
db = SQLAlchemy()
##############################################################################
# Model definitions
class User(db.Model):
"""User of PaveRave site."""
__tablename__ = "users"
# TODO: start ids at high number?
user_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
# TODO: add email format check and account verification with email link
email = db.Column(db.String(64), unique=True, nullable=False)
# TODO: secure pwd and add min requirements
password = db.Column(db.String(255), nullable=False)
username = db.Column(db.String(64), unique=True, nullable=False)
date_added = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
date_modified = db.Column(db.DateTime)
date_removed = db.Column(db.DateTime)
profile_url = db.Column(db.String(255))
profile_picture_url = db.Column(db.String(255))
# Define relationship to Vehicle db through UserVehicle
vehicles = relationship("UserVehicle", back_populates="user")
# Define relationship to user db through UserVehicles
comments = relationship("CommentUpvote", back_populates="user")
def __repr__(self):
"""Provide helpful representation when printed."""
return "<User user_id=%s email=%s username = %s>" % (self.user_id, self.email, self.username)
class UserVehicle(db.Model): # do we need this once users claim vehicles? Or just add to owner field in vehicle table?
"""Association table for users and vehicles on paverave site because vehicles can exist without user."""
__tablename__ = "uservehicles"
# Q: do we need id for association table?
# A: (yes, there can be multiple usrs per vehicle, and multiple vehicles per user.)
user_vehicle_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
vehicle_plate = db.Column(db.String(64), db.ForeignKey('vehicles.vehicle_plate'))
date_linked = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
date_unlinked = db.Column(db.DateTime)
# from Association object secion here: http://docs.sqlalchemy.org/en/latest/orm/basic_relationships.html
vehicle = relationship("Vehicle", back_populates="users")
user = relationship("User", back_populates="vehicles")
def __repr__(self):
"""Provide helpful representation when printed."""
return "<UserVehicle user_vehicle_id=%d user_id=%d vehicle_plate=%d>" % (self.user_vehicle_id, self.user_id, self.vehicle_plate)
class Vehicle(db.Model):
"""Vehicle on PaveRave site."""
__tablename__ = "vehicles"
# TODO: Beta version, allow users to add more vehicle info, incl pics
vehicle_plate = db.Column(db.String(64), primary_key=True) # license plate
# TODO: Beta version, add region and country plate formats including symbols.
vtype = db.Column(db.String(64)) # car, truck, motorcycle, semi, plane, boat, etc.
vstyle = db.Column(db.String(64)) # sedan, coupe, flatbed, hazmat, sport, etc.
make = db.Column(db.String(64))
model = db.Column(db.String(64))
color = db.Column(db.String(64))
user_id_adder = db.Column(db.Integer, nullable=False) # user adding car
date_added = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
date_modified = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
# TODO: move owner to UserVehicles association table?
user_id_owner = db.Column(db.Integer)
vpic_url = db.Column(db.String(255)) # TODO: beta, add ability to add vehicle photo url
# Define relationship to user db through UserVehicles
users = relationship("UserVehicle", back_populates="vehicle")
def __repr__(self):
"""Provide helpful representation when printed."""
return "<Vehicle vehicle_plate=%d>" % (self.vehicle_plate)
class Post(db.Model):
"""Post on PaveRave site by a user about vehicle."""
__tablename__ = "posts"
post_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
vehicle_plate = db.Column(db.String(64), db.ForeignKey('vehicles.vehicle_plate'))
event_date = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
ptype = db.Column(db.String(64), nullable=False)
location = db.Column(db.String(255))
latitude = db.Column(db.Numeric(precision=9, scale=6))
longitude = db.Column(db.Numeric(precision=9, scale=6))
topic = db.Column(db.String(255), nullable=False)
date_added = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
date_modified = db.Column(db.DateTime)
date_removed = db.Column(db.DateTime)
comment_count = db.Column(db.Integer, default=0)
# Define relationship to users db
user = db.relationship("User", backref=db.backref("paverave", order_by=user_id))
# Define relationship to vehicles db
vehicle = db.relationship("Vehicle", backref=db.backref("paverave", order_by=vehicle_plate))
def __repr__(self):
"""Provide helpful representation when printed."""
return "<Roadrate post_id=%s user_id=%s vehicle_plate=%s event_date=%s ptype=%s latitude=%s longitude=%s topic=%s>" % (
self.post_id, self.user_id, self.vehicle_plate, self.event_date, self.ptype, self.latitude, self.longitude, self.topic)
class Comment(db.Model):
"""Comments on a post on PaveRave site."""
# TODO: crate commentPings association table to enable user pings in jquery-comments
__tablename__ = "comments"
comment_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.post_id'))
cid = db.Column(db.String(64), nullable=False) # used by jquery-comments for position in thread
parent = db.Column(db.String(64), default="null", nullable=False) # null means first comment in thread for jquery-comments
date_created = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
date_modified = db.Column(db.DateTime)
# Either content or fileURL must be present for jquery_comments
content = db.Column(db.String(255), nullable=False)
file_url = db.Column(db.String(255))
pings = db.Column(db.String(255))
upvotes = db.Column(db.Integer, default=0, nullable=False)
date_removed = db.Column(db.DateTime)
# Define relationship to posts db
post = db.relationship("Post", backref=db.backref("paverave", order_by=post_id))
# Define relationship to user db through UserVehicles
users = relationship("CommentUpvote", back_populates="comment")
def __repr__(self):
"""Provide helpful representation when printed."""
return "<Roadrate comment_id=%d user_id=%d post_id=%d cid=%s parent=%s date_created=%s date_modified=%s content=%s upvotes=%d>" % (
self.comment_id, self.user_id, self.post_id, self.cid, self.parent, self.date_created, self.content, self.upvotes)
class CommentUpvote(db.Model):
"""Upvotes on a comment on a post on the PaveRave site."""
__tablename__ = "commentupvotes"
upvote_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
comment_id = db.Column(db.Integer, db.ForeignKey('comments.comment_id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
# # Define relationship to comments table
# comment = db.relationship("Comment", backref=db.backref("paverave", order_by=comment_id))
# # Define relationship to users table
# user = db.relationship("User", backref=db.backref("paverave", order_by=user_id))
comment = relationship("Comment", back_populates="users")
user = relationship("User", back_populates="comments")
def __repr__(self):
"""Provide helpful representation when printed."""
return "<Roadrate upvote_id=%d comment_id=%s user_id=%d>" % (
self.upvote_id, self.comment_id, self.user_id)
##############################################################################
# Helper functions
# def connect_to_db(app):
# """Connect the database to our Flask app."""
# # Configure to use our PostgreSQL database
# app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///paverave'
def connect_to_db(app, db_uri=None):
"""Connect our application to our database."""
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri or 'postgres:///paverave'
# app.config['SQLALCHEMY_ECHO'] = True
db.app = app
db.init_app(app)
print "Connected to DB."
if __name__ == "__main__":
# As a convenience, if we run this module interactively, it will leave
# you in a state of being able to work with the database directly.
from server import app
connect_to_db(app)
print "Connected to DB."
# db.create_all()
|
|
#! /usr/bin/env python
import os
import logging
import ConfigParser
from pbcore.io.FastaIO import FastaReader, FastaWriter
from pbphase.AmpliconAnalyzer import AmpliconAnalyzer
from pbhla.log import initialize_logger
from pbhla.arguments import args, parse_args
from pbhla.fofn import ( create_baxh5_fofn,
write_sequence_fofn )
from pbhla.separate_sequences import ( separate_sequences,
separate_listed_sequences,
separate_aligned_sequences )
from pbhla.fasta.utils import ( write_fasta,
fasta_size )
from pbhla.fasta.rename import rename_fofn
from pbhla.io.extract_subreads import extract_subreads
from pbhla.io.FofnIO import FofnReader
from pbhla.amplicon_analysis.chimeras import ChimeraDetector
from pbhla.dictionary import create_m1_reference
from pbhla.references.fofn import parse_reference_fofn
from pbhla.phasing.combine import ( combine_amp_analysis,
combine_resequencing,
combine_fastq )
from pbhla.external.HbarTools import HbarRunner
from pbhla.external.utils import align_best_reference
from pbhla.resequencing.Resequencer import Resequencer
from pbhla.utilities.rename_fastq import rename_resequencing
from pbhla.utilities.filter_fastq import filter_fastq
from pbhla.typing.sequences import type_sequences
from pbhla.fasta.rename_subreads import write_renaming_key
from pbhla.utils import *
log = logging.getLogger()
class HlaPipeline( object ):
def __init__( self ):
# Parse the options
parse_args()
self._config = ConfigParser.SafeConfigParser()
self._config.read( args.config_file )
# Initialize output folder and sub-folders
self.subfolders = _initialize_folders( args.output )
# Initialize logging
log_file = os.path.join( args.output, "HLA_Pipeline.log" )
initialize_logger( log, log_file=log_file )
@property
def loci(self):
sections = [s for s in self._config.sections() if s != "Global"]
return [s[4:] if s.startswith('HLA-') else s for s in sections]
def __getattr__(self, item):
if item in ['min_read_length', 'max_read_length', 'min_num_reads', 'min_consensus_length', 'max_count']:
return self._config.getint('DEFAULT', item)
elif item in ['min_read_score', 'min_snr']:
return self._config.getfloat('DEFAULT', item)
elif item in ['clustering']:
return self._config.getboolean('DEFAULT', item)
else:
return self._config.get('Global', item)
def config(self, domain, item):
domain = self._check_domain( domain )
return self._config.get( domain, item )
def _check_domain(self, domain):
hla_domain = 'HLA-%s' % domain
numbered_domain = hla_domain + '1'
if domain in self._config.sections():
return domain
elif hla_domain in self._config.sections():
return hla_domain
elif numbered_domain in self._config.sections():
return numbered_domain
else:
return None
def to_be_phased(self, domain):
domain = self._check_domain( domain )
if domain:
if self._config.getboolean( domain, 'use_amp_analysis' ):
log.info("AmpliconAnalysis enabled for %s" % domain)
return True
else:
log.info("AmpliconAnalysis disabled for %s" % domain)
return False
log.info("No configuration detected for %s" % domain)
return False
def to_be_resequenced(self, domain):
domain = self._check_domain( domain )
print "Reseq", domain
if domain:
if self._config.getboolean( domain, 'use_resequencing' ):
log.info("Resequencing enabled for %s" % domain)
return True
else:
log.info("resequencing disabled for %s" % domain)
return False
log.info("No configuration detected for %s" % domain)
return False
def get_filepath(self, folder, filename):
return os.path.join( self.subfolders[folder], filename )
def run( self ):
baxh5 = _create_baxh5_fofn( args.input_file, args.output )
subread_fofn = self.extract_subread_data()
# First we assemble the supplied data via HGAP / HBAR
input_fofn = self.create_hbar_input_fofn( subread_fofn )
contig_file = self.run_hbar_assembly( input_fofn )
renamed_subreads = self.export_hbar_subreads()
renaming_key = self.create_renaming_key( subread_fofn, renamed_subreads )
# Second align the subreads and contigs to various references ...
subread_contig_dict = self.align_subreads_to_contigs( renamed_subreads, contig_file )
contig_genome_dict = self.align_contigs_to_genome( contig_file )
hla_reference, metadata, loci = self.parse_reference()
contig_reference_dict = self.align_contigs_to_reference( contig_file, hla_reference )
contig_locus_dict = cross_ref_dict( contig_reference_dict, loci )
subread_locus_dict = cross_ref_dict( subread_contig_dict, contig_locus_dict )
# ... in order to separate the on-target from off-target sequences ...
on_target = self.find_on_target_contigs( contig_genome_dict, contig_locus_dict )
hla_contigs = self.separate_hla_contigs( contig_file, on_target )
hla_subreads = self.separate_hla_subreads( renamed_subreads, subread_contig_dict, on_target )
# ... and the different loci and contigs from each other
contig_subread_fofn = self.separate_subreads_by_contig( hla_subreads, subread_contig_dict )
locus_subread_fofn = self.separate_subreads_by_locus( hla_subreads, subread_locus_dict )
locus_contig_fofn = self.separate_contigs_by_locus( hla_contigs, contig_locus_dict )
# Rename the subreads for each locus
renamed_subread_fofn = self.rename_subread_files( locus_subread_fofn, renaming_key )
# Use the renamed subreads to Phase with AA and remove AA-chimeras
self.run_amp_analysis( renamed_subread_fofn )
phasing_results = self.combine_phasing_results()
#good_results = self.remove_chimeras( phasing_results )
# Use the renamed subreads to resequence any other loci/HBAR contigs
self.run_resequencing( baxh5, renamed_subread_fofn, locus_contig_fofn )
reseq_results = self.combine_resequencing_results()
renamed_results = self.rename_resequencing_results( reseq_results )
# Combine the results from AA and resequencing and type the results
combined_results = self.combine_phasing_and_reseq( phasing_results, renamed_results )
filtered_results = self.filter_combined_results( combined_results )
typing_sequences = self.copy_sequences_for_typing( filtered_results )
self.type_hla_sequences( typing_sequences )
def extract_subread_data( self ):
"""
Extract subreads meeting the DEFAULT requirements for analysis
"""
log.info('Looking for raw subread data')
# Dump all valid reads from the above files
subread_fofn = self.get_filepath( "subreads", "all_subreads.fofn" )
if valid_file( subread_fofn ):
log.info("Using existing subread fofn\n")
return subread_fofn
log.info('No subread data found, extracting from input file(s)')
extract_subreads( args.input_file,
subread_fofn,
min_length=self.min_read_length,
max_length=self.max_read_length,
min_score=self.min_read_score,
min_snr=self.min_snr,
max_count=self.max_count )
check_output_file( subread_fofn )
log.info('Finished extracting subread data from input\n')
return subread_fofn
def create_hbar_input_fofn( self, subread_fofn ):
"""
Create a input FOFN for HBAR pointing to the raw subread data
"""
log.info('Looking for HBAR input FOFN')
input_fofn = self.get_filepath( 'HBAR', 'input.fofn' )
copy_file( subread_fofn, input_fofn )
log.info('Finished creating input fofn\n')
return input_fofn
def run_hbar_assembly( self, input_fofn ):
"""
Run HBAR to assemble rough contigs from the raw HLA subreads
"""
log.info('Looking for HBAR-assembled HLA contigs')
output = self.get_filepath( "HBAR", "3-CA/9-terminator/asm.utg.fasta" )
contig_file = self.get_filepath( "references", "all_contigs.fasta" )
print input_fofn
if valid_file( output ):
log.info("Using existing HBAR contig file")
else: # Run HGAP
log.info("No HBAR contig file found, initializing HbarRunner")
hbar = HbarRunner( input_fofn,
self.subfolders["HBAR"],
min_length=self.min_read_length,
min_score=self.min_read_score )
hbar()
# Copy the contig file to a more convenient location
check_output_file( output )
copy_file( output, contig_file )
log.info('Finished assembling subreads data with HBAR\n')
return contig_file
def export_hbar_subreads(self):
"""
Export the HBAR-renamed subread data for downstream analysis
"""
log.info('Looking for exported renamed subreads from HBAR')
renamed_subreads = self.get_filepath( 'subreads', 'renamed_subreads.fasta' )
if valid_file( renamed_subreads ):
log.info('Using existing HBAR-exported subread file\n')
return renamed_subreads
log.info("No renamed subread file found, exporting from HBAR output...")
fasta_folder = self.get_filepath( 'HBAR', '0-fasta_files' )
with FastaWriter( renamed_subreads ) as handle:
for entry in os.listdir( fasta_folder ):
if entry.endswith('_q.fa'):
hbar_fasta = os.path.join( fasta_folder, entry )
for record in FastaReader( hbar_fasta ):
handle.writeRecord( record )
check_output_file( renamed_subreads )
log.info('Finished exporting HBAR-renamed subreads\n')
return renamed_subreads
def create_renaming_key(self, subread_fofn, renamed_subreads ):
"""
Create a key for translating HBAR subread names to canonical PacBio names
"""
log.info("Looking for Raw<--->HBAR subread renaming key")
renaming_key = self.get_filepath( 'subreads', 'renaming_key.txt' )
if valid_file( renaming_key ):
log.info('Using existing subread renaming key\n')
return renaming_key
log.info("No subread renaming key round, creating one...")
write_renaming_key( subread_fofn, renamed_subreads, renaming_key )
check_output_file( renaming_key )
log.info("Finished creating subread renaming key\n")
return renaming_key
def align_subreads_to_contigs(self, subread_file, contig_file ):
"""
Align the subreads to the contigs assembled by HBAR
"""
log.info("Looking for Subread-to-Contig alignment data")
subread_contig_align = self.get_filepath( 'alignments', 'subreads_to_contigs.m1' )
if valid_file( subread_contig_align ):
log.info("Using existing Subread->Contig alignment file\n")
else:
log.info("No Subread->Contig alignment found, creating...")
align_best_reference( subread_file, contig_file, output=subread_contig_align )
check_output_file( subread_contig_align )
log.info("Finished aligning subreads to the HBAR contigs\n")
return create_m1_reference( subread_contig_align )
def align_contigs_to_genome(self, contig_file):
log.info("Looking for Contig-to-Genome alignment data")
contig_genome_align = self.get_filepath( 'alignments', 'contigs_to_genome.m1' )
if valid_file( contig_genome_align ):
log.info("Using existing Contig->Genome alignment file\n")
else:
log.info("No Contig->Genome alignment found, creating...")
align_best_reference( contig_file, self.human_reference, output=contig_genome_align )
check_output_file( contig_genome_align )
log.info("Finished aligning contigs to the genomic reference\n")
return create_m1_reference( contig_genome_align )
def parse_reference(self):
"""
Parse HLA data from the configured reference FOFN
"""
log.info("Parsing the supplied FOFN of HLA reference data")
hla_reference_seqs = self.get_filepath( "references", "HLA_references.fasta" )
sequences, metadata, loci = parse_reference_fofn( self.hla_reference )
log.info("Writing collected HLA reference sequences to file")
write_fasta( sequences, hla_reference_seqs )
check_output_file( hla_reference_seqs )
log.info("Finished parsing the HLA reference data\n")
return hla_reference_seqs, metadata, loci
def align_contigs_to_reference(self, contig_file, reference_file):
"""
Align HBAR contigs to an HLA reference Fasta
"""
log.info("Looking for Contig-to-Reference alignment data")
contig_reference_align = self.get_filepath( 'alignments', 'contigs_to_reference.m1' )
if valid_file( contig_reference_align ):
log.info("Using an existing Contig->Reference alignment file\n")
else:
log.info("No Contig->Reference alignment found, creating...")
align_best_reference( contig_file, reference_file, output=contig_reference_align )
check_output_file( contig_reference_align )
log.info("Finished aligning contigs to the HLA reference data\n")
return create_m1_reference( contig_reference_align )
def find_on_target_contigs( self, contig_genome, contig_loci ):
"""
Identify on-target contigs based on their genomic and reference alignments
"""
log.info('Identifying on-target contigs from alignment data')
on_target_contig_ids = [c for c in contig_loci if contig_genome[c] == 'chr6']
log.info('Finished identifying on-target contigs\n')
return on_target_contig_ids
def separate_hla_contigs( self, contig_file, on_target_ids ):
"""
Separate the ON/OFF target contig sequences from each other
"""
log.info('Looking for separated on/off target contig data')
hla_contigs = self.get_filepath( 'references', 'hla_contigs.fasta' )
other_contigs = self.get_filepath( 'references', 'other_contigs.fasta' )
if valid_file( hla_contigs ):
log.info("Using existing separated contig files\n")
return hla_contigs
else:
log.info('No separated contig files found, creating...')
separate_listed_sequences( contig_file, on_target_ids, hla_contigs, other_contigs )
log.info('Finished separating on/off-target contigs\n')
return hla_contigs
def separate_hla_subreads(self, subread_file, subread_contigs, on_target_ids ):
"""
Separate the ON/OFF target subreads based on their aligned contig
"""
log.info('Looking for separated on/off target subread data')
hla_subreads = self.get_filepath( 'subreads', 'hla_subreads.fasta' )
other_subreads = self.get_filepath( 'subreads', 'other_subreads.fasta' )
if valid_file( hla_subreads ):
log.info("Using existing separated subread files\n")
return hla_subreads
else:
log.info('No separated subread files found, creating...')
separate_aligned_sequences( subread_file, subread_contigs, on_target_ids,
hla_subreads, other_subreads )
log.info('Finished separating on/off-target subreads\n')
return hla_subreads
def separate_subreads_by_contig( self, hla_subreads, subread_contigs ):
"""
Separate the on-target subreads by the HBAR contig to which they align
"""
log.info("Looking for contig-specific subread files")
contig_subread_fofn = self.get_filepath( "subreads", "Contig_Subread_Files.fofn" )
if valid_file( contig_subread_fofn ):
log.info("Using existing contig subread files and FOFN\n")
return contig_subread_fofn
log.info("No contig subread files found, creating...")
subread_prefix = self.get_filepath( "subreads", "Contig" )
contig_files = separate_sequences( hla_subreads, subread_contigs, subread_prefix )
write_sequence_fofn( contig_files, contig_subread_fofn )
log.info('Finished separating subreads by contig\n')
return contig_subread_fofn
def separate_subreads_by_locus( self, hla_subreads, subread_loci ):
"""
Separate the on-target subreads by the locus to which their HBAR contig aligns
"""
log.info("Looking for locus-specific subread files")
locus_subread_fofn = self.get_filepath( "subreads", "Locus_Subread_Files.fofn" )
if valid_file( locus_subread_fofn ):
log.info("Using existing locus subread files and FOFN\n")
return locus_subread_fofn
log.info("No locus subread files found, creating...")
subread_prefix = self.get_filepath( "subreads", "Locus" )
locus_files = separate_sequences( hla_subreads, subread_loci, subread_prefix )
write_sequence_fofn( locus_files, locus_subread_fofn )
log.info('Finished separating subreads by locus\n')
return locus_subread_fofn
def separate_contigs_by_locus( self, hla_contigs, contig_loci ):
"""
Separate the on-target contigs by the locus to which they align
"""
log.info("Looking for locus-specific contig files")
locus_contig_fofn = self.get_filepath( "references", "Locus_Contig_Files.fofn" )
if valid_file( locus_contig_fofn ):
log.info("Using existing locus contig files and FOFN\n")
return locus_contig_fofn
log.info("No locus subread files found, creating...")
file_prefix = self.get_filepath( "references", "Locus" )
locus_files = separate_sequences( hla_contigs, contig_loci, file_prefix )
write_sequence_fofn( locus_files, locus_contig_fofn )
log.info('Finished separating subreads by locus\n')
return locus_contig_fofn
def rename_subread_files( self, subread_fofn, renaming_key ):
log.info("Looking for FOFN of renamed subread files")
renamed_fofn = '.'.join(subread_fofn.split('.')[:-1]) + '_renamed.fofn'
if valid_file( renamed_fofn ):
log.info("Using existing FOFN of renamed subread files\n")
return renamed_fofn
log.info("No renamed FOFN found, creating...")
rename_fofn( subread_fofn, renamed_fofn, renaming_key )
log.info("Finished renaming subread files\n")
return renaming_key
def run_amp_analysis( self, renamed_fofn ):
log.info("Looking for phased AmpliconAnalysis results")
analyzer = AmpliconAnalyzer( self.smrt_path, args.nproc, 'AmpliconAnalysis' )
for subread_file in FofnReader( renamed_fofn ):
# Check if the source of the current file has a configuration
source = get_file_source( subread_file )
print subread_file
print source
if not self.to_be_phased( source ):
continue
# For sources with a configuration, run AA if output DNE
output_folder = self.get_filepath( 'phasing', source )
output_file = os.path.join( output_folder, 'amplicon_analysis.fastq' )
if os.path.exists( output_file ):
log.info('AmpliconAnalyzer output detected for "%s", skipping...' % source)
else:
log.info('Phasing subreads from "%s"' % source)
analyzer_args = {'whiteList': subread_file,
'sampleName': '_' + source,
'minLength': self.config(source, 'min_read_length'),
'minReadScore': self.config(source, 'min_read_score'),
'minSnr': self.config(source, 'min_snr'),
'maxPhasingReads': self.config(source, 'max_phasing_reads'),
'noClustering': self.config(source, 'disable_clustering')}
analyzer.run( args.input_file, output_folder, analyzer_args )
log.info('Finished phasing subreads with Amplicon Analysis\n')
def combine_phasing_results( self ):
log.info("Looking for the combined output from Amplicon Assembly")
combined_file = self.get_filepath( 'references', 'amp_analysis_consensus.fastq')
if valid_file( combined_file ):
log.info("Using existing combined consensus file\n")
return combined_file
log.info("No combined consensus file found, creating...")
combine_amp_analysis( self.subfolders['phasing'], combined_file )
log.info('Finished combining AmpliconAnalysis results\n')
return combined_file
def run_resequencing( self, baxh5_fofn, renamed_fofn, reference_fofn ):
log.info("Looking for phased AmpliconAnalysis results")
print self.smrt_path
resequencer = Resequencer( self.smrt_path, args.nproc )
subread_handle = FofnReader( renamed_fofn )
reference_handle = FofnReader( reference_fofn )
for subreads, reference in zip(subread_handle, reference_handle):
# Check if the source of the current file has a configuration
source = get_file_source( subreads )
if not self.to_be_resequenced( source ):
continue
# For sources with a configuration, run AA if output DNE
output_folder = self.get_filepath( 'resequencing', source )
if os.path.exists( output_folder ):
log.info('Resequencing output detected for "%s", skipping...' % source)
else:
log.info('Resequencing HBAR contigs from "%s"' % source)
resequencer( baxh5_fofn, subreads, reference, output=output_folder )
log.info('Finished phasing subreads with Amplicon Analysis\n')
def combine_resequencing_results( self ):
log.info("Looking for the combined output from Amplicon Assembly")
combined_file = self.get_filepath( 'references', 'resequencing_consensus.fastq')
if valid_file( combined_file ):
log.info("Using existing combined consensus file\n")
return combined_file
log.info("No combined consensus file found, creating...")
combine_resequencing( self.subfolders['resequencing'], combined_file )
log.info('Finished combining AmpliconAnalysis results\n')
return combined_file
def remove_chimeras( self, sequence_file ):
log.info("Looking for Chimera-filtered AmpliconAnalysis results")
non_chimera_file = '.'.join( sequence_file.split('.')[:-1] ) + '.good.fastq'
if valid_file( non_chimera_file ):
log.info("Using existing Chimera-filtered file\n")
return non_chimera_file
log.info("No Chimera-filtered file found, creating...")
cd = ChimeraDetector()
cd.run( sequence_file )
check_output_file( non_chimera_file )
log.info("Finished removing chimeric sequences\n")
return non_chimera_file
def rename_resequencing_results( self, input_file ):
log.info("Looking for renamed Resequencing consensus file")
renamed_file = self.get_filepath( 'references', 'resequencing_consensus.renamed.fastq')
if valid_file( renamed_file ):
log.info("Using existing renamed consensus file\n")
return renamed_file
if not valid_file( input_file ):
log.info("No valid resequencing output detected, skipping...")
return input_file
log.info("No renamed consensus file found, creating...")
rename_resequencing( input_file, renamed_file )
check_output_file( renamed_file )
log.info('Finished combining AmpliconAnalysis results\n')
return renamed_file
def combine_phasing_and_reseq( self, good_results, reseq_results ):
log.info("Looking for combined Phasing and Resequencing results")
combined_results = self.get_filepath("references", "combined_consensus.fastq")
if valid_file( combined_results ):
log.info("Using existing combined consensus file\n")
return combined_results
if valid_file( reseq_results ):
log.info("No combined consensus file found, creating...")
combine_fastq( [good_results, reseq_results], combined_results )
check_output_file( combined_results )
else:
log.info("No resequencing output to combine, using only Phasing results...")
copy_file( good_results, combined_results )
log.info("Finished combining Phasing and Resequencing results\n")
return combined_results
def filter_combined_results( self, combined_results ):
log.info("Looking for filtered consensus results")
filtered_results = self.get_filepath("references", "filtered_consensus.fastq")
if valid_file( filtered_results ):
log.info("Using existing filtered consensus file\n")
return filtered_results
log.info("No filtered consensus file found, creating...")
filter_fastq( combined_results, filtered_results,
min_length=self.min_consensus_length,
min_num_reads=self.min_num_reads)
check_output_file( filtered_results )
log.info("Finished filtering combined consensus results\n")
return filtered_results
def copy_sequences_for_typing(self, sequence_file):
log.info("Looking for a sequence file to use in HLA-typing")
typing_sequences = self.get_filepath('typing', 'consensus_sequences.fastq')
if valid_file( typing_sequences ):
log.info("Using existing typing sequences file")
return typing_sequences
log.info("No sequence file for typing found, creating...")
copy_file( sequence_file, typing_sequences )
log.info("Finished copying sequences for HLA-typing\n")
return typing_sequences
def type_hla_sequences(self, sequence_file ):
print self.loci
log.info('Typing the selected HLA consensus sequences')
typing = type_sequences( sequence_file,
grouping='locus',
exon_fofn=self.exon_reference,
genomic_reference=self.locus_reference,
cDNA_reference=self.cDNA_reference,
loci=self.loci)
check_output_file( typing )
log.info('Finished typing the selected HLA sequences\n')
return typing
def _initialize_folders( output ):
"""
Create the Main and Sub Output directories
"""
# Create Main
log.info("Creating output directories")
output = os.path.abspath( output )
create_directory( output )
# Create sub-directories
subfolders = {}
for dir_name in ['HBAR', 'references', 'subreads', 'results',
'alignments', 'phasing', 'typing', 'resequencing']:
sub_dir = os.path.join( output, dir_name )
create_directory( sub_dir )
subfolders[dir_name] = sub_dir
return subfolders
def _create_baxh5_fofn( input_file, output ):
"""
Convert any BasH5 input files to BaxH5 to avoid file-type problems
"""
log.info('Creating FOFN of Bax.H5 files')
baxh5_fofn = os.path.join( output, 'baxh5.fofn' )
if valid_file( baxh5_fofn ):
log.info("Using existing Bax.H5 FOFN file")
return baxh5_fofn
log.info("No existing Bax.H5 fofn found")
create_baxh5_fofn( input_file, baxh5_fofn )
check_output_file( baxh5_fofn )
log.info('Finished writing Bax.H5 fofn file\n')
return baxh5_fofn
if __name__ == '__main__':
HlaPipeline().run()
|
|
from __future__ import print_function
from dynamicserialize.dstypes.com.raytheon.uf.common.dataquery.requests import RequestConstraint
from shapely.geometry import box, Point
from awips.dataaccess import DataAccessLayer as DAL
from awips.ThriftClient import ThriftRequestException
from awips.test.dafTests import baseDafTestCase
from awips.test.dafTests import params
import unittest
#
# Test DAF support for grid data
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 01/19/16 4795 mapeters Initial Creation.
# 04/11/16 5548 tgurney Cleanup
# 04/18/16 5548 tgurney More cleanup
# 06/09/16 5587 tgurney Typo in id values test
# 07/06/16 5728 mapeters Add advanced query tests
# 08/03/16 5728 mapeters Add additional identifiers to testGetDataWith*
# tests to shorten run time and prevent EOFError
# 10/13/16 5942 bsteffen Test envelopes
# 11/08/16 5985 tgurney Skip certain tests when no
# data is available
# 12/07/16 5981 tgurney Parameterize
# 01/06/17 5981 tgurney Skip envelope test when no
# data is available
#
class GridTestCase(baseDafTestCase.DafTestCase):
"""Test DAF support for grid data"""
datatype = 'grid'
model = 'GFS160'
def testGetAvailableParameters(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier('info.datasetId', self.model)
self.runParametersTest(req)
def testGetAvailableLocations(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier('info.datasetId', self.model)
self.runLocationsTest(req)
def testGetAvailableLevels(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier('info.datasetId', self.model)
self.runLevelsTest(req)
def testGetAvailableTimes(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier('info.datasetId', self.model)
req.setLevels('2FHAG')
self.runTimesTest(req)
def testGetGridData(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier('info.datasetId', self.model)
req.setLevels('2FHAG')
req.setParameters('T')
self.runGridDataTest(req)
def testGetIdentifierValues(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier('info.datasetId', 'ENSEMBLE')
req.setLevels('2FHAG')
req.setParameters('T')
idValues = DAL.getIdentifierValues(req, 'info.ensembleId')
self.assertTrue(hasattr(idValues, '__iter__'))
if idValues:
self.assertIn('ctl1', idValues)
self.assertIn('p1', idValues)
self.assertIn('n1', idValues)
else:
raise unittest.SkipTest("no data available")
def testGetInvalidIdentifierValuesThrowsException(self):
self.runInvalidIdValuesTest()
def testGetNonexistentIdentifierValuesThrowsException(self):
self.runNonexistentIdValuesTest()
def testGetDataWithEnvelope(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier('info.datasetId', self.model)
req.setLevels('2FHAG')
req.setParameters('T')
req.setEnvelope(params.ENVELOPE)
gridData = self.runGridDataTest(req)
if len(gridData) == 0:
raise unittest.SkipTest("No data available")
lons, lats = gridData[0].getLatLonCoords()
lons = lons.reshape(-1)
lats = lats.reshape(-1)
# Ensure all points are within one degree of the original box
# to allow slight margin of error for reprojection distortion.
testEnv = box(params.ENVELOPE.bounds[0] - 1, params.ENVELOPE.bounds[1] - 1,
params.ENVELOPE.bounds[2] + 1, params.ENVELOPE.bounds[3] + 1 )
for i in range(len(lons)):
self.assertTrue(testEnv.contains(Point(lons[i], lats[i])))
def _runConstraintTest(self, key, operator, value):
req = DAL.newDataRequest(self.datatype)
constraint = RequestConstraint.new(operator, value)
req.addIdentifier(key, constraint)
req.addIdentifier('info.datasetId', self.model)
req.addIdentifier('info.level.masterLevel.name', 'FHAG')
req.addIdentifier('info.level.leveltwovalue', 3000.0)
req.setParameters('T')
return self.runGridDataTest(req)
def testGetDataWithEqualsString(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '=', '2000.0')
for record in gridData:
self.assertEqual(record.getAttribute('info.level.levelonevalue'), 2000.0)
def testGetDataWithEqualsUnicode(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '=', u'2000.0')
for record in gridData:
self.assertEqual(record.getAttribute('info.level.levelonevalue'), 2000.0)
def testGetDataWithEqualsInt(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '=', 2000)
for record in gridData:
self.assertEqual(record.getAttribute('info.level.levelonevalue'), 2000)
def testGetDataWithEqualsLong(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '=', 2000)
for record in gridData:
self.assertEqual(record.getAttribute('info.level.levelonevalue'), 2000)
def testGetDataWithEqualsFloat(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '=', 2000.0)
for record in gridData:
self.assertEqual(round(record.getAttribute('info.level.levelonevalue'), 1), 2000.0)
def testGetDataWithEqualsNone(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '=', None)
for record in gridData:
self.assertIsNone(record.getAttribute('info.level.levelonevalue'))
def testGetDataWithNotEquals(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '!=', 2000.0)
for record in gridData:
self.assertNotEqual(record.getAttribute('info.level.levelonevalue'), 2000.0)
def testGetDataWithNotEqualsNone(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '!=', None)
for record in gridData:
self.assertIsNotNone(record.getAttribute('info.level.levelonevalue'))
def testGetDataWithGreaterThan(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '>', 2000.0)
for record in gridData:
self.assertGreater(record.getAttribute('info.level.levelonevalue'), 2000.0)
def testGetDataWithLessThan(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '<', 2000.0)
for record in gridData:
self.assertLess(record.getAttribute('info.level.levelonevalue'), 2000.0)
def testGetDataWithGreaterThanEquals(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '>=', 2000.0)
for record in gridData:
self.assertGreaterEqual(record.getAttribute('info.level.levelonevalue'), 2000.0)
def testGetDataWithLessThanEquals(self):
gridData = self._runConstraintTest('info.level.levelonevalue', '<=', 2000.0)
for record in gridData:
self.assertLessEqual(record.getAttribute('info.level.levelonevalue'), 2000.0)
def testGetDataWithInList(self):
collection = [2000.0, 1000.0]
gridData = self._runConstraintTest('info.level.levelonevalue', 'in', collection)
for record in gridData:
self.assertIn(record.getAttribute('info.level.levelonevalue'), collection)
def testGetDataWithNotInList(self):
collection = [2000.0, 1000.0]
gridData = self._runConstraintTest('info.level.levelonevalue', 'not in', collection)
for record in gridData:
self.assertNotIn(record.getAttribute('info.level.levelonevalue'), collection)
def testGetDataWithInvalidConstraintTypeThrowsException(self):
with self.assertRaises(ValueError):
self._runConstraintTest('info.level.levelonevalue', 'junk', '2000.0')
def testGetDataWithInvalidConstraintValueThrowsException(self):
with self.assertRaises(TypeError):
self._runConstraintTest('info.level.levelonevalue', '=', {})
def testGetDataWithEmptyInConstraintThrowsException(self):
with self.assertRaises(ValueError):
self._runConstraintTest('info.level.levelonevalue', 'in', [])
def testGetDataWithLevelOneAndLevelTwoConstraints(self):
req = DAL.newDataRequest(self.datatype)
levelOneConstraint = RequestConstraint.new('>=', 2000.0)
req.addIdentifier('info.level.levelonevalue', levelOneConstraint)
levelTwoConstraint = RequestConstraint.new('in', (4000.0, 5000.0))
req.addIdentifier('info.level.leveltwovalue', levelTwoConstraint)
req.addIdentifier('info.datasetId', self.model)
req.addIdentifier('info.level.masterLevel.name', 'FHAG')
req.setParameters('T')
gridData = self.runGridDataTest(req)
for record in gridData:
self.assertGreaterEqual(record.getAttribute('info.level.levelonevalue'), 2000.0)
self.assertIn(record.getAttribute('info.level.leveltwovalue'), (4000.0, 5000.0))
def testGetDataWithMasterLevelNameInConstraint(self):
req = DAL.newDataRequest(self.datatype)
masterLevelConstraint = RequestConstraint.new('in', ('FHAG', 'K'))
req.addIdentifier('info.level.masterLevel.name', masterLevelConstraint)
req.addIdentifier('info.level.levelonevalue', 2000.0)
req.addIdentifier('info.level.leveltwovalue', 3000.0)
req.addIdentifier('info.datasetId', 'GFS160')
req.setParameters('T')
gridData = self.runGridDataTest(req)
for record in gridData:
self.assertIn(record.getAttribute('info.level.masterLevel.name'), ('FHAG', 'K'))
def testGetDataWithDatasetIdInConstraint(self):
req = DAL.newDataRequest(self.datatype)
# gfs160 is alias for GFS160 in this namespace
req.addIdentifier('namespace', 'gfeParamInfo')
datasetIdConstraint = RequestConstraint.new('in', ('gfs160', 'HRRR'))
req.addIdentifier('info.datasetId', datasetIdConstraint)
req.addIdentifier('info.level.masterLevel.name', 'FHAG')
req.addIdentifier('info.level.levelonevalue', 2000.0)
req.addIdentifier('info.level.leveltwovalue', 3000.0)
req.setParameters('T')
gridData = self.runGridDataTest(req, testSameShape=False)
for record in gridData:
self.assertIn(record.getAttribute('info.datasetId'), ('gfs160', 'HRRR'))
def testGetDataWithMasterLevelNameLessThanEqualsConstraint(self):
req = DAL.newDataRequest(self.datatype)
masterLevelConstraint = RequestConstraint.new('<=', 'K')
req.addIdentifier('info.level.masterLevel.name', masterLevelConstraint)
req.addIdentifier('info.level.levelonevalue', 2000.0)
req.addIdentifier('info.level.leveltwovalue', 3000.0)
req.addIdentifier('info.datasetId', 'GFS160')
req.setParameters('T')
gridData = self.runGridDataTest(req)
for record in gridData:
self.assertLessEqual(record.getAttribute('info.level.masterLevel.name'), 'K')
def testGetDataWithComplexConstraintAndNamespaceThrowsException(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier('namespace', 'grib')
masterLevelConstraint = RequestConstraint.new('<=', 'K')
req.addIdentifier('info.level.masterLevel.name', masterLevelConstraint)
req.addIdentifier('info.datasetId', 'GFS160')
req.setParameters('T')
with self.assertRaises(ThriftRequestException) as cm:
self.runGridDataTest(req)
self.assertIn('IncompatibleRequestException', str(cm.exception))
self.assertIn('info.level.masterLevel.name', str(cm.exception))
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# (c) 2012 Mike Lewis
import logging; log = logging.getLogger(__name__)
try:
import simplejson as json
except ImportError:
import json
import base64
import hashlib
import hmac
import inspect
import string
import time
import urllib
# 3rd party libraries that might not be present during initial install
# but we need to import for the version #
try:
import httplib2
import poster
except ImportError:
pass
__version__ = '20120430'
__author__ = u'Mike Lewis'
API_ENDPOINT = 'http://api.singleplatform.co'
# Number of times to retry http requests
NUM_REQUEST_RETRIES = 3
# Generic SinglePlatform exception
class SinglePlatformException(Exception): pass
error_types = {
}
def b64_key_to_binary(key):
"""Convert a base64 encoded key to binary"""
padding_factor = (4 - len(key) % 4) % 4
key += "=" * padding_factor
return base64.b64decode(unicode(key).translate(dict(zip(map(ord, u'-_'), u'+/'))))
class SinglePlatform(object):
"""SinglePlatform API wrapper"""
def __init__(self, client_id, signing_key, api_key=None):
"""Sets up the api object"""
binary_key = b64_key_to_binary(signing_key)
# Set up endpoints
self.base_requester = self.Requester(client_id, binary_key, api_key)
# Dynamically enable endpoints
self._attach_endpoints()
def _attach_endpoints(self):
"""Dynamically attach endpoint callables to this client"""
for name, endpoint in inspect.getmembers(self):
if inspect.isclass(endpoint) and issubclass(endpoint, self._Endpoint) and (endpoint is not self._Endpoint):
endpoint_instance = endpoint(self.base_requester)
setattr(self, endpoint_instance.endpoint, endpoint_instance)
class Requester(object):
"""Api requesting object"""
def __init__(self, client_id, binary_key, api_key=None):
"""Sets up the api object"""
self.api_key = api_key
self.client_id = client_id
self.binary_key = binary_key
def GET(self, path, params=None):
"""GET request that returns processed data"""
if not params: params = {}
# Attach the client id
params['client'] = self.client_id
# Get the uri and it's corresponding signature
relative_uri = self.build_uri(path, params)
params['sig'] = self.sign_uri(relative_uri)
# Include the API key if provided
if self.api_key:
params['apiKey'] = self.api_key
# Make the request, including the sig
final_uri = u'{API_ENDPOINT}{signed_uri}'.format(
API_ENDPOINT=API_ENDPOINT,
signed_uri=self.build_uri(path, params)
)
log.debug(u'GET url: {0}'.format(final_uri))
return _request_with_retry(final_uri)
def build_uri(self, path, params=None):
"""Construct a url to use"""
_params = {}
if params:
_params.update(params)
return '{path}?{params}'.format(
path=path,
params=urllib.urlencode(_params)
)
def sign_uri(self, uri):
"""Sign this uri"""
digest = hmac.new(self.binary_key, uri, hashlib.sha1).digest()
digest = base64.b64encode(digest)
digest = digest.translate(string.maketrans('+/', '-_'))
return digest.rstrip('=')
class _Endpoint(object):
"""Generic endpoint class"""
def __init__(self, requester):
"""Stores the request function for retrieving data"""
self.requester = requester
def _expanded_path(self, path=None):
"""Gets the expanded path, given this endpoint"""
return '/{expanded_path}'.format(
expanded_path='/'.join(p for p in (self.endpoint, path) if p)
)
def GET(self, path=None, *args, **kwargs):
"""Use the requester to get the data"""
return self.requester.GET(self._expanded_path(path), *args, **kwargs)
class Restaurants(_Endpoint):
"""Restaurant specific endpoint"""
endpoint = 'restaurants'
def search(self, params):
"""https://singleplatform.jira.com/wiki/display/PubDocs/SinglePlatform+Publisher+Integration#SinglePlatformPublisherIntegration-URIrestaurantssearch"""
return self.GET('search', params)
def location(self, LOCATION):
"""https://singleplatform.jira.com/wiki/display/PubDocs/SinglePlatform+Publisher+Integration#SinglePlatformPublisherIntegration-URIrestaurantsLOCATION"""
return self.GET('{LOCATION}'.format(LOCATION=LOCATION))
def menu(self, LOCATION):
"""https://singleplatform.jira.com/wiki/display/PubDocs/SinglePlatform+Publisher+Integration#SinglePlatformPublisherIntegration-URIrestaurantsLOCATIONmenu"""
return self.GET('{LOCATION}/menu'.format(LOCATION=LOCATION))
def shortmenu(self, LOCATION):
"""https://singleplatform.jira.com/wiki/display/PubDocs/SinglePlatform+Publisher+Integration#SinglePlatformPublisherIntegration-URIrestaurantsLOCATIONshortmenu"""
return self.GET('{LOCATION}/shortmenu'.format(LOCATION=LOCATION))
"""
Network helper functions
"""
def _request_with_retry(url, data=None):
"""Tries to load data from an endpoint using retries"""
for i in xrange(NUM_REQUEST_RETRIES):
try:
return _process_request_with_httplib2(url, data)
except SinglePlatformException, e:
# Some errors don't bear repeating
if e.__class__ in []: raise
if ((i + 1) == NUM_REQUEST_RETRIES): raise
time.sleep(1)
def _process_request_with_httplib2(url, data=None):
"""Make the request and handle exception processing"""
try:
h = httplib2.Http()
if data:
datagen, headers = poster.encode.multipart_encode(data)
data = ''.join(datagen)
method = 'POST'
else:
headers = {}
method = 'GET'
headers['Accept'] = u'application/json'
response, body = h.request(url, method, headers=headers, body=data)
data = _json_to_data(body)
# Default case, Got proper response
if response.status == 200:
return data
return _check_response(data)
except httplib2.HttpLib2Error, e:
log.error(e)
raise SinglePlatformException(u'Error connecting with SinglePlatform API')
def _json_to_data(s):
"""Convert a response string to data"""
try:
return json.loads(s)
except ValueError, e:
log.error('Invalid response: {0}'.format(e))
raise SinglePlatformException(e)
def _check_response(data):
"""Processes the response data"""
if data.get('ok') == u'true': return data
exc = error_types.get(data.get('status'))
if exc:
raise exc(data.get('status'))
else:
log.error(u'Unknown error type: {0}'.format(data.get('status')))
raise SinglePlatformException(data.get('status'))
|
|
from micawber import *
from micawber.test_utils import test_pr, test_cache, test_pr_cache, TestProvider, BaseTestCase
class ProviderTestCase(BaseTestCase):
def test_register_unregister(self):
pr = ProviderRegistry()
provider1 = TestProvider('link')
provider2 = TestProvider('link')
pr.register('1', provider1)
pr.register('2', provider1)
pr.register('3', provider2)
pr.unregister('2')
self.assertEqual(len(pr._registry), 2)
# Multiple calls to remove() are OK.
self.assertRaises(KeyError, pr.unregister, '2')
self.assertEqual(pr.provider_for_url('1'), provider1)
self.assertEqual(pr.provider_for_url('2'), None)
self.assertEqual(pr.provider_for_url('3'), provider2)
pr.unregister('1')
pr.unregister('3')
self.assertEqual(len(pr._registry), 0)
for test_regex in ['1', '2', '3']:
self.assertEqual(pr.provider_for_url(test_regex), None)
def test_multiple_matches(self):
pr = ProviderRegistry()
provider1 = TestProvider('link')
provider2 = TestProvider('link')
pr.register('1(\d+)', provider1)
pr.register('1\d+', provider2)
self.assertEqual(pr.provider_for_url('11'), provider2)
pr.unregister('1\d+')
self.assertEqual(pr.provider_for_url('11'), provider1)
def test_provider_matching(self):
provider = test_pr.provider_for_url('http://link-test1')
self.assertFalse(provider is None)
self.assertEqual(provider.endpoint, 'link')
provider = test_pr.provider_for_url('http://photo-test1')
self.assertFalse(provider is None)
self.assertEqual(provider.endpoint, 'photo')
provider = test_pr.provider_for_url('http://video-test1')
self.assertFalse(provider is None)
self.assertEqual(provider.endpoint, 'video')
provider = test_pr.provider_for_url('http://rich-test1')
self.assertFalse(provider is None)
self.assertEqual(provider.endpoint, 'rich')
provider = test_pr.provider_for_url('http://none-test1')
self.assertTrue(provider is None)
def test_provider(self):
resp = test_pr.request('http://link-test1')
self.assertEqual(resp, {'title': 'test1', 'type': 'link', 'url': 'http://link-test1'})
resp = test_pr.request('http://photo-test2')
self.assertEqual(resp, {'title': 'ptest2', 'type': 'photo', 'url': 'test2.jpg'})
resp = test_pr.request('http://video-test1')
self.assertEqual(resp, {'title': 'vtest1', 'type': 'video', 'html': '<test1>video</test1>', 'url': 'http://video-test1'})
resp = test_pr.request('http://link-test1', width=100)
self.assertEqual(resp, {'title': 'test1', 'type': 'link', 'url': 'http://link-test1', 'width': 99})
self.assertRaises(ProviderException, test_pr.request, 'http://not-here')
self.assertRaises(ProviderException, test_pr.request, 'http://link-test3')
def test_caching(self):
resp = test_pr_cache.request('http://link-test1')
self.assertCached('http://link-test1', resp)
# check that its the same as what we tested in the previous case
resp2 = test_pr.request('http://link-test1')
self.assertEqual(resp, resp2)
resp = test_pr_cache.request('http://photo-test2')
self.assertCached('http://photo-test2', resp)
resp = test_pr_cache.request('http://video-test1')
self.assertCached('http://video-test1', resp)
self.assertEqual(len(test_cache._cache), 3)
def test_caching_params(self):
resp = test_pr_cache.request('http://link-test1')
self.assertCached('http://link-test1', resp)
resp_p = test_pr_cache.request('http://link-test1', width=100)
self.assertCached('http://link-test1', resp_p, width=100)
self.assertFalse(resp == resp_p)
def test_invalid_json(self):
pr = ProviderRegistry()
class BadProvider(Provider):
def fetch(self, url):
return 'bad'
pr.register('http://bad', BadProvider('link'))
self.assertRaises(InvalidResponseException, pr.request, 'http://bad')
class ParserTestCase(BaseTestCase):
def test_parse_text_full(self):
for url, expected in self.full_pairs.items():
parsed = parse_text_full(url, test_pr)
self.assertHTMLEqual(parsed, expected)
# the parse_text_full will replace even inline content
for url, expected in self.full_pairs.items():
parsed = parse_text_full('this is inline: %s' % url, test_pr)
self.assertHTMLEqual(parsed, 'this is inline: %s' % expected)
for url, expected in self.full_pairs.items():
parsed = parse_html('<p>%s</p>' % url, test_pr)
self.assertHTMLEqual(parsed, '<p>%s</p>' % expected)
def test_parse_text(self):
for url, expected in self.inline_pairs.items():
parsed = parse_text('this is inline: %s' % url, test_pr)
self.assertHTMLEqual(parsed, 'this is inline: %s' % expected)
# if the link comes on its own line it gets included in full
for url, expected in self.full_pairs.items():
parsed = parse_text(url, test_pr)
self.assertHTMLEqual(parsed, expected)
# links inside block tags will render as inline
frame = '<p>Testing %s</p>'
for url, expected in self.inline_pairs.items():
parsed = parse_html(frame % (url), test_pr)
self.assertHTMLEqual(parsed, frame % (expected))
# links inside <a> tags won't change at all
frame = '<p><a href="%s">%s</a></p>'
for url, expected in self.inline_pairs.items():
parsed = parse_html(frame % (url, url), test_pr)
self.assertHTMLEqual(parsed, frame % (url, url))
# links within tags within a tags are fine too
frame = '<p><a href="%s"><span>%s</span></a></p>'
for url, expected in self.inline_pairs.items():
parsed = parse_html(frame % (url, url), test_pr)
self.assertHTMLEqual(parsed, frame % (url, url))
def test_multiline(self):
for url, expected in self.full_pairs.items():
expected_inline = self.inline_pairs[url]
frame = 'this is inline: %s\n%s\nand yet another %s'
test_str = frame % (url, url, url)
parsed = parse_text(test_str, test_pr)
self.assertHTMLEqual(parsed, frame % (expected_inline, expected, expected_inline))
for url, expected in self.full_pairs.items():
expected_inline = self.inline_pairs[url]
frame = '%s\nthis is inline: %s\n%s'
test_str = frame % (url, url, url)
parsed = parse_text(test_str, test_pr)
self.assertHTMLEqual(parsed, frame % (expected, expected_inline, expected))
# test mixing multiline with p tags
for url, expected in self.full_pairs.items():
expected_inline = self.inline_pairs[url]
frame = '<p>%s</p>\n<p>this is inline: %s</p>\n<p>\n%s\n</p><p>last test\n%s\n</p>'
test_str = frame % (url, url, url, url)
parsed = parse_html(test_str, test_pr)
self.assertHTMLEqual(parsed, frame % (expected, expected_inline, expected, expected_inline))
for url, expected in self.full_pairs.items():
expected_inline = self.inline_pairs[url]
frame = '<p><a href="#foo">%s</a></p>\n<p>this is inline: %s</p>\n<p>last test\n%s\n</p>'
test_str = frame % (url, url, url)
parsed = parse_html(test_str, test_pr)
self.assertHTMLEqual(parsed, frame % (url, expected_inline, expected_inline))
def test_multiline_full(self):
for url, expected in self.full_pairs.items():
frame = 'this is inline: %s\n%s\nand yet another %s'
test_str = frame % (url, url, url)
parsed = parse_text_full(test_str, test_pr)
self.assertHTMLEqual(parsed, frame % (expected, expected, expected))
def test_urlize(self):
blank = 'http://fapp.io/foo/'
blank_e = '<a href="http://fapp.io/foo/">http://fapp.io/foo/</a>'
for url, expected in self.full_pairs.items():
expected_inline = self.inline_pairs[url]
frame = 'test %s\n%s\n%s\nand finally %s'
test_str = frame % (url, blank, url, blank)
parsed = parse_text(test_str, test_pr)
self.assertHTMLEqual(parsed, frame % (expected_inline, blank_e, expected, blank_e))
parsed = parse_text(test_str, test_pr, urlize_all=False)
self.assertHTMLEqual(parsed, frame % (expected_inline, blank, expected, blank))
parsed = parse_text_full(test_str, test_pr)
self.assertHTMLEqual(parsed, frame % (expected, blank_e, expected, blank_e))
parsed = parse_text_full(test_str, test_pr, urlize_all=False)
self.assertHTMLEqual(parsed, frame % (expected, blank, expected, blank))
parsed = parse_html(test_str, test_pr)
self.assertHTMLEqual(parsed, frame % (expected_inline, blank_e, expected_inline, blank_e))
parsed = parse_html(test_str, test_pr, urlize_all=False)
self.assertHTMLEqual(parsed, frame % (expected_inline, blank, expected_inline, blank))
frame = '<p>test %s</p>\n<a href="foo">%s</a>\n<a href="foo2">%s</a>\n<p>and finally %s</p>'
test_str = frame % (url, blank, url, blank)
parsed = parse_html(test_str, test_pr)
self.assertHTMLEqual(parsed, frame % (expected_inline, blank, url, blank_e))
parsed = parse_html(test_str, test_pr, urlize_all=False)
self.assertHTMLEqual(parsed, frame % (expected_inline, blank, url, blank))
def test_extract(self):
blank = 'http://fapp.io/foo/'
frame = 'test %s\n%s\n%s\n%s at last'
frame_html = '<p>test %s</p><p><a href="foo">%s</a> %s</p><p>%s</p>'
for url, expected in self.data_pairs.items():
all_urls, extracted = extract(frame % (url, blank, url, blank), test_pr)
self.assertEqual(all_urls, [url, blank])
if 'url' not in expected:
expected['url'] = url
if 'title' not in expected:
expected['title'] = expected['url']
self.assertEqual(extracted, {url: expected})
all_urls, extracted = extract_html(frame_html % (url, url, blank, blank), test_pr)
self.assertEqual(all_urls, [url, blank])
if 'url' not in expected:
expected['url'] = url
self.assertEqual(extracted, {url: expected})
def test_outside_of_markup(self):
frame = '%s<p>testing</p>'
for url, expected in self.full_pairs.items():
parsed = parse_html(frame % (url), test_pr)
self.assertHTMLEqual(parsed, frame % (expected))
def test_html_entities(self):
frame_html = '<p>test %s</p><p><a href="foo">%s</a></p>'
for url, expected in self.data_pairs.items():
esc_url = url.replace('&', '&')
all_urls, extracted = extract_html(frame_html % (esc_url, esc_url), test_pr)
self.assertEqual(all_urls, [url])
if 'url' not in expected:
expected['url'] = url
if 'title' not in expected:
expected['title'] = expected['url']
self.assertEqual(extracted, {url: expected})
rendered = parse_html('<p>%s</p>' % esc_url, test_pr)
self.assertHTMLEqual(rendered, '<p>%s</p>' % self.full_pairs[url])
|
|
import math
import numpy
import re
import StringIO
import sys
# Script to translate from TLM flat file to U-FLAME lattice.
#
# type name length [m] type [-18: Stripper, -21: Corrector, -28: BPM, -30: Matrix]
# mark LS1_CA01:BPM_D1129 0.000000 -28.000000
#
# type name length [m] aper [m]
# drift DRIFT 0.072000 0.020000
#
# type name length [m] aper [m] B [T]
# solenoid LS1_CA01:SOL1_D1131 0.100000 0.020000 5.340000
#
# type name length [m] aper [m] phi [deg] beta*gamma type phi1 [deg] phi2 [deg]
# dipole FS1_CSS:DH_D2163 0.060000 0.020000 -1.000000 0.190370 400.000000 0.000000 -5.000000
#
# type name length [m] aper [m] B2 [T/m]
# quadpole FS1_CSS:QH_D2194 0.250000 0.025000 3.459800
#
# type name length [m] aper [m] f [MHz] scl fact phi [deg]
# rfcavity LS1_CA01:CAV1_D1127 0.240000 0.017000 80.500000 0.640000 -35.000000
#
# type name length [m] aper [m] phi [deg] beta cyl(0)/spher(1) hor(0)/ver(1)...
# ebend EB1 1.0 0.02 90.0 0.00506953 1 1...
# X fringe Y fringe voltage asym fact
# 0.0 0.0 0.0
#
# type name length [m] aper [m] voltage [V] electrode radius [m]
# equad QE1H 0.1034 0.02 -358.574 0.0746
#
# Mis-alignment:
#
# dx [m] dy [m] pitch [rad] yaw [rad] tilt [rad]
def get_misalign(tokens, ind):
return ' dx = %s, dy = %s, pitch = %s, yaw = %s, tilt = %s;' \
% (tokens[ind], tokens[ind+1], tokens[ind+2], tokens[ind+3], tokens[ind+4])
def get_index(tokens, token):
return tokens.index(token) if token in tokens else None
def marker(line, tokens):
global beam_line, n_marker, add_ind
if float(tokens[2]) != 0:
print '*** marker with non zero length: '
exit(1)
if add_ind: n_marker += 1; tokens[1] += '_%d' % (n_marker)
beam_line.append(tokens[1])
return '%s: marker;' % (tokens[1])
def drift(line, tokens):
global beam_line, n_drift
if add_ind: n_drift += 1; tokens[1] += '_%d' % (n_drift)
beam_line.append(tokens[1])
return '%s: drift, L = %s, aper = %s;' % (tokens[1], tokens[2], tokens[3])
def sbend(line, tokens):
global beam_line, n_sbend, add_ind
if add_ind: n_sbend += 1; tokens[1] += '_%d' % (n_sbend)
beam_line.append(tokens[1])
str = '%s: sbend, L = %s, phi = %s, phi1 = %s, phi2 = %s, bg = %s, aper = %s;' \
% (tokens[1], tokens[2], tokens[4], tokens[7], tokens[8], tokens[5], tokens[3])
# str += get_misalign(tokens, 9)
return str
def solenoid(line, tokens):
global beam_line, n_solenoid, add_ind
if add_ind: n_solenoid += 1; tokens[1] += '_%d' % (n_solenoid)
beam_line.append(tokens[1])
str = '%s: solenoid, L = %s, B = %s, aper = %s,\n' \
% (tokens[1], tokens[2], tokens[4], tokens[3])
str += get_misalign(tokens, 5)
return str
def quadrupole(line, tokens):
global beam_line, n_quad, add_ind
if add_ind: n_quad += 1; tokens[1] += '_%d' % (n_quad)
beam_line.append(tokens[1])
str = '%s: quadrupole, L = %s, B2 = %s, aper = %s,\n' \
% (tokens[1], tokens[2], tokens[4], tokens[3])
str += get_misalign(tokens, 5)
return str
def rfcavity(line, tokens):
global beam_line, n_cavity, add_ind
if add_ind: n_cavity += 1; tokens[1] += '_%d' % (n_cavity)
beam_line.append(tokens[1])
str = '%s: rfcavity, cavtype = \"0.041QWR\", L = %s, f = %se6, phi = %s,\n scl_fac = %s,' \
' aper = %s,\n' \
% (tokens[1], tokens[2], tokens[4], tokens[6], tokens[5], tokens[3])
str += get_misalign(tokens, 7)
return str
def edipole(line, tokens):
global beam_line, n_edipole
n_edipole += 1; tokens[1] += '_%d' % (n_edipole)
if add_ind: n_edipole += 1; tokens[1] += '_%d' % (n_ecavity)
beam_line.append(tokens[1])
return '%s: edipole, L = %s, phi = %s, x_frng = %s, y_frng = %s, beta = %s,' \
' spher = %s, asym_fac = %s, aper = %s;' \
% (tokens[1], tokens[2], tokens[4], tokens[7], tokens[8],
tokens[5], tokens[6], tokens[9], tokens[3])
# str += get_misalign(tokens, 10)
return str
def equad(line, tokens):
global beam_line, n_equad
n_equad += 1; tokens[1] += '_%d' % (n_equad)
if add_ind: n_equad += 1; tokens[1] += '_%d' % (n_equad)
beam_line.append(tokens[1])
return '%s: equad, L = %s, V = %s, radius = %s, aper = %s;' \
% (tokens[1], tokens[2], tokens[4], tokens[5], tokens[3])
# str += get_misalign(tokens, 7)
return str
tlm_dict = {
'mark' : marker,
'drift' : drift,
'solenoid' : solenoid,
'dipole' : sbend,
'quadpole' : quadrupole,
'combquad' : quadrupole,
'rfcavity' : rfcavity,
'ebend' : edipole,
'equad' : equad,
}
def parse_definition(line, tokens):
n_elem = 10; # No of elements per line.
for k in range(len(tokens)):
# Remove white space; unless a string.
if not tokens[k].startswith('"'):
tokens[k] = re.sub('[\s]', '', tokens[k])
try:
str = tlm_dict[tokens[0]](line, tokens)
except KeyError:
print '\n*** undefined token: ', tokens[0]
print line
print tokens
exit(1)
return str
def parse_line(line, outf):
line_lc = line.lower()
if not line_lc.rstrip():
# Blank line.
outf.write('\n')
elif line_lc.startswith('#'):
# Comment.
outf.write('{ %s }\n' % (line.strip('!')))
else:
# Definition.
# tokens = re.split(r'[ ]', line_lc)
tokens = re.split(r'\s+', line_lc)
# Replace ':' with '_' in name.
tokens[1] = tokens[1].replace(':', '_', 1)
outf.write('%s\n' % (parse_definition(line_lc, tokens)))
def prt_decl(outf):
outf.write('# Beam envelope simulation.\n')
outf.write('\nsim_type = "MomentMatrix";\n\n')
def transl_file(file_name):
global beam_line
str = file_name.split('.')[0]+'.lat'
inf = open(file_name, 'r')
outf = open(str, 'w')
prt_decl(outf)
line = inf.readline()
while line:
line = line.strip('\r\n')
while line.endswith('&'):
# Line
line = line.strip('&')
line += (inf.readline()).strip('\r\n')
parse_line(line, outf)
line = inf.readline()
outf.write('\ncell: LINE = (\n S,\n')
n = len(beam_line); n_max = 8;
outf.write(' ')
for k in range(n):
outf.write(beam_line[k]);
if (k+1 != n): outf.write(', ')
if (k+1) % n_max == 0: outf.write('\n'); outf.write(' ')
if n % n_max != n_max: outf.write('\n')
outf.write(');\n')
outf.write('\nUSE: cell;\n')
home_dir = ''
n_marker = 0; n_drift = 0; n_sbend = 0; n_solenoid = 0
n_quad = 0; n_cavity = 0; n_ecavity = 0; n_edipole = 0; n_equad = 0
add_ind = True
beam_line = [];
transl_file(home_dir+sys.argv[1])
|
|
# Author: Jonas Latt, jonas.latt@flowkit.com
class Cpp2d:
def __init__(self, lattice):
self.lattice = lattice
def collide_boundaries(self):
return {
'var': ['fin', 'nx', 'ny', 'c', 't', 'q', 'wall', 'force', 'omega'],
'code': """
blitz::Array<double,1> feq(q);
bool useObstacles = wall.numElements() > 0;
bool useForce = force.numElements() > 0;
for (int x=0; x<nx; ++x) {{
int dy = x==0 || x==nx-1 ? 1 : ny-1;
for (int y=0; y<ny; y+=dy) {{
{collide_cell}
}}
}}
""".format(collide_cell=self._collide_cell()) }
def inlet_outlet(self):
return {
'var': ['fin', 'nx', 'ny', 'c', 't', 'q', 'bdvel',
'wall', 'use_inlet', 'use_outlet'],
'code': """
blitz::Array<double,1> feq(q);
bool useObstacles = wall.numElements() > 0;
for (int y=0; y<ny; ++y) {{
int x=0;
{inlet}
}}
for (int y=0; y<ny; ++y) {{
int x=nx-1;
{outlet}
}}
""".format(collide_cell=self._collide_cell(),
inlet=self._inlet(), outlet=self._outlet()) }
def collide_bulk_and_stream(self):
return {
'var': ['fin', 'nx', 'ny', 'c', 't', 'q', 'wall', 'force', 'omega'],
'code': """
blitz::Array<double,1> feq(q);
bool useObstacles = wall.numElements() > 0;
bool useForce = force.numElements() > 0;
for (int x=1; x<nx-1; ++x) {{
for (int y=1; y<ny-1; ++y) {{
{collide_cell}
{bulkstream}
}}
}}
for (int x=0; x<nx; ++x) {{
int dy = x==0 || x==nx-1 ? 1 : ny-1;
for (int y=0; y<ny; y+=dy) {{
{bdstream}
}}
}}
""".format(collide_cell=self._collide_cell(),
bulkstream=self._bulkstream(), bdstream=self._bdstream()) }
def num_excess(self):
return {
'var': ['numexcess', 'nx', 'ny', 'q', 'c'],
'code': self._excess_templ().format(bulk="""
numexcess(bi)++;
""")}
def get_excess(self):
return {
'var': ['fin', 'ftmp', 'ofs', 'nx', 'ny', 'q', 'c'],
'code': self._excess_templ().format(bulk="""
int abs_ind = ofs(bi)+ind(bi);
ftmp(abs_ind) = fin(x,y, q-1-i);
ind(bi)++;
""")}
def put_excess(self):
return {
'var': ['fin', 'ftmp', 'ofs', 'nx', 'ny', 'q', 'c'],
'code': self._excess_templ().format(bulk="""
if (bx==-1) xx+=nx; if (bx==+1) xx-=nx;
if (by==-1) yy+=ny; if (by==+1) yy-=ny;
int abs_ind = ofs(bi)+ind(bi);
fin(xx,yy, i) = ftmp(abs_ind);
ind(bi)++;
""")}
def _macroscopic(self):
q, c = self.lattice.q, self.lattice.c
fxy = lambda i: "fin(x,y,{0})".format(i)
return """
double rho = {rho};
double u0 = ({u0_left} - ({u0_right}))/rho;
double u1 = ({u1_left} - ({u1_right}))/rho;
""".format(
rho= "+".join([fxy(i) for i in range(q)]),
u0_left= "+".join([fxy(i) for i in range(q) if c[i,0] > 0]),
u0_right="+".join([fxy(i) for i in range(q) if c[i,0] < 0]),
u1_left= "+".join([fxy(i) for i in range(q) if c[i,1] > 0]),
u1_right="+".join([fxy(i) for i in range(q) if c[i,1] < 0]))
def _equilibrium(self):
q, c = self.lattice.q, self.lattice.c
def ci_dot_u(i):
return "".join([(["-", "", "+"][c[i,d] + 1] + "u{d}").format(d=d)
for d in range(2) if c[i,d] != 0 ])
eq1 = """
double usqr = 3./2.*(u0*u0+u1*u1);
double cu;
"""
eq2_template = """
cu = 3.0 * ({ci_dot_u});
feq({pop}) = rho*t({pop})*(1.+cu+0.5*cu*cu-usqr);
"""
eq2 = "".join([eq2_template.format(pop=i, ci_dot_u=ci_dot_u(i))
for i in range(q) if i != q//2 ])
eq3 = """
feq({i0}) = rho*t({i0})*(1.-usqr);
""".format(i0=q//2)
return eq1 + eq2 + eq3
def _collision(self):
q, c = self.lattice.q, self.lattice.c
def ci_dot_f(i):
return "".join([(["-", "", "+"][c[i,d] + 1] + "force(x,y,{d})").format(d=d)
for d in range(2) if c[i,d] != 0 ])
add_force_template = """
fin(x,y,{pop}) += 3.0*t({pop})*{ci_dot_f};
"""
add_force = "".join([add_force_template.format(pop=i, ci_dot_f=ci_dot_f(i))
for i in range(q) if i != q//2 ])
return """
if (!(useObstacles && wall(x,y))) {{
for (int i=0; i<q; ++i) {{
fin(x,y,i) *= 1.-omega;
fin(x,y,i) += omega*feq(i);
}}
if (useForce) {{
{add_force}
}}
for (int i=0; i<q/2; ++i) {{
std::swap(fin(x,y,i),fin(x,y,q-1-i));
}}
}}
""".format(add_force = add_force)
def _collide_cell(self):
return self._macroscopic() + self._equilibrium() + self._collision()
def _outlet(self):
return """
if (useObstacles && wall(x,y)) continue;
if (use_outlet) {{
for (int i=0; i<q; ++i) {{
if (c(i,0)==-1) fin(x,y,i) = fin(x-1,y,i);
}}
}}
""".format(collide_cell=self._collide_cell())
def _inlet(self):
return """
if (useObstacles && wall(x,y)) continue;
if (use_inlet) {{
double u0 = bdvel(0,y,0);
double u1 = bdvel(0,y,1);
double rhoMiddle = 0., rhoLeft = 0.;
for (int i=0; i<q; ++i) {{
if (c(i,0)==-1) rhoLeft += fin(x,y,i);
else if (c(i,0)==0) rhoMiddle += fin(x,y,i);
}}
double rho = 1./(1.-u0)*(rhoMiddle+2.*rhoLeft);
rho = 1.0;
{equilibrium}
for (int i=0; i<q; ++i) {{
if (c(i,0)==1)
fin(x,y,i) = fin(x,y,q-1-i) + feq(i) - feq(q-1-i);
}}
}}
""".format(equilibrium=self._equilibrium(), collision=self._collision(),
collide_cell=self._collide_cell())
def _bulkstream(self):
return """
for (int i=0; i<q/2; ++i) {{
int xx = x+c(i,0);
int yy = y+c(i,1);
std::swap(fin(xx,yy,i),fin(x,y,q-1-i));
}}
"""
def _bdstream(self):
return """
for (int i=0; i<q/2; ++i) {{
int xx = x+c(i,0);
int yy = y+c(i,1);
if (xx<0) xx=nx-1; if (xx>=nx) xx=0;
if (yy<0) yy=ny-1; if (yy>=ny) yy=0;
std::swap(fin(xx,yy,i),fin(x,y,q-1-i));
}}
"""
def _excess_templ(self):
return """
blitz::Array<size_t,1> ind(9);
for(int i=0; i<9; ++i) ind(i)=0;
for (int x=0; x<nx; ++x) {{
int dy = x==0 || x==nx-1 ? 1 : ny-1;
for (int y=0; y<ny; y+=dy) {{
for (int i=0; i<q; ++i) {{
int xx = x+c(i,0); int yy = y+c(i,1);
int bx=0, by=0;
if (xx<0) bx=-1; if (xx>=nx) bx=+1;
if (yy<0) by=-1; if (yy>=ny) by=+1;
if(bx!=0 || by!=0) {{
int bi = by+1 + 3*(bx+1);
{bulk}
}}
}}
}}
}}
"""
class Cpp3d:
def __init__(self, lattice):
self.lattice = lattice
def collide_boundaries(self):
return {
'var': ['fin', 'nx', 'ny', 'nz', 'c', 't', 'q',
'wall', 'force', 'omega'],
'code': """
blitz::Array<double,1> feq(q);
bool useObstacles = wall.numElements() > 0;
bool useForce = force.numElements() > 0;
for (int x=0; x<nx; ++x) {{
for (int y=0; y<ny; ++y) {{
int dz = x==0 || x==nx-1 || y==0 || y==ny-1 ? 1 : nz-1;
for (int z=0; z<nz; z+=dz) {{
{collide_cell}
}}
}}
}}
""".format(collide_cell=self._collide_cell()) }
def inlet_outlet(self):
return {
'var': ['fin', 'nx', 'ny', 'nz', 'c', 't', 'q', 'bdvel',
'wall', 'use_inlet', 'use_outlet'],
'code': """
blitz::Array<double,1> feq(q);
bool useObstacles = wall.numElements() > 0;
for (int y=0; y<ny; ++y) {{
for (int z=0; z<nz; ++z) {{
int x=0;
{inlet}
}}
}}
for (int y=0; y<ny; ++y) {{
for (int z=0; z<nz; ++z) {{
int x=nx-1;
{outlet}
}}
}}
""".format(collide_cell=self._collide_cell(),
inlet=self._inlet(), outlet=self._outlet()) }
def collide_bulk_and_stream(self):
return {
'var': ['fin', 'nx', 'ny', 'nz', 'c', 't', 'q', 'wall', 'force', 'omega'],
'code': """
blitz::Array<double,1> feq(q);
bool useObstacles = wall.numElements() > 0;
bool useForce = force.numElements() > 0;
for (int x=1; x<nx-1; ++x) {{
for (int y=1; y<ny-1; ++y) {{
for (int z=1; z<nz-1; ++z) {{
{collide_cell}
{bulkstream}
}}
}}
}}
for (int x=0; x<nx; ++x) {{
for (int y=0; y<ny; ++y) {{
int dz = x==0 || x==nx-1 || y==0 || y==ny-1 ? 1 : nz-1;
for (int z=0; z<nz; z+=dz) {{
{bdstream}
}}
}}
}}
""".format(collide_cell=self._collide_cell(),
bulkstream=self._bulkstream(), bdstream=self._bdstream()) }
def num_excess(self):
return {
'var': ['numexcess', 'nx', 'ny', 'nz', 'q', 'c'],
'code': self._excess_templ().format(bulk="""
numexcess(bi)++;
""") }
def get_excess(self):
return {
'var': ['fin', 'ftmp', 'ofs', 'nx', 'ny', 'nz', 'q', 'c'],
'code': self._excess_templ().format(bulk="""
int abs_ind = ofs(bi)+ind(bi);
ftmp(abs_ind) = fin(x,y,z, q-1-i);
ind(bi)++;
""") }
def put_excess(self):
return {
'var': ['fin', 'ftmp', 'ofs', 'nx', 'ny', 'nz', 'q', 'c'],
'code': self._excess_templ().format(bulk="""
if (bx==-1) xx+=nx; if (bx==+1) xx-=nx;
if (by==-1) yy+=ny; if (by==+1) yy-=ny;
if (bz==-1) zz+=nz; if (bz==+1) zz-=nz;
int abs_ind = ofs(bi)+ind(bi);
fin(xx,yy,zz, i) = ftmp(abs_ind);
ind(bi)++;
""") }
def _macroscopic(self):
q, c = self.lattice.q, self.lattice.c
fxy = lambda i: "fin(x,y,z,{0})".format(i)
return """
double rho = {rho};
double u0 = ({u0_left} - ({u0_right}))/rho;
double u1 = ({u1_left} - ({u1_right}))/rho;
double u2 = ({u2_left} - ({u2_right}))/rho;
""".format(
rho= "+".join([fxy(i) for i in range(q)]),
u0_left= "+".join([fxy(i) for i in range(q) if c[i,0] > 0]),
u0_right="+".join([fxy(i) for i in range(q) if c[i,0] < 0]),
u1_left= "+".join([fxy(i) for i in range(q) if c[i,1] > 0]),
u1_right="+".join([fxy(i) for i in range(q) if c[i,1] < 0]),
u2_left= "+".join([fxy(i) for i in range(q) if c[i,2] > 0]),
u2_right="+".join([fxy(i) for i in range(q) if c[i,2] < 0]))
def _equilibrium(self):
q, c = self.lattice.q, self.lattice.c
def ci_dot_u(i):
return "".join([(["-", "", "+"][c[i,d] + 1] + "u{d}").format(d=d)
for d in range(3) if c[i,d] != 0 ])
eq1 = """
double usqr = 3./2.*(u0*u0+u1*u1+u2*u2);
double cu;
"""
eq2_template = """
cu = 3.0 * ({ci_dot_u});
feq({pop}) = rho*t({pop})*(1.+cu+0.5*cu*cu-usqr);
"""
eq2 = "".join([eq2_template.format(pop=i, ci_dot_u=ci_dot_u(i))
for i in range(q) if i != q//2 ])
eq3 = """
feq({i0}) = rho*t({i0})*(1.-usqr);
""".format(i0=q//2)
return eq1 + eq2 + eq3
def _collision(self):
q, c = self.lattice.q, self.lattice.c
def ci_dot_f(i):
return "".join([(["-", "", "+"][c[i,d] + 1] + "force(x,y,z,{d})").format(d=d)
for d in range(3) if c[i,d] != 0 ])
add_force_template = """
fin(x,y,z,{pop}) += 3.0*t({pop})*{ci_dot_f};
"""
add_force = "".join([add_force_template.format(pop=i, ci_dot_f=ci_dot_f(i))
for i in range(q) if i != q//2 ])
return """
if (!(useObstacles && wall(x,y,z))) {{
for (int i=0; i<q; ++i) {{
fin(x,y,z,i) *= 1.-omega;
fin(x,y,z,i) += omega*feq(i);
}}
if (useForce) {{
{add_force}
}}
for (int i=0; i<q/2; ++i) {{
std::swap(fin(x,y,z,i),fin(x,y,z,q-1-i));
}}
}}
""".format(add_force = add_force)
def _collide_cell(self):
return self._macroscopic() + self._equilibrium() + self._collision()
def _outlet(self):
return """
if (useObstacles && wall(x,y,z)) continue;
if (use_outlet) {{
for (int i=0; i<q; ++i) {{
if (c(i,0)==-1) fin(x,y,z,i) = fin(x-1,y,z,i);
}}
}}
""".format(collide_cell=self._collide_cell())
def _inlet(self):
return """
if (useObstacles && wall(x,y,z)) continue;
if (use_inlet) {{
double u0 = bdvel(0,y,z,0);
double u1 = bdvel(0,y,z,1);
double u2 = bdvel(0,y,z,2);
double rhoMiddle = 0., rhoLeft = 0.;
for (int i=0; i<q; ++i) {{
if (c(i,0)==-1) rhoLeft += fin(x,y,z,i);
else if (c(i,0)==0) rhoMiddle += fin(x,y,z,i);
}}
double rho = 1./(1.-u0)*(rhoMiddle+2.*rhoLeft);
{equilibrium}
for (int i=0; i<q; ++i) {{
if (c(i,0)==1)
fin(x,y,z,i) = fin(x,y,z,q-1-i) + feq(i) - feq(q-1-i);
}}
}}
""".format(equilibrium=self._equilibrium(), collision=self._collision(),
collide_cell=self._collide_cell())
def _bulkstream(self):
return """
for (int i=0; i<q/2; ++i) {{
int xx = x+c(i,0);
int yy = y+c(i,1);
int zz = z+c(i,2);
std::swap(fin(xx,yy,zz,i),fin(x,y,z,q-1-i));
}}
"""
def _bdstream(self):
return """
for (int i=0; i<q/2; ++i) {{
int xx = x+c(i,0);
int yy = y+c(i,1);
int zz = z+c(i,2);
if (xx<0) xx=nx-1; if (xx>=nx) xx=0;
if (yy<0) yy=ny-1; if (yy>=ny) yy=0;
if (zz<0) zz=nz-1; if (zz>=nz) zz=0;
std::swap(fin(xx,yy,zz,i),fin(x,y,z,q-1-i));
}}
"""
def _excess_templ(self):
return """
blitz::Array<size_t,1> ind(27);
for(int i=0; i<27; ++i) ind(i)=0;
for (int x=0; x<nx; ++x) {{
for (int y=0; y<ny; ++y) {{
int dz = (x==0 || x==nx-1 || y==0 || y==ny-1) ? 1 : (nz-1);
for (int z=0; z<nz; z+=dz) {{
for (int i=0; i<q; ++i) {{
int xx = x+c(i,0); int yy = y+c(i,1); int zz = z+c(i,2);
int bx=0, by=0, bz=0;
if (xx<0) bx=-1; if (xx>=nx) bx=+1;
if (yy<0) by=-1; if (yy>=ny) by=+1;
if (zz<0) bz=-1; if (zz>=nz) bz=+1;
if(bx!=0 || by!=0 || bz!=0) {{
int bi = bz+1 + 3*(by+1 +3*(bx+1));
{bulk}
}}
}}
}}
}}
}}
"""
|
|
"""Tests for the Z-Wave init."""
import asyncio
from collections import OrderedDict
from datetime import datetime
import unittest
from unittest.mock import MagicMock, patch
import pytest
from pytz import utc
import voluptuous as vol
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import zwave
from homeassistant.components.zwave import (
CONF_DEVICE_CONFIG_GLOB,
CONFIG_SCHEMA,
DATA_NETWORK,
const,
)
from homeassistant.components.zwave.binary_sensor import get_device
from homeassistant.const import ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.setup import setup_component
from tests.common import (
async_fire_time_changed,
get_test_home_assistant,
mock_coro,
mock_registry,
)
from tests.mock.zwave import MockEntityValues, MockNetwork, MockNode, MockValue
async def test_valid_device_config(hass, mock_openzwave):
"""Test valid device config."""
device_config = {"light.kitchen": {"ignored": "true"}}
result = await async_setup_component(
hass, "zwave", {"zwave": {"device_config": device_config}}
)
await hass.async_block_till_done()
assert result
async def test_invalid_device_config(hass, mock_openzwave):
"""Test invalid device config."""
device_config = {"light.kitchen": {"some_ignored": "true"}}
result = await async_setup_component(
hass, "zwave", {"zwave": {"device_config": device_config}}
)
await hass.async_block_till_done()
assert not result
def test_config_access_error():
"""Test threading error accessing config values."""
node = MagicMock()
def side_effect():
raise RuntimeError
node.values.values.side_effect = side_effect
result = zwave.get_config_value(node, 1)
assert result is None
async def test_network_options(hass, mock_openzwave):
"""Test network options."""
result = await async_setup_component(
hass,
"zwave",
{"zwave": {"usb_path": "mock_usb_path", "config_path": "mock_config_path"}},
)
await hass.async_block_till_done()
assert result
network = hass.data[zwave.DATA_NETWORK]
assert network.options.device == "mock_usb_path"
assert network.options.config_path == "mock_config_path"
async def test_network_key_validation(hass, mock_openzwave):
"""Test network key validation."""
test_values = [
(
"0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, "
"0x0C, 0x0D, 0x0E, 0x0F, 0x10"
),
(
"0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,"
"0x0E,0x0F,0x10"
),
]
for value in test_values:
result = zwave.CONFIG_SCHEMA({"zwave": {"network_key": value}})
assert result["zwave"]["network_key"] == value
async def test_erronous_network_key_fails_validation(hass, mock_openzwave):
"""Test failing erronous network key validation."""
test_values = [
(
"0x 01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, "
"0x0C, 0x0D, 0x0E, 0x0F, 0x10"
),
(
"0X01,0X02,0X03,0X04,0X05,0X06,0X07,0X08,0X09,0X0A,0X0B,0X0C,0X0D,"
"0X0E,0X0F,0X10"
),
"invalid",
"1234567",
1234567,
]
for value in test_values:
with pytest.raises(vol.Invalid):
zwave.CONFIG_SCHEMA({"zwave": {"network_key": value}})
async def test_auto_heal_midnight(hass, mock_openzwave):
"""Test network auto-heal at midnight."""
await async_setup_component(hass, "zwave", {"zwave": {"autoheal": True}})
await hass.async_block_till_done()
network = hass.data[zwave.DATA_NETWORK]
assert not network.heal.called
time = utc.localize(datetime(2017, 5, 6, 0, 0, 0))
async_fire_time_changed(hass, time)
await hass.async_block_till_done()
assert network.heal.called
assert len(network.heal.mock_calls) == 1
async def test_auto_heal_disabled(hass, mock_openzwave):
"""Test network auto-heal disabled."""
await async_setup_component(hass, "zwave", {"zwave": {"autoheal": False}})
await hass.async_block_till_done()
network = hass.data[zwave.DATA_NETWORK]
assert not network.heal.called
time = utc.localize(datetime(2017, 5, 6, 0, 0, 0))
async_fire_time_changed(hass, time)
await hass.async_block_till_done()
assert not network.heal.called
async def test_setup_platform(hass, mock_openzwave):
"""Test invalid device config."""
mock_device = MagicMock()
hass.data[DATA_NETWORK] = MagicMock()
hass.data[zwave.DATA_DEVICES] = {456: mock_device}
async_add_entities = MagicMock()
result = await zwave.async_setup_platform(hass, None, async_add_entities, None)
assert not result
assert not async_add_entities.called
result = await zwave.async_setup_platform(
hass, None, async_add_entities, {const.DISCOVERY_DEVICE: 123}
)
assert not result
assert not async_add_entities.called
result = await zwave.async_setup_platform(
hass, None, async_add_entities, {const.DISCOVERY_DEVICE: 456}
)
assert result
assert async_add_entities.called
assert len(async_add_entities.mock_calls) == 1
assert async_add_entities.mock_calls[0][1][0] == [mock_device]
async def test_zwave_ready_wait(hass, mock_openzwave):
"""Test that zwave continues after waiting for network ready."""
# Initialize zwave
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
sleeps = []
def utcnow():
return datetime.fromtimestamp(len(sleeps))
asyncio_sleep = asyncio.sleep
async def sleep(duration, loop=None):
if duration > 0:
sleeps.append(duration)
await asyncio_sleep(0)
with patch("homeassistant.components.zwave.dt_util.utcnow", new=utcnow):
with patch("asyncio.sleep", new=sleep):
with patch.object(zwave, "_LOGGER") as mock_logger:
hass.data[DATA_NETWORK].state = MockNetwork.STATE_STARTED
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert len(sleeps) == const.NETWORK_READY_WAIT_SECS
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
assert (
mock_logger.warning.mock_calls[0][1][1]
== const.NETWORK_READY_WAIT_SECS
)
async def test_device_entity(hass, mock_openzwave):
"""Test device entity base class."""
node = MockNode(node_id="10", name="Mock Node")
value = MockValue(
data=False,
node=node,
instance=2,
object_id="11",
label="Sensor",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
)
power_value = MockValue(
data=50.123456, node=node, precision=3, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = zwave.ZWaveDeviceEntity(values, "zwave")
device.hass = hass
device.value_added()
device.update_properties()
await hass.async_block_till_done()
assert not device.should_poll
assert device.unique_id == "10-11"
assert device.name == "Mock Node Sensor"
assert device.device_state_attributes[zwave.ATTR_POWER] == 50.123
async def test_node_removed(hass, mock_openzwave):
"""Test node removed in base class."""
# Create a mock node & node entity
node = MockNode(node_id="10", name="Mock Node")
value = MockValue(
data=False,
node=node,
instance=2,
object_id="11",
label="Sensor",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
)
power_value = MockValue(
data=50.123456, node=node, precision=3, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = zwave.ZWaveDeviceEntity(values, "zwave")
device.hass = hass
device.entity_id = "zwave.mock_node"
device.value_added()
device.update_properties()
await hass.async_block_till_done()
# Save it to the entity registry
registry = mock_registry(hass)
registry.async_get_or_create("zwave", "zwave", device.unique_id)
device.entity_id = registry.async_get_entity_id("zwave", "zwave", device.unique_id)
# Create dummy entity registry entries for other integrations
hue_entity = registry.async_get_or_create("light", "hue", 1234)
zha_entity = registry.async_get_or_create("sensor", "zha", 5678)
# Verify our Z-Wave entity is registered
assert registry.async_is_registered(device.entity_id)
# Remove it
entity_id = device.entity_id
await device.node_removed()
# Verify registry entry for our Z-Wave node is gone
assert not registry.async_is_registered(entity_id)
# Verify registry entries for our other entities remain
assert registry.async_is_registered(hue_entity.entity_id)
assert registry.async_is_registered(zha_entity.entity_id)
async def test_node_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14)
hass.async_add_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert hass.states.get("zwave.mock_node").state == "unknown"
async def test_unparsed_node_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14, manufacturer_name=None, name=None, is_ready=False)
sleeps = []
def utcnow():
return datetime.fromtimestamp(len(sleeps))
asyncio_sleep = asyncio.sleep
async def sleep(duration, loop=None):
if duration > 0:
sleeps.append(duration)
await asyncio_sleep(0)
with patch("homeassistant.components.zwave.dt_util.utcnow", new=utcnow):
with patch("asyncio.sleep", new=sleep):
with patch.object(zwave, "_LOGGER") as mock_logger:
hass.async_add_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert len(sleeps) == const.NODE_READY_WAIT_SECS
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
assert mock_logger.warning.mock_calls[0][1][1:] == (
14,
const.NODE_READY_WAIT_SECS,
)
assert hass.states.get("zwave.unknown_node_14").state == "unknown"
async def test_node_ignored(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(
hass,
"zwave",
{"zwave": {"device_config": {"zwave.mock_node": {"ignored": True}}}},
)
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14)
hass.async_add_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert hass.states.get("zwave.mock_node") is None
async def test_value_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SENSOR_BINARY)
value = MockValue(
data=False,
node=node,
index=12,
instance=13,
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
hass.async_add_job(mock_receivers[0], node, value)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.mock_node_mock_value").state == "off"
async def test_value_entities(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = {}
def mock_connect(receiver, signal, *args, **kwargs):
mock_receivers[signal] = receiver
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
zwave_network = hass.data[DATA_NETWORK]
zwave_network.state = MockNetwork.STATE_READY
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert mock_receivers
hass.async_add_job(mock_receivers[MockNetwork.SIGNAL_ALL_NODES_QUERIED])
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SENSOR_BINARY)
zwave_network.nodes = {node.node_id: node}
value = MockValue(
data=False,
node=node,
index=12,
instance=1,
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
node.values = {"primary": value, value.value_id: value}
value2 = MockValue(
data=False,
node=node,
index=12,
instance=2,
label="Mock Value B",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
node.values[value2.value_id] = value2
hass.async_add_job(mock_receivers[MockNetwork.SIGNAL_NODE_ADDED], node)
hass.async_add_job(mock_receivers[MockNetwork.SIGNAL_VALUE_ADDED], node, value)
hass.async_add_job(mock_receivers[MockNetwork.SIGNAL_VALUE_ADDED], node, value2)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.mock_node_mock_value").state == "off"
assert hass.states.get("binary_sensor.mock_node_mock_value_b").state == "off"
ent_reg = await async_get_registry(hass)
dev_reg = await get_dev_reg(hass)
entry = ent_reg.async_get("zwave.mock_node")
assert entry is not None
assert entry.unique_id == "node-{}".format(node.node_id)
node_dev_id = entry.device_id
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value")
assert entry is not None
assert entry.unique_id == "{}-{}".format(node.node_id, value.object_id)
assert entry.name is None
assert entry.device_id == node_dev_id
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value_b")
assert entry is not None
assert entry.unique_id == "{}-{}".format(node.node_id, value2.object_id)
assert entry.name is None
assert entry.device_id != node_dev_id
device_id_b = entry.device_id
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.name == node.name
old_device = device
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == "{} ({})".format(node.name, value2.instance)
# test renaming without updating
await hass.services.async_call(
"zwave",
"rename_node",
{const.ATTR_NODE_ID: node.node_id, const.ATTR_NAME: "Demo Node"},
)
await hass.async_block_till_done()
assert node.name == "Demo Node"
entry = ent_reg.async_get("zwave.mock_node")
assert entry is not None
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value")
assert entry is not None
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value_b")
assert entry is not None
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.id == old_device.id
assert device.name == node.name
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == "{} ({})".format(node.name, value2.instance)
# test renaming
await hass.services.async_call(
"zwave",
"rename_node",
{
const.ATTR_NODE_ID: node.node_id,
const.ATTR_UPDATE_IDS: True,
const.ATTR_NAME: "New Node",
},
)
await hass.async_block_till_done()
assert node.name == "New Node"
entry = ent_reg.async_get("zwave.new_node")
assert entry is not None
assert entry.unique_id == "node-{}".format(node.node_id)
entry = ent_reg.async_get("binary_sensor.new_node_mock_value")
assert entry is not None
assert entry.unique_id == "{}-{}".format(node.node_id, value.object_id)
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.id == old_device.id
assert device.name == node.name
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == "{} ({})".format(node.name, value2.instance)
await hass.services.async_call(
"zwave",
"rename_value",
{
const.ATTR_NODE_ID: node.node_id,
const.ATTR_VALUE_ID: value.object_id,
const.ATTR_UPDATE_IDS: True,
const.ATTR_NAME: "New Label",
},
)
await hass.async_block_till_done()
entry = ent_reg.async_get("binary_sensor.new_node_new_label")
assert entry is not None
assert entry.unique_id == "{}-{}".format(node.node_id, value.object_id)
async def test_value_discovery_existing_entity(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_THERMOSTAT)
thermostat_mode = MockValue(
data="Heat",
data_items=["Off", "Heat"],
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
genre=const.GENRE_USER,
)
setpoint_heating = MockValue(
data=22.0,
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT,
index=1,
genre=const.GENRE_USER,
)
hass.async_add_job(mock_receivers[0], node, thermostat_mode)
await hass.async_block_till_done()
def mock_update(self):
self.hass.add_job(self.async_update_ha_state)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
hass.async_add_job(mock_receivers[0], node, setpoint_heating)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
assert (
hass.states.get("climate.mock_node_mock_value").attributes[
"current_temperature"
]
is None
)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
temperature = MockValue(
data=23.5,
node=node,
index=1,
command_class=const.COMMAND_CLASS_SENSOR_MULTILEVEL,
genre=const.GENRE_USER,
units="C",
)
hass.async_add_job(mock_receivers[0], node, temperature)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
assert (
hass.states.get("climate.mock_node_mock_value").attributes[
"current_temperature"
]
== 23.5
)
async def test_power_schemes(hass, mock_openzwave):
"""Test power attribute."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SWITCH_BINARY)
switch = MockValue(
data=True,
node=node,
index=12,
instance=13,
command_class=const.COMMAND_CLASS_SWITCH_BINARY,
genre=const.GENRE_USER,
type=const.TYPE_BOOL,
)
hass.async_add_job(mock_receivers[0], node, switch)
await hass.async_block_till_done()
assert hass.states.get("switch.mock_node_mock_value").state == "on"
assert (
"power_consumption"
not in hass.states.get("switch.mock_node_mock_value").attributes
)
def mock_update(self):
self.hass.add_job(self.async_update_ha_state)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
power = MockValue(
data=23.5,
node=node,
index=const.INDEX_SENSOR_MULTILEVEL_POWER,
instance=13,
command_class=const.COMMAND_CLASS_SENSOR_MULTILEVEL,
)
hass.async_add_job(mock_receivers[0], node, power)
await hass.async_block_till_done()
assert (
hass.states.get("switch.mock_node_mock_value").attributes["power_consumption"]
== 23.5
)
async def test_network_ready(hass, mock_openzwave):
"""Test Node network ready event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_ALL_NODES_QUERIED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_COMPLETE, listener)
hass.async_add_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_network_complete(hass, mock_openzwave):
"""Test Node network complete event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_AWAKE_NODES_QUERIED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_READY, listener)
hass.async_add_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_network_complete_some_dead(hass, mock_openzwave):
"""Test Node network complete some dead event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_COMPLETE_SOME_DEAD, listener)
hass.async_add_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
class TestZWaveDeviceEntityValues(unittest.TestCase):
"""Tests for the ZWaveDeviceEntityValues helper."""
@pytest.fixture(autouse=True)
def set_mock_openzwave(self, mock_openzwave):
"""Use the mock_openzwave fixture for this class."""
self.mock_openzwave = mock_openzwave
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.hass.start()
self.registry = mock_registry(self.hass)
setup_component(self.hass, "zwave", {"zwave": {}})
self.hass.block_till_done()
self.node = MockNode()
self.mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {const.DISC_COMMAND_CLASS: ["mock_primary_class"]},
"secondary": {const.DISC_COMMAND_CLASS: ["mock_secondary_class"]},
"optional": {
const.DISC_COMMAND_CLASS: ["mock_optional_class"],
const.DISC_OPTIONAL: True,
},
},
}
self.primary = MockValue(
command_class="mock_primary_class", node=self.node, value_id=1000
)
self.secondary = MockValue(command_class="mock_secondary_class", node=self.node)
self.duplicate_secondary = MockValue(
command_class="mock_secondary_class", node=self.node
)
self.optional = MockValue(command_class="mock_optional_class", node=self.node)
self.no_match_value = MockValue(command_class="mock_bad_class", node=self.node)
self.entity_id = "mock_component.mock_node_mock_value"
self.zwave_config = {"zwave": {}}
self.device_config = {self.entity_id: {}}
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_discovery(self, discovery, import_module):
"""Test the creation of a new entity."""
discovery.async_load_platform.return_value = mock_coro()
mock_platform = MagicMock()
import_module.return_value = mock_platform
mock_device = MagicMock()
mock_device.name = "test_device"
mock_platform.get_device.return_value = mock_device
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
assert values.primary is self.primary
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[self.primary, None, None], key=lambda a: id(a)
)
assert not discovery.async_load_platform.called
values.check_value(self.secondary)
self.hass.block_till_done()
assert values.secondary is self.secondary
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[self.primary, self.secondary, None], key=lambda a: id(a)
)
assert discovery.async_load_platform.called
assert len(discovery.async_load_platform.mock_calls) == 1
args = discovery.async_load_platform.mock_calls[0][1]
assert args[0] == self.hass
assert args[1] == "mock_component"
assert args[2] == "zwave"
assert args[3] == {const.DISCOVERY_DEVICE: mock_device.unique_id}
assert args[4] == self.zwave_config
discovery.async_load_platform.reset_mock()
values.check_value(self.optional)
values.check_value(self.duplicate_secondary)
values.check_value(self.no_match_value)
self.hass.block_till_done()
assert values.optional is self.optional
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[self.primary, self.secondary, self.optional], key=lambda a: id(a)
)
assert not discovery.async_load_platform.called
assert values._entity.value_added.called
assert len(values._entity.value_added.mock_calls) == 1
assert values._entity.value_changed.called
assert len(values._entity.value_changed.mock_calls) == 1
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_existing_values(self, discovery, import_module):
"""Test the loading of already discovered values."""
discovery.async_load_platform.return_value = mock_coro()
mock_platform = MagicMock()
import_module.return_value = mock_platform
mock_device = MagicMock()
mock_device.name = "test_device"
mock_platform.get_device.return_value = mock_device
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
self.optional.value_id: self.optional,
self.no_match_value.value_id: self.no_match_value,
}
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
self.hass.block_till_done()
assert values.primary is self.primary
assert values.secondary is self.secondary
assert values.optional is self.optional
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[self.primary, self.secondary, self.optional], key=lambda a: id(a)
)
assert discovery.async_load_platform.called
assert len(discovery.async_load_platform.mock_calls) == 1
args = discovery.async_load_platform.mock_calls[0][1]
assert args[0] == self.hass
assert args[1] == "mock_component"
assert args[2] == "zwave"
assert args[3] == {const.DISCOVERY_DEVICE: mock_device.unique_id}
assert args[4] == self.zwave_config
assert not self.primary.enable_poll.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_node_schema_mismatch(self, discovery, import_module):
"""Test node schema mismatch."""
self.node.generic = "no_match"
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
self.mock_schema[const.DISC_GENERIC_DEVICE_CLASS] = ["generic_match"]
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_workaround_component(self, discovery, import_module):
"""Test component workaround."""
discovery.async_load_platform.return_value = mock_coro()
mock_platform = MagicMock()
import_module.return_value = mock_platform
mock_device = MagicMock()
mock_device.name = "test_device"
mock_platform.get_device.return_value = mock_device
self.node.manufacturer_id = "010f"
self.node.product_type = "0b00"
self.primary.command_class = const.COMMAND_CLASS_SENSOR_ALARM
self.entity_id = "binary_sensor.mock_node_mock_value"
self.device_config = {self.entity_id: {}}
self.mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY]
}
},
}
with patch.object(zwave, "async_dispatcher_send") as mock_dispatch_send:
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert mock_dispatch_send.called
assert len(mock_dispatch_send.mock_calls) == 1
args = mock_dispatch_send.mock_calls[0][1]
assert args[1] == "zwave_new_binary_sensor"
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_workaround_ignore(self, discovery, import_module):
"""Test ignore workaround."""
self.node.manufacturer_id = "010f"
self.node.product_type = "0301"
self.primary.command_class = const.COMMAND_CLASS_SWITCH_BINARY
self.mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY]
}
},
}
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_config_ignore(self, discovery, import_module):
"""Test ignore config."""
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
self.device_config = {self.entity_id: {zwave.CONF_IGNORED: True}}
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_config_ignore_with_registry(self, discovery, import_module):
"""Test ignore config.
The case when the device is in entity registry.
"""
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
self.device_config = {"mock_component.registry_id": {zwave.CONF_IGNORED: True}}
with patch.object(self.registry, "async_schedule_save"):
self.registry.async_get_or_create(
"mock_component",
zwave.DOMAIN,
"567-1000",
suggested_object_id="registry_id",
)
zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_platform_ignore(self, discovery, import_module):
"""Test platform ignore device."""
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
platform = MagicMock()
import_module.return_value = platform
platform.get_device.return_value = None
zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_config_polling_intensity(self, discovery, import_module):
"""Test polling intensity."""
mock_platform = MagicMock()
import_module.return_value = mock_platform
mock_device = MagicMock()
mock_device.name = "test_device"
mock_platform.get_device.return_value = mock_device
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
self.device_config = {self.entity_id: {zwave.CONF_POLLING_INTENSITY: 123}}
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert discovery.async_load_platform.called
assert self.primary.enable_poll.called
assert len(self.primary.enable_poll.mock_calls) == 1
assert self.primary.enable_poll.mock_calls[0][1][0] == 123
class TestZwave(unittest.TestCase):
"""Test zwave init."""
def test_device_config_glob_is_ordered(self):
"""Test that device_config_glob preserves order."""
conf = CONFIG_SCHEMA({"zwave": {CONF_DEVICE_CONFIG_GLOB: OrderedDict()}})
assert isinstance(conf["zwave"][CONF_DEVICE_CONFIG_GLOB], OrderedDict)
class TestZWaveServices(unittest.TestCase):
"""Tests for zwave services."""
@pytest.fixture(autouse=True)
def set_mock_openzwave(self, mock_openzwave):
"""Use the mock_openzwave fixture for this class."""
self.mock_openzwave = mock_openzwave
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.hass.start()
# Initialize zwave
setup_component(self.hass, "zwave", {"zwave": {}})
self.hass.block_till_done()
self.zwave_network = self.hass.data[DATA_NETWORK]
self.zwave_network.state = MockNetwork.STATE_READY
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.block_till_done()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.services.call("zwave", "stop_network", {})
self.hass.block_till_done()
self.hass.stop()
def test_add_node(self):
"""Test zwave add_node service."""
self.hass.services.call("zwave", "add_node", {})
self.hass.block_till_done()
assert self.zwave_network.controller.add_node.called
assert len(self.zwave_network.controller.add_node.mock_calls) == 1
assert len(self.zwave_network.controller.add_node.mock_calls[0][1]) == 0
def test_add_node_secure(self):
"""Test zwave add_node_secure service."""
self.hass.services.call("zwave", "add_node_secure", {})
self.hass.block_till_done()
assert self.zwave_network.controller.add_node.called
assert len(self.zwave_network.controller.add_node.mock_calls) == 1
assert self.zwave_network.controller.add_node.mock_calls[0][1][0] is True
def test_remove_node(self):
"""Test zwave remove_node service."""
self.hass.services.call("zwave", "remove_node", {})
self.hass.block_till_done()
assert self.zwave_network.controller.remove_node.called
assert len(self.zwave_network.controller.remove_node.mock_calls) == 1
def test_cancel_command(self):
"""Test zwave cancel_command service."""
self.hass.services.call("zwave", "cancel_command", {})
self.hass.block_till_done()
assert self.zwave_network.controller.cancel_command.called
assert len(self.zwave_network.controller.cancel_command.mock_calls) == 1
def test_heal_network(self):
"""Test zwave heal_network service."""
self.hass.services.call("zwave", "heal_network", {})
self.hass.block_till_done()
assert self.zwave_network.heal.called
assert len(self.zwave_network.heal.mock_calls) == 1
def test_soft_reset(self):
"""Test zwave soft_reset service."""
self.hass.services.call("zwave", "soft_reset", {})
self.hass.block_till_done()
assert self.zwave_network.controller.soft_reset.called
assert len(self.zwave_network.controller.soft_reset.mock_calls) == 1
def test_test_network(self):
"""Test zwave test_network service."""
self.hass.services.call("zwave", "test_network", {})
self.hass.block_till_done()
assert self.zwave_network.test.called
assert len(self.zwave_network.test.mock_calls) == 1
def test_stop_network(self):
"""Test zwave stop_network service."""
with patch.object(self.hass.bus, "fire") as mock_fire:
self.hass.services.call("zwave", "stop_network", {})
self.hass.block_till_done()
assert self.zwave_network.stop.called
assert len(self.zwave_network.stop.mock_calls) == 1
assert mock_fire.called
assert len(mock_fire.mock_calls) == 1
assert mock_fire.mock_calls[0][1][0] == const.EVENT_NETWORK_STOP
def test_rename_node(self):
"""Test zwave rename_node service."""
self.zwave_network.nodes = {11: MagicMock()}
self.hass.services.call(
"zwave",
"rename_node",
{const.ATTR_NODE_ID: 11, const.ATTR_NAME: "test_name"},
)
self.hass.block_till_done()
assert self.zwave_network.nodes[11].name == "test_name"
def test_rename_value(self):
"""Test zwave rename_value service."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, label="Old Label")
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.label == "Old Label"
self.hass.services.call(
"zwave",
"rename_value",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_NAME: "New Label",
},
)
self.hass.block_till_done()
assert value.label == "New Label"
def test_set_poll_intensity_enable(self):
"""Test zwave set_poll_intensity service, successful set."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=0)
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 0
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 4,
},
)
self.hass.block_till_done()
enable_poll = value.enable_poll
assert value.enable_poll.called
assert len(enable_poll.mock_calls) == 2
assert enable_poll.mock_calls[0][1][0] == 4
def test_set_poll_intensity_enable_failed(self):
"""Test zwave set_poll_intensity service, failed set."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=0)
value.enable_poll.return_value = False
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 0
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 4,
},
)
self.hass.block_till_done()
enable_poll = value.enable_poll
assert value.enable_poll.called
assert len(enable_poll.mock_calls) == 1
def test_set_poll_intensity_disable(self):
"""Test zwave set_poll_intensity service, successful disable."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=4)
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 4
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 0,
},
)
self.hass.block_till_done()
disable_poll = value.disable_poll
assert value.disable_poll.called
assert len(disable_poll.mock_calls) == 2
def test_set_poll_intensity_disable_failed(self):
"""Test zwave set_poll_intensity service, failed disable."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=4)
value.disable_poll.return_value = False
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 4
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 0,
},
)
self.hass.block_till_done()
disable_poll = value.disable_poll
assert value.disable_poll.called
assert len(disable_poll.mock_calls) == 1
def test_remove_failed_node(self):
"""Test zwave remove_failed_node service."""
self.hass.services.call("zwave", "remove_failed_node", {const.ATTR_NODE_ID: 12})
self.hass.block_till_done()
remove_failed_node = self.zwave_network.controller.remove_failed_node
assert remove_failed_node.called
assert len(remove_failed_node.mock_calls) == 1
assert remove_failed_node.mock_calls[0][1][0] == 12
def test_replace_failed_node(self):
"""Test zwave replace_failed_node service."""
self.hass.services.call(
"zwave", "replace_failed_node", {const.ATTR_NODE_ID: 13}
)
self.hass.block_till_done()
replace_failed_node = self.zwave_network.controller.replace_failed_node
assert replace_failed_node.called
assert len(replace_failed_node.mock_calls) == 1
assert replace_failed_node.mock_calls[0][1][0] == 13
def test_set_config_parameter(self):
"""Test zwave set_config_parameter service."""
value_byte = MockValue(
index=12,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BYTE,
)
value_list = MockValue(
index=13,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_LIST,
data_items=["item1", "item2", "item3"],
)
value_button = MockValue(
index=14,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BUTTON,
)
value_list_int = MockValue(
index=15,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_LIST,
data_items=["1", "2", "3"],
)
value_bool = MockValue(
index=16,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BOOL,
)
node = MockNode(node_id=14)
node.get_values.return_value = {
12: value_byte,
13: value_list,
14: value_button,
15: value_list_int,
16: value_bool,
}
self.zwave_network.nodes = {14: node}
# Byte
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 12,
const.ATTR_CONFIG_VALUE: 7,
},
)
self.hass.block_till_done()
assert value_byte.data == 7
# List
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 13,
const.ATTR_CONFIG_VALUE: "item3",
},
)
self.hass.block_till_done()
assert value_list.data == "item3"
# Button
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 14,
const.ATTR_CONFIG_VALUE: True,
},
)
self.hass.block_till_done()
assert self.zwave_network.manager.pressButton.called
assert self.zwave_network.manager.releaseButton.called
# List of Ints
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 15,
const.ATTR_CONFIG_VALUE: 3,
},
)
self.hass.block_till_done()
assert value_list_int.data == "3"
# Boolean Truthy
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 16,
const.ATTR_CONFIG_VALUE: "True",
},
)
self.hass.block_till_done()
assert value_bool.data == 1
# Boolean Falsy
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 16,
const.ATTR_CONFIG_VALUE: "False",
},
)
self.hass.block_till_done()
assert value_bool.data == 0
# Different Parameter Size
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 19,
const.ATTR_CONFIG_VALUE: 0x01020304,
const.ATTR_CONFIG_SIZE: 4,
},
)
self.hass.block_till_done()
assert node.set_config_param.called
assert len(node.set_config_param.mock_calls) == 1
assert node.set_config_param.mock_calls[0][1][0] == 19
assert node.set_config_param.mock_calls[0][1][1] == 0x01020304
assert node.set_config_param.mock_calls[0][1][2] == 4
node.set_config_param.reset_mock()
def test_print_config_parameter(self):
"""Test zwave print_config_parameter service."""
value1 = MockValue(
index=12, command_class=const.COMMAND_CLASS_CONFIGURATION, data=1234
)
value2 = MockValue(
index=13, command_class=const.COMMAND_CLASS_CONFIGURATION, data=2345
)
node = MockNode(node_id=14)
node.values = {12: value1, 13: value2}
self.zwave_network.nodes = {14: node}
with patch.object(zwave, "_LOGGER") as mock_logger:
self.hass.services.call(
"zwave",
"print_config_parameter",
{const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_PARAMETER: 13},
)
self.hass.block_till_done()
assert mock_logger.info.called
assert len(mock_logger.info.mock_calls) == 1
assert mock_logger.info.mock_calls[0][1][1] == 13
assert mock_logger.info.mock_calls[0][1][2] == 14
assert mock_logger.info.mock_calls[0][1][3] == 2345
def test_print_node(self):
"""Test zwave print_node_parameter service."""
node = MockNode(node_id=14)
self.zwave_network.nodes = {14: node}
with self.assertLogs(level="DEBUG") as mock_logger:
self.hass.services.call("zwave", "print_node", {const.ATTR_NODE_ID: 14})
self.hass.block_till_done()
assert "FOUND NODE " in mock_logger.output[1]
def test_set_wakeup(self):
"""Test zwave set_wakeup service."""
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave", "set_wakeup", {const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_VALUE: 15}
)
self.hass.block_till_done()
assert value.data == 15
node.can_wake_up_value = False
self.hass.services.call(
"zwave", "set_wakeup", {const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_VALUE: 20}
)
self.hass.block_till_done()
assert value.data == 15
def test_reset_node_meters(self):
"""Test zwave reset_node_meters service."""
value = MockValue(
instance=1, index=8, data=99.5, command_class=const.COMMAND_CLASS_METER
)
reset_value = MockValue(
instance=1, index=33, command_class=const.COMMAND_CLASS_METER
)
node = MockNode(node_id=14)
node.values = {8: value, 33: reset_value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"reset_node_meters",
{const.ATTR_NODE_ID: 14, const.ATTR_INSTANCE: 2},
)
self.hass.block_till_done()
assert not self.zwave_network.manager.pressButton.called
assert not self.zwave_network.manager.releaseButton.called
self.hass.services.call("zwave", "reset_node_meters", {const.ATTR_NODE_ID: 14})
self.hass.block_till_done()
assert self.zwave_network.manager.pressButton.called
(value_id,) = self.zwave_network.manager.pressButton.mock_calls.pop(0)[1]
assert value_id == reset_value.value_id
assert self.zwave_network.manager.releaseButton.called
(value_id,) = self.zwave_network.manager.releaseButton.mock_calls.pop(0)[1]
assert value_id == reset_value.value_id
def test_add_association(self):
"""Test zwave change_association service."""
ZWaveGroup = self.mock_openzwave.group.ZWaveGroup
group = MagicMock()
ZWaveGroup.return_value = group
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"change_association",
{
const.ATTR_ASSOCIATION: "add",
const.ATTR_NODE_ID: 14,
const.ATTR_TARGET_NODE_ID: 24,
const.ATTR_GROUP: 3,
const.ATTR_INSTANCE: 5,
},
)
self.hass.block_till_done()
assert ZWaveGroup.called
assert len(ZWaveGroup.mock_calls) == 2
assert ZWaveGroup.mock_calls[0][1][0] == 3
assert ZWaveGroup.mock_calls[0][1][2] == 14
assert group.add_association.called
assert len(group.add_association.mock_calls) == 1
assert group.add_association.mock_calls[0][1][0] == 24
assert group.add_association.mock_calls[0][1][1] == 5
def test_remove_association(self):
"""Test zwave change_association service."""
ZWaveGroup = self.mock_openzwave.group.ZWaveGroup
group = MagicMock()
ZWaveGroup.return_value = group
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"change_association",
{
const.ATTR_ASSOCIATION: "remove",
const.ATTR_NODE_ID: 14,
const.ATTR_TARGET_NODE_ID: 24,
const.ATTR_GROUP: 3,
const.ATTR_INSTANCE: 5,
},
)
self.hass.block_till_done()
assert ZWaveGroup.called
assert len(ZWaveGroup.mock_calls) == 2
assert ZWaveGroup.mock_calls[0][1][0] == 3
assert ZWaveGroup.mock_calls[0][1][2] == 14
assert group.remove_association.called
assert len(group.remove_association.mock_calls) == 1
assert group.remove_association.mock_calls[0][1][0] == 24
assert group.remove_association.mock_calls[0][1][1] == 5
def test_refresh_entity(self):
"""Test zwave refresh_entity service."""
node = MockNode()
value = MockValue(
data=False, node=node, command_class=const.COMMAND_CLASS_SENSOR_BINARY
)
power_value = MockValue(
data=50, node=node, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = get_device(node=node, values=values, node_config={})
device.hass = self.hass
device.entity_id = "binary_sensor.mock_entity_id"
self.hass.add_job(device.async_added_to_hass())
self.hass.block_till_done()
self.hass.services.call(
"zwave", "refresh_entity", {ATTR_ENTITY_ID: "binary_sensor.mock_entity_id"}
)
self.hass.block_till_done()
assert node.refresh_value.called
assert len(node.refresh_value.mock_calls) == 2
assert sorted(
[
node.refresh_value.mock_calls[0][1][0],
node.refresh_value.mock_calls[1][1][0],
]
) == sorted([value.value_id, power_value.value_id])
def test_refresh_node(self):
"""Test zwave refresh_node service."""
node = MockNode(node_id=14)
self.zwave_network.nodes = {14: node}
self.hass.services.call("zwave", "refresh_node", {const.ATTR_NODE_ID: 14})
self.hass.block_till_done()
assert node.refresh_info.called
assert len(node.refresh_info.mock_calls) == 1
def test_set_node_value(self):
"""Test zwave set_node_value service."""
value = MockValue(index=12, command_class=const.COMMAND_CLASS_INDICATOR, data=4)
node = MockNode(node_id=14, command_classes=[const.COMMAND_CLASS_INDICATOR])
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"set_node_value",
{
const.ATTR_NODE_ID: 14,
const.ATTR_VALUE_ID: 12,
const.ATTR_CONFIG_VALUE: 2,
},
)
self.hass.block_till_done()
assert self.zwave_network.nodes[14].values[12].data == 2
def test_refresh_node_value(self):
"""Test zwave refresh_node_value service."""
node = MockNode(
node_id=14,
command_classes=[const.COMMAND_CLASS_INDICATOR],
network=self.zwave_network,
)
value = MockValue(
node=node, index=12, command_class=const.COMMAND_CLASS_INDICATOR, data=2
)
value.refresh = MagicMock()
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"refresh_node_value",
{const.ATTR_NODE_ID: 14, const.ATTR_VALUE_ID: 12},
)
self.hass.block_till_done()
assert value.refresh.called
def test_heal_node(self):
"""Test zwave heal_node service."""
node = MockNode(node_id=19)
self.zwave_network.nodes = {19: node}
self.hass.services.call("zwave", "heal_node", {const.ATTR_NODE_ID: 19})
self.hass.block_till_done()
assert node.heal.called
assert len(node.heal.mock_calls) == 1
def test_test_node(self):
"""Test the zwave test_node service."""
node = MockNode(node_id=19)
self.zwave_network.nodes = {19: node}
self.hass.services.call("zwave", "test_node", {const.ATTR_NODE_ID: 19})
self.hass.block_till_done()
assert node.test.called
assert len(node.test.mock_calls) == 1
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Parallelization utility optimizer."""
__all__ = ['split_data', 'split_and_load', 'clip_global_norm',
'check_sha1', 'download']
import os
import hashlib
import warnings
try:
import requests
except ImportError:
class requests_failed_to_import(object):
pass
requests = requests_failed_to_import
import numpy as np
from .. import ndarray
def split_data(data, num_slice, batch_axis=0, even_split=True):
"""Splits an NDArray into `num_slice` slices along `batch_axis`.
Usually used for data parallelism where each slices is sent
to one device (i.e. GPU).
Parameters
----------
data : NDArray
A batch of data.
num_slice : int
Number of desired slices.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
If `True`, an error will be raised when `num_slice` does not evenly
divide `data.shape[batch_axis]`.
Returns
-------
list of NDArray
Return value is a list even if `num_slice` is 1.
"""
size = data.shape[batch_axis]
if size < num_slice:
raise ValueError(
"Too many slices for data with shape %s. Arguments are " \
"num_slice=%d and batch_axis=%d."%(str(data.shape), num_slice, batch_axis))
if even_split and size % num_slice != 0:
raise ValueError(
"data with shape %s cannot be evenly split into %d slices along axis %d. " \
"Use a batch size that's multiple of %d or set even_split=False to allow " \
"uneven partitioning of data."%(
str(data.shape), num_slice, batch_axis, num_slice))
step = size // num_slice
if batch_axis == 0:
slices = [data[i*step:(i+1)*step] if i < num_slice - 1 else data[i*step:size]
for i in range(num_slice)]
elif even_split:
slices = ndarray.split(data, num_outputs=num_slice, axis=batch_axis)
else:
slices = [ndarray.slice_axis(data, batch_axis, i*step, (i+1)*step)
if i < num_slice - 1 else
ndarray.slice_axis(data, batch_axis, i*step, size)
for i in range(num_slice)]
return slices
def split_and_load(data, ctx_list, batch_axis=0, even_split=True):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`.
"""
if not isinstance(data, ndarray.NDArray):
data = ndarray.array(data, ctx=ctx_list[0])
if len(ctx_list) == 1:
return [data.as_in_context(ctx_list[0])]
slices = split_data(data, len(ctx_list), batch_axis, even_split)
return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)]
def clip_global_norm(arrays, max_norm):
"""Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`.
"""
assert len(arrays) > 0
ctx = arrays[0].context
total_norm = ndarray.add_n(*[ndarray.dot(x, x).as_in_context(ctx)
for x in (arr.reshape((-1,)) for arr in arrays)])
total_norm = ndarray.sqrt(total_norm).asscalar()
if not np.isfinite(total_norm):
warnings.warn(UserWarning('nan or inf is detected. Clipping results will be undefined.'),
stacklevel=2)
scale = max_norm / (total_norm + 1e-8)
if scale < 1.0:
for arr in arrays:
arr *= scale
return total_norm
def _indent(s_, numSpaces):
"""Indent string
"""
s = s_.split('\n')
if len(s) == 1:
return s_
first = s.pop(0)
s = [first] + [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
return s
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
def download(url, path=None, overwrite=False, sha1_hash=None):
"""Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split('/')[-1]
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
print('Downloading %s from %s...'%(fname, url))
r = requests.get(url, stream=True)
if r.status_code != 200:
raise RuntimeError("Failed downloading url %s"%url)
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning('File {} is downloaded but the content hash does not match. ' \
'The repo may be outdated or download may be incomplete. ' \
'If the "repo_url" is overridden, consider switching to ' \
'the default repo.'.format(fname))
return fname
def _get_repo_url():
"""Return the base URL for Gluon dataset and model repository."""
default_repo = 'https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/'
repo_url = os.environ.get('MXNET_GLUON_REPO', default_repo)
if repo_url[-1] != '/':
repo_url = repo_url+'/'
return repo_url
def _get_repo_file_url(namespace, filename):
"""Return the URL for hosted file in Gluon repository.
Parameters
----------
namespace : str
Namespace of the file.
filename : str
Name of the file
"""
return '{base_url}{namespace}/{filename}'.format(base_url=_get_repo_url(),
namespace=namespace,
filename=filename)
def _brief_print_list(lst, limit=7):
"""Print at most `limit` elements of list."""
lst = list(lst)
if len(lst) > limit:
return _brief_print_list(lst[:limit//2], limit) + ', ..., ' + \
_brief_print_list(lst[-limit//2:], limit)
return ', '.join(["'%s'"%str(i) for i in lst])
|
|
# Copyright 2020 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import datetime
import fnmatch
import logging
import time
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Union
from typing import overload
from cloudaux.aws.iam import get_role_inline_policies
from dateutil.parser import parse as ts_parse
from pydantic import BaseModel
from pydantic import Field
from pydantic import PrivateAttr
from pydantic import root_validator
from pydantic import validator
from repokid import CONFIG
from repokid.datasource.access_advisor import AccessAdvisorDatasource
from repokid.datasource.iam import IAMDatasource
from repokid.exceptions import DynamoDBError
from repokid.exceptions import DynamoDBMaxItemSizeError
from repokid.exceptions import IAMActionError
from repokid.exceptions import IAMError
from repokid.exceptions import IntegrityError
from repokid.exceptions import MissingRepoableServices
from repokid.exceptions import ModelError
from repokid.exceptions import NotFoundError
from repokid.exceptions import RoleNotFoundError
from repokid.exceptions import RoleStoreError
from repokid.hooks import call_hooks
from repokid.types import IAMEntry
from repokid.types import RepokidConfig
from repokid.types import RepokidHooks
from repokid.utils.dynamo import get_role_by_arn
from repokid.utils.dynamo import get_role_by_id
from repokid.utils.dynamo import set_role_data
from repokid.utils.iam import delete_policy
from repokid.utils.iam import inline_policies_size_exceeds_maximum
from repokid.utils.iam import replace_policies
from repokid.utils.iam import update_repoed_description
from repokid.utils.logging import log_deleted_and_repoed_policies
from repokid.utils.permissions import convert_repoable_perms_to_perms_and_services
from repokid.utils.permissions import find_newly_added_permissions
from repokid.utils.permissions import get_permissions_in_policy
from repokid.utils.permissions import get_repoable_permissions
from repokid.utils.permissions import get_repoed_policy
from repokid.utils.permissions import get_services_and_permissions_from_repoable
logger = logging.getLogger("repokid")
def to_camel(string: str) -> str:
return "".join(word.capitalize() for word in string.split("_"))
class Role(BaseModel):
aa_data: Optional[List[Dict[str, Any]]] = Field(alias="AAData")
account: str = Field(default="")
active: Optional[bool] = Field()
arn: str = Field(default="")
assume_role_policy_document: Dict[str, Any] = Field(default={})
create_date: Optional[datetime.datetime] = Field()
disqualified_by: List[str] = Field(default=[])
last_updated: Optional[datetime.datetime] = Field()
no_repo_permissions: Dict[str, int] = Field(default={})
opt_out: Dict[str, Any] = Field(default={})
policies: List[Dict[str, Any]] = Field(default=[])
refreshed: Optional[str] = Field()
repoable_permissions: int = Field(default=0)
repoable_services: List[str] = Field(default=[])
repoed: Optional[str] = Field()
repo_scheduled: int = Field(default=0)
role_id: str = Field(default="")
role_name: str = Field(default="")
scheduled_perms: List[str] = Field(default=[])
stats: List[Dict[str, Any]] = Field(default=[])
tags: List[Dict[str, Any]] = Field(default=[])
total_permissions: Optional[int] = Field()
config: Union[RepokidConfig, None] = Field(default=CONFIG)
_dirty: bool = PrivateAttr(default=False)
_keys: Set[str] = {
"role_id",
"account",
}
_meta: Set[str] = {
"config",
"_dirty",
"_updated_fields",
}
# TODO: read exclude_new_permissions_for_days from config
_no_repo_secs: int = 24 * 60 * 60 * 14
_updated_fields: Set[str] = PrivateAttr(default_factory=set)
class Config:
alias_generator = to_camel
allow_population_by_field_name = True
arbitrary_types_allowed = True
underscore_attrs_are_private = True
def __eq__(self, other: object) -> bool:
if not self.arn:
return False
return self.arn == other
def __hash__(self) -> int:
return hash(self.arn)
def __repr__(self) -> str:
return f"<{self.arn}>"
@root_validator(pre=True)
def derive_from_arn(cls, values: Dict[str, Any]) -> Dict[str, Any]:
arn = values.get("arn") or values.get("Arn")
account = values.get("account") or values.get("Account")
role_name = values.get("role_name") or values.get("RoleName")
if arn and not account:
values["account"] = arn.split(":")[4]
if arn and not role_name:
values["role_name"] = arn.split("/")[-1]
if account and role_name and not arn:
values["arn"] = f"arn:aws:iam::{account}:role/{role_name}"
return values
@validator("create_date")
def datetime_normalize(
cls, v: Union[datetime.datetime, str]
) -> Optional[datetime.datetime]:
if isinstance(v, datetime.datetime):
return datetime.datetime.fromtimestamp(v.timestamp())
elif isinstance(v, str):
return ts_parse(v)
else:
return None
@validator("config")
def fix_none_config(cls, v: Optional[RepokidConfig]) -> RepokidConfig:
if v is None:
return CONFIG
return v
def add_policy_version(
self,
policy: Dict[str, Any],
source: str = "Scan",
store: bool = True,
add_no_repo: bool = True,
) -> bool:
if not policy:
logger.debug("no policy provided, not adding")
return False
if self.policies:
last_policy = self.policies[-1]["Policy"]
last_source = self.policies[-1]["Source"]
if policy == last_policy and source == last_source:
# we're already up to date, so this is a noop
return False
policy_entry = {
"Source": source,
"Discovered": datetime.datetime.now().isoformat(),
"Policy": policy,
}
self.policies.append(policy_entry)
if add_no_repo:
self._calculate_no_repo_permissions()
if store:
self.store(fields=["Policies", "NoRepoPermissions"])
return True
def _remove_oldest_policy_version(self) -> None:
if len(self.policies) > 0:
removed = self.policies.pop(0)
source = removed.get("Source")
discovered = removed.get("Discovered")
logger.info(
f"removing policy discovered by {source} on {discovered} from {self.role_name}"
)
self._updated_fields.add("policies")
def calculate_repo_scores(self, minimum_age: int, hooks: RepokidHooks) -> None:
(
all_permissions,
eligible_permissions,
) = self.get_permissions_for_policy_version()
self.total_permissions = len(all_permissions)
if self.disqualified_by or not self.aa_data:
self.repoable_permissions = 0
self.repoable_services = []
return
repoable_permissions = get_repoable_permissions(
self.account,
self.arn,
self.role_name,
all_permissions,
self.aa_data,
self.no_repo_permissions,
self.role_id,
minimum_age,
hooks,
)
(
repoable_permissions_set,
repoable_services_set,
) = convert_repoable_perms_to_perms_and_services(
all_permissions, repoable_permissions
)
# combine repoable services and permissions, convert to list, then sort
repoable_services_list = list(
repoable_services_set.union(repoable_permissions_set)
)
self.repoable_services = sorted(repoable_services_list)
self.repoable_permissions = len(repoable_permissions)
def get_permissions_for_policy_version(
self, selection: int = -1, warn_unknown_perms: bool = False
) -> Tuple[Set[str], Set[str]]:
if not self.policies:
return set(), set()
return get_permissions_in_policy(
self.policies[selection]["Policy"], warn_unknown_perms=warn_unknown_perms
)
def _calculate_no_repo_permissions(self) -> None:
if not self.policies:
return
try:
previous_policy = self.policies[-2]
except IndexError:
previous_policy = {}
new_policy = self.policies[-1]
try:
newly_added_permissions = find_newly_added_permissions(
previous_policy.get("Policy", {}),
new_policy.get("Policy", {}),
minimize=True,
)
except IAMActionError:
logger.error(
"failed to calculate no-repo permissions for %s",
self.arn,
exc_info=True,
)
return
current_time = int(time.time())
# iterate through a copy of self.no_repo_permissions and remove expired items from
# the source dict
for permission, expiration in copy.copy(self.no_repo_permissions).items():
if current_time > expiration:
self.no_repo_permissions.pop(permission)
expire_time = current_time + self._no_repo_secs
existing_no_repo = self.no_repo_permissions.keys()
for permission in newly_added_permissions:
if not fnmatch.filter(existing_no_repo, permission):
self.no_repo_permissions[permission] = expire_time
def get_repoed_policy(
self, scheduled: bool = False
) -> Tuple[Dict[str, Any], List[str]]:
if not self.repoable_services:
raise MissingRepoableServices("role must be updated")
if scheduled:
permissions, services = get_services_and_permissions_from_repoable(
self.scheduled_perms
)
repoable = [
p
for p in self.repoable_services
if p in self.scheduled_perms or p.split(":")[0] in services
]
else:
repoable = self.repoable_services
repoed_policies, deleted_policy_names = get_repoed_policy(
self.policies[-1]["Policy"], set(repoable)
)
return repoed_policies, deleted_policy_names
def is_eligible_for_repo(self) -> Tuple[bool, str]:
if len(self.disqualified_by) > 0:
return False, f"disqualified by {', '.join(self.disqualified_by)}"
if not self.aa_data:
return False, "no Access Advisor data available"
if not self.repoable_permissions and not self.scheduled_perms:
return False, "no repoable permissions"
stale_aa_services = self._stale_aa_services()
if stale_aa_services:
return (
False,
f"stale Access Advisor data for {', '.join(stale_aa_services)}",
)
return True, ""
def _stale_aa_services(self) -> List[str]:
thresh = datetime.datetime.now() - datetime.timedelta(
days=self.config["repo_requirements"]["oldest_aa_data_days"] # type: ignore
)
stale_services = []
if self.aa_data:
for service in self.aa_data:
if ts_parse(service["lastUpdated"], ignoretz=True) < thresh:
stale_services.append(service["serviceName"])
return stale_services
def _update_opt_out(self) -> None:
if self.opt_out and int(self.opt_out["expire"]) < int(time.time()):
self.opt_out = {}
def _update_refreshed(self) -> None:
self.refreshed = datetime.datetime.now().isoformat()
def _update_tags(self, tags: List[Dict[str, str]]) -> None:
self.tags = tags
def update(
self, values: Dict[str, Any], store: bool = True, dirty: bool = True
) -> None:
self._dirty = dirty
self._updated_fields.update(values.keys())
temp_role = Role(**values)
role_data = temp_role.dict(
exclude_unset=True, exclude={"config", "_dirty", "_updated_fields"}
)
for k, v in role_data.items():
setattr(self, k, v)
if store:
fields = list(values.keys())
self.store(fields=fields)
self._dirty = False
def fetch_aa_data(self) -> None:
if not self.arn:
raise ModelError(
"missing arn on Role instance, cannot retrieve Access Advisor data"
)
aardvark_data = AccessAdvisorDatasource()
if self.account:
# We'll go ahead and seed this whole account
aardvark_data.seed(self.account)
try:
self.aa_data = aardvark_data.get(self.arn)
except NotFoundError:
self.aa_data = []
def _fetch_iam_data(self) -> IAMEntry:
iam_datasource = IAMDatasource()
role_data = iam_datasource.get(self.arn)
role_id = role_data.get("RoleId")
if role_id:
self.role_id = role_id
create_date = role_data.get("CreateDate")
if create_date:
self.create_date = create_date
return role_data
def fetch(
self,
fields: Optional[List[str]] = None,
update: bool = True,
fetch_aa_data: bool = False,
) -> None:
if self._dirty:
raise IntegrityError(
"role object has unsaved modifications, fetching may overwrite changes"
)
if self.role_id:
stored_role_data = get_role_by_id(self.role_id, fields=fields)
elif self.arn:
stored_role_data = get_role_by_arn(self.arn, fields=fields)
else:
# TODO: we can pull role_name and account from an ARN, support that too
raise ModelError(
"missing role_id or role_name and account on Role instance"
)
if update:
self.update(stored_role_data, store=False, dirty=False)
self._updated_fields - set(stored_role_data.keys())
if fetch_aa_data:
self.fetch_aa_data()
def mark_inactive(self, store: bool = False) -> None:
self.active = False
if store:
self.store(fields=["active"])
def store(self, fields: Optional[List[str]] = None) -> None:
create = False
try:
remote_role_data = Role(role_id=self.role_id, arn=self.arn)
remote_role_data.fetch(fields=["LastUpdated"])
if (
remote_role_data.last_updated
and self.last_updated
and remote_role_data.last_updated > self.last_updated
):
# Fetch the rest of the role data for debugging
remote_role_data.fetch()
logger.warning(
"role has been updated since last fetch: stored %s, local %s",
remote_role_data.last_updated,
self.last_updated,
extra={
"stored_role": remote_role_data.dict(),
"local_role": self.dict(),
},
)
raise IntegrityError("stored role has been updated since last fetch")
except RoleNotFoundError:
create = True
self.last_updated = datetime.datetime.now()
set_role_data_args: Dict[str, Any] = {
"by_alias": True,
}
# If fields are specified, we need to add last_updated to make sure it gets set
if fields:
include_fields = set(fields)
include_fields.add("last_updated")
set_role_data_args["include"] = include_fields
# Exclude key fields unless this is a newly-created item. Key fields cannot be included
# in DynamoDB update calls.
exclude_fields = self._meta
if not create:
exclude_fields.update(self._keys)
set_role_data_args["exclude"] = exclude_fields
attempts = 0
max_retries = 3
while attempts < max_retries:
try:
set_role_data(
self.role_id,
self.dict(**set_role_data_args),
create=create,
)
self._updated_fields = (
self._updated_fields - set(fields) if fields else set()
)
# model is still dirty if we haven't stored all updated fields
self._dirty = len(self._updated_fields) > 0
return
except DynamoDBMaxItemSizeError:
logger.info(
"role %s too big for DynamoDB, removing oldest policy version",
self.role_name,
)
self._remove_oldest_policy_version()
attempts += 1
continue
except DynamoDBError:
logger.info(
"failed attempt %d to store role %s in DynamoDB",
attempts,
self.role_name,
exc_info=True,
)
attempts += 1
continue
# If we've made it this far, the role was not stored
raise RoleStoreError(f"failed to store {self.arn} in DynamoDB")
def gather_role_data(
self,
hooks: RepokidHooks,
current_policies: Dict[str, Any] = {},
config: Optional[RepokidConfig] = None,
source: str = "Scan",
add_no_repo: bool = True,
store: bool = True,
) -> None:
config = config or CONFIG
try:
self.fetch()
except RoleNotFoundError as e:
# we don't have this role in DynamoDB yet, but that's okay
logger.debug("%s, will be created", e)
self.active = True
self.fetch_aa_data()
iam_data = self._fetch_iam_data()
if not current_policies:
current_policies = iam_data.get("RolePolicyList", {}) or iam_data.get(
"InlinePolicies", {}
)
policy_added = self.add_policy_version(
current_policies, source=source, store=False, add_no_repo=add_no_repo
)
if policy_added and add_no_repo:
self._calculate_no_repo_permissions()
self._update_tags(iam_data.get("Tags", []))
self._update_opt_out()
self._update_refreshed()
minimum_age = config["filter_config"]["AgeFilter"]["minimum_age"]
self.calculate_repo_scores(minimum_age, hooks)
self.calculate_stats(source=source, store=False)
if store:
self.store()
def calculate_stats(self, source: str = "Scan", store: bool = True) -> None:
new_stats = {
"Date": datetime.datetime.now().isoformat(),
"DisqualifiedBy": self.disqualified_by,
"PermissionsCount": self.total_permissions,
"RepoablePermissionsCount": self.repoable_permissions,
"Source": source,
}
try:
cur_stats = self.stats[-1]
except IndexError:
cur_stats = {
"DisqualifiedBy": [],
"PermissionsCount": 0,
"RepoablePermissionsCount": 0,
}
check_fields = [
"DisqualifiedBy",
"PermissionsCount",
"RepoablePermissionsCount",
]
changed = any(
[new_stats.get(item) != cur_stats.get(item) for item in check_fields]
)
if changed:
self.stats.append(new_stats)
if store:
self.store(fields=["stats"])
def remove_permissions(
self, permissions: List[str], hooks: RepokidHooks, commit: bool = False
) -> None:
"""Remove the list of permissions from the provided role.
Args:
account_number (string)
permissions (list<string>)
role (Role object)
role_id (string)
commit (bool)
Returns:
None
"""
(
repoed_policies,
deleted_policy_names,
) = get_repoed_policy(self.policies[-1]["Policy"], set(permissions))
if inline_policies_size_exceeds_maximum(repoed_policies):
logger.error(
"Policies would exceed the AWS size limit after repo for role: {} in account {}. "
"Please manually minify.".format(self.role_name, self.account)
)
return
if not commit:
log_deleted_and_repoed_policies(
deleted_policy_names, repoed_policies, self.role_name, self.account
)
return
conn = self.config["connection_iam"] # type: ignore
conn["account_number"] = self.account
for name in deleted_policy_names:
try:
delete_policy(name, self.role_name, self.account, conn)
except IAMError as e:
logger.error(e)
if repoed_policies:
try:
replace_policies(repoed_policies, self.role_name, self.account, conn)
except IAMError as e:
logger.error(e)
current_policies = get_role_inline_policies(self.dict(), **conn) or {}
self.add_policy_version(current_policies, "Repo")
self.repoed = datetime.datetime.now(tz=datetime.timezone.utc).isoformat()
update_repoed_description(self.role_name, conn)
self.gather_role_data(
hooks,
current_policies=current_policies,
source="ManualPermissionRepo",
add_no_repo=False,
)
logger.info(
"Successfully removed {permissions} from role: {role} in account {account_number}".format(
permissions=permissions,
role=self.role_name,
account_number=self.account,
)
)
def repo(
self, hooks: RepokidHooks, commit: bool = False, scheduled: bool = False
) -> List[str]:
errors: List[str] = []
eligible, reason = self.is_eligible_for_repo()
if not eligible:
errors.append(f"Role {self.role_name} not eligible for repo: {reason}")
return errors
self.calculate_repo_scores(
self.config["filter_config"]["AgeFilter"]["minimum_age"], hooks # type: ignore
)
try:
repoed_policies, deleted_policy_names = self.get_repoed_policy(
scheduled=scheduled
)
except MissingRepoableServices as e:
errors.append(f"Role {self.role_name} cannot be repoed: {e}")
return errors
if inline_policies_size_exceeds_maximum(repoed_policies):
error = (
"Policies would exceed the AWS size limit after repo for role: {} in account {}. "
"Please manually minify.".format(self.role_name, self.account)
)
logger.error(error)
errors.append(error)
self.repo_scheduled = 0
self.scheduled_perms = []
self.store(["repo_scheduled", "scheduled_perms"])
return errors
if not commit:
log_deleted_and_repoed_policies(
deleted_policy_names, repoed_policies, self.role_name, self.account
)
return errors
conn = self.config["connection_iam"] # type: ignore
conn["account_number"] = self.account
for name in deleted_policy_names:
try:
delete_policy(name, self.role_name, self.account, conn)
except IAMError as e:
logger.error(e)
errors.append(str(e))
if repoed_policies:
try:
replace_policies(repoed_policies, self.role_name, self.account, conn)
except IAMError as e:
logger.error(e)
errors.append(str(e))
current_policies = (
get_role_inline_policies(self.dict(by_alias=True), **conn) or {}
)
self.add_policy_version(current_policies, source="Repo")
# regardless of whether we're successful we want to unschedule the repo
self.repo_scheduled = 0
self.scheduled_perms = []
call_hooks(hooks, "AFTER_REPO", {"role": self, "errors": errors})
if not errors:
# repos will stay scheduled until they are successful
self.repoed = datetime.datetime.now(tz=datetime.timezone.utc).isoformat()
update_repoed_description(self.role_name, conn)
logger.info(
"Successfully repoed role: {} in account {}".format(
self.role_name, self.account
)
)
try:
self.store()
except RoleStoreError:
logger.exception("failed to store role after repo", exc_info=True)
return errors
class RoleList(object):
def __init__(
self, role_object_list: List[Role], config: Optional[RepokidConfig] = None
):
self.config = config or CONFIG
self.roles: List[Role] = role_object_list
@overload
def __getitem__(self, index: slice) -> RoleList:
# type info for retrieving a slice of contained roles
...
@overload
def __getitem__(self, index: int) -> Role:
# type info for retrieving a single contained role
...
def __getitem__(self, index: Union[int, slice]) -> Union[Role, RoleList]:
if isinstance(index, slice):
# return a RoleList if the call was for a slice of contained roles
return RoleList(self.roles[index])
return self.roles[index]
def __len__(self) -> int:
return len(self.roles)
def __repr__(self) -> str:
return str([role.arn for role in self.roles])
def __eq__(self, other: object) -> bool:
if not isinstance(other, RoleList):
return False
return repr(self) == repr(other)
def __iter__(self) -> RoleList:
self._iter_index = 0
self._len = len(self)
return self
def __next__(self) -> Role:
if self._iter_index < self._len:
result = self[self._iter_index]
self._iter_index += 1
return result
else:
raise StopIteration
@classmethod
def from_ids(
cls,
id_list: Iterable[str],
fetch: bool = True,
fetch_aa_data: bool = True,
fields: Optional[List[str]] = None,
config: Optional[RepokidConfig] = None,
) -> RoleList:
role_list = cls(
[Role(role_id=role_id, config=config) for role_id in id_list], config=config
)
if fetch:
role_list.fetch_all(fetch_aa_data=fetch_aa_data, fields=fields)
return role_list
@classmethod
def from_arns(
cls,
arn_list: Iterable[str],
fetch: bool = True,
fetch_aa_data: bool = True,
fields: Optional[List[str]] = None,
config: Optional[RepokidConfig] = None,
) -> RoleList:
role_list = cls(
[Role(arn=arn, config=config) for arn in arn_list], config=config
)
if fetch:
role_list.fetch_all(fetch_aa_data=fetch_aa_data, fields=fields)
return role_list
def append(self, role: Role) -> None:
if not isinstance(role, Role):
raise AttributeError("cannot add non-Role to RoleList")
self.roles.append(role)
def role_id_list(self) -> List[str]:
return [role.role_id for role in self.roles]
def get_active(self) -> RoleList:
return self.filter(active=True)
def get_by_id(self, role_id: str) -> Optional[Role]:
try:
return self.filter(role_id=role_id)[0]
except IndexError:
return None
def get_scheduled(self) -> RoleList:
cur_time = int(time.time())
return RoleList(
[
role
for role in self.roles
if (role.repo_scheduled and cur_time > role.repo_scheduled)
]
)
def filter(self, **kwargs: Any) -> RoleList:
roles = self.roles
for arg, value in kwargs.items():
roles = [role for role in roles if getattr(role, arg, None) == value]
return RoleList(roles)
def store(self, fields: Optional[List[str]] = None) -> None:
for role in self.roles:
logger.info("storing role %s", role.arn)
try:
role.store(fields=fields)
except RoleStoreError as e:
logger.error("could not store role %s: %s", role.arn, e, exc_info=True)
except IntegrityError as e:
logger.error("could not store role %s: %s", role.arn, e, exc_info=True)
def update_stats(self, source: str = "Scan", store: bool = True) -> None:
for role in self.roles:
role.calculate_stats(source=source, store=store)
def fetch_all(
self, fetch_aa_data: bool = False, fields: Optional[List[str]] = None
) -> None:
for role in self.roles:
try:
role.fetch(fetch_aa_data=fetch_aa_data, fields=fields)
except RoleNotFoundError as e:
logger.info(e)
|
|
"""Tests for the Z-Wave init."""
import asyncio
from collections import OrderedDict
from datetime import datetime
import unittest
import pytest
from pytz import utc
import voluptuous as vol
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import zwave
from homeassistant.components.zwave import (
CONF_DEVICE_CONFIG_GLOB,
CONFIG_SCHEMA,
DATA_NETWORK,
const,
)
from homeassistant.components.zwave.binary_sensor import get_device
from homeassistant.const import ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.setup import setup_component
from tests.async_mock import MagicMock, patch
from tests.common import async_fire_time_changed, get_test_home_assistant, mock_registry
from tests.mock.zwave import MockEntityValues, MockNetwork, MockNode, MockValue
@pytest.fixture(autouse=True)
def mock_storage(hass_storage):
"""Autouse hass_storage for the TestCase tests."""
async def test_valid_device_config(hass, mock_openzwave):
"""Test valid device config."""
device_config = {"light.kitchen": {"ignored": "true"}}
result = await async_setup_component(
hass, "zwave", {"zwave": {"device_config": device_config}}
)
await hass.async_block_till_done()
assert result
async def test_invalid_device_config(hass, mock_openzwave):
"""Test invalid device config."""
device_config = {"light.kitchen": {"some_ignored": "true"}}
result = await async_setup_component(
hass, "zwave", {"zwave": {"device_config": device_config}}
)
await hass.async_block_till_done()
assert not result
def test_config_access_error():
"""Test threading error accessing config values."""
node = MagicMock()
def side_effect():
raise RuntimeError
node.values.values.side_effect = side_effect
result = zwave.get_config_value(node, 1)
assert result is None
async def test_network_options(hass, mock_openzwave):
"""Test network options."""
result = await async_setup_component(
hass,
"zwave",
{"zwave": {"usb_path": "mock_usb_path", "config_path": "mock_config_path"}},
)
await hass.async_block_till_done()
assert result
network = hass.data[zwave.DATA_NETWORK]
assert network.options.device == "mock_usb_path"
assert network.options.config_path == "mock_config_path"
async def test_network_key_validation(hass, mock_openzwave):
"""Test network key validation."""
test_values = [
(
"0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, "
"0x0C, 0x0D, 0x0E, 0x0F, 0x10"
),
(
"0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,"
"0x0E,0x0F,0x10"
),
]
for value in test_values:
result = zwave.CONFIG_SCHEMA({"zwave": {"network_key": value}})
assert result["zwave"]["network_key"] == value
async def test_erronous_network_key_fails_validation(hass, mock_openzwave):
"""Test failing erroneous network key validation."""
test_values = [
(
"0x 01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, "
"0x0C, 0x0D, 0x0E, 0x0F, 0x10"
),
(
"0X01,0X02,0X03,0X04,0X05,0X06,0X07,0X08,0X09,0X0A,0X0B,0X0C,0X0D,"
"0X0E,0X0F,0X10"
),
"invalid",
"1234567",
1234567,
]
for value in test_values:
with pytest.raises(vol.Invalid):
zwave.CONFIG_SCHEMA({"zwave": {"network_key": value}})
async def test_auto_heal_midnight(hass, mock_openzwave, legacy_patchable_time):
"""Test network auto-heal at midnight."""
await async_setup_component(hass, "zwave", {"zwave": {"autoheal": True}})
await hass.async_block_till_done()
network = hass.data[zwave.DATA_NETWORK]
assert not network.heal.called
time = utc.localize(datetime(2017, 5, 6, 0, 0, 0))
async_fire_time_changed(hass, time)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert network.heal.called
assert len(network.heal.mock_calls) == 1
async def test_auto_heal_disabled(hass, mock_openzwave):
"""Test network auto-heal disabled."""
await async_setup_component(hass, "zwave", {"zwave": {"autoheal": False}})
await hass.async_block_till_done()
network = hass.data[zwave.DATA_NETWORK]
assert not network.heal.called
time = utc.localize(datetime(2017, 5, 6, 0, 0, 0))
async_fire_time_changed(hass, time)
await hass.async_block_till_done()
assert not network.heal.called
async def test_setup_platform(hass, mock_openzwave):
"""Test invalid device config."""
mock_device = MagicMock()
hass.data[DATA_NETWORK] = MagicMock()
hass.data[zwave.DATA_DEVICES] = {456: mock_device}
async_add_entities = MagicMock()
result = await zwave.async_setup_platform(hass, None, async_add_entities, None)
assert not result
assert not async_add_entities.called
result = await zwave.async_setup_platform(
hass, None, async_add_entities, {const.DISCOVERY_DEVICE: 123}
)
assert not result
assert not async_add_entities.called
result = await zwave.async_setup_platform(
hass, None, async_add_entities, {const.DISCOVERY_DEVICE: 456}
)
assert result
assert async_add_entities.called
assert len(async_add_entities.mock_calls) == 1
assert async_add_entities.mock_calls[0][1][0] == [mock_device]
async def test_zwave_ready_wait(hass, mock_openzwave):
"""Test that zwave continues after waiting for network ready."""
# Initialize zwave
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
sleeps = []
def utcnow():
return datetime.fromtimestamp(len(sleeps))
asyncio_sleep = asyncio.sleep
async def sleep(duration, loop=None):
if duration > 0:
sleeps.append(duration)
await asyncio_sleep(0)
with patch("homeassistant.components.zwave.dt_util.utcnow", new=utcnow):
with patch("asyncio.sleep", new=sleep):
with patch.object(zwave, "_LOGGER") as mock_logger:
hass.data[DATA_NETWORK].state = MockNetwork.STATE_STARTED
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert len(sleeps) == const.NETWORK_READY_WAIT_SECS
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
assert (
mock_logger.warning.mock_calls[0][1][1]
== const.NETWORK_READY_WAIT_SECS
)
async def test_device_entity(hass, mock_openzwave):
"""Test device entity base class."""
node = MockNode(node_id="10", name="Mock Node")
value = MockValue(
data=False,
node=node,
instance=2,
object_id="11",
label="Sensor",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
)
power_value = MockValue(
data=50.123456, node=node, precision=3, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = zwave.ZWaveDeviceEntity(values, "zwave")
device.hass = hass
device.value_added()
device.update_properties()
await hass.async_block_till_done()
assert not device.should_poll
assert device.unique_id == "10-11"
assert device.name == "Mock Node Sensor"
assert device.device_state_attributes[zwave.ATTR_POWER] == 50.123
async def test_node_removed(hass, mock_openzwave):
"""Test node removed in base class."""
# Create a mock node & node entity
node = MockNode(node_id="10", name="Mock Node")
value = MockValue(
data=False,
node=node,
instance=2,
object_id="11",
label="Sensor",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
)
power_value = MockValue(
data=50.123456, node=node, precision=3, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = zwave.ZWaveDeviceEntity(values, "zwave")
device.hass = hass
device.entity_id = "zwave.mock_node"
device.value_added()
device.update_properties()
await hass.async_block_till_done()
# Save it to the entity registry
registry = mock_registry(hass)
registry.async_get_or_create("zwave", "zwave", device.unique_id)
device.entity_id = registry.async_get_entity_id("zwave", "zwave", device.unique_id)
# Create dummy entity registry entries for other integrations
hue_entity = registry.async_get_or_create("light", "hue", 1234)
zha_entity = registry.async_get_or_create("sensor", "zha", 5678)
# Verify our Z-Wave entity is registered
assert registry.async_is_registered(device.entity_id)
# Remove it
entity_id = device.entity_id
await device.node_removed()
# Verify registry entry for our Z-Wave node is gone
assert not registry.async_is_registered(entity_id)
# Verify registry entries for our other entities remain
assert registry.async_is_registered(hue_entity.entity_id)
assert registry.async_is_registered(zha_entity.entity_id)
async def test_node_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14)
await hass.async_add_executor_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert hass.states.get("zwave.mock_node").state == "unknown"
async def test_unparsed_node_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14, manufacturer_name=None, name=None, is_ready=False)
sleeps = []
def utcnow():
return datetime.fromtimestamp(len(sleeps))
asyncio_sleep = asyncio.sleep
async def sleep(duration, loop=None):
if duration > 0:
sleeps.append(duration)
await asyncio_sleep(0)
with patch("homeassistant.components.zwave.dt_util.utcnow", new=utcnow):
with patch("asyncio.sleep", new=sleep):
with patch.object(zwave, "_LOGGER") as mock_logger:
await hass.async_add_executor_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert len(sleeps) == const.NODE_READY_WAIT_SECS
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
assert mock_logger.warning.mock_calls[0][1][1:] == (
14,
const.NODE_READY_WAIT_SECS,
)
assert hass.states.get("zwave.unknown_node_14").state == "unknown"
async def test_node_ignored(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(
hass,
"zwave",
{"zwave": {"device_config": {"zwave.mock_node": {"ignored": True}}}},
)
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14)
await hass.async_add_executor_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert hass.states.get("zwave.mock_node") is None
async def test_value_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SENSOR_BINARY)
value = MockValue(
data=False,
node=node,
index=12,
instance=13,
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
await hass.async_add_executor_job(mock_receivers[0], node, value)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.mock_node_mock_value").state == "off"
async def test_value_entities(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = {}
def mock_connect(receiver, signal, *args, **kwargs):
mock_receivers[signal] = receiver
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
zwave_network = hass.data[DATA_NETWORK]
zwave_network.state = MockNetwork.STATE_READY
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert mock_receivers
await hass.async_add_executor_job(
mock_receivers[MockNetwork.SIGNAL_ALL_NODES_QUERIED]
)
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SENSOR_BINARY)
zwave_network.nodes = {node.node_id: node}
value = MockValue(
data=False,
node=node,
index=12,
instance=1,
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
node.values = {"primary": value, value.value_id: value}
value2 = MockValue(
data=False,
node=node,
index=12,
instance=2,
label="Mock Value B",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
node.values[value2.value_id] = value2
await hass.async_add_executor_job(
mock_receivers[MockNetwork.SIGNAL_NODE_ADDED], node
)
await hass.async_add_executor_job(
mock_receivers[MockNetwork.SIGNAL_VALUE_ADDED], node, value
)
await hass.async_add_executor_job(
mock_receivers[MockNetwork.SIGNAL_VALUE_ADDED], node, value2
)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.mock_node_mock_value").state == "off"
assert hass.states.get("binary_sensor.mock_node_mock_value_b").state == "off"
ent_reg = await async_get_registry(hass)
dev_reg = await get_dev_reg(hass)
entry = ent_reg.async_get("zwave.mock_node")
assert entry is not None
assert entry.unique_id == f"node-{node.node_id}"
node_dev_id = entry.device_id
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value.object_id}"
assert entry.name is None
assert entry.device_id == node_dev_id
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value_b")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value2.object_id}"
assert entry.name is None
assert entry.device_id != node_dev_id
device_id_b = entry.device_id
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.name == node.name
old_device = device
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == f"{node.name} ({value2.instance})"
# test renaming without updating
await hass.services.async_call(
"zwave",
"rename_node",
{const.ATTR_NODE_ID: node.node_id, const.ATTR_NAME: "Demo Node"},
)
await hass.async_block_till_done()
assert node.name == "Demo Node"
entry = ent_reg.async_get("zwave.mock_node")
assert entry is not None
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value")
assert entry is not None
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value_b")
assert entry is not None
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.id == old_device.id
assert device.name == node.name
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == f"{node.name} ({value2.instance})"
# test renaming
await hass.services.async_call(
"zwave",
"rename_node",
{
const.ATTR_NODE_ID: node.node_id,
const.ATTR_UPDATE_IDS: True,
const.ATTR_NAME: "New Node",
},
)
await hass.async_block_till_done()
assert node.name == "New Node"
entry = ent_reg.async_get("zwave.new_node")
assert entry is not None
assert entry.unique_id == f"node-{node.node_id}"
entry = ent_reg.async_get("binary_sensor.new_node_mock_value")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value.object_id}"
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.id == old_device.id
assert device.name == node.name
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == f"{node.name} ({value2.instance})"
await hass.services.async_call(
"zwave",
"rename_value",
{
const.ATTR_NODE_ID: node.node_id,
const.ATTR_VALUE_ID: value.object_id,
const.ATTR_UPDATE_IDS: True,
const.ATTR_NAME: "New Label",
},
)
await hass.async_block_till_done()
entry = ent_reg.async_get("binary_sensor.new_node_new_label")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value.object_id}"
async def test_value_discovery_existing_entity(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(
node_id=11,
generic=const.GENERIC_TYPE_THERMOSTAT,
specific=const.SPECIFIC_TYPE_THERMOSTAT_GENERAL_V2,
)
thermostat_mode = MockValue(
data="Heat",
data_items=["Off", "Heat"],
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
genre=const.GENRE_USER,
)
setpoint_heating = MockValue(
data=22.0,
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT,
index=1,
genre=const.GENRE_USER,
)
await hass.async_add_executor_job(mock_receivers[0], node, thermostat_mode)
await hass.async_block_till_done()
def mock_update(self):
self.hass.add_job(self.async_update_ha_state)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
await hass.async_add_executor_job(mock_receivers[0], node, setpoint_heating)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
assert (
hass.states.get("climate.mock_node_mock_value").attributes[
"current_temperature"
]
is None
)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
temperature = MockValue(
data=23.5,
node=node,
index=1,
command_class=const.COMMAND_CLASS_SENSOR_MULTILEVEL,
genre=const.GENRE_USER,
units="C",
)
await hass.async_add_executor_job(mock_receivers[0], node, temperature)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
assert (
hass.states.get("climate.mock_node_mock_value").attributes[
"current_temperature"
]
== 23.5
)
async def test_value_discovery_legacy_thermostat(hass, mock_openzwave):
"""Test discovery of a node. Special case for legacy thermostats."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(
node_id=11,
generic=const.GENERIC_TYPE_THERMOSTAT,
specific=const.SPECIFIC_TYPE_SETPOINT_THERMOSTAT,
)
setpoint_heating = MockValue(
data=22.0,
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT,
index=1,
genre=const.GENRE_USER,
)
await hass.async_add_executor_job(mock_receivers[0], node, setpoint_heating)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
async def test_power_schemes(hass, mock_openzwave):
"""Test power attribute."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SWITCH_BINARY)
switch = MockValue(
data=True,
node=node,
index=12,
instance=13,
command_class=const.COMMAND_CLASS_SWITCH_BINARY,
genre=const.GENRE_USER,
type=const.TYPE_BOOL,
)
await hass.async_add_executor_job(mock_receivers[0], node, switch)
await hass.async_block_till_done()
assert hass.states.get("switch.mock_node_mock_value").state == "on"
assert (
"power_consumption"
not in hass.states.get("switch.mock_node_mock_value").attributes
)
def mock_update(self):
self.hass.add_job(self.async_update_ha_state)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
power = MockValue(
data=23.5,
node=node,
index=const.INDEX_SENSOR_MULTILEVEL_POWER,
instance=13,
command_class=const.COMMAND_CLASS_SENSOR_MULTILEVEL,
genre=const.GENRE_USER, # to avoid exception
)
await hass.async_add_executor_job(mock_receivers[0], node, power)
await hass.async_block_till_done()
assert (
hass.states.get("switch.mock_node_mock_value").attributes["power_consumption"]
== 23.5
)
async def test_network_ready(hass, mock_openzwave):
"""Test Node network ready event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_ALL_NODES_QUERIED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_COMPLETE, listener)
await hass.async_add_executor_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_network_complete(hass, mock_openzwave):
"""Test Node network complete event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_AWAKE_NODES_QUERIED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_READY, listener)
await hass.async_add_executor_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_network_complete_some_dead(hass, mock_openzwave):
"""Test Node network complete some dead event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_COMPLETE_SOME_DEAD, listener)
await hass.async_add_executor_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_entity_discovery(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave
):
"""Test the creation of a new entity."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
with patch.object(zwave, "discovery", mock_discovery):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
assert not mock_discovery.async_load_platform.called
assert values.primary is value_class.primary
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[value_class.primary, None, None], key=lambda a: id(a)
)
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values.check_value(value_class.secondary)
await hass.async_block_till_done()
assert mock_discovery.async_load_platform.called
assert len(mock_discovery.async_load_platform.mock_calls) == 1
args = mock_discovery.async_load_platform.mock_calls[0][1]
assert args[0] == hass
assert args[1] == "mock_component"
assert args[2] == "zwave"
assert args[3] == {
const.DISCOVERY_DEVICE: mock_import_module().get_device().unique_id
}
assert args[4] == zwave_config
assert values.secondary is value_class.secondary
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[value_class.primary, value_class.secondary, None], key=lambda a: id(a)
)
mock_discovery.async_load_platform.reset_mock()
with patch.object(zwave, "discovery", mock_discovery):
values.check_value(value_class.optional)
values.check_value(value_class.duplicate_secondary)
values.check_value(value_class.no_match_value)
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
assert values.optional is value_class.optional
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[value_class.primary, value_class.secondary, value_class.optional],
key=lambda a: id(a),
)
assert values._entity.value_added.called
assert len(values._entity.value_added.mock_calls) == 1
assert values._entity.value_changed.called
assert len(values._entity.value_changed.mock_calls) == 1
async def test_entity_existing_values(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave
):
"""Test the loading of already discovered values."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
value_class.optional.value_id: value_class.optional,
value_class.no_match_value.value_id: value_class.no_match_value,
}
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
await hass.async_block_till_done()
assert mock_discovery.async_load_platform.called
assert len(mock_discovery.async_load_platform.mock_calls) == 1
args = mock_discovery.async_load_platform.mock_calls[0][1]
assert args[0] == hass
assert args[1] == "mock_component"
assert args[2] == "zwave"
assert args[3] == {
const.DISCOVERY_DEVICE: mock_import_module().get_device().unique_id
}
assert args[4] == zwave_config
assert not value_class.primary.enable_poll.called
assert values.primary is value_class.primary
assert values.secondary is value_class.secondary
assert values.optional is value_class.optional
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[value_class.primary, value_class.secondary, value_class.optional],
key=lambda a: id(a),
)
async def test_node_schema_mismatch(hass, mock_discovery, mock_values, mock_openzwave):
"""Test node schema mismatch."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.generic = "no_match"
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
mock_schema[const.DISC_GENERIC_DEVICE_CLASS] = ["generic_match"]
with patch.object(zwave, "discovery", mock_discovery):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_entity_workaround_component(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave
):
"""Test component workaround."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
node.manufacturer_id = "010f"
node.product_type = "0b00"
value_class.primary.command_class = const.COMMAND_CLASS_SENSOR_ALARM
entity_id = "binary_sensor.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY]
}
},
}
with patch.object(
zwave, "async_dispatcher_send"
) as mock_dispatch_send, patch.object(
zwave, "discovery", mock_discovery
), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert mock_dispatch_send.called
assert len(mock_dispatch_send.mock_calls) == 1
args = mock_dispatch_send.mock_calls[0][1]
assert args[1] == "zwave_new_binary_sensor"
async def test_entity_workaround_ignore(
hass, mock_discovery, mock_values, mock_openzwave
):
"""Test ignore workaround."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.manufacturer_id = "010f"
node.product_type = "0301"
value_class.primary.command_class = const.COMMAND_CLASS_SWITCH_BINARY
mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY]
}
},
}
with patch.object(zwave, "discovery", mock_discovery):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_entity_config_ignore(hass, mock_discovery, mock_values, mock_openzwave):
"""Test ignore config."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
device_config = {entity_id: {zwave.CONF_IGNORED: True}}
with patch.object(zwave, "discovery", mock_discovery):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_entity_config_ignore_with_registry(
hass, mock_discovery, mock_values, mock_openzwave
):
"""Test ignore config.
The case when the device is in entity registry.
"""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
device_config = {"mock_component.registry_id": {zwave.CONF_IGNORED: True}}
with patch.object(registry, "async_schedule_save"):
registry.async_get_or_create(
"mock_component",
zwave.DOMAIN,
"567-1000",
suggested_object_id="registry_id",
)
with patch.object(zwave, "discovery", mock_discovery):
zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_entity_platform_ignore(
hass, mock_discovery, mock_values, mock_openzwave
):
"""Test platform ignore device."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
import_module = MagicMock()
platform = MagicMock()
import_module.return_value = platform
platform.get_device.return_value = None
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", import_module
):
zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_config_polling_intensity(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave
):
"""Test polling intensity."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
device_config = {entity_id: {zwave.CONF_POLLING_INTENSITY: 123}}
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert mock_discovery.async_load_platform.called
assert value_class.primary.enable_poll.called
assert len(value_class.primary.enable_poll.mock_calls) == 1
assert value_class.primary.enable_poll.mock_calls[0][1][0] == 123
class TestZwave(unittest.TestCase):
"""Test zwave init."""
def test_device_config_glob_is_ordered(self):
"""Test that device_config_glob preserves order."""
conf = CONFIG_SCHEMA({"zwave": {CONF_DEVICE_CONFIG_GLOB: OrderedDict()}})
assert isinstance(conf["zwave"][CONF_DEVICE_CONFIG_GLOB], OrderedDict)
class TestZWaveServices(unittest.TestCase):
"""Tests for zwave services."""
@pytest.fixture(autouse=True)
def set_mock_openzwave(self, mock_openzwave):
"""Use the mock_openzwave fixture for this class."""
self.mock_openzwave = mock_openzwave
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.hass.start()
# Initialize zwave
setup_component(self.hass, "zwave", {"zwave": {}})
self.hass.block_till_done()
self.zwave_network = self.hass.data[DATA_NETWORK]
self.zwave_network.state = MockNetwork.STATE_READY
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.block_till_done()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
self.hass.services.call("zwave", "stop_network", {})
self.hass.block_till_done()
self.hass.stop()
def test_add_node(self):
"""Test zwave add_node service."""
self.hass.services.call("zwave", "add_node", {})
self.hass.block_till_done()
assert self.zwave_network.controller.add_node.called
assert len(self.zwave_network.controller.add_node.mock_calls) == 1
assert len(self.zwave_network.controller.add_node.mock_calls[0][1]) == 0
def test_add_node_secure(self):
"""Test zwave add_node_secure service."""
self.hass.services.call("zwave", "add_node_secure", {})
self.hass.block_till_done()
assert self.zwave_network.controller.add_node.called
assert len(self.zwave_network.controller.add_node.mock_calls) == 1
assert self.zwave_network.controller.add_node.mock_calls[0][1][0] is True
def test_remove_node(self):
"""Test zwave remove_node service."""
self.hass.services.call("zwave", "remove_node", {})
self.hass.block_till_done()
assert self.zwave_network.controller.remove_node.called
assert len(self.zwave_network.controller.remove_node.mock_calls) == 1
def test_cancel_command(self):
"""Test zwave cancel_command service."""
self.hass.services.call("zwave", "cancel_command", {})
self.hass.block_till_done()
assert self.zwave_network.controller.cancel_command.called
assert len(self.zwave_network.controller.cancel_command.mock_calls) == 1
def test_heal_network(self):
"""Test zwave heal_network service."""
self.hass.services.call("zwave", "heal_network", {})
self.hass.block_till_done()
assert self.zwave_network.heal.called
assert len(self.zwave_network.heal.mock_calls) == 1
def test_soft_reset(self):
"""Test zwave soft_reset service."""
self.hass.services.call("zwave", "soft_reset", {})
self.hass.block_till_done()
assert self.zwave_network.controller.soft_reset.called
assert len(self.zwave_network.controller.soft_reset.mock_calls) == 1
def test_test_network(self):
"""Test zwave test_network service."""
self.hass.services.call("zwave", "test_network", {})
self.hass.block_till_done()
assert self.zwave_network.test.called
assert len(self.zwave_network.test.mock_calls) == 1
def test_stop_network(self):
"""Test zwave stop_network service."""
with patch.object(self.hass.bus, "fire") as mock_fire:
self.hass.services.call("zwave", "stop_network", {})
self.hass.block_till_done()
assert self.zwave_network.stop.called
assert len(self.zwave_network.stop.mock_calls) == 1
assert mock_fire.called
assert len(mock_fire.mock_calls) == 1
assert mock_fire.mock_calls[0][1][0] == const.EVENT_NETWORK_STOP
def test_rename_node(self):
"""Test zwave rename_node service."""
self.zwave_network.nodes = {11: MagicMock()}
self.hass.services.call(
"zwave",
"rename_node",
{const.ATTR_NODE_ID: 11, const.ATTR_NAME: "test_name"},
)
self.hass.block_till_done()
assert self.zwave_network.nodes[11].name == "test_name"
def test_rename_value(self):
"""Test zwave rename_value service."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, label="Old Label")
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.label == "Old Label"
self.hass.services.call(
"zwave",
"rename_value",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_NAME: "New Label",
},
)
self.hass.block_till_done()
assert value.label == "New Label"
def test_set_poll_intensity_enable(self):
"""Test zwave set_poll_intensity service, successful set."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=0)
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 0
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 4,
},
)
self.hass.block_till_done()
enable_poll = value.enable_poll
assert value.enable_poll.called
assert len(enable_poll.mock_calls) == 2
assert enable_poll.mock_calls[0][1][0] == 4
def test_set_poll_intensity_enable_failed(self):
"""Test zwave set_poll_intensity service, failed set."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=0)
value.enable_poll.return_value = False
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 0
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 4,
},
)
self.hass.block_till_done()
enable_poll = value.enable_poll
assert value.enable_poll.called
assert len(enable_poll.mock_calls) == 1
def test_set_poll_intensity_disable(self):
"""Test zwave set_poll_intensity service, successful disable."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=4)
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 4
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 0,
},
)
self.hass.block_till_done()
disable_poll = value.disable_poll
assert value.disable_poll.called
assert len(disable_poll.mock_calls) == 2
def test_set_poll_intensity_disable_failed(self):
"""Test zwave set_poll_intensity service, failed disable."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=4)
value.disable_poll.return_value = False
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 4
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 0,
},
)
self.hass.block_till_done()
disable_poll = value.disable_poll
assert value.disable_poll.called
assert len(disable_poll.mock_calls) == 1
def test_remove_failed_node(self):
"""Test zwave remove_failed_node service."""
self.hass.services.call("zwave", "remove_failed_node", {const.ATTR_NODE_ID: 12})
self.hass.block_till_done()
remove_failed_node = self.zwave_network.controller.remove_failed_node
assert remove_failed_node.called
assert len(remove_failed_node.mock_calls) == 1
assert remove_failed_node.mock_calls[0][1][0] == 12
def test_replace_failed_node(self):
"""Test zwave replace_failed_node service."""
self.hass.services.call(
"zwave", "replace_failed_node", {const.ATTR_NODE_ID: 13}
)
self.hass.block_till_done()
replace_failed_node = self.zwave_network.controller.replace_failed_node
assert replace_failed_node.called
assert len(replace_failed_node.mock_calls) == 1
assert replace_failed_node.mock_calls[0][1][0] == 13
def test_set_config_parameter(self):
"""Test zwave set_config_parameter service."""
value_byte = MockValue(
index=12,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BYTE,
)
value_list = MockValue(
index=13,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_LIST,
data_items=["item1", "item2", "item3"],
)
value_button = MockValue(
index=14,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BUTTON,
)
value_list_int = MockValue(
index=15,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_LIST,
data_items=["1", "2", "3"],
)
value_bool = MockValue(
index=16,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BOOL,
)
node = MockNode(node_id=14)
node.get_values.return_value = {
12: value_byte,
13: value_list,
14: value_button,
15: value_list_int,
16: value_bool,
}
self.zwave_network.nodes = {14: node}
# Byte
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 12,
const.ATTR_CONFIG_VALUE: 7,
},
)
self.hass.block_till_done()
assert value_byte.data == 7
# List
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 13,
const.ATTR_CONFIG_VALUE: "item3",
},
)
self.hass.block_till_done()
assert value_list.data == "item3"
# Button
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 14,
const.ATTR_CONFIG_VALUE: True,
},
)
self.hass.block_till_done()
assert self.zwave_network.manager.pressButton.called
assert self.zwave_network.manager.releaseButton.called
# List of Ints
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 15,
const.ATTR_CONFIG_VALUE: 3,
},
)
self.hass.block_till_done()
assert value_list_int.data == "3"
# Boolean Truthy
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 16,
const.ATTR_CONFIG_VALUE: "True",
},
)
self.hass.block_till_done()
assert value_bool.data == 1
# Boolean Falsy
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 16,
const.ATTR_CONFIG_VALUE: "False",
},
)
self.hass.block_till_done()
assert value_bool.data == 0
# Different Parameter Size
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 19,
const.ATTR_CONFIG_VALUE: 0x01020304,
const.ATTR_CONFIG_SIZE: 4,
},
)
self.hass.block_till_done()
assert node.set_config_param.called
assert len(node.set_config_param.mock_calls) == 1
assert node.set_config_param.mock_calls[0][1][0] == 19
assert node.set_config_param.mock_calls[0][1][1] == 0x01020304
assert node.set_config_param.mock_calls[0][1][2] == 4
node.set_config_param.reset_mock()
def test_print_config_parameter(self):
"""Test zwave print_config_parameter service."""
value1 = MockValue(
index=12, command_class=const.COMMAND_CLASS_CONFIGURATION, data=1234
)
value2 = MockValue(
index=13, command_class=const.COMMAND_CLASS_CONFIGURATION, data=2345
)
node = MockNode(node_id=14)
node.values = {12: value1, 13: value2}
self.zwave_network.nodes = {14: node}
with patch.object(zwave, "_LOGGER") as mock_logger:
self.hass.services.call(
"zwave",
"print_config_parameter",
{const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_PARAMETER: 13},
)
self.hass.block_till_done()
assert mock_logger.info.called
assert len(mock_logger.info.mock_calls) == 1
assert mock_logger.info.mock_calls[0][1][1] == 13
assert mock_logger.info.mock_calls[0][1][2] == 14
assert mock_logger.info.mock_calls[0][1][3] == 2345
def test_print_node(self):
"""Test zwave print_node_parameter service."""
node = MockNode(node_id=14)
self.zwave_network.nodes = {14: node}
with self.assertLogs(level="DEBUG") as mock_logger:
self.hass.services.call("zwave", "print_node", {const.ATTR_NODE_ID: 14})
self.hass.block_till_done()
assert "FOUND NODE " in mock_logger.output[1]
def test_set_wakeup(self):
"""Test zwave set_wakeup service."""
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave", "set_wakeup", {const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_VALUE: 15}
)
self.hass.block_till_done()
assert value.data == 15
node.can_wake_up_value = False
self.hass.services.call(
"zwave", "set_wakeup", {const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_VALUE: 20}
)
self.hass.block_till_done()
assert value.data == 15
def test_reset_node_meters(self):
"""Test zwave reset_node_meters service."""
value = MockValue(
instance=1, index=8, data=99.5, command_class=const.COMMAND_CLASS_METER
)
reset_value = MockValue(
instance=1, index=33, command_class=const.COMMAND_CLASS_METER
)
node = MockNode(node_id=14)
node.values = {8: value, 33: reset_value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"reset_node_meters",
{const.ATTR_NODE_ID: 14, const.ATTR_INSTANCE: 2},
)
self.hass.block_till_done()
assert not self.zwave_network.manager.pressButton.called
assert not self.zwave_network.manager.releaseButton.called
self.hass.services.call("zwave", "reset_node_meters", {const.ATTR_NODE_ID: 14})
self.hass.block_till_done()
assert self.zwave_network.manager.pressButton.called
(value_id,) = self.zwave_network.manager.pressButton.mock_calls.pop(0)[1]
assert value_id == reset_value.value_id
assert self.zwave_network.manager.releaseButton.called
(value_id,) = self.zwave_network.manager.releaseButton.mock_calls.pop(0)[1]
assert value_id == reset_value.value_id
def test_add_association(self):
"""Test zwave change_association service."""
ZWaveGroup = self.mock_openzwave.group.ZWaveGroup
group = MagicMock()
ZWaveGroup.return_value = group
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"change_association",
{
const.ATTR_ASSOCIATION: "add",
const.ATTR_NODE_ID: 14,
const.ATTR_TARGET_NODE_ID: 24,
const.ATTR_GROUP: 3,
const.ATTR_INSTANCE: 5,
},
)
self.hass.block_till_done()
assert ZWaveGroup.called
assert len(ZWaveGroup.mock_calls) == 2
assert ZWaveGroup.mock_calls[0][1][0] == 3
assert ZWaveGroup.mock_calls[0][1][2] == 14
assert group.add_association.called
assert len(group.add_association.mock_calls) == 1
assert group.add_association.mock_calls[0][1][0] == 24
assert group.add_association.mock_calls[0][1][1] == 5
def test_remove_association(self):
"""Test zwave change_association service."""
ZWaveGroup = self.mock_openzwave.group.ZWaveGroup
group = MagicMock()
ZWaveGroup.return_value = group
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"change_association",
{
const.ATTR_ASSOCIATION: "remove",
const.ATTR_NODE_ID: 14,
const.ATTR_TARGET_NODE_ID: 24,
const.ATTR_GROUP: 3,
const.ATTR_INSTANCE: 5,
},
)
self.hass.block_till_done()
assert ZWaveGroup.called
assert len(ZWaveGroup.mock_calls) == 2
assert ZWaveGroup.mock_calls[0][1][0] == 3
assert ZWaveGroup.mock_calls[0][1][2] == 14
assert group.remove_association.called
assert len(group.remove_association.mock_calls) == 1
assert group.remove_association.mock_calls[0][1][0] == 24
assert group.remove_association.mock_calls[0][1][1] == 5
def test_refresh_entity(self):
"""Test zwave refresh_entity service."""
node = MockNode()
value = MockValue(
data=False, node=node, command_class=const.COMMAND_CLASS_SENSOR_BINARY
)
power_value = MockValue(
data=50, node=node, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = get_device(node=node, values=values, node_config={})
device.hass = self.hass
device.entity_id = "binary_sensor.mock_entity_id"
self.hass.add_job(device.async_added_to_hass())
self.hass.block_till_done()
self.hass.services.call(
"zwave", "refresh_entity", {ATTR_ENTITY_ID: "binary_sensor.mock_entity_id"}
)
self.hass.block_till_done()
assert node.refresh_value.called
assert len(node.refresh_value.mock_calls) == 2
assert (
sorted(
[
node.refresh_value.mock_calls[0][1][0],
node.refresh_value.mock_calls[1][1][0],
]
)
== sorted([value.value_id, power_value.value_id])
)
def test_refresh_node(self):
"""Test zwave refresh_node service."""
node = MockNode(node_id=14)
self.zwave_network.nodes = {14: node}
self.hass.services.call("zwave", "refresh_node", {const.ATTR_NODE_ID: 14})
self.hass.block_till_done()
assert node.refresh_info.called
assert len(node.refresh_info.mock_calls) == 1
def test_set_node_value(self):
"""Test zwave set_node_value service."""
value = MockValue(index=12, command_class=const.COMMAND_CLASS_INDICATOR, data=4)
node = MockNode(node_id=14, command_classes=[const.COMMAND_CLASS_INDICATOR])
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"set_node_value",
{
const.ATTR_NODE_ID: 14,
const.ATTR_VALUE_ID: 12,
const.ATTR_CONFIG_VALUE: 2,
},
)
self.hass.block_till_done()
assert self.zwave_network.nodes[14].values[12].data == 2
def test_set_node_value_with_long_id_and_text_value(self):
"""Test zwave set_node_value service."""
value = MockValue(
index=87512398541236578,
command_class=const.COMMAND_CLASS_SWITCH_COLOR,
data="#ff0000",
)
node = MockNode(node_id=14, command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
node.values = {87512398541236578: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"set_node_value",
{
const.ATTR_NODE_ID: 14,
const.ATTR_VALUE_ID: "87512398541236578",
const.ATTR_CONFIG_VALUE: "#00ff00",
},
)
self.hass.block_till_done()
assert self.zwave_network.nodes[14].values[87512398541236578].data == "#00ff00"
def test_refresh_node_value(self):
"""Test zwave refresh_node_value service."""
node = MockNode(
node_id=14,
command_classes=[const.COMMAND_CLASS_INDICATOR],
network=self.zwave_network,
)
value = MockValue(
node=node, index=12, command_class=const.COMMAND_CLASS_INDICATOR, data=2
)
value.refresh = MagicMock()
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"refresh_node_value",
{const.ATTR_NODE_ID: 14, const.ATTR_VALUE_ID: 12},
)
self.hass.block_till_done()
assert value.refresh.called
def test_heal_node(self):
"""Test zwave heal_node service."""
node = MockNode(node_id=19)
self.zwave_network.nodes = {19: node}
self.hass.services.call("zwave", "heal_node", {const.ATTR_NODE_ID: 19})
self.hass.block_till_done()
assert node.heal.called
assert len(node.heal.mock_calls) == 1
def test_test_node(self):
"""Test the zwave test_node service."""
node = MockNode(node_id=19)
self.zwave_network.nodes = {19: node}
self.hass.services.call("zwave", "test_node", {const.ATTR_NODE_ID: 19})
self.hass.block_till_done()
assert node.test.called
assert len(node.test.mock_calls) == 1
|
|
#!/usr/bin/python
""" Script for computing observables from a transport+hydro hybrid output """
import argparse
import os.path
import math
import array
from collections import defaultdict
from hic import flow
from hybrid_analysis.event_selection import centrality_filters as cf
from hybrid_analysis.file_reader import hybrid_reader as reader
from hybrid_analysis.multiplicity import distributions as mlt
from hybrid_analysis.multiplicity import counters
from hybrid_analysis.v_n import cumulants as cumu
from hybrid_analysis.v_n import eventplane as ep
# Initialization
events = 0
# Nch at |eta| < 0.5
# To be compared with STAR data
# PRC79, 034909 (2009)
nch_etacut = 0.5
nch_mid = 0
# Integrated yields
# To be compared with PHOBOS data
# PRC75, 024910 (2007)
midy_min = -0.1
midy_max = 0.4
integrated_p = 0.0
integrated_pbar = 0.0
# mean pT
# To be compared with STAR data
# PRC79, 034909 (2009)
meanpt_deltay = 0.2
ptsums = {}
particleidlist = [211, -211, 321, -321, 2212, -2212]
for particletype in particleidlist:
ptsums[particletype] = (0.0, 0)
# pT spectra
# To be compared with PHOBOS data
# PRC75, 024910 (2007)
# Rapidity point
ypoint = 0.8
deltay = 0.1
spectraptpoints = [0.25, 0.30, 0.35, 0.40, 0.50, 0.55, 0.60, 0.70,
1.0, 1.2, 1.55, 1.85, 2.2]
spectraptbinw = 0.05
dndptsums = {}
for particletype in particleidlist:
dndptsum = defaultdict(float)
dndptsums[particletype] = dndptsum
# Pseudorapidity distribution
# To be compared with PHOBOS data
# PRC83, 024913 (2011)
deltaeta = 0.2
etamin = -5.3
etabins = int(-2 * etamin / deltaeta + 0.5)
nchetapoints = [(etamin + deltaeta * i) for i in range(0, etabins+1)]
dndetasum = defaultdict(float)
# Flow analysis
# To be compared with STAR data
# PRC86, 054908 (2012)
cumulant_etacut = 1.0
flowptpoints = [0.26, 0.44, 0.64, 0.84, 1.04, 1.24, 1.44, 1.64, 1.86]
flowptbinw = 0.2
qcharges = {}
qphis = {}
for ptpoint in flowptpoints:
qcharges[ptpoint] = []
qphis[ptpoint] = []
vn_event_etacut = 0.3
vn_event_sums = array.array('d', [0.0]*8)
observables = ["nch_mid", "np_integ", "meanpt", "dndpt", "dndeta", "v24", "v2ep"]
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--impact", type=float, nargs=2,
metavar=('bmin', 'bmax'),
help="impact parameter range")
parser.add_argument("-n", "--npart", type=int, nargs=2,
metavar=('nmin', 'nmax'),
help="participant range")
parser.add_argument("-o", "--only", nargs=1, action='append',
choices=observables,
help="run only listed parts of analysis")
parser.add_argument("-x", "--exclude", nargs=1, action='append',
choices=observables,
help="exclude listed parts of analysis")
parser.add_argument("datapath",
help="path to datafiles")
args = parser.parse_args()
# Centrality filtering
datafiles = []
if args.impact:
bmin = 0.0
bmax = 0.0
if args.impact[0] < args.impact[1]:
bmin = args.impact[0]
bmax = args.impact[1]
else:
bmin = args.impact[1]
bmax = args.impact[0]
print "Impact parameter range:", bmin, bmax
cfilter = cf.CentralityFilter(args.datapath, b_min=bmin, b_max=bmax)
datafiles = cfilter.filter_events()
elif args.npart:
npmin = 0.0
npmax = 0.0
if args.npart[0] < args.npart[1]:
npmin = args.npart[0]
npmax = args.npart[1]
else:
npmin = args.npart[1]
npmax = args.npart[0]
print "Npart range:", npmin, npmax
cfilter = cf.CentralityFilter(args.datapath, npart_min=npmin,
npart_max=npmax)
datafiles = cfilter.filter_events()
analysis = set()
for obs in observables:
# Because 'append', args.only and args.exclude
# both return a list of 1-element lists
if args.only and [obs] in args.only:
analysis.add(obs)
elif args.exclude and [obs] not in args.exclude:
analysis.add(obs)
elif not args.only and not args.exclude:
analysis.add(obs)
# Data analysis
files = 0
skipped_files = 0
for datafile in datafiles:
if files%100 == 0:
print "Files read:", files
files += 1
if not os.path.isfile(datafile):
skipped_files += 1
continue
with open(datafile, 'r') as f:
reading = True
while reading:
particlelist = reader.next_text_event(f)
if not particlelist:
reading = False
continue
events += 1
if "nch_mid" in analysis:
nch_mid += sum([1 for x in particlelist
if (abs(x.charge) > 0
and abs(x.pseudorap) < nch_etacut)])
if "np_integ" in analysis:
integrated_p += sum([1 for x in particlelist
if (x.ptype == 2212
and x.rap > midy_min
and x.rap < midy_max)])
integrated_pbar += sum([1 for x in particlelist
if (x.ptype == -2212
and x.rap > midy_min
and x.rap < midy_max)])
if "meanpt" in analysis:
counters.ptcount(particlelist, particleidlist,
ptsums, deltay=meanpt_deltay)
if "dndpt" in analysis:
mlt.ptdistr(particlelist, particleidlist, spectraptpoints,
spectraptbinw, ypoint, deltay, dndptsums)
if "dndeta" in analysis:
mlt.etadistr(particlelist, nchetapoints, deltaeta, dndetasum)
if "v24" in analysis:
for ptpoint in flowptpoints:
minpt = ptpoint - flowptbinw / 2.0
maxpt = ptpoint + flowptbinw / 2.0
(ncharges, phis) = cumu.charged_phis(particlelist,
ptmin=minpt,
ptmax=maxpt,
etacut=cumulant_etacut)
qcharges[ptpoint].append(ncharges)
qphis[ptpoint].append(phis)
if "v2ep" in analysis:
ep.v2v3event(particlelist, vn_event_sums,
ptmin=0.2, ptmax=2.0, etacut=vn_event_etacut)
print "Attempted to read", files, "files in total, failures:", skipped_files
# Analysis output
if "nch_mid" in analysis:
print "Nch for |eta| <", nch_etacut
print nch_mid / events
if "np_integ" in analysis:
yrange = midy_max - midy_min
print "Integrated yields at midrapidity:",
print midy_min, "< y <", midy_max
print "Proton Antiproton"
print integrated_p / yrange / events, integrated_pbar / yrange / events
if "meanpt" in analysis:
print "<pT> at |y| <", meanpt_deltay / 2
for ptype in ptsums:
print ptype
try:
print ptsums[ptype][0] / ptsums[ptype][1]
except ZeroDivisionError:
print 0.0
if "dndpt" in analysis:
print "dn/dpT at y =", ypoint
for ptype in dndptsums:
print ptype
for ptpoint in spectraptpoints:
print ptpoint,
print dndptsums[ptype][ptpoint] / events / deltay / spectraptbinw / 2/math.pi
if "dndeta" in analysis:
print "Average Nch:", sum(dndetasum.values()) * deltaeta / events
for eta in nchetapoints:
print eta, dndetasum[eta] / events / deltaeta
if "v24" in analysis:
print "Flow cumulant analysis for pseudorapidity <", cumulant_etacut
print "pT v2{2} v2{4}"
for ptpoint in flowptpoints:
q2_list = []
q4_list = []
for event in qphis[ptpoint]:
q2_list.append(flow.qn(event, 2))
q4_list.append(flow.qn(event, 4))
vnk = flow.Cumulant(qcharges[ptpoint], q2=q2_list, q4=q4_list)
v22 = vnk.flow(2, 2)
v24 = vnk.flow(2, 4)
print ptpoint, v22, v24
if "v2ep" in analysis:
print "Flow event plane analysis for pseudorapidity <", vn_event_etacut
ep.v2v3mean(vn_event_sums, events)
|
|
from toolz import curry
import numpy as np
import pandas as pd
from nfldata.common import process_time_col
from nfldata.lookup import score_before_time
offense_team_stat_columns = [
'rushing_att',
'rushing_yds',
'rushing_tds',
'rushing_twoptm',
'fumbles_lost',
'passing_att',
'passing_cmp',
'passing_incmp',
'passing_int',
'passing_sk',
'passing_yds',
'passing_tds',
'passing_twoptm',
'kicking_xpa',
'kicking_xpmade',
'kicking_fga',
'kicking_fgmissed',
'kicking_fgm',
]
defense_team_stat_columns = [
'defense_int',
'defense_int_tds',
'defense_frec',
'defense_frec_tds',
'defense_misc_tds',
'defense_puntblk',
'defense_fgblk',
'defense_safe',
'defense_sk',
]
special_team_stat_columns = [
'kickret_tds',
'puntret_tds',
]
def player_stats_by_game(connection, include_preseason=False):
sum_columns = [
'fumbles_lost',
'kicking_fga',
'kicking_fgm',
'kicking_xpa',
'kicking_xpmade',
'kickret_tds',
'passing_att',
'passing_cmp',
'passing_incmp',
'passing_int',
'passing_sk',
'passing_tds',
'passing_twoptm',
'passing_yds',
'puntret_tds',
'receiving_rec',
'receiving_tar',
'receiving_tds',
'receiving_twoptm',
'receiving_yds',
'rushing_att',
'rushing_tds',
'rushing_twoptm',
'rushing_yds',
]
positions = [
'FB',
'K',
'QB',
'RB',
'TE',
'WR',
'UNK',
]
query = """
SELECT player_id, position, play_player.team, gsis_id, {}
FROM play_player
INNER JOIN player USING(player_id)
{}
WHERE position IN %(positions)s
{}
GROUP BY player_id, position, play_player.team, gsis_id
""".format(
', '.join(_sum_query(col) for col in sum_columns),
'' if include_preseason else 'INNER JOIN game USING(gsis_id)',
'' if include_preseason else "AND season_type != 'Preseason'",
)
return pd.read_sql_query(
query, connection,
params=dict(positions=tuple(positions)),
index_col=['gsis_id', 'player_id'],
).sort_index()
def team_stats_by_drive(connection, include_preseason=False):
sum_columns_sql = ', '.join(_sum_query(col) for col in offense_team_stat_columns)
team_sums = pd.read_sql_query(
"""SELECT gsis_id, drive_id, {}
FROM drive
INNER JOIN agg_play USING(gsis_id, drive_id)
{}
GROUP BY gsis_id, drive_id
""".format(
sum_columns_sql,
'' if include_preseason else """
INNER JOIN game USING(gsis_id)
WHERE season_type != 'Preseason'
"""
),
connection,
index_col=['gsis_id', 'drive_id'],
).sort_index()
drive = pd.read_sql_table(
'drive', connection,
index_col=['gsis_id', 'drive_id'],
).sort_index()
drive['team'] = drive['pos_team']
del drive['pos_team']
for col in ['start_field', 'end_field', 'pos_time']:
drive.loc[~drive[col].isnull(), col] = _de_parenthesize(drive.loc[~drive[col].isnull(), col])
for time_type in ['start', 'end']:
drive[time_type + '_quarter'], drive[time_type + '_time'] = process_time_col(drive[time_type + '_time'])
drive = (pd.concat([drive, team_sums], axis=1, join='inner')
.reset_index()
.set_index(['gsis_id', 'team', 'drive_id'])
.sort_index()
)
drive['offense_score'] = 0
drive['defense_score'] = 0
for name, row in drive.iterrows():
gsis_id, team, drive_id = name
scores = score_before_time(connection, gsis_id, row['start_quarter'], row['start_time'])
drive.loc[name, 'offense_score'] = scores[team]
drive.loc[name, 'defense_score'] = scores[scores.index != team][0]
return drive
def team_stats_by_game(connection, include_preseason=False):
team_stat_columns = offense_team_stat_columns + defense_team_stat_columns + special_team_stat_columns
sum_columns_sql = ', '.join(_sum_query(column) for column in team_stat_columns)
team_sums = pd.read_sql_query(
"""SELECT gsis_id, team, {}
FROM play_player
GROUP BY gsis_id, team
""".format(sum_columns_sql),
connection,
index_col=['gsis_id', 'team'],
).sort_index()
sum = _sum_cols(team_sums)
team_sums['passing_plays'] = sum(['passing_att', 'passing_sk'])
team_sums['offense_plays'] = sum(['passing_plays', 'rushing_att'])
team_sums['defense_blk'] = sum(['defense_puntblk', 'defense_fgblk'], drop=True)
team_sums['defense_ret_tds'] = sum(['kickret_tds', 'puntret_tds'], drop=True)
team_sums['defense_tds'] = sum(['defense_misc_tds', 'defense_frec_tds', 'defense_int_tds'], drop=True)
games = pd.melt(
pd.read_sql_table(
'game', connection,
columns=['gsis_id', 'start_time', 'week', 'season_year', 'season_type', 'home_team', 'away_team'],
),
id_vars=['gsis_id', 'start_time', 'season_type', 'season_year', 'week'],
value_vars=['home_team', 'away_team'],
value_name='team',
var_name='home',
)
games['home'] = games['home'] == 'home_team'
games = games.set_index(['gsis_id', 'team']).sort_index()
game_data = (pd.concat([games, team_sums], axis=1)
.reset_index()
.set_index(['gsis_id', 'home'])
.sort_index()
)
if not include_preseason:
game_data.drop(game_data.index[game_data['season_type'] == 'Preseason'], axis=0, inplace=True)
game_data['offense_pts'] = (
game_data[
['passing_tds', 'rushing_tds', 'passing_twoptm', 'rushing_twoptm', 'kicking_fgm', 'kicking_xpmade']
] @ np.array([6, 6, 2, 2, 3, 1])
)
game_data['defense_ptsa'] = (
game_data['offense_pts']
.sortlevel('home', ascending=False)
.sortlevel('gsis_id', sort_remaining=False)
.values
)
return game_data
def _de_parenthesize(series, type_=int):
return series.str.strip('()').astype(int)
def _sum_query(col):
return 'sum({0}) AS {0}'.format(col)
@curry
def _sum_cols(df, cols, drop=False):
series = df[cols].sum(axis=1)
if drop:
df.drop(cols, axis=1, inplace=True)
return series
|
|
#!/usr/bin/env python
# coding=utf-8
##########################################################################
from __future__ import print_function
import configobj
import optparse
import os
import shutil
import sys
import tempfile
import traceback
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), 'src')))
def getIncludePaths(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isfile(cPath) and len(f) > 3 and f.endswith('.py'):
sys.path.append(os.path.dirname(cPath))
elif os.path.isdir(cPath):
getIncludePaths(cPath)
collectors = {}
def getCollectors(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isfile(cPath) and len(f) > 3 and f.endswith('.py'):
modname = f[:-3]
if modname.startswith('Test'):
continue
if modname.startswith('test'):
continue
try:
# Import the module
module = __import__(modname, globals(), locals(), ['*'])
# Find the name
for attr in dir(module):
if not attr.endswith('Collector'):
continue
cls = getattr(module, attr)
if cls.__module__ != modname:
continue
if cls.__name__ not in collectors:
collectors[cls.__name__] = module
except Exception:
print("Failed to import module: %s. %s" % (
modname, traceback.format_exc()))
collectors[modname] = False
elif os.path.isdir(cPath):
getCollectors(cPath)
handlers = {}
def getHandlers(path, name=None):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isfile(cPath) and len(f) > 3 and f.endswith('.py'):
modname = f[:-3]
if name and f is not "%s.py" % name:
break
try:
# Import the module
module = __import__(modname, globals(), locals(), ['*'])
# Find the name
for attr in dir(module):
if ((not attr.endswith('Handler') or
attr.startswith('Handler'))):
continue
cls = getattr(module, attr)
if cls.__name__ not in handlers:
handlers[cls.__name__] = module
except Exception:
print("Failed to import module: %s. %s" % (
modname, traceback.format_exc()))
handlers[modname] = False
elif os.path.isdir(cPath):
getHandlers(cPath)
def writeDocHeader(docFile):
docFile.write("<!--")
docFile.write("This file was generated from the python source\n")
docFile.write("Please edit the source to make changes\n")
docFile.write("-->\n")
def writeDocString(docFile, name, doc):
docFile.write("%s\n" % (name))
docFile.write("=====\n")
if doc is None:
print("No __doc__ string for %s!" % name)
docFile.write("%s\n" % doc)
def writeDocOptionsHeader(docFile):
docFile.write("#### Options\n")
docFile.write("\n")
docFile.write("Setting | Default | Description | Type\n")
docFile.write("--------|---------|-------------|-----\n")
def writeDocOptions(docFile, options, default_options):
for option in sorted(options.keys()):
defaultOption = ''
defaultOptionType = ''
if option in default_options:
defaultOptionType = default_options[option].__class__.__name__
if isinstance(default_options[option], list):
defaultOption = ', '.join(map(str, default_options[option]))
defaultOption += ','
else:
defaultOption = str(default_options[option])
docFile.write("%s | %s | %s | %s\n"
% (option,
defaultOption,
options[option].replace("\n", '<br>\n'),
defaultOptionType))
def writeDoc(items, type_name, doc_path):
for item in sorted(items.iterkeys()):
# Skip configuring the basic item object
if item == type_name:
continue
if item.startswith('Test'):
continue
print("Processing %s..." % (item))
if not hasattr(items[item], item):
continue
cls = getattr(items[item], item)
item_options = None
default_options = None
try:
tmpfile = None
if type_name is "Collector":
obj = cls(config=config, handlers={})
elif type_name is "Handler":
tmpfile = tempfile.mkstemp()
obj = cls({'log_file': tmpfile[1]})
item_options = obj.get_default_config_help()
default_options = obj.get_default_config()
if type_name is "Handler":
os.remove(tmpfile[1])
except Exception as e:
print("Caught Exception {}".format(e))
docFile = open(os.path.join(doc_path, item + ".md"), 'w')
writeDocHeader(docFile)
writeDocString(docFile, item, items[item].__doc__)
writeDocOptionsHeader(docFile)
if item_options:
writeDocOptions(docFile, item_options, default_options)
if type_name is "Collector":
docFile.write("\n")
docFile.write("#### Example Output\n")
docFile.write("\n")
docFile.write("```\n")
docFile.write("__EXAMPLESHERE__\n")
docFile.write("```\n")
docFile.write("\n")
docFile.close()
##########################################################################
if __name__ == "__main__":
# Initialize Options
parser = optparse.OptionParser()
parser.add_option("-c", "--configfile",
dest="configfile",
default="/etc/diamond/diamond.conf",
help="Path to the config file")
parser.add_option("-C", "--collector",
dest="collector",
default=None,
help="Configure a single collector")
parser.add_option("-H", "--handler",
dest="handler",
default=None,
help="Configure a single handler")
parser.add_option("-p", "--print",
action="store_true",
dest="dump",
default=False,
help="Just print the defaults")
# Parse Command Line Args
(options, args) = parser.parse_args()
# Initialize Config
if os.path.exists(options.configfile):
config = configobj.ConfigObj(os.path.abspath(options.configfile))
else:
print("ERROR: Config file: %s does not exist." % (
options.configfile), file=sys.stderr)
print(("Please run python config.py -c /path/to/diamond.conf"),
file=sys.stderr)
parser.print_help(sys.stderr)
sys.exit(1)
docs_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'docs'))
if options.collector or (not options.collector and not options.handler):
collector_path = config['server']['collectors_path']
collectors_doc_path = os.path.join(docs_path, "collectors")
getIncludePaths(collector_path)
if options.collector:
single_collector_path = os.path.join(collector_path,
options.collector)
getCollectors(single_collector_path)
else:
# Ugly hack for snmp collector overrides
getCollectors(os.path.join(collector_path, 'snmp'))
getCollectors(collector_path)
shutil.rmtree(collectors_doc_path)
os.mkdir(collectors_doc_path)
writeDoc(collectors, "Collector", collectors_doc_path)
if options.handler or (not options.collector and not options.handler):
handler_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'src',
'diamond',
'handler'))
handlers_doc_path = os.path.join(docs_path, "handlers")
getIncludePaths(handler_path)
if options.handler:
getHandlers(handler_path, name=options.handler)
else:
getHandlers(handler_path)
shutil.rmtree(handlers_doc_path)
os.mkdir(handlers_doc_path)
writeDoc(handlers, "Handler", handlers_doc_path)
|
|
"""
flickr.py
Copyright 2004 James Clarke <james@jamesclarke.info>
THIS SOFTWARE IS SUPPLIED WITHOUT WARRANTY OF ANY KIND, AND MAY BE
COPIED, MODIFIED OR DISTRIBUTED IN ANY WAY, AS LONG AS THIS NOTICE
AND ACKNOWLEDGEMENT OF AUTHORSHIP REMAIN.
This TODO list may not include recent API changes.
TODO (see TODO comments too):
* flickr.blogs, flickr.contacts, flickr.urls
* groups
* flickr.groups.browse
* flickr.groups.getActiveList
* flickr.groups.pools.add
* flickr.groups.pools.remove
* photosets
* flickr.photosets.delete
* flickr.photosets.editMeta
* flickr.photosets.orderSets
* favorites
* flickr.favorites.add
* flickr.favorites.remove
* photos
* flickr.photos.getContactsPhotos
* flickr.photos.getPerms
* flickr.photos.setPerms
* flickr.photos.getCounts
* flickr.photos.getUntagged
* notes for photos
"""
__author__ = "James Clarke <james@jamesclarke.info>"
__version__ = "$Rev: 18 $"
__date__ = "$Date: 2004-11-13 09:26:24 +0000 (Sat, 13 Nov 2004) $"
__copyright__ = "Copyright 2004 James Clarke"
from urllib import urlencode, urlopen
from xml.dom import minidom
import sys
HOST = 'http://flickr.com'
API = '/services/rest'
API_KEY = 'your api key here'
#Set email and password for auth
email = None
password = None
class FlickrError(Exception): pass
class Photo(object):
"""Represents a Flickr Photo."""
#XXX: Hopefully None wont cause problems
def __init__(self, id, owner=None, dateuploaded=None, \
title=None, description=None, ispublic=None, \
isfriend=None, isfamily=None, cancomment=None, \
canaddmeta=None, comments=None, tags=None):
"""Must specify id, rest is optional."""
self.__loaded = False
self.__id = id
self.__owner = owner
self.__dateuploaded = dateuploaded
self.__title = title
self.__description = description
self.__ispublic = ispublic
self.__isfriend = isfriend
self.__isfamily = isfamily
self.__cancomment = cancomment
self.__canaddmeta = canaddmeta
self.__comments = comments
self.__tags = tags
#property mojo, ugly
#make everything read only
#TODO: maybe make title/description modifable and have the setters
# call setMeta. Will result in two API calls instead of one
# if we change both title and description. Cleaner though!
id = property(lambda self: self._general_getattr('id'))
owner = property(lambda self: self._general_getattr('owner'))
dateuploaded = property(lambda self: \
self._general_getattr('dateuploaded'))
title = property(lambda self: self._general_getattr('title'))
description = property(lambda self: \
self._general_getattr('description'))
ispublic = property(lambda self: self._general_getattr('ispublic'))
isfriend = property(lambda self: self._general_getattr('isfriend'))
isfamily = property(lambda self: self._general_getattr('family'))
cancomment = property(lambda self: \
self._general_getattr('cancomment'))
canaddmeta = property(lambda self: \
self._general_getattr('canaddmeta'))
comments = property(lambda self: self._general_getattr('comments'))
tags = property(lambda self: self._general_getattr('tags'))
permcomment = property(lambda self: self._general_getattr('permcomment'))
permaddmeta = property(lambda self: self._general_getattr('permaddmeta'))
#XXX: I don't like this bit
# It would be nicer if I could pass the var (self.__id) into here
# But since _load_properties() modifies self.__id then the var
# is out of date when I return it.
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
#XXX: This is the one I like but it doesn't work
# here var is self.__id not 'id'
#def _general_getattr(self, var):
# if var is None and not self.__loaded:
# self._load_properties()
# return var
def _load_properties(self):
"""Loads the properties from Flickr."""
method = 'flickr.photos.getInfo'
data = _doget(method, photo_id=self.id)
self.__loaded = True
photo = data.rsp.photo
self.__dateuploaded = photo.dateuploaded
owner = photo.owner
self.__owner = User(owner.nsid, username=owner.username,\
realname=owner.realname,\
location=owner.location)
self.__title = photo.title.text
self.__description = photo.description.text
self.__ispublic = photo.visibility.ispublic
self.__isfriend = photo.visibility.isfriend
self.__isfamily = photo.visibility.isfamily
self.__cancomment = photo.editability.cancomment
self.__canaddmeta = photo.editability.canaddmeta
self.__comments = photo.comments.text
self.__permcomment = photo.permissions.permcomment
self.__permaddmeta = photo.permissions.permaddmeta
#TODO: Implement Notes?
self.__tags = [tag.text for tag in photo.tags.tag]
def __str__(self):
return '<Flickr Photo %s>' % self.id
def setTags(self, tags):
"""Set the tags for current photo to list tags.
(flickr.photos.settags)
"""
method = 'flickr.photos.setTags'
tags = uniq(tags)
_doget(method, auth=True, photo_id=self.id, tags=tags)
self.__tags = tags
def addTags(self, tags):
"""Adds the list of tags to current tags. (flickr.photos.addtags)
"""
method = 'flickr.photos.addTags'
if isinstance(tags, list):
tags = uniq(tags)
_doget(method, auth=True, photo_id=self.id, tags=tags)
#add new tags to old tags
try:
self.tags.extend(tags)
except TypeError:
self.tags.append(tags)
self.__tags = uniq(self.tags)
def setMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photos.setMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_doget(method, auth=True, title=title, \
description=description, photo_id=self.id)
self.__title = title
self.__description = description
#TODO: I'm not too sure about this function, I would like a method
# to return all sizes but unsure on the data structure
def getURL(self, size='Medium', urlType='url'):
"""Retrieves a url for the photo. (flickr.photos.getSizes)
urlType - 'url' or 'source'
'url' - flickr page of photo
'source' - image file
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
for psize in data.rsp.sizes.size:
if psize.label == size:
return getattr(psize, urlType)
raise FlickrError, "No URL found"
class Photoset(object):
"""A Flickr photoset."""
def __init__(self, id, title, primary, photos=0, description=''):
self.__id = id
self.__title = title
self.__primary = primary
self.__description = description
self.__n = photos
id = property(lambda self: self.__id)
title = property(lambda self: self.__title)
description = property(lambda self: self.__description)
primary = property(lambda self: self.__primary)
def __len__(self):
return self.__n
def __str__(self):
return '<Flickr Photoset %s>' % self.id
def getPhotos(self):
"""Returns list of Photos."""
method = 'flickr.photosets.getPhotos'
data = _doget(method, photoset_id=self.id)
photos = data.rsp.photoset.photo
p = []
for photo in photos:
p.append(Photo(photo.id))
return p
def editPhotos(self, photos, primary=None):
"""Edit the photos in this set.
photos - photos for set
primary - primary photo (if None will used current)
"""
method = 'flickr.photosets.editPhotos'
if primary is None:
primary = self.primary
ids = [photo.id for photo in photos]
if primary.id not in ids:
ids.append(primary.id)
_doget(method, auth=True, photoset_id=self.id,\
primary_photo_id=primary.id,
photo_ids=ids)
self.__n = len(ids)
def create(cls, photo, title, description=''):
"""Create a new photoset.
photo - primary photo
"""
if not isinstance(photo, Photo):
raise TypeError, "Photo expected"
method = 'flickr.photosets.create'
data = _doget(method, auth=True, title=title,\
description=description,\
primary_photo_id=photo.id)
set = Photoset(data.rsp.photoset.id, title, Photo(photo.id),
photos=1, description=description)
return set
create = classmethod(create)
class User(object):
"""A Flickr user."""
def __init__(self, id, username=None, isadmin=None, ispro=None, \
realname=None, location=None, firstdate=None, count=None):
"""id required, rest optional."""
self.__loaded = False #so we don't keep loading data
self.__id = id
self.__username = username
self.__isadmin = isadmin
self.__ispro = ispro
self.__realname = realname
self.__location = location
self.__photos_firstdate = firstdate
self.__photos_count = count
#property fu
id = property(lambda self: self._general_getattr('id'))
username = property(lambda self: self._general_getattr('username'))
isadmin = property(lambda self: self._general_getattr('isadmin'))
ispro = property(lambda self: self._general_getattr('ispro'))
realname = property(lambda self: self._general_getattr('realname'))
location = property(lambda self: self._general_getattr('location'))
photos_firstdate = property(lambda self: \
self._general_getattr('photos_firstdate'))
photos_count = property(lambda self: \
self._general_getattr('photos_count'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Load User properties from Flickr."""
method = 'flickr.people.getInfo'
data = _doget(method, user_id=self.__id)
self.__loaded = True
person = data.rsp.person
self.__isadmin = person.isadmin
self.__ispro = person.ispro
self.__username = person.username.text
self.__realname = person.realname.text
self.__location = person.location.text
self.__photos_firstdate = person.photos.firstdate.text
self.__photos_count = person.photos.count.text
def __str__(self):
return '<Flickr User %s>' % self.id
def getPhotosets(self):
"""Returns a list of Photosets."""
method = 'flickr.photosets.getList'
data = _doget(method, user_id=self.id)
sets = []
for photoset in data.rsp.photosets.photoset:
sets.append(Photoset(photoset.id, photoset.title,\
Photo(photoset.primary),\
description=photoset.description,
photos=photoset.photos))
return sets
class Group(object):
"""Flickr Group Pool"""
def __init__(self, id, name=None, members=None, online=None,\
privacy=None, chatid=None, chatcount=None):
self.__loaded = False
self.__id = id
self.__name = name
self.__members = members
self.__online = online
self.__privacy = privacy
self.__chatid = chatid
self.__chatcount = chatcount
id = property(lambda self: self._general_getattr('id'))
name = property(lambda self: self._general_getattr('name'))
members = property(lambda self: self._general_getattr('members'))
online = property(lambda self: self._general_getattr('online'))
privacy = property(lambda self: self._general_getattr('privacy'))
chatid = property(lambda self: self._general_getattr('chatid'))
chatcount = property(lambda self: self._general_getattr('chatcount'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Loads the properties from Flickr."""
method = 'flickr.groups.getInfo'
data = _doget(method, group_id=self.id)
self.__loaded = True
group = data.rsp.group
self.__name = photo.name.text
self.__members = photo.members.text
self.__online = photo.online.text
self.__privacy = photo.privacy.text
self.__chatid = photo.chatid.text
self.__chatcount = photo.chatcount.text
def __str__(self):
return '<Flickr Group %s>' % self.id
def getPhotos(self, tags='', per_page='', page=''):
"""Get a list of photo objects for this group"""
method = 'flickr.groups.pools.getPhotos'
data = _doget(method, group_id=self.id, tags=tags,\
per_page=per_page, page=page)
photos = []
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
return photos
#Flickr API methods
#see api docs http://www.flickr.com/services/api/
#for details of each param
#XXX: Could just use photo.tags (as you'd already have Photo object)
def tags_getListPhoto(id):
method = 'flickr.tags.getListPhoto'
data = _doget(method, photo_id=id)
return [tag.text for tag in data.rsp.photo.tags.tag]
#XXX: Should be in User as User.tags
def tags_getListUser(id=''):
method = 'flickr.tags.getListUser'
data = _doget(method, user_id=id)
return [tag.text for tag in data.rsp.who.tags.tag]
#XXX: Could be in User
def tags_getListUserPopular(id='', count=''):
#TODO: handle count? data.rsp.who.tags.tag.count
method = 'flickr.tags.getListUserPopular'
data = _doget(method, user_id=id, count=count)
return [tag.text for tag in data.rsp.who.tags.tag]
#XXX: Could be Photo.search(cls)
def photos_search(user_id='', auth=False, tags='', tag_mode='', text='',\
min_upload_date='', max_upload_date='',\
per_page='', page=''):
"""Returns a list of Photo objects.
If auth=True then will auth the user. Can see private etc
"""
method = 'flickr.photos.search'
data = _doget(method, auth=auth, user_id=user_id, \
tags=tags, tag_mode=tag_mode, text=text,\
min_upload_date=min_upload_date,\
max_upload_date=max_upload_date, per_page=per_page,\
page=page)
photos = []
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
return photos
#XXX: Could be class method in User
def people_findByEmail(email):
"""Returns User object."""
method = 'flickr.people.findByEmail'
data = _doget(method, find_email=email)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
def people_findByUsername(username):
"""Returns User object."""
method = 'flickr.people.findByUsername'
data = _doget(method, username=username)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
#XXX: Should probably be in User as a list User.public
def people_getPublicPhotos(user_id, per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.people.getPublicPhotos'
data = _doget(method, user_id=user_id, per_page=per_page, page=page)
photos = []
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
return photos
#XXX: Should probably be in User as User.favorites
def favorites_getList(user_id='', per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.favorites.getList'
data = _doget(method, auth=True, user_id=user_id, per_page=per_page,\
page=page)
photos = []
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
return photos
def test_login():
method = 'flickr.test.login'
data = _doget(method, auth=True)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
def test_echo():
method = 'flickr.test.echo'
data = _doget(method)
return data.rsp.stat
#useful methods
def _doget(method, auth=False, **params):
#uncomment to check you aren't killing the flickr server
#print "***** do get %s" % method
#convert lists to strings with ',' between items
for (key, value) in params.items():
if isinstance(value, list):
params[key] = ','.join([item for item in value])
url = '%s%s/?api_key=%s&method=%s&%s'% \
(HOST, API, API_KEY, method, urlencode(params))
if auth:
url = url + '&email=%s&password=%s' % (email, password)
#another useful debug print statement
#print url
f = urlopen(url).readlines()
str = "\n".join(f)
print str
xml = minidom.parseString(str)
data = unmarshal(xml)
if not data.rsp.stat == 'ok':
msg = "ERROR [%s]: %s" % (data.rsp.err.code, data.rsp.err.msg)
raise FlickrError, msg
return data
def _parse_photo(photo):
"""Create a Photo object from photo data."""
owner = User(photo.owner)
title = photo.title
ispublic = photo.ispublic
isfriend = photo.isfriend
isfamily = photo.isfamily
p = Photo(photo.id, owner=owner, title=title, ispublic=ispublic,\
isfriend=isfriend, isfamily=isfamily)
return p
#stolen methods
class Bag: pass
#unmarshal taken and modified from pyamazon.py
#makes the xml easy to work with
def unmarshal(element):
rc = Bag()
if isinstance(element, minidom.Element):
for key in element.attributes.keys():
setattr(rc, key, element.attributes[key].value)
childElements = [e for e in element.childNodes \
if isinstance(e, minidom.Element)]
if childElements:
for child in childElements:
key = child.tagName
if hasattr(rc, key):
if type(getattr(rc, key)) <> type([]):
setattr(rc, key, [getattr(rc, key)])
setattr(rc, key, getattr(rc, key) + [unmarshal(child)])
elif isinstance(child, minidom.Element) and \
(child.tagName == 'Details'):
# make the first Details element a key
setattr(rc,key,[unmarshal(child)])
#dbg: because otherwise 'hasattr' only tests
#dbg: on the second occurence: if there's a
#dbg: single return to a query, it's not a
#dbg: list. This module should always
#dbg: return a list of Details objects.
else:
setattr(rc, key, unmarshal(child))
else:
#jec: we'll have the main part of the element stored in .text
#jec: will break if tag <text> is also present
text = "".join([e.data for e in element.childNodes \
if isinstance(e, minidom.Text)])
setattr(rc, 'text', text)
return rc
#unique items from a list from the cookbook
def uniq(alist): # Fastest without order preserving
set = {}
map(set.__setitem__, alist, [])
return set.keys()
if __name__ == '__main__':
print test_echo()
|
|
"""Support for representing current time of the day as binary sensors."""
from datetime import datetime, timedelta
import logging
import pytz
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import (
CONF_AFTER,
CONF_BEFORE,
CONF_NAME,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.sun import get_astral_event_date, get_astral_event_next
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_AFTER = "after"
ATTR_BEFORE = "before"
ATTR_NEXT_UPDATE = "next_update"
CONF_AFTER_OFFSET = "after_offset"
CONF_BEFORE_OFFSET = "before_offset"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_AFTER): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_BEFORE): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_AFTER_OFFSET, default=timedelta(0)): cv.time_period,
vol.Optional(CONF_BEFORE_OFFSET, default=timedelta(0)): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ToD sensors."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant configuration")
return
after = config[CONF_AFTER]
after_offset = config[CONF_AFTER_OFFSET]
before = config[CONF_BEFORE]
before_offset = config[CONF_BEFORE_OFFSET]
name = config[CONF_NAME]
sensor = TodSensor(name, after, after_offset, before, before_offset)
async_add_entities([sensor])
def is_sun_event(event):
"""Return true if event is sun event not time."""
return event in (SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET)
class TodSensor(BinarySensorDevice):
"""Time of the Day Sensor."""
def __init__(self, name, after, after_offset, before, before_offset):
"""Init the ToD Sensor..."""
self._name = name
self._time_before = self._time_after = self._next_update = None
self._after_offset = after_offset
self._before_offset = before_offset
self._before = before
self._after = after
@property
def should_poll(self):
"""Sensor does not need to be polled."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def after(self):
"""Return the timestamp for the beginning of the period."""
return self._time_after
@property
def before(self):
"""Return the timestamp for the end of the period."""
return self._time_before
@property
def is_on(self):
"""Return True is sensor is on."""
if self.after < self.before:
return self.after <= self.current_datetime < self.before
return False
@property
def current_datetime(self):
"""Return local current datetime according to hass configuration."""
return dt_util.utcnow()
@property
def next_update(self):
"""Return the next update point in the UTC time."""
return self._next_update
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_AFTER: self.after.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_BEFORE: self.before.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_NEXT_UPDATE: self.next_update.astimezone(
self.hass.config.time_zone
).isoformat(),
}
def _naive_time_to_utc_datetime(self, naive_time):
"""Convert naive time from config to utc_datetime with current day."""
# get the current local date from utc time
current_local_date = self.current_datetime.astimezone(
self.hass.config.time_zone
).date()
# calcuate utc datetime corecponding to local time
utc_datetime = self.hass.config.time_zone.localize(
datetime.combine(current_local_date, naive_time)
).astimezone(tz=pytz.UTC)
return utc_datetime
def _calculate_initial_boudary_time(self):
"""Calculate internal absolute time boudaries."""
nowutc = self.current_datetime
# If after value is a sun event instead of absolute time
if is_sun_event(self._after):
# Calculate the today's event utc time or
# if not available take next
after_event_date = get_astral_event_date(
self.hass, self._after, nowutc
) or get_astral_event_next(self.hass, self._after, nowutc)
else:
# Convert local time provided to UTC today
# datetime.combine(date, time, tzinfo) is not supported
# in python 3.5. The self._after is provided
# with hass configured TZ not system wide
after_event_date = self._naive_time_to_utc_datetime(self._after)
self._time_after = after_event_date
# If before value is a sun event instead of absolute time
if is_sun_event(self._before):
# Calculate the today's event utc time or if not available take
# next
before_event_date = get_astral_event_date(
self.hass, self._before, nowutc
) or get_astral_event_next(self.hass, self._before, nowutc)
# Before is earlier than after
if before_event_date < after_event_date:
# Take next day for before
before_event_date = get_astral_event_next(
self.hass, self._before, after_event_date
)
else:
# Convert local time provided to UTC today, see above
before_event_date = self._naive_time_to_utc_datetime(self._before)
# It is safe to add timedelta days=1 to UTC as there is no DST
if before_event_date < after_event_date + self._after_offset:
before_event_date += timedelta(days=1)
self._time_before = before_event_date
# Add offset to utc boundaries according to the configuration
self._time_after += self._after_offset
self._time_before += self._before_offset
def _turn_to_next_day(self):
"""Turn to to the next day."""
if is_sun_event(self._after):
self._time_after = get_astral_event_next(
self.hass, self._after, self._time_after - self._after_offset
)
self._time_after += self._after_offset
else:
# Offset is already there
self._time_after += timedelta(days=1)
if is_sun_event(self._before):
self._time_before = get_astral_event_next(
self.hass, self._before, self._time_before - self._before_offset
)
self._time_before += self._before_offset
else:
# Offset is already there
self._time_before += timedelta(days=1)
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
self._calculate_initial_boudary_time()
self._calculate_next_update()
self._point_in_time_listener(dt_util.now())
def _calculate_next_update(self):
"""Datetime when the next update to the state."""
now = self.current_datetime
if now < self.after:
self._next_update = self.after
return
if now < self.before:
self._next_update = self.before
return
self._turn_to_next_day()
self._next_update = self.after
@callback
def _point_in_time_listener(self, now):
"""Run when the state of the sensor should be updated."""
self._calculate_next_update()
self.async_schedule_update_ha_state()
async_track_point_in_utc_time(
self.hass, self._point_in_time_listener, self.next_update
)
|
|
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class GithubRepositorypermissions(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'admin': 'bool',
'push': 'bool',
'pull': 'bool',
'_class': 'str'
}
attribute_map = {
'admin': 'admin',
'push': 'push',
'pull': 'pull',
'_class': '_class'
}
def __init__(self, admin=None, push=None, pull=None, _class=None, local_vars_configuration=None): # noqa: E501
"""GithubRepositorypermissions - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._admin = None
self._push = None
self._pull = None
self.__class = None
self.discriminator = None
if admin is not None:
self.admin = admin
if push is not None:
self.push = push
if pull is not None:
self.pull = pull
if _class is not None:
self._class = _class
@property
def admin(self):
"""Gets the admin of this GithubRepositorypermissions. # noqa: E501
:return: The admin of this GithubRepositorypermissions. # noqa: E501
:rtype: bool
"""
return self._admin
@admin.setter
def admin(self, admin):
"""Sets the admin of this GithubRepositorypermissions.
:param admin: The admin of this GithubRepositorypermissions. # noqa: E501
:type admin: bool
"""
self._admin = admin
@property
def push(self):
"""Gets the push of this GithubRepositorypermissions. # noqa: E501
:return: The push of this GithubRepositorypermissions. # noqa: E501
:rtype: bool
"""
return self._push
@push.setter
def push(self, push):
"""Sets the push of this GithubRepositorypermissions.
:param push: The push of this GithubRepositorypermissions. # noqa: E501
:type push: bool
"""
self._push = push
@property
def pull(self):
"""Gets the pull of this GithubRepositorypermissions. # noqa: E501
:return: The pull of this GithubRepositorypermissions. # noqa: E501
:rtype: bool
"""
return self._pull
@pull.setter
def pull(self, pull):
"""Sets the pull of this GithubRepositorypermissions.
:param pull: The pull of this GithubRepositorypermissions. # noqa: E501
:type pull: bool
"""
self._pull = pull
@property
def _class(self):
"""Gets the _class of this GithubRepositorypermissions. # noqa: E501
:return: The _class of this GithubRepositorypermissions. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this GithubRepositorypermissions.
:param _class: The _class of this GithubRepositorypermissions. # noqa: E501
:type _class: str
"""
self.__class = _class
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GithubRepositorypermissions):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, GithubRepositorypermissions):
return True
return self.to_dict() != other.to_dict()
|
|
import pytest
from . import testUtil as ipwb_test
from ipwb import replay
from time import sleep
import requests
import urllib
# Successful retrieval
# Accurate retrieval
# Comprehensive retrieval of sub-resources
@pytest.mark.parametrize("warc,lookup,has_md_header", [
('HTTP404.warc', 'memento/20200202100000/memento.us/', True),
('HTTP404.warc', 'memento/20200202100000/memento.ca/', False),
('HTTP404.warc', 'loremipsum', False)])
def test_replay_404(warc, lookup, has_md_header):
ipwb_test.start_replay(warc)
resp = requests.get(f'http://localhost:5000/{lookup}',
allow_redirects=False)
assert resp.status_code == 404
if has_md_header:
assert 'Memento-Datetime' in resp.headers
else:
assert 'Memento-Datetime' not in resp.headers
ipwb_test.stop_replay()
@pytest.mark.parametrize("warc,lookup,status,location", [
('salam-home.warc', 'memento/*/cs.odu.edu/~salam/', 302,
'/memento/20160305192247/cs.odu.edu/~salam/'),
('1memento.warc', 'memento/*/memento.us', 302,
'/memento/20130202100000/memento.us/'),
('2mementos.warc', 'memento/*/memento.us', 200, None),
('salam-home.warc', 'memento/*/?url=cs.odu.edu/~salam/', 301,
'/memento/*/cs.odu.edu/~salam/'),
('1memento.warc', 'memento/*/?url=memento.us', 301,
'/memento/*/memento.us'),
('2mementos.warc', 'memento/*/?url=memento.us', 301,
'/memento/*/memento.us'),
('2mementos_queryString.warc',
'/memento/20130202100000/memento.us/' +
'index.php?anotherval=ipsum&someval=lorem', 200, None),
])
def test_replay_search(warc, lookup, status, location):
ipwb_test.start_replay(warc)
resp = requests.get(f'http://localhost:5000/{lookup}',
allow_redirects=False)
assert resp.status_code == status
if location is not None: # Allow for checks w/o redirects
assert resp.headers.get('location') == location
ipwb_test.stop_replay()
def test_replay_dated_memento():
ipwb_test.start_replay('salam-home.warc')
url = 'http://localhost:5000/memento/{}/cs.odu.edu/~salam/'
dest = '/memento/20160305192247/cs.odu.edu/~salam/'
invalid_dts = [
'18',
'20181',
'201800',
'20180132',
'2018010226',
'20181301000000',
'20180932000000',
'20180230000000',
'20180102263127',
]
for dt in invalid_dts:
resp = requests.get(url.format(dt), allow_redirects=False)
assert resp.status_code == 400
typoDts = [
'foo',
'201l',
'2018010100000O',
'20181126134257.123',
]
for dt in typoDts:
resp = requests.get(url.format(dt), allow_redirects=False)
assert resp.status_code == 404
valid_dts = [
'2018',
'201811',
'20181126',
'2018112613',
'201811261342',
'20181126134257',
]
for dt in valid_dts:
resp = requests.get(url.format(dt), allow_redirects=False)
assert resp.status_code == 302
assert resp.headers.get('location') == dest
resp = requests.get(url.format('20160305192247'), allow_redirects=False)
assert resp.status_code == 200
ipwb_test.stop_replay()
@pytest.mark.parametrize("warc,index,tmformat,urim", [
('5mementos.warc', '5mementos.cdxj', 'cdxj', 'memento.us'),
('5mementos.warc', '5mementos.link', 'link', 'memento.us')
])
def test_generate_timemap(warc, index, tmformat, urim):
ipwb_test.start_replay(warc)
resp = requests.get(f'http://localhost:5000/timemap/{tmformat}/{urim}',
allow_redirects=False)
with open(f'samples/indexes/{index}', 'r') as index:
assert index.read().encode('utf-8') == resp.content
ipwb_test.stop_replay()
@pytest.mark.skip(reason='not implemented')
def test_retrieveWARCRecord_fromIPFSHash():
pass
@pytest.mark.skip(reason='not implemented')
def test_retrieveWARCRecord_fromLocalCDXJFile():
pass
@pytest.mark.skip(reason='not implemented')
def test_retrieveWARCRecord_fromRemoteCDXJFile_ByIPFSHash():
pass
@pytest.mark.skip(reason='not implemented')
def test_retrieveWARCRecord_fromRemoteCDXJFile_ByHTTP():
pass
@pytest.mark.skip(reason='not implemented')
def test_retrieveWARCRecord_fromRemoteCDXJFile_ByHTTPS():
pass
@pytest.mark.skip(reason='not implemented')
def test_retrieveWARCRecord_fromRemoteCDXJFile_ByFTP():
pass
@pytest.mark.skip(reason='not implemented')
def test_retrieveWARCRecord_fromRemoteCDXJFile_ByBitTorrentMagnetLink():
pass
@pytest.mark.skip(reason='not implemented')
def test_retrieveWARCRecord_fromRemoteCDXJFile_BySMB():
pass
@pytest.mark.skip(reason='not implemented')
def test_accuracy_retrievedContent_vsWARC():
pass
@pytest.mark.skip(reason='not implemented')
def test_availability_subResources():
pass
@pytest.mark.skip(reason='not implemented')
def test_inclusionInWebpage_selectResources():
pass
@pytest.mark.skip(reason='not implemented')
def test_exclusionInWebpage_selectIrrelevantResources():
pass
@pytest.mark.skip(reason='not implemented')
def test_fileImport_nonCDXJ(): # Fail w/ friendly message when non-cdxj
pass
@pytest.mark.skip(reason='not implemented')
def test_helpWithoutDaemon(): # See #244
pass
def test_unit_command_daemon():
replay.command_daemon('start')
sleep(10)
try:
urllib.request.urlopen('http://localhost:5001')
except urllib.error.HTTPError as e:
assert e.code == 404
except Exception as e:
assert False
@pytest.mark.parametrize("expected,input", [
(True, 'http://example.com'),
(True, 'https://example.com'),
(True, 'HTTP://EXAMPLE.COM'),
(True, 'HTTPS://EXAMPLE.COM'),
(True, 'http://example.com/'),
(True, 'http://example.com/foo.bar'),
(True, 'https://www.example.com/foo?a=b&c=d'),
(False, ''),
(False, 'foo'),
(False, 'foo/bar.baz'),
(False, 'foo?a=b&c=d'),
(False, '/'),
(False, '/foo'),
(False, '/foo/bar.baz'),
(False, '/foo?a=b&c=d'),
(False, './'),
(False, './foo'),
(False, './foo/bar.baz'),
(False, './foo?a=b&c=d'),
(False, '../'),
(False, '../foo'),
(False, '../foo/bar.baz'),
(False, '../foo?a=b&c=d'),
(False, '../../'),
(False, '../../foo'),
(False, '../../foo/bar.baz'),
(False, '../../foo?a=b&c=d'),
(False, 'ftp://example.com'),
(False, 'httpd://example.com'),
(False, 'http//example.com'),
(False, 'http:/example.com'),
(False, 'http:example.com'),
(False, 'http.example.com'),
(False, 'http-bin.com'),
])
def test_is_uri(expected, input):
assert expected == bool(replay.is_uri(input))
# TODO: Have unit tests for each function in replay.py
|
|
# python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for plugins.pipeline_plugins.hooks.bq_hook."""
import datetime
import json
import unittest
from unittest import mock
from airflow import exceptions
from airflow.contrib.hooks import bigquery_hook
import freezegun
from plugins.pipeline_plugins.hooks import monitoring_hook
from plugins.pipeline_plugins.utils import errors
@freezegun.freeze_time('2020-11-01')
class MonitoringHookTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.dag_name = 'dag'
self.project_id = 'test_project'
self.dataset_id = 'test_dataset'
self.table_id = 'test_table'
self.conn_id = 'test_conn'
self.expected_run_row = {
'dag_name': self.dag_name,
'timestamp': '20201103180000',
'type_id': monitoring_hook.MonitoringEntityMap.RUN.value,
'location': 'https://input/resource',
'position': '',
'info': ''}
self.expected_blob_row = {
'dag_name': self.dag_name,
'timestamp': '20201103180000',
'type_id': monitoring_hook.MonitoringEntityMap.BLOB.value,
'location': 'https://input/resource',
'position': '3000',
'info': '1500'}
self.expected_event_row = {
'dag_name': self.dag_name,
'timestamp': '20201103180000',
'type_id': 50,
'location': 'https://input/resource',
'position': '60',
'info': json.dumps({'a': 1})}
self.expected_retry_row = {
'dag_name': self.dag_name,
'timestamp': '20201103180000',
'type_id': monitoring_hook.MonitoringEntityMap.RETRY.value,
'location': 'https://input/resource',
'position': '',
'info': ''}
self.mock_conn_obj = mock.MagicMock()
self.mock_cursor_obj = mock.MagicMock()
self.mock_cursor_obj.project_id = self.project_id
self.mock_conn_obj.cursor = mock.MagicMock(
return_value=self.mock_cursor_obj)
self.mock_cursor_obj.create_empty_table = mock.MagicMock()
self.mock_cursor_obj.create_empty_dataset = mock.MagicMock()
self.mock_cursor_obj.insert_all = mock.MagicMock()
self.original_get_conn = monitoring_hook.MonitoringHook.get_conn
monitoring_hook.MonitoringHook.get_conn = mock.MagicMock(
return_value=self.mock_conn_obj)
self.original_bigquery_hook_init = bigquery_hook.BigQueryHook.__init__
bigquery_hook.BigQueryHook.__init__ = mock.MagicMock()
with mock.patch(
'plugins.pipeline_plugins.hooks.monitoring_hook.'
'MonitoringHook._create_monitoring_dataset_and_table_if_not_exist'):
self.hook = monitoring_hook.MonitoringHook(
bq_conn_id=self.conn_id,
monitoring_dataset=self.dataset_id,
monitoring_table=self.table_id)
self.hook.get_conn = mock.MagicMock(return_value=self.mock_conn_obj)
def tearDown(self):
super().tearDown()
bigquery_hook.BigQueryHook.__init__ = self.original_bigquery_hook_init
monitoring_hook.MonitoringHook.get_conn = self.original_get_conn
def test_init(self):
self.mock_cursor_obj.get_dataset.side_effect = exceptions.AirflowException()
monitoring_hook.MonitoringHook.table_exists = mock.MagicMock(
return_value=True)
monitoring_hook.MonitoringHook(
bq_conn_id='test_conn',
monitoring_dataset=self.dataset_id,
monitoring_table=self.table_id)
self.mock_cursor_obj.get_dataset.assert_called_with(
project_id=self.project_id, dataset_id=self.dataset_id)
monitoring_hook.MonitoringHook.table_exists.assert_called_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id)
def test_init_create_monitoring_dataset_and_table(self):
self.mock_cursor_obj.get_dataset.side_effect = exceptions.AirflowException()
monitoring_hook.MonitoringHook.table_exists = mock.MagicMock(
return_value=False)
monitoring_hook.MonitoringHook(
bq_conn_id='test_conn',
monitoring_dataset=self.dataset_id,
monitoring_table=self.table_id)
self.mock_cursor_obj.create_empty_table.assert_called_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id,
schema_fields=monitoring_hook._LOG_SCHEMA_FIELDS)
self.mock_cursor_obj.create_empty_dataset.assert_called_with(
project_id=self.project_id, dataset_id=self.dataset_id)
self.mock_cursor_obj.get_dataset.assert_called_with(
project_id=self.project_id, dataset_id=self.dataset_id)
monitoring_hook.MonitoringHook.table_exists.assert_called_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id)
def test_init_handles_bigquery_create_empty_dataset_errors(self):
self.mock_cursor_obj.get_dataset.side_effect = exceptions.AirflowException()
self.mock_cursor_obj.create_empty_dataset.side_effect = (
exceptions.AirflowException())
with self.assertRaises(errors.MonitoringDatabaseError):
monitoring_hook.MonitoringHook(
bq_conn_id='test_conn',
monitoring_dataset=self.dataset_id,
monitoring_table=self.table_id)
def test_init_handles_bigquery_create_empty_table_errors(self):
self.mock_cursor_obj.create_empty_table.side_effect = (
exceptions.AirflowException())
monitoring_hook.MonitoringHook.table_exists = mock.MagicMock(
return_value=False)
with self.assertRaises(errors.MonitoringDatabaseError):
monitoring_hook.MonitoringHook(
bq_conn_id='test_conn',
monitoring_dataset=self.dataset_id,
monitoring_table=self.table_id)
def test_get_location(self):
location = self.hook.get_location()
self.assertEqual(location,
(f'bq://{self.hook.project_id}.'
f'{self.hook.dataset_id}.{self.hook.table_id}'))
def test_store_run(self):
self.hook.store_run(dag_name=self.expected_run_row['dag_name'],
timestamp=self.expected_run_row['timestamp'],
location=self.expected_run_row['location'])
self.mock_cursor_obj.insert_all.assert_called_once_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=[{'json': self.expected_run_row}])
def test_store_run_creates_timestamp_when_none_provided(self):
self.expected_run_row['timestamp'] = (monitoring_hook.
_generate_zone_aware_timestamp())
self.hook.store_run(dag_name=self.expected_run_row['dag_name'],
location=self.expected_run_row['location'])
self.mock_cursor_obj.insert_all.assert_called_once_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=[{'json': self.expected_run_row}])
def test_store_run_handles_storing_error(self):
self.mock_cursor_obj.insert_all.side_effect = exceptions.AirflowException()
self.expected_run_row['timestamp'] = (monitoring_hook.
_generate_zone_aware_timestamp())
with self.assertRaises(errors.MonitoringAppendLogError):
self.hook.store_run(dag_name=self.expected_run_row['dag_name'],
location=self.expected_run_row['location'])
def test_store_blob(self):
self.hook.store_blob(dag_name=self.expected_blob_row['dag_name'],
timestamp=self.expected_blob_row['timestamp'],
location=self.expected_blob_row['location'],
position=self.expected_blob_row['position'],
num_rows=self.expected_blob_row['info'])
self.mock_cursor_obj.insert_all.assert_called_once_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=[{'json': self.expected_blob_row}])
def test_store_blobs_creates_timestamp_when_none_provided(self):
self.expected_blob_row['timestamp'] = (monitoring_hook.
_generate_zone_aware_timestamp())
self.hook.store_blob(dag_name=self.expected_blob_row['dag_name'],
location=self.expected_blob_row['location'],
position=self.expected_blob_row['position'],
num_rows=self.expected_blob_row['info'])
self.mock_cursor_obj.insert_all.assert_called_once_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=[{'json': self.expected_blob_row}])
def test_store_blob_handles_storing_error(self):
self.mock_cursor_obj.insert_all.side_effect = exceptions.AirflowException()
self.expected_blob_row['timestamp'] = (monitoring_hook.
_generate_zone_aware_timestamp())
with self.assertRaises(errors.MonitoringAppendLogError):
self.hook.store_blob(dag_name=self.expected_blob_row['dag_name'],
location=self.expected_blob_row['location'],
position=self.expected_blob_row['position'],
num_rows=self.expected_blob_row['info'])
def test_store_events(self):
expected_event = (self.expected_event_row['position'],
json.loads(self.expected_event_row['info']),
self.expected_event_row['type_id'])
self.expected_event_row['info'] = json.dumps(expected_event[1])
self.hook.store_events(dag_name=self.expected_event_row['dag_name'],
timestamp=self.expected_event_row['timestamp'],
location=self.expected_event_row['location'],
id_event_error_tuple_list=[expected_event])
self.mock_cursor_obj.insert_all.assert_called_once_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=[{'json': self.expected_event_row}])
def test_store_events_creates_timestamp_when_none_provided(self):
expected_event = (self.expected_event_row['position'],
json.loads(self.expected_event_row['info']),
self.expected_event_row['type_id'])
self.expected_event_row['timestamp'] = (monitoring_hook.
_generate_zone_aware_timestamp())
self.hook.store_events(dag_name=self.expected_event_row['dag_name'],
location=self.expected_event_row['location'],
id_event_error_tuple_list=[expected_event])
self.mock_cursor_obj.insert_all.assert_called_once_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=[{'json': self.expected_event_row}])
def test_store_events_handles_storing_error(self):
self.mock_cursor_obj.insert_all.side_effect = exceptions.AirflowException()
expected_event = (self.expected_event_row['position'],
json.loads(self.expected_event_row['info']),
self.expected_event_row['type_id'])
self.expected_event_row['timestamp'] = (monitoring_hook.
_generate_zone_aware_timestamp())
with self.assertRaises(errors.MonitoringAppendLogError):
self.hook.store_events(dag_name=self.expected_event_row['dag_name'],
location=self.expected_event_row['location'],
id_event_error_tuple_list=[expected_event])
def test_store_retry(self):
self.hook.store_retry(dag_name=self.expected_retry_row['dag_name'],
timestamp=self.expected_retry_row['timestamp'],
location=self.expected_retry_row['location'])
self.mock_cursor_obj.insert_all.assert_called_once_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=[{'json': self.expected_retry_row}])
def test_store_retry_creates_timestamp_when_none_provided(self):
self.expected_retry_row['timestamp'] = (monitoring_hook.
_generate_zone_aware_timestamp())
self.hook.store_retry(dag_name=self.expected_retry_row['dag_name'],
location=self.expected_retry_row['location'])
self.mock_cursor_obj.insert_all.assert_called_once_with(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=[{'json': self.expected_retry_row}])
def test_store_retry_handles_storing_error(self):
self.mock_cursor_obj.insert_all.side_effect = exceptions.AirflowException()
self.expected_retry_row['timestamp'] = (monitoring_hook.
_generate_zone_aware_timestamp())
with self.assertRaises(errors.MonitoringAppendLogError):
self.hook.store_retry(dag_name=self.expected_retry_row['dag_name'],
location=self.expected_retry_row['location'])
def test_generate_processed_blobs_position_ranges(self):
self.mock_cursor_obj.execute = mock.MagicMock()
self.mock_cursor_obj.fetchone.side_effect = [('0', '1000'),
('1000', '1'), None]
gen = self.hook.generate_processed_blobs_ranges()
self.assertTupleEqual(('0', '1000'), next(gen))
self.assertTupleEqual(('1000', '1'), next(gen))
with self.assertRaises(StopIteration):
next(gen)
self.mock_cursor_obj.execute.assert_called_once()
def test_events_blobs_generator(self):
self.mock_cursor_obj.execute = mock.MagicMock()
self.mock_cursor_obj.fetchone.side_effect = [['{"a": "1"}'], ['{"b": 2}'],
None]
gen = self.hook.events_blobs_generator()
blb = next(gen)
self.assertListEqual([{'a': '1'}, {'b': 2}], blb.events)
self.mock_cursor_obj.execute.assert_called_once()
def test_events_blobs_generator_2_blobs(self):
self.mock_cursor_obj.execute = mock.MagicMock()
self.mock_cursor_obj.fetchone.side_effect = (
[['{"a": "1"}']] * (monitoring_hook._DEFAULT_PAGE_SIZE + 1) +
[['{"b": 2}'], None])
gen = self.hook.events_blobs_generator()
blb = next(gen)
self.assertListEqual(
[{'a': '1'}]*(monitoring_hook._DEFAULT_PAGE_SIZE), blb.events)
blb = next(gen)
self.assertListEqual([{'a': '1'}, {'b': 2}], blb.events)
self.mock_cursor_obj.execute.assert_called_once()
def test_events_blobs_generator_exactly_page_size(self):
self.mock_cursor_obj.execute = mock.MagicMock()
self.mock_cursor_obj.fetchone.side_effect = (
[['{"a": "1"}']]*(monitoring_hook._DEFAULT_PAGE_SIZE) + [None])
gen = self.hook.events_blobs_generator()
blb = next(gen)
self.assertListEqual(
[{'a': '1'}]*(monitoring_hook._DEFAULT_PAGE_SIZE), blb.events)
self.mock_cursor_obj.execute.assert_called_once()
def test_events_blobs_generator_retry(self):
self.mock_cursor_obj.execute = mock.MagicMock()
self.mock_cursor_obj.fetchone.side_effect = [['{"a": "1"}'], ['{"b": 2}'],
None]
with mock.patch.object(monitoring_hook.MonitoringHook, 'store_retry',
autospec=True):
gen = self.hook.events_blobs_generator()
next(gen)
self.hook.store_retry.assert_called_once()
def test_events_blobs_generator_no_retry(self):
self.mock_cursor_obj.execute = mock.MagicMock()
self.mock_cursor_obj.fetchone.side_effect = [['{"a": "1"}'], ['{"b": 2}'],
None]
with mock.patch.object(monitoring_hook.MonitoringHook, 'store_retry',
autospec=True):
self.hook.enable_monitoring = False
gen = self.hook.events_blobs_generator()
next(gen)
self.hook.store_retry.assert_not_called()
def test_cleanup_by_days_to_live(self):
time_to_live = 1
self.hook.dag_name = 'bq_to_cm_dag'
cutoff_timestamp = (datetime.datetime.utcnow() - datetime.timedelta(
days=time_to_live)).isoformat() + 'Z'
cleanup_sql = (f'DELETE FROM `{self.dataset_id}.{self.table_id}` WHERE '
f'`timestamp`<%(cutoff_timestamp)s AND '
'dag_name="bq_to_cm_dag"')
params = {'cutoff_timestamp': cutoff_timestamp}
self.mock_cursor_obj.execute = mock.MagicMock()
self.hook.cleanup_by_days_to_live(days_to_live=time_to_live)
self.mock_cursor_obj.execute.assert_called_once_with(cleanup_sql, params)
def test_cleanup_by_days_to_live_running_from_cleanup_dag(self):
"""Asserts SQL has no DAG name filter when running from cleanup DAG."""
time_to_live = 1
self.hook.dag_name = 'tcrm_monitoring_cleanup'
cutoff_timestamp = (datetime.datetime.utcnow() - datetime.timedelta(
days=time_to_live)).isoformat() + 'Z'
cleanup_sql = (f'DELETE FROM `{self.dataset_id}.{self.table_id}` WHERE '
f'`timestamp`<%(cutoff_timestamp)s')
params = {'cutoff_timestamp': cutoff_timestamp}
self.mock_cursor_obj.execute = mock.MagicMock()
self.hook.cleanup_by_days_to_live(days_to_live=time_to_live)
self.mock_cursor_obj.execute.assert_called_once_with(cleanup_sql, params)
def test_cleanup_by_days_to_live_with_no_ttl_raises_error(self):
with self.assertRaises(errors.MonitoringCleanupError):
self.hook.cleanup_by_days_to_live(days_to_live=None)
def test_cleanup_by_days_to_live_with_ttl_less_than_one_raises_error(self):
with self.assertRaises(errors.MonitoringCleanupError):
self.hook.cleanup_by_days_to_live(days_to_live=-1)
if __name__ == '__main__':
unittest.main()
|
|
# $Id: universal.py 6112 2009-09-03 07:27:59Z milde $
# Authors: David Goodger <goodger@python.org>; Ueli Schlaepfer
# Copyright: This module has been placed in the public domain.
"""
Transforms needed by most or all documents:
- `Decorations`: Generate a document's header & footer.
- `Messages`: Placement of system messages stored in
`nodes.document.transform_messages`.
- `TestMessages`: Like `Messages`, used on test runs.
- `FinalReferences`: Resolve remaining references.
"""
__docformat__ = 'reStructuredText'
import re
import sys
import time
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class Decorations(Transform):
"""
Populate a document's decoration element (header, footer).
"""
default_priority = 820
def apply(self):
header_nodes = self.generate_header()
if header_nodes:
decoration = self.document.get_decoration()
header = decoration.get_header()
header.extend(header_nodes)
footer_nodes = self.generate_footer()
if footer_nodes:
decoration = self.document.get_decoration()
footer = decoration.get_footer()
footer.extend(footer_nodes)
def generate_header(self):
return None
def generate_footer(self):
# @@@ Text is hard-coded for now.
# Should be made dynamic (language-dependent).
settings = self.document.settings
if settings.generator or settings.datestamp or settings.source_link \
or settings.source_url:
text = []
if settings.source_link and settings._source \
or settings.source_url:
if settings.source_url:
source = settings.source_url
else:
source = utils.relative_path(settings._destination,
settings._source)
text.extend([
nodes.reference('', 'View document source',
refuri=source),
nodes.Text('.\n')])
if settings.datestamp:
datestamp = time.strftime(settings.datestamp, time.gmtime())
text.append(nodes.Text('Generated on: ' + datestamp + '.\n'))
if settings.generator:
text.extend([
nodes.Text('Generated by '),
nodes.reference('', 'Docutils', refuri=
'http://docutils.sourceforge.net/'),
nodes.Text(' from '),
nodes.reference('', 'reStructuredText', refuri='http://'
'docutils.sourceforge.net/rst.html'),
nodes.Text(' source.\n')])
return [nodes.paragraph('', '', *text)]
else:
return None
class ExposeInternals(Transform):
"""
Expose internal attributes if ``expose_internals`` setting is set.
"""
default_priority = 840
def not_Text(self, node):
return not isinstance(node, nodes.Text)
def apply(self):
if self.document.settings.expose_internals:
for node in self.document.traverse(self.not_Text):
for att in self.document.settings.expose_internals:
value = getattr(node, att, None)
if value is not None:
node['internal:' + att] = value
class Messages(Transform):
"""
Place any system messages generated after parsing into a dedicated section
of the document.
"""
default_priority = 860
def apply(self):
unfiltered = self.document.transform_messages
threshold = self.document.reporter.report_level
messages = []
for msg in unfiltered:
if msg['level'] >= threshold and not msg.parent:
messages.append(msg)
if messages:
section = nodes.section(classes=['system-messages'])
# @@@ get this from the language module?
section += nodes.title('', 'Docutils System Messages')
section += messages
self.document.transform_messages[:] = []
self.document += section
class FilterMessages(Transform):
"""
Remove system messages below verbosity threshold.
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
if node['level'] < self.document.reporter.report_level:
node.parent.remove(node)
class TestMessages(Transform):
"""
Append all post-parse system messages to the end of the document.
Used for testing purposes.
"""
default_priority = 880
def apply(self):
for msg in self.document.transform_messages:
if not msg.parent:
self.document += msg
class StripComments(Transform):
"""
Remove comment elements from the document tree (only if the
``strip_comments`` setting is enabled).
"""
default_priority = 740
def apply(self):
if self.document.settings.strip_comments:
for node in self.document.traverse(nodes.comment):
node.parent.remove(node)
class StripClassesAndElements(Transform):
"""
Remove from the document tree all elements with classes in
`self.document.settings.strip_elements_with_classes` and all "classes"
attribute values in `self.document.settings.strip_classes`.
"""
default_priority = 420
def apply(self):
if not (self.document.settings.strip_elements_with_classes
or self.document.settings.strip_classes):
return
# prepare dicts for lookup (not sets, for Python 2.2 compatibility):
self.strip_elements = dict(
[(key, None)
for key in (self.document.settings.strip_elements_with_classes
or [])])
self.strip_classes = dict(
[(key, None) for key in (self.document.settings.strip_classes
or [])])
for node in self.document.traverse(self.check_classes):
node.parent.remove(node)
def check_classes(self, node):
if isinstance(node, nodes.Element):
for class_value in node['classes'][:]:
if class_value in self.strip_classes:
node['classes'].remove(class_value)
if class_value in self.strip_elements:
return 1
|
|
#
# Copyright (c) 2007 Red Hat
#
# Written by Mauricio Teixeira <mteixeira@webset.net>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import posixpath
import os
import ConfigParser
import re
# be compatible with 2.3
import sys
if sys.version_info < (2, 4):
from sets import Set as set
from smart.channel import *
from smart import *
YUM_REPOS_DIR = "/etc/yum.repos.d/"
def _getbasearch():
"""
Get system "base" architecture.
"""
try:
import rpmUtils.arch # from yum
return rpmUtils.arch.getBaseArch()
except ImportError:
return None
def _getreleasever():
"""
Get system release and version.
"""
try:
import rpm
import rpmUtils.transaction
except ImportError:
return None
rpmroot = sysconf.get("rpm-root", "/")
ts = rpmUtils.transaction.initReadOnlyTransaction(root=rpmroot)
if hasattr(rpm, '_RPMVSF_NOSIGNATURES') and hasattr(rpm, '_RPMVSF_NODIGESTS'):
ts.pushVSFlags(~(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS))
releasever = None
# HACK: we're hard-coding the most used distros, will add more if needed
idx = ts.dbMatch('provides', 'fedora-release')
if idx.count() == 0:
idx = ts.dbMatch('provides', 'redhat-release')
if idx.count() != 0:
hdr = idx.next()
releasever = str(hdr['version'])
del hdr
del idx
del ts
return releasever
BASEARCH = _getbasearch()
RELEASEVER = _getreleasever()
def _replaceStrings(txt):
"""
Replace some predefined strings that may appear in the repo file.
"""
retxt = re.sub("\$basearch", "%s" % BASEARCH, txt)
retxt = re.sub("\$releasever", "%s" % RELEASEVER, retxt)
return retxt
def _findBaseUrl(mirrorlist, repo):
"""
Fetches the first suggested mirror from the mirrorlist and use as baseurl.
"""
import urllib
list = urllib.urlopen(mirrorlist)
baseurl = None
while 1:
line = list.readline()
if line.startswith("#"):
continue
elif (line.startswith("http:") or line.startswith("https:") or
line.startswith("ftp:") or line.startswith("file:")):
baseurl = line
break
elif not line:
break
return baseurl
def _searchComments(repofile, repo):
"""
Hack to find the commented out baseurl line if mirrorlist is feeling sad.
"""
section = None
baseurl = None
file = open(repofile)
while 1:
line = file.readline()
if not line:
break
line = line.strip()
if line.startswith("[") and line.endswith("]"):
section = line.strip("[]")
continue
elif section == repo and line.startswith("#baseurl="):
baseurl = _replaceStrings(line[9:])
break
file.close()
return baseurl
def _loadRepoFile(filename):
"""
Loads each repository file information.
"""
file = open(filename)
# The computed aliases we have seen in the given file
seen = set()
repofile = ConfigParser.ConfigParser()
repofile.read(filename)
for repo in repofile.sections():
# Iterate through each repo found in file
alias = "yumsync-%s" % repo
name = _replaceStrings(repofile.get(repo, 'name'))
baseurl = None
mirrorlist = None
# Some repos have baseurl, some have mirrorlist
if repofile.has_option(repo, 'baseurl'):
baseurl = _replaceStrings(repofile.get(repo, 'baseurl'))
if baseurl.find("\n") >= 0: baseurl = baseurl.splitlines()[1]
if baseurl == "file:///media/cdrom/": baseurl = "localmedia://"
if baseurl == "file:///media/cdrecorder/": baseurl = "localmedia://"
else:
# baseurl is required for rpm-md channels
baseurl = _searchComments(filename, repo)
if repofile.has_option(repo, 'mirrorlist'):
mirrorlist = _replaceStrings(repofile.get(repo, 'mirrorlist'))
if not baseurl:
baseurl = _findBaseUrl(mirrorlist, repo)
if baseurl is None and mirrorlist is None:
iface.warning(_("Yum channel %s does not contain baseurl or " \
"mirrorlist addresses. Not syncing.") % repo)
return seen
if repofile.has_option(repo, 'enabled'):
enabled = not repofile.getboolean(repo, 'enabled')
else:
enabled = False
data = {"type": "rpm-md",
"name": name,
"baseurl": baseurl,
"disabled": enabled}
if mirrorlist:
data["mirrorlist"] = mirrorlist
seen.add(alias)
try:
createChannel(alias, data)
except Error, e:
iface.error(_("While using %s: %s") % (filename, e))
else:
# Store it persistently.
sysconf.set(("channels", alias), data)
return seen
def syncYumRepos(reposdir, force=None):
"""
Sync Smart channels based on Yum repositories.
"""
seen = set()
if os.path.isdir(reposdir):
for entry in os.listdir(reposdir):
if entry.endswith(".repo"):
filepath = os.path.join(reposdir, entry)
if os.path.isfile(filepath):
seen.update(_loadRepoFile(filepath))
# Delete the entries which were not seen in current files.
channels = sysconf.get("channels")
for alias in sysconf.keys("channels"):
if alias.startswith("yumsync-") and alias not in seen:
sysconf.remove(("channels", alias))
if not sysconf.getReadOnly():
if sysconf.get("sync-yum-repos",False):
syncYumRepos(sysconf.get("yum-repos-dir", YUM_REPOS_DIR))
# vim:ts=4:sw=4:et
|
|
# -*- coding: utf-8 -*-
#
# electrical documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 22 15:47:53 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinxcontrib.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'corr'
copyright = u'2019 US NIST MGI'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.2"
# The full version, including alpha/beta/rc tags.
release = "0.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
autoclass_content = 'both'
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinxdoc'
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# ("Storage", "rst/corr-db/README.html", True),
# ("API", "rst/corr-api/README.html", True),
# ("Cloud", "rst/corr-cloud/README.html", True),
# ("Frontend", "rst/corr-view/README.html", True),
html_theme_options = {
'navbar_title': "*",
'navbar_links': [
("launch", "rst/LAUNCH.html", True),
("use", "rst/USE.html", True),
("contribute", "https://github.com/usnistgov/corr", True),
("corr.nist.gov", "https://corr.nist.gov", True),
],
'navbar_pagenav': False,
'navbar_sidebarrel': False,
'globaltoc_depth': 1,
'source_link_position': '',
'bootswatch_theme': 'cosmo'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "CoRR"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "CoRR"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'corrdoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'corr.tex', u'CoRR Documentation',
u'Faical Yannick P. Congo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'corr', u'CoRR Documentation',
[u'Faical Yannick P. Congo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'corr', u'CoRR Documentation',
u'Faical Yannick P. Congo', 'corr', 'Cloud of Reproducible Records',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
source_parsers = {'.md' : CommonMarkParser}
source_suffix = ['.rst', '.md']
def url_resolver(url):
"""Resolve url for both documentation and Github online.
If the url is an IPython notebook links to the correct path.
Args:
url: the path to the link (not always a full url)
Returns:
a local url to either the documentation or the Github
"""
if url[-6:] == '.ipynb':
return url[4:-6] + '.html'
else:
return url
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': url_resolver,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
app.add_stylesheet('corr.css')
import shutil, os, glob
rst_directory = 'rst'
# db_directory = 'rst/corr-db'
# api_directory = 'rst/corr-api'
# cloud_directory = 'rst/corr-cloud'
# view_directory = 'rst/corr-view'
for directory in [rst_directory]:#, db_directory, api_directory, cloud_directory, view_directory]:
if not os.path.exists(directory):
os.makedirs(directory)
# 'corr-db/README.md',
# 'corr-api/README.md',
# 'corr-cloud/README.md',
# 'corr-view/README.md',
files_to_copy = (
'README.md',
'LAUNCH.md',
'USE.md'
)
print("+"*24)
print(files_to_copy)
for fpath in files_to_copy:
for fpath_glob in glob.glob(os.path.join('..', fpath)):
fpath_glob_ = '/'.join(fpath_glob.split('/')[1:])
print("{} -> {}".format(fpath_glob, os.path.join(rst_directory, fpath_glob_)))
shutil.copy(fpath_glob, os.path.join(rst_directory, fpath_glob_))
|
|
import json
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.test.utils import override_settings
from django.contrib.sites.models import Site
from allauth.socialaccount.models import SocialApp
from allauth.socialaccount.providers.facebook.provider import GRAPH_API_URL
import responses
from rest_framework import status
from .test_base import BaseAPITestCase
@override_settings(ROOT_URLCONF="tests.urls")
class TestSocialAuth(TestCase, BaseAPITestCase):
USERNAME = 'person'
PASS = 'person'
EMAIL = "person1@world.com"
REGISTRATION_DATA = {
"username": USERNAME,
"password1": PASS,
"password2": PASS,
"email": EMAIL
}
def setUp(self):
self.init()
social_app = SocialApp.objects.create(
provider='facebook',
name='Facebook',
client_id='123123123',
secret='321321321',
)
twitter_social_app = SocialApp.objects.create(
provider='twitter',
name='Twitter',
client_id='11223344',
secret='55667788',
)
site = Site.objects.get_current()
social_app.sites.add(site)
twitter_social_app.sites.add(site)
self.graph_api_url = GRAPH_API_URL + '/me'
self.twitter_url = 'http://twitter.com/foobarme'
@responses.activate
def test_failed_social_auth(self):
# fake response
responses.add(
responses.GET,
self.graph_api_url,
body='',
status=400,
content_type='application/json'
)
payload = {
'access_token': 'abc123'
}
self.post(self.fb_login_url, data=payload, status_code=400)
@responses.activate
def test_social_auth(self):
# fake response for facebook call
resp_body = {
"id": "123123123123",
"first_name": "John",
"gender": "male",
"last_name": "Smith",
"link": "https://www.facebook.com/john.smith",
"locale": "en_US",
"name": "John Smith",
"timezone": 2,
"updated_time": "2014-08-13T10:14:38+0000",
"username": "john.smith",
"verified": True
}
responses.add(
responses.GET,
self.graph_api_url,
body=json.dumps(resp_body),
status=200,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
payload = {
'access_token': 'abc123'
}
self.post(self.fb_login_url, data=payload, status_code=200)
self.assertIn('key', self.response.json.keys())
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
# make sure that second request will not create a new user
self.post(self.fb_login_url, data=payload, status_code=200)
self.assertIn('key', self.response.json.keys())
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
def _twitter_social_auth(self):
# fake response for twitter call
resp_body = {
"id": "123123123123",
}
responses.add(
responses.GET,
'https://api.twitter.com/1.1/account/verify_credentials.json',
body=json.dumps(resp_body),
status=200,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
payload = {
'access_token': 'abc123',
'token_secret': '1111222233334444'
}
self.post(self.tw_login_url, data=payload)
self.assertIn('key', self.response.json.keys())
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
# make sure that second request will not create a new user
self.post(self.tw_login_url, data=payload, status_code=200)
self.assertIn('key', self.response.json.keys())
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
@responses.activate
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=True)
def test_twitter_social_auth(self):
self._twitter_social_auth()
@responses.activate
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=False)
def test_twitter_social_auth_without_auto_singup(self):
self._twitter_social_auth()
@responses.activate
def test_twitter_social_auth_request_error(self):
# fake response for twitter call
resp_body = {
"id": "123123123123",
}
responses.add(
responses.GET,
'https://api.twitter.com/1.1/account/verify_credentials.json',
body=json.dumps(resp_body),
status=400,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
payload = {
'access_token': 'abc123',
'token_secret': '1111222233334444'
}
self.post(self.tw_login_url, data=payload, status_code=400)
self.assertNotIn('key', self.response.json.keys())
self.assertEqual(get_user_model().objects.all().count(), users_count)
@responses.activate
def test_twitter_social_auth_no_view_in_context(self):
# fake response for twitter call
resp_body = {
"id": "123123123123",
}
responses.add(
responses.GET,
'https://api.twitter.com/1.1/account/verify_credentials.json',
body=json.dumps(resp_body),
status=400,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
payload = {
'access_token': 'abc123',
'token_secret': '1111222233334444'
}
self.post(self.tw_login_no_view_url, data=payload, status_code=400)
self.assertEqual(get_user_model().objects.all().count(), users_count)
@responses.activate
def test_twitter_social_auth_no_adapter(self):
# fake response for twitter call
resp_body = {
"id": "123123123123",
}
responses.add(
responses.GET,
'https://api.twitter.com/1.1/account/verify_credentials.json',
body=json.dumps(resp_body),
status=400,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
payload = {
'access_token': 'abc123',
'token_secret': '1111222233334444'
}
self.post(self.tw_login_no_adapter_url, data=payload, status_code=400)
self.assertEqual(get_user_model().objects.all().count(), users_count)
@responses.activate
@override_settings(
ACCOUNT_EMAIL_VERIFICATION='mandatory',
ACCOUNT_EMAIL_REQUIRED=True,
REST_SESSION_LOGIN=False,
ACCOUNT_EMAIL_CONFIRMATION_HMAC=False
)
def test_edge_case(self):
resp_body = {
"id": "123123123123",
"first_name": "John",
"gender": "male",
"last_name": "Smith",
"link": "https://www.facebook.com/john.smith",
"locale": "en_US",
"name": "John Smith",
"timezone": 2,
"updated_time": "2014-08-13T10:14:38+0000",
"username": "john.smith",
"verified": True,
"email": self.EMAIL
}
responses.add(
responses.GET,
self.graph_api_url,
body=json.dumps(resp_body),
status=200,
content_type='application/json'
)
# test empty payload
self.post(self.register_url, data={}, status_code=400)
self.post(
self.register_url,
data=self.REGISTRATION_DATA,
status_code=201
)
new_user = get_user_model().objects.latest('id')
self.assertEqual(new_user.username, self.REGISTRATION_DATA['username'])
# verify email
email_confirmation = new_user.emailaddress_set.get(email=self.EMAIL)\
.emailconfirmation_set.order_by('-created')[0]
self.post(
self.veirfy_email_url,
data={"key": email_confirmation.key},
status_code=status.HTTP_200_OK
)
self._login()
self._logout()
payload = {
'access_token': 'abc123'
}
self.post(self.fb_login_url, data=payload, status_code=200)
self.assertIn('key', self.response.json.keys())
@responses.activate
@override_settings(
REST_USE_JWT=True
)
def test_jwt(self):
resp_body = '{"id":"123123123123","first_name":"John","gender":"male","last_name":"Smith","link":"https:\\/\\/www.facebook.com\\/john.smith","locale":"en_US","name":"John Smith","timezone":2,"updated_time":"2014-08-13T10:14:38+0000","username":"john.smith","verified":true}' # noqa
responses.add(
responses.GET,
self.graph_api_url,
body=resp_body,
status=200,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
payload = {
'access_token': 'abc123'
}
self.post(self.fb_login_url, data=payload, status_code=200)
self.assertIn('token', self.response.json.keys())
self.assertIn('user', self.response.json.keys())
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
|
|
#!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# scramble-tree.py: (See scramble-tree.py --help.)
#
# Makes multiple random file changes to a directory tree, for testing.
#
# This script will add some new files, remove some existing files, add
# text to some existing files, and delete text from some existing
# files. It will also leave some files completely untouched.
#
# The exact set of changes made is always the same for identical trees,
# where "identical" means the names of files and directories are the
# same, and they are arranged in the same tree structure (the actual
# contents of files may differ). If two are not identical, the sets of
# changes scramble-tree.py will make may differ arbitrarily.
#
# Directories named .svn/ and CVS/ are ignored.
#
# Example scenario, starting with a pristine Subversion working copy:
#
# $ ls
# foo/
# $ svn st foo
# $ cp -r foo bar
# $ svn st bar
# $ scramble-tree.py foo
# $ svn st foo
# [... see lots of scary status output ...]
# $ scramble-tree.py bar
# [... see the exact same scary status output ...]
# $ scramble-tree.py foo
# [... see a new bunch of scary status output ...]
# $
import os
import sys
import getopt
try:
my_getopt = getopt.gnu_getopt
except AttributeError:
my_getopt = getopt.getopt
import random
from hashlib import md5 as hashlib_md5
import base64
class VCActions:
def __init__(self):
pass
def add_file(self, path):
"""Add an existing file to version control."""
pass
def remove_file(self, path):
"""Remove an existing file from version control, and delete it."""
pass
class NoVCActions(VCActions):
def remove_file(self, path):
os.unlink(path)
class CVSActions(VCActions):
def add_file(self, path):
cwd = os.getcwd()
try:
dirname, basename = os.path.split(path)
os.chdir(os.path.join(cwd, dirname))
os.system('cvs -Q add -m "Adding file to repository" "%s"' % (basename))
finally:
os.chdir(cwd)
def remove_file(self, path):
cwd = os.getcwd()
try:
dirname, basename = os.path.split(path)
os.chdir(os.path.join(cwd, dirname))
os.system('cvs -Q rm -f "%s"' % (basename))
finally:
os.chdir(cwd)
class SVNActions(VCActions):
def add_file(self, path):
os.system('svn add --quiet "%s"' % (path))
def remove_file(self, path):
os.remove(path)
os.system('svn rm --quiet --force "%s"' % (path))
class hashDir:
"""Given a directory, creates a string containing all directories
and files under that directory (sorted alphanumerically) and makes a
base64-encoded md5 hash of the resulting string. Call
hashDir.gen_seed() to generate a seed value for this tree."""
def __init__(self, rootdir):
self.allfiles = []
for dirpath, dirs, files in os.walk(rootdir):
self.walker_callback(len(rootdir), dirpath, dirs + files)
def gen_seed(self):
# Return a base64-encoded (kinda ... strip the '==\n' from the
# end) MD5 hash of sorted tree listing.
self.allfiles.sort()
return base64.encodestring(hashlib_md5(''.join(self.allfiles)).digest())[:-3]
def walker_callback(self, baselen, dirname, fnames):
if ((dirname == '.svn') or (dirname == 'CVS')):
return
self.allfiles.append(dirname[baselen:])
for filename in fnames:
path = os.path.join(dirname, filename)
if not os.path.isdir(path):
self.allfiles.append(path[baselen:])
class Scrambler:
def __init__(self, seed, vc_actions, dry_run, quiet):
if not quiet:
print('SEED: ' + seed)
self.rand = random.Random(seed)
self.vc_actions = vc_actions
self.dry_run = dry_run
self.quiet = quiet
self.ops = [] ### ["add" | "munge", path]
self.greeking = """
======================================================================
This is some text that was inserted into this file by the lovely and
talented scramble-tree.py script.
======================================================================
"""
### Helpers
def shrink_list(self, list, remove_count):
if len(list) <= remove_count:
return []
for i in range(remove_count):
j = self.rand.randrange(len(list) - 1)
del list[j]
return list
def _make_new_file(self, dir):
i = 0
path = None
for i in range(99999):
path = os.path.join(dir, "newfile.%05d.txt" % i)
if not os.path.exists(path):
open(path, 'w').write(self.greeking)
return path
raise Exception("Ran out of unique new filenames in directory '%s'" % dir)
### File Mungers
def _mod_append_to_file(self, path):
if not self.quiet:
print('append_to_file: %s' % path)
if self.dry_run:
return
fh = open(path, "a")
fh.write(self.greeking)
fh.close()
def _mod_remove_from_file(self, path):
if not self.quiet:
print('remove_from_file: %s' % path)
if self.dry_run:
return
lines = self.shrink_list(open(path, "r").readlines(), 5)
open(path, "w").writelines(lines)
def _mod_delete_file(self, path):
if not self.quiet:
print('delete_file: %s' % path)
if self.dry_run:
return
self.vc_actions.remove_file(path)
### Public Interfaces
def get_randomizer(self):
return self.rand
def schedule_munge(self, path):
self.ops.append(tuple(["munge", path]))
def schedule_addition(self, dir):
self.ops.append(tuple(["add", dir]))
def enact(self, limit):
num_ops = len(self.ops)
if limit == 0:
return
elif limit > 0 and limit <= num_ops:
self.ops = self.shrink_list(self.ops, num_ops - limit)
for op, path in self.ops:
if op == "add":
path = self._make_new_file(path)
if not self.quiet:
print("add_file: %s" % path)
if self.dry_run:
return
self.vc_actions.add_file(path)
elif op == "munge":
file_mungers = [self._mod_append_to_file,
self._mod_append_to_file,
self._mod_append_to_file,
self._mod_remove_from_file,
self._mod_remove_from_file,
self._mod_remove_from_file,
self._mod_delete_file,
]
self.rand.choice(file_mungers)(path)
def usage(retcode=255):
print('Usage: %s [OPTIONS] DIRECTORY' % (sys.argv[0]))
print('')
print('Options:')
print(' --help, -h : Show this usage message.')
print(' --seed ARG : Use seed ARG to scramble the tree.')
print(' --use-svn : Use Subversion (as "svn") to perform file additions')
print(' and removals.')
print(' --use-cvs : Use CVS (as "cvs") to perform file additions')
print(' and removals.')
print(' --dry-run : Don\'t actually change the disk.')
print(' --limit N : Limit the scrambling to a maximum of N operations.')
print(' --quiet, -q : Run in stealth mode!')
sys.exit(retcode)
def walker_callback(scrambler, dirname, fnames):
if ((dirname.find('.svn') != -1) or dirname.find('CVS') != -1):
return
rand = scrambler.get_randomizer()
if rand.randrange(5) == 1:
scrambler.schedule_addition(dirname)
for filename in fnames:
path = os.path.join(dirname, filename)
if not os.path.isdir(path) and rand.randrange(3) == 1:
scrambler.schedule_munge(path)
def main():
seed = None
vc_actions = NoVCActions()
dry_run = 0
quiet = 0
limit = None
# Mm... option parsing.
optlist, args = my_getopt(sys.argv[1:], "hq",
['seed=', 'use-svn', 'use-cvs',
'help', 'quiet', 'dry-run', 'limit='])
for opt, arg in optlist:
if opt == '--help' or opt == '-h':
usage(0)
if opt == '--seed':
seed = arg
if opt == '--use-svn':
vc_actions = SVNActions()
if opt == '--use-cvs':
vc_actions = CVSActions()
if opt == '--dry-run':
dry_run = 1
if opt == '--limit':
limit = int(arg)
if opt == '--quiet' or opt == '-q':
quiet = 1
# We need at least a path to work with, here.
argc = len(args)
if argc < 1 or argc > 1:
usage()
rootdir = args[0]
# If a seed wasn't provide, calculate one.
if seed is None:
seed = hashDir(rootdir).gen_seed()
scrambler = Scrambler(seed, vc_actions, dry_run, quiet)
for dirpath, dirs, files in os.walk(rootdir):
walker_callback(scrambler, dirpath, dirs + files)
scrambler.enact(limit)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous utility functions for use in generating responses.
Why not swift.common.utils, you ask? Because this way we can import things
from swob in here without creating circular imports.
"""
import hashlib
import itertools
import sys
import time
import six
from six.moves.urllib.parse import unquote
from swift import gettext_ as _
from swift.common.storage_policy import POLICIES
from swift.common.constraints import FORMAT2CONTENT_TYPE
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.http import is_success
from swift.common.swob import (HTTPBadRequest, HTTPNotAcceptable,
HTTPServiceUnavailable, Range)
from swift.common.utils import split_path, validate_device_partition, \
close_if_possible, maybe_multipart_byteranges_to_document_iters
from swift.common.wsgi import make_subrequest
def get_param(req, name, default=None):
"""
Get parameters from an HTTP request ensuring proper handling UTF-8
encoding.
:param req: request object
:param name: parameter name
:param default: result to return if the parameter is not found
:returns: HTTP request parameter value
(as UTF-8 encoded str, not unicode object)
:raises: HTTPBadRequest if param not valid UTF-8 byte sequence
"""
value = req.params.get(name, default)
if value and not isinstance(value, six.text_type):
try:
value.decode('utf8') # Ensure UTF8ness
except UnicodeDecodeError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='"%s" parameter not valid UTF-8' % name)
return value
def get_listing_content_type(req):
"""
Determine the content type to use for an account or container listing
response.
:param req: request object
:returns: content type as a string (e.g. text/plain, application/json)
:raises: HTTPNotAcceptable if the requested content type is not acceptable
:raises: HTTPBadRequest if the 'format' query param is provided and
not valid UTF-8
"""
query_format = get_param(req, 'format')
if query_format:
req.accept = FORMAT2CONTENT_TYPE.get(
query_format.lower(), FORMAT2CONTENT_TYPE['plain'])
out_content_type = req.accept.best_match(
['text/plain', 'application/json', 'application/xml', 'text/xml'])
if not out_content_type:
raise HTTPNotAcceptable(request=req)
return out_content_type
def get_name_and_placement(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path and storage
policy. The storage policy index is extracted from the headers of
the request and converted to a StoragePolicy instance. The
remaining args are passed through to
:meth:`split_and_validate_path`.
:returns: a list, result of :meth:`split_and_validate_path` with
the BaseStoragePolicy instance appended on the end
:raises: HTTPServiceUnavailable if the path is invalid or no policy exists
with the extracted policy_index.
"""
policy_index = request.headers.get('X-Backend-Storage-Policy-Index')
policy = POLICIES.get_by_index(policy_index)
if not policy:
raise HTTPServiceUnavailable(
body=_("No policy with index %s") % policy_index,
request=request, content_type='text/plain')
results = split_and_validate_path(request, minsegs=minsegs,
maxsegs=maxsegs,
rest_with_last=rest_with_last)
results.append(policy)
return results
def split_and_validate_path(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path.
:returns: result of :meth:`~swift.common.utils.split_path` if
everything's okay
:raises: HTTPBadRequest if something's not okay
"""
try:
segs = split_path(unquote(request.path),
minsegs, maxsegs, rest_with_last)
validate_device_partition(segs[0], segs[1])
return segs
except ValueError as err:
raise HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
def is_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 8 + len(server_type):
return False
return key.lower().startswith(get_user_meta_prefix(server_type))
def is_sys_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 11 + len(server_type):
return False
return key.lower().startswith(get_sys_meta_prefix(server_type))
def is_sys_or_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user or system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
return is_user_meta(server_type, key) or is_sys_meta(server_type, key)
def strip_user_meta_prefix(server_type, key):
"""
Removes the user metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
return key[len(get_user_meta_prefix(server_type)):]
def strip_sys_meta_prefix(server_type, key):
"""
Removes the system metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
return key[len(get_sys_meta_prefix(server_type)):]
def get_user_meta_prefix(server_type):
"""
Returns the prefix for user metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's user metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'meta')
def get_sys_meta_prefix(server_type):
"""
Returns the prefix for system metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's system metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'sysmeta')
def remove_items(headers, condition):
"""
Removes items from a dict whose keys satisfy
the given condition.
:param headers: a dict of headers
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be removed.
:returns: a dict, possibly empty, of headers that have been removed
"""
removed = {}
keys = filter(condition, headers)
removed.update((key, headers.pop(key)) for key in keys)
return removed
def copy_header_subset(from_r, to_r, condition):
"""
Will copy desired subset of headers from from_r to to_r.
:param from_r: a swob Request or Response
:param to_r: a swob Request or Response
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be copied.
"""
for k, v in from_r.headers.items():
if condition(k):
to_r.headers[k] = v
class SegmentedIterable(object):
"""
Iterable that returns the object contents for a large object.
:param req: original request object
:param app: WSGI application from which segments will come
:param listing_iter: iterable yielding the object segments to fetch,
along with the byte subranges to fetch, in the
form of a tuple (object-path, first-byte, last-byte)
or (object-path, None, None) to fetch the whole thing.
:param max_get_time: maximum permitted duration of a GET request (seconds)
:param logger: logger object
:param swift_source: value of swift.source in subrequest environ
(just for logging)
:param ua_suffix: string to append to user-agent.
:param name: name of manifest (used in logging only)
:param response_body_length: optional response body length for
the response being sent to the client.
"""
def __init__(self, req, app, listing_iter, max_get_time,
logger, ua_suffix, swift_source,
name='<not specified>', response_body_length=None):
self.req = req
self.app = app
self.listing_iter = listing_iter
self.max_get_time = max_get_time
self.logger = logger
self.ua_suffix = " " + ua_suffix
self.swift_source = swift_source
self.name = name
self.response_body_length = response_body_length
self.peeked_chunk = None
self.app_iter = self._internal_iter()
self.validated_first_segment = False
self.current_resp = None
def _coalesce_requests(self):
start_time = time.time()
pending_req = None
pending_etag = None
pending_size = None
try:
for seg_path, seg_etag, seg_size, first_byte, last_byte \
in self.listing_iter:
first_byte = first_byte or 0
go_to_end = last_byte is None or (
seg_size is not None and last_byte == seg_size - 1)
if time.time() - start_time > self.max_get_time:
raise SegmentError(
'ERROR: While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
# The "multipart-manifest=get" query param ensures that the
# segment is a plain old object, not some flavor of large
# object; therefore, its etag is its MD5sum and hence we can
# check it.
path = seg_path + '?multipart-manifest=get'
seg_req = make_subrequest(
self.req.environ, path=path, method='GET',
headers={'x-auth-token': self.req.headers.get(
'x-auth-token')},
agent=('%(orig)s ' + self.ua_suffix),
swift_source=self.swift_source)
seg_req_rangeval = None
if first_byte != 0 or not go_to_end:
seg_req_rangeval = "%s-%s" % (
first_byte, '' if go_to_end else last_byte)
seg_req.headers['Range'] = "bytes=" + seg_req_rangeval
# We can only coalesce if paths match and we know the segment
# size (so we can check that the ranges will be allowed)
if pending_req and pending_req.path == seg_req.path and \
seg_size is not None:
# Make a new Range object so that we don't goof up the
# existing one in case of invalid ranges. Note that a
# range set with too many individual byteranges is
# invalid, so we can combine N valid byteranges and 1
# valid byterange and get an invalid range set.
if pending_req.range:
new_range_str = str(pending_req.range)
else:
new_range_str = "bytes=0-%d" % (seg_size - 1)
if seg_req.range:
new_range_str += "," + seg_req_rangeval
else:
new_range_str += ",0-%d" % (seg_size - 1)
if Range(new_range_str).ranges_for_length(seg_size):
# Good news! We can coalesce the requests
pending_req.headers['Range'] = new_range_str
continue
# else, Too many ranges, or too much backtracking, or ...
if pending_req:
yield pending_req, pending_etag, pending_size
pending_req = seg_req
pending_etag = seg_etag
pending_size = seg_size
except ListingIterError:
e_type, e_value, e_traceback = sys.exc_info()
if time.time() - start_time > self.max_get_time:
raise SegmentError(
'ERROR: While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
if pending_req:
yield pending_req, pending_etag, pending_size
six.reraise(e_type, e_value, e_traceback)
if time.time() - start_time > self.max_get_time:
raise SegmentError(
'ERROR: While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
if pending_req:
yield pending_req, pending_etag, pending_size
def _internal_iter(self):
bytes_left = self.response_body_length
try:
for seg_req, seg_etag, seg_size in self._coalesce_requests():
seg_resp = seg_req.get_response(self.app)
if not is_success(seg_resp.status_int):
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'ERROR: While processing manifest %s, '
'got %d while retrieving %s' %
(self.name, seg_resp.status_int, seg_req.path))
elif ((seg_etag and (seg_resp.etag != seg_etag)) or
(seg_size and (seg_resp.content_length != seg_size) and
not seg_req.range)):
# The content-length check is for security reasons. Seems
# possible that an attacker could upload a >1mb object and
# then replace it with a much smaller object with same
# etag. Then create a big nested SLO that calls that
# object many times which would hammer our obj servers. If
# this is a range request, don't check content-length
# because it won't match.
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Object segment no longer valid: '
'%(path)s etag: %(r_etag)s != %(s_etag)s or '
'%(r_size)s != %(s_size)s.' %
{'path': seg_req.path, 'r_etag': seg_resp.etag,
'r_size': seg_resp.content_length,
's_etag': seg_etag,
's_size': seg_size})
else:
self.current_resp = seg_resp
seg_hash = None
if seg_resp.etag and not seg_req.headers.get('Range'):
# Only calculate the MD5 if it we can use it to validate
seg_hash = hashlib.md5()
document_iters = maybe_multipart_byteranges_to_document_iters(
seg_resp.app_iter,
seg_resp.headers['Content-Type'])
for chunk in itertools.chain.from_iterable(document_iters):
if seg_hash:
seg_hash.update(chunk)
if bytes_left is None:
yield chunk
elif bytes_left >= len(chunk):
yield chunk
bytes_left -= len(chunk)
else:
yield chunk[:bytes_left]
bytes_left -= len(chunk)
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Too many bytes for %(name)s; truncating in '
'%(seg)s with %(left)d bytes left' %
{'name': self.name, 'seg': seg_req.path,
'left': bytes_left})
close_if_possible(seg_resp.app_iter)
if seg_hash and seg_hash.hexdigest() != seg_resp.etag:
raise SegmentError(
"Bad MD5 checksum in %(name)s for %(seg)s: headers had"
" %(etag)s, but object MD5 was actually %(actual)s" %
{'seg': seg_req.path, 'etag': seg_resp.etag,
'name': self.name, 'actual': seg_hash.hexdigest()})
if bytes_left:
raise SegmentError(
'Not enough bytes for %s; closing connection' % self.name)
except (ListingIterError, SegmentError):
self.logger.exception(_('ERROR: An error occurred '
'while retrieving segments'))
raise
finally:
if self.current_resp:
close_if_possible(self.current_resp.app_iter)
def app_iter_range(self, *a, **kw):
"""
swob.Response will only respond with a 206 status in certain cases; one
of those is if the body iterator responds to .app_iter_range().
However, this object (or really, its listing iter) is smart enough to
handle the range stuff internally, so we just no-op this out for swob.
"""
return self
def validate_first_segment(self):
"""
Start fetching object data to ensure that the first segment (if any) is
valid. This is to catch cases like "first segment is missing" or
"first segment's etag doesn't match manifest".
Note: this does not validate that you have any segments. A
zero-segment large object is not erroneous; it is just empty.
"""
if self.validated_first_segment:
return
self.validated_first_segment = True
try:
self.peeked_chunk = next(self.app_iter)
except StopIteration:
pass
def __iter__(self):
if self.peeked_chunk is not None:
pc = self.peeked_chunk
self.peeked_chunk = None
return itertools.chain([pc], self.app_iter)
else:
return self.app_iter
def close(self):
"""
Called when the client disconnect. Ensure that the connection to the
backend server is closed.
"""
close_if_possible(self.app_iter)
|
|
# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad
# Complutense de Madrid (dsa-research.org)
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenNebula.org test suite.
"""
__docformat__ = 'epytext'
import unittest
import sys
from libcloud.utils.py3 import httplib
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeState
from libcloud.compute.drivers.opennebula import *
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.common.types import InvalidCredsError
from libcloud.test import MockResponse, MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.secrets import OPENNEBULA_PARAMS
class OpenNebulaCaseMixin(TestCaseMixin):
def test_reboot_node_response(self):
pass
class OpenNebula_ResponseTests(unittest.TestCase):
XML = """<?xml version="1.0" encoding="UTF-8"?><root/>"""
def test_unauthorized_response(self):
http_response = MockResponse(httplib.UNAUTHORIZED,
OpenNebula_ResponseTests.XML,
headers={'content-type':
'application/xml'})
try:
OpenNebulaResponse(http_response, None).parse_body()
except InvalidCredsError:
exceptionType = sys.exc_info()[0]
self.assertEqual(exceptionType, type(InvalidCredsError()))
class OpenNebula_1_4_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v1.4.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_1_4_MockHttp, OpenNebula_1_4_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('1.4',))
def test_create_node(self):
"""
Test create_node functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
size = NodeSize(id=1, name='small', ram=None, disk=None,
bandwidth=None, price=None, driver=self.driver)
networks = list()
networks.append(OpenNebulaNetwork(id=5, name='Network 5',
address='192.168.0.0', size=256, driver=self.driver))
networks.append(OpenNebulaNetwork(id=15, name='Network 15',
address='192.168.1.0', size=256, driver=self.driver))
node = self.driver.create_node(name='Compute 5', image=image,
size=size, networks=networks)
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.extra['dev'], 'sda1')
def test_destroy_node(self):
"""
Test destroy_node functionality.
"""
node = Node(5, None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_nodes(self):
"""
Test list_nodes functionality.
"""
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 3)
node = nodes[0]
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.extra['dev'], 'sda1')
node = nodes[1]
self.assertEqual(node.id, '15')
self.assertEqual(node.name, 'Compute 15')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].address, '192.168.0.2')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].address, '192.168.1.2')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image.id, '15')
self.assertEqual(node.image.extra['dev'], 'sda1')
node = nodes[2]
self.assertEqual(node.id, '25')
self.assertEqual(node.name, 'Compute 25')
self.assertEqual(node.state,
NodeState.UNKNOWN)
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].address, '192.168.0.3')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].address, '192.168.1.3')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image, None)
def test_list_images(self):
"""
Test list_images functionality.
"""
images = self.driver.list_images()
self.assertEqual(len(images), 2)
image = images[0]
self.assertEqual(image.id, '5')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['size'], '2048')
self.assertEqual(image.extra['url'],
'file:///images/ubuntu/jaunty.img')
image = images[1]
self.assertEqual(image.id, '15')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['size'], '2048')
self.assertEqual(image.extra['url'],
'file:///images/ubuntu/jaunty.img')
def test_list_sizes(self):
"""
Test list_sizes functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 3)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
def test_list_locations(self):
"""
Test list_locations functionality.
"""
locations = self.driver.list_locations()
self.assertEqual(len(locations), 1)
location = locations[0]
self.assertEqual(location.id, '0')
self.assertEqual(location.name, '')
self.assertEqual(location.country, '')
def test_ex_list_networks(self):
"""
Test ex_list_networks functionality.
"""
networks = self.driver.ex_list_networks()
self.assertEqual(len(networks), 2)
network = networks[0]
self.assertEqual(network.id, '5')
self.assertEqual(network.name, 'Network 5')
self.assertEqual(network.address, '192.168.0.0')
self.assertEqual(network.size, '256')
network = networks[1]
self.assertEqual(network.id, '15')
self.assertEqual(network.name, 'Network 15')
self.assertEqual(network.address, '192.168.1.0')
self.assertEqual(network.size, '256')
def test_ex_node_action(self):
"""
Test ex_node_action functionality.
"""
node = Node(5, None, None, None, None, self.driver)
ret = self.driver.ex_node_action(node, ACTION.STOP)
self.assertTrue(ret)
class OpenNebula_2_0_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v2.0 through v2.2.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_2_0_MockHttp, OpenNebula_2_0_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('2.0',))
def test_create_node(self):
"""
Test create_node functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
size = OpenNebulaNodeSize(id=1, name='small', ram=1024, cpu=1,
disk=None, bandwidth=None, price=None,
driver=self.driver)
networks = list()
networks.append(OpenNebulaNetwork(id=5, name='Network 5',
address='192.168.0.0', size=256, driver=self.driver))
networks.append(OpenNebulaNetwork(id=15, name='Network 15',
address='192.168.1.0', size=256, driver=self.driver))
context = {'hostname': 'compute-5'}
node = self.driver.create_node(name='Compute 5', image=image,
size=size, networks=networks,
context=context)
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01')
self.assertEqual(node.private_ips, [])
self.assertTrue(len([size for size in self.driver.list_sizes() \
if size.id == node.size.id]) == 1)
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(node.image.extra['type'], 'DISK')
self.assertEqual(node.image.extra['target'], 'hda')
context = node.extra['context']
self.assertEqual(context['hostname'], 'compute-5')
def test_destroy_node(self):
"""
Test destroy_node functionality.
"""
node = Node(5, None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_nodes(self):
"""
Test list_nodes functionality.
"""
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 3)
node = nodes[0]
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01')
self.assertEqual(node.private_ips, [])
self.assertTrue(len([size for size in self.driver.list_sizes() \
if size.id == node.size.id]) == 1)
self.assertEqual(node.size.id, '1')
self.assertEqual(node.size.name, 'small')
self.assertEqual(node.size.ram, 1024)
self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu,
int))
self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu,
int))
self.assertEqual(node.size.cpu, 1)
self.assertEqual(node.size.vcpu, None)
self.assertEqual(node.size.disk, None)
self.assertEqual(node.size.bandwidth, None)
self.assertEqual(node.size.price, None)
self.assertTrue(len([image for image in self.driver.list_images() \
if image.id == node.image.id]) == 1)
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(node.image.extra['type'], 'DISK')
self.assertEqual(node.image.extra['target'], 'hda')
context = node.extra['context']
self.assertEqual(context['hostname'], 'compute-5')
node = nodes[1]
self.assertEqual(node.id, '15')
self.assertEqual(node.name, 'Compute 15')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.2')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:02')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.2')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:02')
self.assertEqual(node.private_ips, [])
self.assertTrue(len([size for size in self.driver.list_sizes() \
if size.id == node.size.id]) == 1)
self.assertEqual(node.size.id, '1')
self.assertEqual(node.size.name, 'small')
self.assertEqual(node.size.ram, 1024)
self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu,
int))
self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu,
int))
self.assertEqual(node.size.cpu, 1)
self.assertEqual(node.size.vcpu, None)
self.assertEqual(node.size.disk, None)
self.assertEqual(node.size.bandwidth, None)
self.assertEqual(node.size.price, None)
self.assertTrue(len([image for image in self.driver.list_images() \
if image.id == node.image.id]) == 1)
self.assertEqual(node.image.id, '15')
self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(node.image.extra['type'], 'DISK')
self.assertEqual(node.image.extra['target'], 'hda')
context = node.extra['context']
self.assertEqual(context['hostname'], 'compute-15')
node = nodes[2]
self.assertEqual(node.id, '25')
self.assertEqual(node.name, 'Compute 25')
self.assertEqual(node.state,
NodeState.UNKNOWN)
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.3')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:03')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.3')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:03')
self.assertEqual(node.private_ips, [])
self.assertEqual(node.size, None)
self.assertEqual(node.image, None)
context = node.extra['context']
self.assertEqual(context, {})
def test_list_images(self):
"""
Test list_images functionality.
"""
images = self.driver.list_images()
self.assertEqual(len(images), 2)
image = images[0]
self.assertEqual(image.id, '5')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['description'],
'Ubuntu 9.04 LAMP Description')
self.assertEqual(image.extra['type'], 'OS')
self.assertEqual(image.extra['size'], '2048')
image = images[1]
self.assertEqual(image.id, '15')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['description'],
'Ubuntu 9.04 LAMP Description')
self.assertEqual(image.extra['type'], 'OS')
self.assertEqual(image.extra['size'], '2048')
def test_list_sizes(self):
"""
Test list_sizes functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 4)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, 1024)
self.assertTrue(size.cpu is None or isinstance(size.cpu, int))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 1)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, 4096)
self.assertTrue(size.cpu is None or isinstance(size.cpu, int))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 4)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, 8192)
self.assertTrue(size.cpu is None or isinstance(size.cpu, int))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 8)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[3]
self.assertEqual(size.id, '4')
self.assertEqual(size.name, 'custom')
self.assertEqual(size.ram, 0)
self.assertEqual(size.cpu, 0)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
def test_list_locations(self):
"""
Test list_locations functionality.
"""
locations = self.driver.list_locations()
self.assertEqual(len(locations), 1)
location = locations[0]
self.assertEqual(location.id, '0')
self.assertEqual(location.name, '')
self.assertEqual(location.country, '')
def test_ex_list_networks(self):
"""
Test ex_list_networks functionality.
"""
networks = self.driver.ex_list_networks()
self.assertEqual(len(networks), 2)
network = networks[0]
self.assertEqual(network.id, '5')
self.assertEqual(network.name, 'Network 5')
self.assertEqual(network.address, '192.168.0.0')
self.assertEqual(network.size, '256')
network = networks[1]
self.assertEqual(network.id, '15')
self.assertEqual(network.name, 'Network 15')
self.assertEqual(network.address, '192.168.1.0')
self.assertEqual(network.size, '256')
class OpenNebula_3_0_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.0.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_0_MockHttp, OpenNebula_3_0_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.0',))
def test_ex_list_networks(self):
"""
Test ex_list_networks functionality.
"""
networks = self.driver.ex_list_networks()
self.assertEqual(len(networks), 2)
network = networks[0]
self.assertEqual(network.id, '5')
self.assertEqual(network.name, 'Network 5')
self.assertEqual(network.address, '192.168.0.0')
self.assertEqual(network.size, '256')
self.assertEqual(network.extra['public'], 'YES')
network = networks[1]
self.assertEqual(network.id, '15')
self.assertEqual(network.name, 'Network 15')
self.assertEqual(network.address, '192.168.1.0')
self.assertEqual(network.size, '256')
self.assertEqual(network.extra['public'], 'NO')
def test_ex_node_set_save_name(self):
"""
Test ex_node_action functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
node = Node(5, None, None, None, None, self.driver, image=image)
ret = self.driver.ex_node_set_save_name(node, 'test')
self.assertTrue(ret)
class OpenNebula_3_2_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.2.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_2_MockHttp, OpenNebula_3_2_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.2',))
def test_reboot_node(self):
"""
Test reboot_node functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
node = Node(5, None, None, None, None, self.driver, image=image)
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_list_sizes(self):
"""
Test ex_list_networks functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 3)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, 1024)
self.assertTrue(size.cpu is None or isinstance(size.cpu, float))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 1)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, 4096)
self.assertTrue(size.cpu is None or isinstance(size.cpu, float))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 4)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, 8192)
self.assertTrue(size.cpu is None or isinstance(size.cpu, float))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 8)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
class OpenNebula_1_4_MockHttp(MockHttp):
"""
Mock HTTP server for testing v1.4 of the OpenNebula.org compute driver.
"""
fixtures = ComputeFileFixtures('opennebula_1_4')
def _compute(self, method, url, body, headers):
"""
Compute pool resources.
"""
if method == 'GET':
body = self.fixtures.load('computes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('compute_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _storage(self, method, url, body, headers):
"""
Storage pool resources.
"""
if method == 'GET':
body = self.fixtures.load('storage.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('disk_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _network(self, method, url, body, headers):
"""
Network pool resources.
"""
if method == 'GET':
body = self.fixtures.load('networks.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('network_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _compute_5(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _compute_15(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _compute_25(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_25.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _storage_5(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('disk_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _storage_15(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('disk_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _network_5(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _network_15(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
class OpenNebula_2_0_MockHttp(MockHttp):
"""
Mock HTTP server for testing v2.0 through v3.2 of the OpenNebula.org
compute driver.
"""
fixtures = ComputeFileFixtures('opennebula_2_0')
def _compute(self, method, url, body, headers):
"""
Compute pool resources.
"""
if method == 'GET':
body = self.fixtures.load('compute_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('compute_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _storage(self, method, url, body, headers):
"""
Storage pool resources.
"""
if method == 'GET':
body = self.fixtures.load('storage_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('storage_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _network(self, method, url, body, headers):
"""
Network pool resources.
"""
if method == 'GET':
body = self.fixtures.load('network_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('network_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _compute_5(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _compute_15(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _compute_25(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_25.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _storage_5(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('storage_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _storage_15(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('storage_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _network_5(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _network_15(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
class OpenNebula_3_0_MockHttp(OpenNebula_2_0_MockHttp):
"""
Mock HTTP server for testing v3.0 of the OpenNebula.org compute driver.
"""
fixtures_3_0 = ComputeFileFixtures('opennebula_3_0')
def _network(self, method, url, body, headers):
"""
Network pool resources.
"""
if method == 'GET':
body = self.fixtures_3_0.load('network_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('network_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _network_5(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures_3_0.load('network_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _network_15(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures_3_0.load('network_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
class OpenNebula_3_2_MockHttp(OpenNebula_3_0_MockHttp):
"""
Mock HTTP server for testing v3.2 of the OpenNebula.org compute driver.
"""
fixtures_3_2 = ComputeFileFixtures('opennebula_3_2')
def _compute_5(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _instance_type(self, method, url, body, headers):
"""
Instance type pool.
"""
if method == 'GET':
body = self.fixtures_3_2.load('instance_type_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
import time
import datetime
from dateutil.parser import parse as datetime_parse
from tzlocal import get_localzone
import pytz
import os
import simplejson as json
import psycopg2
from psycopg2.extensions import AsIs
import argparse
import shapely
from shapely.geometry import mapping, shape, Point
from .pgpointcloud import (
DATA_TYPE_MAPPING,
build_pc_dimension, build_pc_schema, add_pc_schema,
create_pcpatch_table, create_temp_table,
insert_pcpoints, copy_pcpoints, insert_pcpatches, make_wkb_point
)
from pgpointcloud_utils import PcRunTimeException, PcInvalidArgException
COORDINATES = ['X', 'Y', 'Z']
OVERRIDE_INPUT_FORMAT = [
['date', datetime_parse],
['time', datetime_parse],
['datetime', datetime_parse],
]
Config = {
'input_file': None,
'dsn': None,
'metadata': None,
'group_by': [],
'ignore': [],
'srid': None,
'pcid': None,
'table_name': None,
'table_action': None,
'date': [],
'time': [],
'datetime': [],
'timezone': get_localzone(),
'copy_mode': False,
'buffer_size': 1000,
'patch_size': 400
}
DSIn = None
DBConn = None
def open_input_file(f):
global DSIn
DSIn = json.load(open(f, 'r'))
if DSIn is None or DSIn.get('type', None) != 'FeatureCollection':
raise PcInvalidArgException(
message='Invalid input file'
)
return DSIn
def open_db_connection(dsn):
return psycopg2.connect(dsn)
def interpret_fields(layer):
def add_coordinate(dimensions, coord):
field_type = float
dimensions.append({
'index': None,
'name': coord,
'type': {
'source': field_type,
'dest': DATA_TYPE_MAPPING[field_type]
}
})
fields = {
'group_by': [],
'ignore': [],
'dimension': [],
'overrides': {}
}
if len(layer) < 1:
raise PcRunTimeException(
message='Layer has no fields'
)
add_coordinate(fields['dimension'], 'X')
add_coordinate(fields['dimension'], 'Y')
add_coordinate(fields['dimension'], 'Z')
# use the first feature
feat = layer[0]
properties = feat['properties']
keys = properties.keys()
keys.sort()
group_by = Config.get('group_by', [])
ignore = Config.get('ignore', [])
# date, time, datetime overrides
overrides = {}
for override_field, override_callback in OVERRIDE_INPUT_FORMAT:
column = Config.get(override_field, [])
overrides[override_field] = {
'column': column,
'callback': override_callback
}
# loop over each field
for idx in xrange(len(keys)):
key = keys[idx]
field_info = {
'index': idx,
'name': key,
}
field_type = type(properties[key])
# field in ignore list
if field_info['name'] in ignore:
fields['ignore'].append(field_info)
continue
# user-defined group_by list and field in that list
if group_by and field_info['name'] in group_by:
# add field to internal group_by list
fields['group_by'].append(field_info)
# field is string format
elif field_type == str:
# field not in user-defined group_by list
if not group_by:
# field in override
found = False
for treat_as, details in overrides.iteritems():
if field_info['name'] in details['column']:
found = True
break
if found:
if treat_as == 'date':
source = datetime.date
elif treat_as == 'time':
source = datetime.time
elif treat_as == 'datetime':
source = datetime.datetime
dest = DATA_TYPE_MAPPING[source]
field_info['type'] = {
'treat_as': treat_as,
'callback': details['callback'],
'source': source,
'dest': dest,
}
fields['dimension'].append(field_info)
# default is to add to group_by
else:
fields['group_by'].append(field_info)
# add field to internal ignore list
else:
fields['ignore'].append(field_info)
# field is supported
elif field_type in DATA_TYPE_MAPPING:
field_info['type'] = {
'source': field_type,
'dest': DATA_TYPE_MAPPING[field_type],
}
fields['dimension'].append(field_info)
# unknown field, add to internal ignore list
else:
fields['ignore'].append(field_info)
for key, indices in fields.iteritems():
if len(indices) < 1:
continue
if key == 'ignore':
label = 'ignored'
elif key == 'group_by':
label = 'grouped'
else:
continue
print "The following fields will be %s:" % label
for fld in indices:
print " %s" % fld['name']
return fields
# TODO: commented out because no true hookups of GeoJSON support
'''
def guess_layer_spatial_ref(layer):
extent = layer.GetExtent()
# is decimal degree?
# -180 <= X <= 180
# -90 <= X <= 90
if (
extent[0] >= -180. and
extent[1] <= 180. and
extent[2] >= -90. and
extent[3] <= 90.
):
# assume WGS84
return 4326
# cannot guess, return zero
return 0
'''
def _get_postgis_srid(proj4):
try:
cursor = DBConn.cursor()
cursor.execute("""
SELECT
srid
FROM spatial_ref_sys
WHERE proj4text = %s
""", [proj4])
if cursor.rowcount > 0:
srid = cursor.fetchone()[0]
else:
srid = 0
except psycopg2.Error:
DBConn.rollback()
return 0
finally:
cursor.close()
return srid
def get_layer_srid(layer):
srid = Config.get('srid', None)
if srid is not None:
return srid
# initial GeoJSON spec assumed WGS84 (EPSG:4326)
return 4326
# TODO: support for GeoJSON CRS support
'''
srs = layer.GetSpatialRef()
# no spatial reference system, attempt to guess
if not srs:
return guess_layer_spatial_ref(layer)
# invalid spatial reference system
elif srs.Validate() != 0:
return 0
# try to get the PostGIS SRID
srid = _get_postgis_srid(srs.ExportToProj4())
return srid
'''
def extract_group(feat, fields):
num_group_by = len(fields['group_by'])
group_list = []
group_dict = {}
for idx in xrange(num_group_by):
group_list.append(feat.GetField(fields['group_by'][idx]['index']))
group_dict[fields['group_by'][idx]['name']] = group_list[-1]
return group_dict
def convert_date_to_seconds(the_date):
'''
convert date to number of seconds UTC from UNIX epoch
value is a decimal to capture milliseconds
'''
if not isinstance(the_date, datetime.date):
# if datetime, convert to
if isinstance(the_date, datetime.datetime):
if the_date.tzinfo is not None:
the_date = the_date.astimezone(pytz.UTC)
the_date = the_date.date()
# no can do
else:
raise PcInvalidArgException(
message='Value cannot be coerced into a Date object'
)
the_date = datetime.datetime.combine(
the_date,
datetime.time(0, 0, 0, tzinfo=pytz.UTC)
)
return (
the_date -
datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)
).total_seconds()
def convert_time_to_seconds(the_time):
'''
convert time to number of seconds UTC from 00:00:00 UTC
value is a decimal to capture milliseconds
'''
if not isinstance(the_time, datetime.time):
# if datetime, convert to
if isinstance(the_time, datetime.datetime):
the_time = the_time.time()
if the_time.tzinfo is None:
raise PcInvalidArgException(
message='Time has no timezone'
)
# no can do
else:
raise PcInvalidArgException(
message='Value cannot be coerced into a Time object'
)
return (
the_time.astimezone(pytz.UTC) -
datetime.time(0, 0, 0, tzinfo=pytz.UTC)
).total_seconds()
def convert_datetime_to_seconds(the_datetime):
'''
convert datetime to number of seconds UTC from UNIX epoch
value is a decimal to capture milliseconds
'''
return (
the_datetime.astimezone(pytz.UTC) -
datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)
).total_seconds()
def build_pcpoint_from_feature(feat, fields, struct_format=False):
geom = shape(feat['geometry'])
if not isinstance(geom, Point):
geom = geom.centroid
localtz = Config.get('timezone')
if localtz is None:
localtz = get_localzone()
properties = feat['properties']
vals = []
frmt = []
for dimension in fields['dimension']:
if struct_format:
frmt.append(dimension['type']['dest']['struct'])
# x, y, z dimension
if dimension['name'] in COORDINATES:
try:
vals.append(getattr(geom, dimension['name'].lower()))
except shapely.geos.DimensionError:
vals.append(0.)
# processing override
elif 'treat_as' in dimension['type']:
val = properties[dimension['name']]
treat_as = dimension['type']['treat_as']
callback = dimension['type']['callback']
if treat_as == 'date':
val = callback(val)
vals.append(
convert_date_to_seconds(
val.date()
)
)
elif treat_as == 'time':
val = callback(val).time()
if val.tzinfo is None:
val = localtz.localize(val)
vals.append(
convert_time_to_seconds(
val
)
)
elif treat_as == 'datetime':
val = callback(val)
if val.tzinfo is None:
val = localtz.localize(val)
vals.append(
convert_datetime_to_seconds(
val
)
)
# standard behavior
else:
val = properties[dimension['name']]
# cast data if needed
func = dimension['type']['dest'].get('cast', None)
if func is not None:
val = func(val)
vals.append(val)
if struct_format:
return vals, ' '. join(frmt)
else:
return vals
def import_layer(layer, file_table, pcid, fields):
buffer_size = Config.get('buffer_size')
copy_mode = Config.get('copy_mode')
num_features = len(layer)
# create temporary table for layer
temp_table = create_temp_table(DBConn)
frmt = None
wkb_set = []
# iterate over features
for idx in xrange(num_features):
feat = layer[idx]
# get group
group = extract_group(feat, fields)
# build pcpoint values
if frmt is None:
vals, frmt = build_pcpoint_from_feature(feat, fields, True)
else:
vals = build_pcpoint_from_feature(feat, fields)
# make wkb of pcpoint
wkb_set.append(make_wkb_point(pcid, frmt, vals))
if len(wkb_set) >= buffer_size:
if copy_mode is True:
copy_pcpoints(DBConn, temp_table, wkb_set, group)
else:
insert_pcpoints(DBConn, temp_table, wkb_set, group)
wkb_set = []
if len(wkb_set) >= 0:
copy_pcpoints(DBConn, temp_table, wkb_set, group)
wkb_set = []
file_name = Config.get('input_file', None)
if file_name:
file_name = os.path.basename(file_name)
# build patches for layer by distinct group
insert_pcpatches(
DBConn,
file_table,
temp_table,
layer,
Config.get('metadata', None),
file_name,
max_points_per_patch=Config.get('patch_size', 400)
)
return True
def get_pcid(layer, fields):
# process fields
# find what to group by, what to ignore, what to process
if not fields['dimension']:
return
# get SRID of layer
srid = get_layer_srid(layer)
# specified pcid
pcid = Config.get('pcid', None)
if pcid is None:
# build pointcloud schema
pc_schema = build_pc_schema(fields)
retry = 0
pcid = None
while pcid is None and retry < 5:
# add schema to database
pcid = add_pc_schema(DBConn, pc_schema, srid)
time.sleep(1)
retry += 1
if pcid is None:
raise PcRunTimeException(
message='Cannot create pointcloud schema'
)
return pcid
def convert_layer(layer, pcid, fields, file_name, file_table):
# do the actual import
import_layer(layer, file_table, pcid, fields)
print 'File "%s" has been imported into Table "%s" with PCID "%s"' % (
file_name,
file_table,
pcid
)
def convert_file():
layer = DSIn['features']
if not layer:
raise PcRunTimeException(
message='Input file has no layer'
)
fields = interpret_fields(layer)
pcid = get_pcid(layer, fields)
metadata = DSIn.get('properties', None)
if metadata and not Config.get('metadata', None):
Config['metadata'] = metadata
file_name = Config.get('input_file', None)
table_name = Config.get('table_name', None)
if table_name is None:
table_name = '"' + os.path.splitext(os.path.basename(file_name))[0] + '"'
else:
# qualify
table_name = '"' + '"."'.join(table_name.split('.', 1)) + '"'
table_action = Config.get('table_action', 'c')
if table_action is None:
table_action = 'c'
table_action = table_action[0]
create_pcpatch_table(
DBConn,
table_name,
table_action
)
convert_layer(layer, pcid, fields, file_name, table_name)
def geojson_to_pgpointcloud(config):
global Config
global DSIn
global DBConn
Config = config
DSIn = open_input_file(Config.get('input_file', None))
DBConn = open_db_connection(Config.get('dsn', None))
try:
convert_file()
DBConn.commit()
except:
DBConn.rollback()
raise
finally:
DBConn.close()
|
|
from django import forms
from django.db.models import get_model
from lighthouse.models.lapack_eigen import *
from lighthouse.models.lapack_choiceDict import *
#####------- Allow disabling options in a RadioSelect widget ----------#####
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
class CustomRadioRenderer(forms.widgets.RadioFieldRenderer):
def render(self):
""" Disable some radio buttons based on disableList """
if self.disable == []:
return mark_safe(u'<ul>\n%s\n</ul>' % u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self]))
else:
midList = []
for x, wid in enumerate(self):
if self.disable[x] == True:
wid.attrs['disabled'] = True
midList.append(wid)
return mark_safe(u'<ul>\n%s\n</ul>' % u'\n'.join([u'<li>%s</li>' % w for w in midList]))
class CustomRadioSelect(forms.widgets.RadioSelect):
renderer = CustomRadioRenderer
###############################################
######-------- For Guided Search --------######
###############################################
##---- problem form ---- ##
class problemForm(forms.Form):
eigen_problem = forms.ChoiceField(label='Which of the following problems would you like to compute?',
widget=forms.RadioSelect(),
choices=EIGENPROBLEM_CHOICES
)
##---- standard/generalized form ---##
class standardGeneralizedForm(forms.Form):
eigen_standardGeneralized = forms.ChoiceField(label='Is the problem standard or generalized?',
widget=forms.RadioSelect(),
choices=STANDARD_CHOICES,
)
##---- complex form ----##
class complexNumberForm(forms.Form):
eigen_complexNumber = forms.ChoiceField(label='Does your matrix have any complex numbers?',
widget=forms.RadioSelect(),
choices=NOYES_CHOICES
)
##---- matrix type form ----##
class matrixTypeForm(forms.Form):
eigen_matrixType = forms.ChoiceField(label='What is the type of your matrix?', choices=[], widget=forms.RadioSelect())
def __init__(self, request, *args, **kwargs):
super(matrixTypeForm, self).__init__(*args, **kwargs)
self.fields['eigen_matrixType'].choices = request.session['Routines'].values_list('matrixType', 'matrixType').distinct()
##--- display full names for semidefinite, SPD and HPD ---##
for i, item in enumerate(self.fields['eigen_matrixType'].choices):
if 'SPD' in item:
self.fields['eigen_matrixType'].choices[i] = (u'SPD', u'real symmetric positive definite (SPD)')
elif 'HPD' in item:
self.fields['eigen_matrixType'].choices[i] = (u'HPD', u'complex Hermitian positive definite (HPD)')
##--- order choices by string length ---##
self.fields['eigen_matrixType'].choices.sort(key=lambda k:len(k[1]))
if len(self.fields['eigen_matrixType'].choices) == 1:
selected = self.fields['eigen_matrixType'].choices[0][1]
self.fields['eigen_matrixType'].label = 'Given your selections, the LAPACK subroutines only support %s matrices. Do you wish to continue the search?'%selected
self.fields['eigen_matrixType'].choices = (
(selected, u'yes, continue'),
(u'stop', u'no, stop the search'),)
##---- storage type form ----##
class storageTypeForm(forms.Form):
eigen_storageType = forms.ChoiceField(label='How is your matrix stored?', choices=[], widget=forms.RadioSelect())
def __init__(self, request, *args, **kwargs):
super(storageTypeForm, self).__init__(*args, **kwargs)
self.fields['eigen_storageType'].choices = request.session['Routines'].values_list('storageType', 'storageType').distinct()
disableList = []
##--- handle the choice full/packed/band/tridiagonal ---##
if (u'full/packed/band/tridiagonal', u'full/packed/band/tridiagonal') in self.fields['eigen_storageType'].choices:
for item in 'full/packed/band/tridiagonal'.split('/'):
self.fields['eigen_storageType'].choices = self.fields['eigen_storageType'].choices+ [(item.decode('unicode-escape'), item.decode('unicode-escape')),]
self.fields['eigen_storageType'].choices.remove((u'full/packed/band/tridiagonal', u'full/packed/band/tridiagonal'))
self.fields['eigen_storageType'].choices = list(set(self.fields['eigen_storageType'].choices))
##--- order choices by string length ---##
self.fields['eigen_storageType'].choices.sort(key=lambda k:len(k[1]))
##--- if there is only one choice, show the others but disable them ---##
if len(self.fields['eigen_storageType'].choices) == 1:
selected = self.fields['eigen_storageType'].choices[0][1]
self.fields['eigen_storageType'].label = 'Given your selections, the LAPACK subroutines only support %s storage matrices. Do you wish to continue the search?'%selected
self.fields['eigen_storageType'].choices = (
(selected, u'yes, continue'),
(u'stop', u'no, stop the search'),)
##---- storage type form with disableb buttons --> NOT used----##
#class storageTypeForm(forms.Form):
# eigen_storageType = forms.ChoiceField(label='How is your matrix stored?', choices=[], widget=CustomRadioSelect())
# def __init__(self, request, *args, **kwargs):
# super(storageTypeForm, self).__init__(*args, **kwargs)
# self.fields['eigen_storageType'].choices = request.session['Routines'].values_list('storageType', 'storageType').distinct()
# disableList = []
#
# ##--- handle the choice full/packed/band/tridiagonal ---##
# if (u'full/packed/band/tridiagonal', u'full/packed/band/tridiagonal') in self.fields['eigen_storageType'].choices:
# for item in 'full/packed/band/tridiagonal'.split('/'):
# self.fields['eigen_storageType'].choices = self.fields['eigen_storageType'].choices+ [(item.decode('unicode-escape'), item.decode('unicode-escape')),]
# self.fields['eigen_storageType'].choices.remove((u'full/packed/band/tridiagonal', u'full/packed/band/tridiagonal'))
# self.fields['eigen_storageType'].choices = list(set(self.fields['eigen_storageType'].choices))
#
# ##--- order choices by string length ---##
# self.fields['eigen_storageType'].choices.sort(key=lambda k:len(k[1]))
#
# ##--- if there is only one choice, show the others but disable them ---##
# if len(self.fields['eigen_storageType'].choices) == 1:
# selected = self.fields['eigen_storageType'].choices[0][1]
# self.fields['eigen_storageType'].choices = (
# (u'full', u'full'),
# (u'band', u'band'),
# (u'packed', u'packed'),
# )
# self.fields['eigen_storageType'].initial = selected
# for item in self.fields['eigen_storageType'].choices:
# if item[1] != selected:
# disableList.append(True)
# else:
# disableList.append(False)
#
# self.fields['eigen_storageType'].widget.renderer.disable = disableList
#
##--- selected eigenvalue form ---##
class selectedEVForm(forms.Form):
eigen_selectedEV = forms.ChoiceField(label='Do you only need eigenvalues within a specific range?',
widget=forms.RadioSelect(),
choices=NOYES_CHOICES)
# if len(self.fields['eigen_selectedEV'].choices) == 1:
# selected = self.fields['eigen_selectedEV'].choices[0][1]
##--- eigenvectors form ---##
class eigenvectorForm(forms.Form):
eigen_eigenvector = forms.ChoiceField(label='Do you need eigenvectors?',
widget=forms.RadioSelect(),
choices=NOYES_CHOICES
)
##--- eigenvectors or Schur form ---##
class schurForm(forms.Form):
eigen_schur = forms.ChoiceField(label='In addition to eigenvalues, do you need other properties such as Schur form, Schur vectors, and sorted eigenvalues?',
widget=forms.RadioSelect(),
choices=NOYES_CHOICES
)
##--- condition number form ---##
class cndNumberForm(forms.Form):
eigen_cndNumber = forms.ChoiceField(label='Do you need a reciprocal condition number?',
widget=forms.RadioSelect(),
choices=NOYES_CHOICES
)
##--- precision form ---##
class singleDoubleForm(forms.Form):
eigen_singleDouble = forms.ChoiceField(label='Would you like to use single or double precision?',
widget=forms.RadioSelect(),
choices=SINGLEDOUBLE_CHOICES
)
#################################################
######-------- For advanced Search --------######
#################################################
class advancedSearchMenuForm(forms.Form):
advancedSearchMenu = forms.MultipleChoiceField(
label = "Which of the following routine categories of eigen problems would you like to search?",
widget=forms.CheckboxSelectMultiple(),
choices=EIGENMENU_CHOICES
)
##--- for driver standard ---##
class driver_standard_sh_Form(forms.Form):
driver_standard_sh_function = mark_safe('solve a standard eigenproblem')
driver_standard_sh_complexNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
driver_standard_sh_matrixType = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=((u'symmetric', u'symmetric'), (u'Hermitian', u'Hermitian'),)
)
driver_standard_sh_storageType = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=((u'full', u'full'), (u'packed', u'packed'), (u'band', u'band'), (u'tridiagonal', u'tridiagonal'))
)
driver_standard_sh_selectedEV = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
driver_standard_sh_method = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=METHOD_dssh_CHOICES)
driver_standard_sh_cndNumber = 'N/A'
driver_standard_sh_singleDouble = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=SINGLEDOUBLE_CHOICES)
class driver_standard_g_Form(forms.Form):
driver_standard_g_function = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=FUNCTION_dsg_CHOICES)
driver_standard_g_complexNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
driver_standard_g_matrixType = 'general'
driver_standard_g_storageType = 'full'
driver_standard_g_selectedEV = 'N/A'
driver_standard_g_method = 'QL/QR'
driver_standard_g_cndNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
driver_standard_g_singleDouble = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=SINGLEDOUBLE_CHOICES)
##--- for driver generalized ---##
class driver_generalized_sh_Form(forms.Form):
driver_generalized_sh_function = mark_safe('solve a generalized eigenproblem')
driver_generalized_sh_complexNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
driver_generalized_sh_matrixType = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=((u'symmetric', u'symmetric'), (u'Hermitian', u'Hermitian'),)
)
driver_generalized_sh_storageType = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=((u'full', u'full'), (u'packed', u'packed'), (u'band', u'band'))
)
driver_generalized_sh_selectedEV = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
driver_generalized_sh_method = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=METHOD_dgsh_CHOICES)
driver_generalized_sh_cndNumber = 'N/A'
driver_generalized_sh_singleDouble = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=SINGLEDOUBLE_CHOICES)
class driver_generalized_g_Form(forms.Form):
driver_generalized_g_function = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=FUNCTION_dgg_CHOICES)
driver_generalized_g_complexNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
driver_generalized_g_matrixType = 'general'
driver_generalized_g_storageType = 'full'
driver_generalized_g_selectedEV = 'N/A'
driver_generalized_g_method = 'QZ and back transformation'
driver_generalized_g_cndNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
driver_generalized_g_singleDouble = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=SINGLEDOUBLE_CHOICES)
##--- for computational standard ---##
class computational_standard_sh_Form(forms.Form):
computational_standard_sh_function = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=FUNCTION_cssh_CHOICES)
computational_standard_sh_complexNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
computational_standard_sh_matrixType = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=((u'symmetric', u'symmetric'), (u'Hermitian', u'Hermitian'),)
)
computational_standard_sh_storageType = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=((u'full', u'full'), (u'packed', u'packed'), (u'band', u'band'), (u'tridiagonal', u'tridiagonal'))
)
computational_standard_sh_selectedEV = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
computational_standard_sh_method = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=METHOD_cssh_CHOICES)
computational_standard_sh_cndNumber = 'N/A'
computational_standard_sh_singleDouble = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=SINGLEDOUBLE_CHOICES)
class computational_standard_g_Form(forms.Form):
computational_standard_g_function = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=FUNCTION_csg_CHOICES)
computational_standard_g_complexNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
computational_standard_g_matrixType = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=((u'upper Hessenberg', u'upper Hessenberg'), (u'upper quasi-triangular', u'upper quasi-triangular'), (u'general', u'general'))
)
computational_standard_g_storageType = 'full'
computational_standard_g_selectedEV = 'N/A'
computational_standard_g_method = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=METHOD_csg_CHOICES)
computational_standard_g_cndNumber = 'N/A'
computational_standard_g_singleDouble = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=SINGLEDOUBLE_CHOICES)
##--- for computational generalized ---##
class computational_generalized_sh_Form(forms.Form):
computational_generalized_sh_function = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=FUNCTION_cgsh_CHOICES)
computational_generalized_sh_complexNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
computational_generalized_sh_matrixType = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=((u'symmetric', u'symmetric'), (u'Hermitian', u'Hermitian'),)
)
computational_generalized_sh_storageType = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=((u'full', u'full'), (u'packed', u'packed'), (u'band', u'band'))
)
computational_generalized_sh_selectedEV = 'N/A'
computational_generalized_sh_cndNumber = 'N/A'
computational_generalized_sh_singleDouble = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=SINGLEDOUBLE_CHOICES)
class computational_generalized_g_Form(forms.Form):
computational_generalized_g_function = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=FUNCTION_cgg_CHOICES)
computational_generalized_g_complexNumber = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=NOYES_CHOICES)
computational_generalized_g_storageType = 'full'
computational_generalized_g_selectedEV = 'N/A'
computational_generalized_g_cndNumber = 'N/A'
computational_generalized_g_singleDouble = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=SINGLEDOUBLE_CHOICES)
|
|
import copy
prefixes = ['mis', 'im', 'un', 'dis']
opposites = {
"absent": "present",
"abundant": "scarce",
"accept": "decline, refuse",
"accurate": "inaccurate",
"admit": "deny",
"advantage": "disadvantage",
"against": "for",
"agree": "disagree",
"alive": "dead",
"all": "none, nothing",
"ally": "enemy",
"always": "never",
"ancient": "modern",
"answer": "question",
"antonym": "synonym",
"apart": "together",
"appear": "disappear, vanish",
"approve": "disapprove",
"arrive": "depart",
"artificial": "natural",
"ascend": "descend",
"attic": "cellar",
"attractive": "repulsive",
"awake": "asleep",
"backward": "forward",
"bad": "good",
"beautiful": "ugly",
"before": "after",
"begin": "end",
"below": "above",
"bent": "straight",
"best": "worst",
"better": "worse, worst",
"big": "little, small",
"black": "white",
"blame": "praise",
"bless": "curse",
"bitter": "sweet",
"borrow": "lend",
"bottom": "top",
"boy": "girl",
"brave": "cowardly",
"build": "destroy",
"bold": "meek, timid",
"borrow": "lend",
"bound": "unbound, free",
"boundless": "limited",
"bright": "dim, dull",
"brighten": "fade",
"broad": "narrow",
"calm": "windy, troubled",
"can": "cannot, can't",
"capable": "incapable",
"captive": "free",
"careful": "careless",
"cheap": "expensive",
"cheerful": "sad, discouraged, dreary",
"clear": "cloudy, opaque",
"clever": "stupid",
"clockwise": "counterclockwise",
"close": "far, distant",
"closed": "ajar, open",
"clumsy": "graceful",
"cold": "hot",
"combine": "separate",
"come": "go",
"comfort": "discomfort",
"common": "rare",
"conceal": "reveal",
"contract": "expand",
"cool": "warm",
"correct": "incorrect, wrong",
"courage": "cowardice",
"create": "destroy",
"crooked": "straight",
"cruel": "kind",
"compulsory": "voluntary",
"courteous": "discourteous, rude",
"dangerous": "safe",
"dark": "light",
"day": "night",
"daytime": "nighttime",
"dead": "alive",
"decline": "accept, increase",
"decrease": "increase",
"deep": "shallow",
"definite": "indefinite",
"demand": "supply",
"despair": "hope",
"dim": "bright",
"disappear": "appear",
"discourage": "encourage",
"diseased": "healthy",
"down": "up",
"downwards": "upwards",
"dreary": "cheerful",
"dry": "moist, wet",
"dull": "bright, shiny",
"dusk": "dawn",
"early": "late",
"east": "west",
"easy": "hard, difficult",
"empty": "full",
"encourage": "discourage",
"end": "begin, start",
"enter": "exit",
"even": "odd",
"expand": "contract",
"export": "import",
"exterior": "interior",
"external": "internal",
"fade": "brighten",
"fail": "succeed",
"false": "true",
"famous": "unknown",
"far": "near",
"fast": "slow",
"fat": "thin",
"feeble": "strong, powerful",
"few": "many",
"find": "lose",
"first": "last",
"float": "sink",
"foolish": "wise",
"fore": "aft",
"free": "bound, captive",
"fold": "unfold",
"forget": "remember",
"found": "lost",
"fresh": "stale",
"frequent": "seldom",
"friend": "enemy",
"for": "against",
"fortunate": "unfortunate",
"full": "empty",
"generous": "stingy",
"gentle": "rough",
"get": "give",
"giant": "tiny, small, dwarf",
"girl": "boy",
"give": "receive, take",
"glad": "sad, sorry",
"gloomy": "cheerful",
"go": "stop",
"good": "bad, evil",
"grant": "refuse",
"great": "tiny, small, unimportant",
"grow": "shrink",
"guest": "host",
"guilty": "innocent",
"happy": "sad",
"hard": "easy",
"hard": "soft",
"harmful": "harmless",
"harsh": "mild",
"hate": "love",
"haves": "have-nots",
"healthy": "diseased, ill, sick",
"heaven": "hell",
"heavy": "light",
"help": "hinder",
"here": "there",
"hero": "coward",
"high": "low",
"hill": "valley",
"hinder": "help",
"honest": "dishonest",
"horizontal": "vertical",
"hot": "cold",
"humble": "proud",
"ill": "healthy, well",
"immense": "tiny, small",
"important": "trivial",
"in": "out",
"include": "exclude",
"increase": "decrease",
"inferior": "superior",
"inhale": "exhale",
"inner": "outer",
"inside": "outside",
"intelligent": "stupid, unintelligent",
"interesting": "boring",
"interior": "exterior",
"interesting": "dull, uninteresting",
"internal": "external",
"intentional": "accidental",
"join": "separate",
"junior": "senior",
"just": "unjust",
"justice": "injustice",
"knowledge": "ignorance",
"known": "unknown",
"landlord": "tenant",
"large": "small",
"last": "first",
"laugh": "cry",
"lawful": "unlawful, illegal",
"lazy": "industrious",
"leader": "follower",
"left": "right",
"lend": "borrow",
"lengthen": "shorten",
"lenient": "strict",
"left": "right",
"less": "more",
"light": "dark, heavy",
"like": "dislike, hate",
"likely": "unlikely",
"limited": "boundless",
"little": "big",
"long": "short",
"loose": "tight",
"lose": "find",
"loss": "win",
"loud": "quiet",
"love": "hate",
"low": "high",
"loyal": "disloyal",
"mad": "happy, sane",
"major": "minor",
"many": "few",
"mature": "immature",
"maximum": "minimum",
"melt": "freeze",
"merry": "sad",
"messy": "neat",
"minor": "major",
"minority": "majority",
"miser": "spendthrift",
"misunderstand": "understand",
"more": "less",
"nadir": "zenith",
"narrow": "wide",
"near": "far, distant",
"neat": "messy, untidy",
"never": "always",
"new": "old",
"night": "day",
"nighttime": "daytime",
"no": "yes",
"noisy": "quiet",
"none": "some",
"north": "south",
"obedient": "disobedient",
"odd": "even",
"offer": "refuse",
"old": "young",
"old": "new",
"on": "off",
"open": "closed, shut",
"opposite": "same, similar",
"optimist": "pessimist",
"out": "in",
"outer": "inner",
"over": "under",
"past": "present",
"patient": "impatient",
"peace": "war",
"permanent": "temporary",
"plentiful": "scarce",
"plural": "singular",
"poetry": "prose",
"polite": "rude, impolite",
"possible": "impossible",
"poverty": "wealth, riches",
"powerful": "weak",
"pretty": "ugly",
"private": "public",
"prudent": "imprudent",
"pure": "impure, contaminated",
"push": "pull",
"qualified": "unqualified",
"question": "answer",
"quiet": "loud, noisy",
"raise": "lower",
"rapid": "slow",
"rare": "common",
"regular": "irregular",
"real": "fake",
"rich": "poor",
"right": "left, wrong",
"right-side-up": "upside-down",
"rough": "smooth",
"rude": "courteous",
"safe": "unsafe",
"same": "opposite",
"satisfactory": "unsatisfactory",
"secure": "insecure",
"scatter": "collect",
"separate": "join, together",
"serious": "trivial",
"second-hand": "new",
"shallow": "deep",
"shrink": "grow",
"sick": "healthy, ill",
"simple": "complex, hard",
"singular": "plural",
"sink": "float",
"slim": "fat, thick",
"slow": "fast",
"sober": "drunk",
"soft": "hard",
"some": "none",
"sorrow": "joy",
"sour": "sweet",
"sow": "reap",
"straight": "crooked",
"start": "finish",
"stop": "go",
"strict": "lenient",
"strong": "weak",
"success": "failure",
"sunny": "cloudy",
"synonym": "antonym",
"sweet": "sour",
"take": "give",
"tall": "short",
"tame": "wild",
"them": "us",
"there": "here",
"thick": "thin",
"tight": "loose, slack",
"tiny": "big, huge",
"together": "apart",
"top": "bottom",
"tough": "easy, tender",
"transparent": "opaque",
"true": "false",
"truth": "flasehood, lie, untruth",
"under": "over",
"unfold": "fold",
"unknown": "known",
"unqualified": "qualified",
"unsafe": "safe",
"up": "down",
"upside-down": "right-side-up",
"upstairs": "downstairs",
"us": "them",
"useful": "useless",
"vacant": "occupied",
"vanish": "appear",
"vast": "tiny",
"victory": "defeat",
"virtue": "vice",
"visible": "invisible",
"voluntary": "compulsory",
"war": "peace",
"wax": "wane",
"weak": "strong",
"wet": "dry",
"white": "black",
"wide": "narrow",
"win": "lose",
"wisdom": "folly, stupidity",
"within": "outside",
"wrong": "right",
"yes": "no",
"yin": "yang",
"young": "old",
"zip": "unzip",
"zenith": "nadir",
}
opposites = {k: v.split(', ') for k, v in opposites.items()}
for k, v in dict(opposites).items():
for w in v:
if w in opposites:
opposites[w].append(k)
else:
opposites[w] = [k]
|
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*(testing.product({
'shape': [None, (2, 3), (2, 3, 2), (2, 3, 2, 2)],
'cache_score': [True, False],
'normalize': [True, False],
'ignore_index': [None, (slice(None),), (0,), (0, 1), (0, 1, 0)],
'dtype': [numpy.float32],
'weight_apply': [False, True],
'enable_double_backprop': [False, True],
'label_dtype': [numpy.int32],
}) + testing.product({
'shape': [None, (2, 3), (2, 3, 2), (2, 3, 2, 2)],
'cache_score': [False],
'normalize': [True],
'ignore_index': [(0, 1)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'weight_apply': [False, True],
'enable_double_backprop': [False, True],
'label_dtype': [numpy.int8, numpy.int16, numpy.int32, numpy.int64],
}) + testing.product({
'shape': [(0, 3), (0, 3, 2), (0, 3, 2, 2)],
'cache_score': [True, False],
'normalize': [True, False],
'ignore_index': [None],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'weight_apply': [False, True],
'enable_double_backprop': [False],
'label_dtype': [numpy.int32],
})))
class TestSoftmaxCrossEntropy(unittest.TestCase):
def setUp(self):
if self.shape is None:
if self.dtype == numpy.float16:
self.x = numpy.array([[-5, 1]], dtype=self.dtype)
else:
self.x = numpy.array([[-1000, 1]], dtype=self.dtype)
self.t = numpy.array([0], dtype=self.label_dtype)
else:
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
out_shape = (self.shape[0],) + self.shape[2:]
self.t = numpy.random.randint(
0, self.shape[1], out_shape).astype(self.label_dtype)
if (self.ignore_index is not None and
len(self.ignore_index) <= self.t.ndim):
self.t[self.ignore_index] = -1
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.x.dtype)
self.ggx = numpy.random.uniform(
-1, 1, self.x.shape).astype(self.x.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}
if self.weight_apply:
self.class_weight = numpy.random.uniform(
0, 10, (self.x.shape[1],)).astype(self.dtype)
else:
self.class_weight = None
def check_forward(self, x_data, t_data, class_weight, use_cudnn='always'):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
with chainer.using_config('use_cudnn', use_cudnn):
loss = functions.softmax_cross_entropy(
x, t, normalize=self.normalize,
cache_score=self.cache_score, class_weight=class_weight,
enable_double_backprop=self.enable_double_backprop)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, self.dtype)
if not self.enable_double_backprop:
assert (loss.creator.y is not None) == self.cache_score
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
loss_expect = 0.0
count = 0
x = numpy.rollaxis(self.x, 1, self.x.ndim).reshape(
(self.t.size, self.x.shape[1]))
t = self.t.ravel()
for xi, ti in six.moves.zip(x, t):
if ti == -1:
continue
log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
if class_weight is None:
loss_expect -= (xi - log_z)[ti]
else:
loss_expect -= (xi - log_z)[ti] * class_weight[ti]
count += 1
if self.normalize:
if count == 0:
loss_expect = 0.0
else:
loss_expect /= count
else:
if len(t_data) == 0:
loss_expect = 0.0
else:
loss_expect /= len(t_data)
testing.assert_allclose(
loss_expect, loss_value, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.t, self.class_weight)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
None if not self.weight_apply else cuda.to_gpu(self.class_weight))
@attr.gpu
def test_forward_gpu_no_cudnn(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
None if not self.weight_apply else cuda.to_gpu(self.class_weight),
'never')
def check_backward(self, x_data, t_data, class_weight, use_cudnn='always'):
with chainer.using_config('use_cudnn', use_cudnn):
func = functions.SoftmaxCrossEntropy(
cache_score=self.cache_score, class_weight=class_weight)
gradient_check.check_backward(
func, (x_data, t_data), None,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.t, self.class_weight)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
None if not self.weight_apply else cuda.to_gpu(self.class_weight))
@attr.gpu
def test_backward_gpu_no_cudnn(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
None if not self.weight_apply else cuda.to_gpu(self.class_weight),
'never')
def check_double_backward(self, x_data, t_data, gy_data, ggx_data,
class_weight, use_cudnn='always'):
def f(x):
return functions.softmax_cross_entropy(
x, t_data, self.normalize, self.cache_score, class_weight,
enable_double_backprop=True)
if not self.enable_double_backprop:
return
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
f, x_data, gy_data, ggx_data,
**self.check_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x, self.t, self.gy, self.ggx, self.class_weight)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
None if not self.weight_apply else cuda.to_gpu(self.class_weight))
@attr.gpu
def test_double_backward_gpu_no_cudnn(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
None if not self.weight_apply else cuda.to_gpu(self.class_weight),
'never')
@testing.parameterize(*testing.product_dict(
[
{'t_value': -2, 'valid': False},
{'t_value': 3, 'valid': False},
{'t_value': -1, 'valid': True} # -1 is ignore_label
],
[
{'enable_double_backprop': True},
{'enable_double_backprop': False}
]
))
class TestSoftmaxCrossEntropyValueCheck(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32)
# `0` is required to avoid NaN
self.t = numpy.array([self.t_value, 0], dtype=numpy.int32)
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_value_check(self, x_data, t_data, use_cudnn):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
with chainer.using_config('use_cudnn', use_cudnn):
if self.valid:
# Check if it throws nothing
functions.softmax_cross_entropy(
x, t, enable_double_backprop=self.enable_double_backprop)
else:
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
x, t,
enable_double_backprop=self.enable_double_backprop)
def test_value_check_cpu(self):
self.check_value_check(self.x, self.t, 'never')
@attr.gpu
def test_value_check_gpu(self):
self.check_value_check(self.x, self.t, 'never')
@attr.gpu
def test_value_check_gpu_cudnn(self):
self.check_value_check(cuda.to_gpu(self.x), cuda.to_gpu(self.t),
'always')
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestSoftmaxCrossEntropyCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (4, 3)).astype(self.dtype)
self.t = cuda.cupy.random.randint(0, 3, (4,)).astype(numpy.int32)
def forward(self):
x = chainer.Variable(self.x)
t = chainer.Variable(self.t)
return functions.softmax_cross_entropy(
x, t, enable_double_backprop=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cuda.cudnn.softmaxForward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
# Note that SoftmaxCrossEntropy does not use cudnn on backward
@testing.parameterize(
{'enable_double_backprop': True},
{'enable_double_backprop': False},
)
class TestClassWeightAssertion(unittest.TestCase):
def setUp(self):
self.x = numpy.array([[0, 1], [2, 3]])
self.t = numpy.array([0, 1])
def test_ndim_assertion(self):
wrong_ndim_class_weight = numpy.array([[0, 0]], dtype='f')
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
self.x, self.t, class_weight=wrong_ndim_class_weight,
enable_double_backprop=self.enable_double_backprop)
def test_dtype_assertion(self):
wrong_dtype_class_weight = numpy.array([0, 0], dtype=numpy.int32)
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
self.x, self.t, class_weight=wrong_dtype_class_weight,
enable_double_backprop=self.enable_double_backprop)
def test_variable_assertion(self):
wrong_inst_class_weight = chainer.Variable(
numpy.array([0, 0], dtype='f'))
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
self.x, self.t, class_weight=wrong_inst_class_weight,
enable_double_backprop=self.enable_double_backprop)
@testing.parameterize(*(testing.product({
'shape': [None, (2, 3), (2, 3, 2), (2, 3, 2, 2)],
'cache_score': [True, False],
'normalize': [True, False],
'ignore_index': [None, (slice(None),), (0,), (0, 1), (0, 1, 0)],
'dtype': [numpy.float32],
'weight_apply': [False, True],
'use_cudnn': ['always', 'auto', 'never'],
'enable_double_backprop': [False, True],
}) + testing.product({
'shape': [None, (2, 3), (2, 3, 2), (2, 3, 2, 2)],
'cache_score': [False],
'normalize': [True, False],
'ignore_index': [(0, 1)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'weight_apply': [False, True],
'use_cudnn': ['always', 'auto', 'never'],
'enable_double_backprop': [False, True],
})))
class TestElementwiseSoftmaxCrossEntropy(unittest.TestCase):
def setUp(self):
if self.shape is None:
if self.dtype == numpy.float16:
self.x = numpy.array([[-5, 1]], dtype=self.dtype)
else:
self.x = numpy.array([[-1000, 1]], dtype=self.dtype)
self.t = numpy.array([0], dtype=numpy.int32)
else:
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
out_shape = (self.shape[0],) + self.shape[2:]
self.t = numpy.random.randint(
0, self.shape[1], out_shape).astype(numpy.int32)
if (self.ignore_index is not None and
len(self.ignore_index) <= self.t.ndim):
self.t[self.ignore_index] = -1
self.g = numpy.random.uniform(-1, 1, self.t.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, self.x.shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}
if self.weight_apply:
self.class_weight = numpy.random.uniform(
0, 10, (self.x.shape[1],)).astype(self.dtype)
else:
self.class_weight = None
def check_forward(self, x_data, t_data, class_weight):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(
x, t, cache_score=self.cache_score, normalize=self.normalize,
class_weight=class_weight, reduce='no',
enable_double_backprop=self.enable_double_backprop)
self.assertEqual(loss.shape, t_data.shape)
self.assertEqual(loss.data.dtype, self.dtype)
if not self.enable_double_backprop:
assert (loss.creator.y is not None) == self.cache_score
loss_value = cuda.to_cpu(loss.data)
x = numpy.rollaxis(self.x, 1, self.x.ndim).reshape(
(self.t.size, self.x.shape[1]))
t = self.t.ravel()
l = loss_value.ravel()
for xi, ti, li in six.moves.zip(x, t, l):
if ti == -1:
continue
log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
if class_weight is None:
loss_expect = -(xi - log_z)[ti]
else:
loss_expect = -(xi - log_z)[ti] * class_weight[ti]
testing.assert_allclose(
loss_expect, li, **self.check_forward_options)
def test_forward_cpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward(self.x, self.t, self.class_weight)
@attr.gpu
def test_forward_gpu(self):
if not self.weight_apply:
weight = None
else:
weight = cuda.to_gpu(self.class_weight)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), weight)
def check_backward(
self, x_data, t_data, g_data, class_weight):
func = functions.SoftmaxCrossEntropy(
cache_score=self.cache_score,
class_weight=class_weight, reduce='no')
gradient_check.check_backward(
func, (x_data, t_data), g_data,
**self.check_backward_options)
def test_backward_cpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(self.x, self.t, self.g, self.class_weight)
@attr.gpu
def test_backward_gpu(self):
if not self.weight_apply:
weight = None
else:
weight = cuda.to_gpu(self.class_weight)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), cuda.to_gpu(self.g),
weight)
def check_double_backward(
self, x_data, t_data, g_data, ggx_data, class_weight):
def f(x):
return functions.softmax_cross_entropy(
x, t_data, self.normalize, self.cache_score, class_weight,
reduce='no', enable_double_backprop=True)
if not self.enable_double_backprop:
return
gradient_check.check_double_backward(
f, x_data, g_data, ggx_data,
**self.check_backward_options)
def test_double_backward_cpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_double_backward(
self.x, self.t, self.g, self.ggx, self.class_weight)
@attr.gpu
def test_double_backward_gpu(self):
if not self.weight_apply:
weight = None
else:
weight = cuda.to_gpu(self.class_weight)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), cuda.to_gpu(self.g),
cuda.to_gpu(self.ggx), weight)
@testing.parameterize(*testing.product({
'enable_double_backprop': [True, False],
}))
class TestSoftmaxCrossEntropyInvalidReduce(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype('f')
self.t = numpy.zeros((2,), 'i')
def check_invalid_reduce(self, x, t):
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
x, t,
reduce='unknown_reduce_type',
enable_double_backprop=self.enable_double_backprop)
def test_invalid_reduce_cpu(self):
self.check_invalid_reduce(self.x, self.t)
@attr.gpu
def test_invalid_reduce_gpu(self):
self.check_invalid_reduce(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@testing.parameterize(*testing.product({
'reduce': ['mean', 'no'],
'enable_double_backprop': [False, True],
'class_weight': [None, numpy.ones((3,), dtype=numpy.float32)]})
)
class TestNonDefaultIgnoreLabel(unittest.TestCase):
def setUp(self):
self.ignore_label = -2
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.t = numpy.full((2,), self.ignore_label, dtype=numpy.int32)
if self.reduce == 'mean':
gy_shape = ()
else:
gy_shape = (2,)
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(numpy.float32)
self.ggx = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def check_forward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
if self.class_weight is not None:
class_weight = xp.asarray(self.class_weight)
else:
class_weight = None
loss = functions.softmax_cross_entropy(
x, t, reduce=self.reduce,
class_weight=class_weight,
ignore_label=self.ignore_label,
enable_double_backprop=self.enable_double_backprop)
if self.reduce == 'mean':
expect = 0.
else:
expect = numpy.zeros((2,), dtype=numpy.float32)
testing.assert_allclose(loss.data, expect)
def test_forward_cpu(self):
self.check_forward(numpy)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.cupy)
def check_backward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
gy = xp.asarray(self.gy)
if self.class_weight is not None:
class_weight = xp.asarray(self.class_weight)
else:
class_weight = None
def f(x_, t_):
return functions.softmax_cross_entropy(
x_, t_, class_weight=class_weight, reduce=self.reduce,
ignore_label=self.ignore_label,
enable_double_backprop=self.enable_double_backprop)
gradient_check.check_backward(f, (x, t), gy)
def test_backward_cpu(self):
self.check_backward(numpy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.cupy)
def check_double_backward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
gy = xp.asarray(self.gy)
ggx = xp.asarray(self.ggx)
if self.class_weight is not None:
class_weight = xp.asarray(self.class_weight)
else:
class_weight = None
def f(x_):
return functions.softmax_cross_entropy(
x_, t, class_weight=class_weight, reduce=self.reduce,
ignore_label=self.ignore_label,
enable_double_backprop=True)
gradient_check.check_double_backward(f, x, gy, ggx)
def test_double_backward_cpu(self):
self.check_double_backward(numpy)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.cupy)
@testing.parameterize(*(testing.product({
'shape_ignore': [(None, None),
((2, 3), (slice(None),)),
((2, 3, 2), (0,)),
((2, 3, 2, 2), (0, 1, 0))],
'normalize': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'weight_apply': [False, True],
})))
class TestForwardConsistency(unittest.TestCase):
# This test case checks if forward propagation of
# double backpropable impl. and non-double backpropable impl.
# agree.
def setUp(self):
self.shape, self.ignore_index = self.shape_ignore
if self.shape is None:
if self.dtype == numpy.float16:
self.x = numpy.array([[-5, 1]], dtype=self.dtype)
else:
self.x = numpy.array([[-1000, 1]], dtype=self.dtype)
self.t = numpy.array([0], dtype=numpy.int32)
else:
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
out_shape = (self.shape[0],) + self.shape[2:]
self.t = numpy.random.randint(
0, self.shape[1], out_shape).astype(numpy.int32)
if (self.ignore_index is not None and
len(self.ignore_index) <= self.t.ndim):
self.t[self.ignore_index] = -1
if self.weight_apply:
self.class_weight = numpy.random.uniform(
0, 10, (self.x.shape[1],)).astype(self.dtype)
else:
self.class_weight = None
def check_consistency(self, xp):
if self.class_weight is None:
class_weight = None
else:
class_weight = xp.asarray(self.class_weight)
x = xp.asarray(self.x)
t = xp.asarray(self.t)
def f(enable_double_backprop):
kwargs = {
'normalize': self.normalize,
'class_weight': class_weight,
'enable_double_backprop': enable_double_backprop
}
return functions.softmax_cross_entropy(x, t, **kwargs).data
loss_single = f(False)
loss_double = f(True)
check_forward_options = {}
if self.dtype == numpy.float16:
check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
testing.assert_allclose(
loss_single, loss_double, **check_forward_options)
def test_consistency_cpu(self):
self.check_consistency(numpy)
@attr.gpu
def test_consistency_gpu_always(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_consistency(cuda.cupy)
@attr.gpu
def test_consistency_gpu_auto(self):
with chainer.using_config('use_cudnn', 'auto'):
self.check_consistency(cuda.cupy)
@attr.gpu
def test_consistency_gpu_never(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_consistency(cuda.cupy)
testing.run_module(__name__, __file__)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from time import sleep
from typing import Optional
from sqlalchemy import Column, Index, Integer, String
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import backref, foreign, relationship
from sqlalchemy.orm.session import make_transient
from airflow.compat.functools import cached_property
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.models.base import ID_LEN, Base
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.helpers import convert_camel_to_snake
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.platform import getuser
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import State
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have its own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(
String(ID_LEN),
)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(UtcDateTime())
end_date = Column(UtcDateTime())
latest_heartbeat = Column(UtcDateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {'polymorphic_on': job_type, 'polymorphic_identity': 'BaseJob'}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
Index('idx_job_state_heartbeat', state, latest_heartbeat),
Index('idx_job_dag_id', dag_id),
)
task_instances_enqueued = relationship(
TaskInstance,
primaryjoin=id == foreign(TaskInstance.queued_by_job_id), # type: ignore
backref=backref('queued_by_job', uselist=False),
)
dag_runs = relationship(
DagRun,
primaryjoin=id == foreign(DagRun.creating_job_id),
backref=backref('creating_job'),
)
"""
TaskInstances which have been enqueued by this Job.
Only makes sense for SchedulerJob and BackfillJob instances.
"""
heartrate = conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC')
def __init__(self, executor=None, heartrate=None, *args, **kwargs):
self.hostname = get_hostname()
if executor:
self.executor = executor
self.executor_class = executor.__class__.__name__
else:
self.executor_class = conf.get('core', 'EXECUTOR')
self.start_date = timezone.utcnow()
self.latest_heartbeat = timezone.utcnow()
if heartrate is not None:
self.heartrate = heartrate
self.unixname = getuser()
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
super().__init__(*args, **kwargs)
@cached_property
def executor(self):
return ExecutorLoader.get_default_executor()
@classmethod
@provide_session
def most_recent_job(cls, session=None) -> Optional['BaseJob']:
"""
Return the most recent job of this type, if any, based on last
heartbeat received.
This method should be called on a subclass (i.e. on SchedulerJob) to
return jobs of that type.
:param session: Database session
:rtype: BaseJob or None
"""
return session.query(cls).order_by(cls.latest_heartbeat.desc()).limit(1).first()
def is_alive(self, grace_multiplier=2.1):
"""
Is this job currently alive.
We define alive as in a state of RUNNING, and having sent a heartbeat
within a multiple of the heartrate (default of 2.1)
:param grace_multiplier: multiplier of heartrate to require heart beat
within
:type grace_multiplier: number
:rtype: boolean
"""
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds()
< self.heartrate * grace_multiplier
)
@provide_session
def kill(self, session=None):
"""Handles on_kill callback and updates state in database."""
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = timezone.utcnow()
try:
self.on_kill()
except Exception as e:
self.log.error('on_kill() method failed: %s', str(e))
session.merge(job)
session.commit()
raise AirflowException("Job shut down externally.")
def on_kill(self):
"""Will be called when an external kill command is received"""
def heartbeat_callback(self, session=None):
"""Callback that is called during heartbeat. This method should be overwritten."""
def heartbeat(self, only_if_necessary: bool = False):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heart rate is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
:param only_if_necessary: If the heartbeat is not yet due then do
nothing (don't update column, don't call ``heartbeat_callback``)
:type only_if_necessary: boolean
"""
seconds_remaining = 0
if self.latest_heartbeat:
seconds_remaining = self.heartrate - (timezone.utcnow() - self.latest_heartbeat).total_seconds()
if seconds_remaining > 0 and only_if_necessary:
return
previous_heartbeat = self.latest_heartbeat
try:
with create_session() as session:
# This will cause it to load from the db
session.merge(self)
previous_heartbeat = self.latest_heartbeat
if self.state in State.terminating_states:
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if self.latest_heartbeat:
seconds_remaining = (
self.heartrate - (timezone.utcnow() - self.latest_heartbeat).total_seconds()
)
sleep_for = max(0, seconds_remaining)
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
# Make the session aware of this object
session.merge(self)
self.latest_heartbeat = timezone.utcnow()
session.commit()
# At this point, the DB has updated.
previous_heartbeat = self.latest_heartbeat
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError:
Stats.incr(convert_camel_to_snake(self.__class__.__name__) + '_heartbeat_failure', 1, 1)
self.log.exception("%s heartbeat got an exception", self.__class__.__name__)
# We didn't manage to heartbeat, so make sure that the timestamp isn't updated
self.latest_heartbeat = previous_heartbeat
def run(self):
"""Starts the job."""
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
# Adding an entry in the DB
with create_session() as session:
self.state = State.RUNNING
session.add(self)
session.commit()
make_transient(self)
try:
self._execute()
# In case of max runs or max duration
self.state = State.SUCCESS
except SystemExit:
# In case of ^C or SIGTERM
self.state = State.SUCCESS
except Exception:
self.state = State.FAILED
raise
finally:
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
|
|
# References:
# http://developer.download.nvidia.com/books/HTML/gpugems/gpugems_ch38.html
# https://github.com/PavelDoGreat/WebGL-Fluid-Simulation
# https://www.bilibili.com/video/BV1ZK411H7Hc?p=4
# https://github.com/ShaneFX/GAMES201/tree/master/HW01
import argparse
import numpy as np
import taichi as ti
# How to run:
# `python stable_fluid.py`: use the jacobi iteration to solve the linear system.
# `python stable_fluid.py -S`: use a sparse matrix to do so.
parser = argparse.ArgumentParser()
parser.add_argument('-S',
'--use-sp-mat',
action='store_true',
help='Solve Poisson\'s equation by using a sparse matrix')
args, unknowns = parser.parse_known_args()
res = 512
dt = 0.03
p_jacobi_iters = 500 # 40 for a quicker but less accurate result
f_strength = 10000.0
curl_strength = 0
time_c = 2
maxfps = 60
dye_decay = 1 - 1 / (maxfps * time_c)
force_radius = res / 2.0
gravity = True
debug = False
paused = False
use_sparse_matrix = False
use_sparse_matrix = args.use_sp_mat
if use_sparse_matrix:
ti.init(arch=ti.x64)
print('Using sparse matrix')
else:
ti.init(arch=ti.gpu)
print('Using jacobi iteration')
_velocities = ti.Vector.field(2, float, shape=(res, res))
_new_velocities = ti.Vector.field(2, float, shape=(res, res))
velocity_divs = ti.field(float, shape=(res, res))
velocity_curls = ti.field(float, shape=(res, res))
_pressures = ti.field(float, shape=(res, res))
_new_pressures = ti.field(float, shape=(res, res))
_dye_buffer = ti.Vector.field(3, float, shape=(res, res))
_new_dye_buffer = ti.Vector.field(3, float, shape=(res, res))
class TexPair:
def __init__(self, cur, nxt):
self.cur = cur
self.nxt = nxt
def swap(self):
self.cur, self.nxt = self.nxt, self.cur
velocities_pair = TexPair(_velocities, _new_velocities)
pressures_pair = TexPair(_pressures, _new_pressures)
dyes_pair = TexPair(_dye_buffer, _new_dye_buffer)
if use_sparse_matrix:
# use a sparse matrix to solve Poisson's pressure equation.
@ti.kernel
def fill_laplacian_matrix(A: ti.types.sparse_matrix_builder()):
for i, j in ti.ndrange(res, res):
row = i * res + j
center = 0.0
if j != 0:
A[row, row - 1] += -1.0
center += 1.0
if j != res - 1:
A[row, row + 1] += -1.0
center += 1.0
if i != 0:
A[row, row - res] += -1.0
center += 1.0
if i != res - 1:
A[row, row + res] += -1.0
center += 1.0
A[row, row] += center
N = res * res
K = ti.linalg.SparseMatrixBuilder(N, N, max_num_triplets=N * 6)
b = ti.field(ti.f32, shape=N)
fill_laplacian_matrix(K)
L = K.build()
solver = ti.linalg.SparseSolver(solver_type="LLT")
solver.analyze_pattern(L)
solver.factorize(L)
@ti.func
def sample(qf, u, v):
I = ti.Vector([int(u), int(v)])
I = max(0, min(res - 1, I))
return qf[I]
@ti.func
def lerp(vl, vr, frac):
# frac: [0.0, 1.0]
return vl + frac * (vr - vl)
@ti.func
def bilerp(vf, p):
u, v = p
s, t = u - 0.5, v - 0.5
# floor
iu, iv = ti.floor(s), ti.floor(t)
# fract
fu, fv = s - iu, t - iv
a = sample(vf, iu, iv)
b = sample(vf, iu + 1, iv)
c = sample(vf, iu, iv + 1)
d = sample(vf, iu + 1, iv + 1)
return lerp(lerp(a, b, fu), lerp(c, d, fu), fv)
# 3rd order Runge-Kutta
@ti.func
def backtrace(vf: ti.template(), p, dt: ti.template()):
v1 = bilerp(vf, p)
p1 = p - 0.5 * dt * v1
v2 = bilerp(vf, p1)
p2 = p - 0.75 * dt * v2
v3 = bilerp(vf, p2)
p -= dt * ((2 / 9) * v1 + (1 / 3) * v2 + (4 / 9) * v3)
return p
@ti.kernel
def advect(vf: ti.template(), qf: ti.template(), new_qf: ti.template()):
for i, j in vf:
p = ti.Vector([i, j]) + 0.5
p = backtrace(vf, p, dt)
new_qf[i, j] = bilerp(qf, p) * dye_decay
@ti.kernel
def apply_impulse(vf: ti.template(), dyef: ti.template(),
imp_data: ti.ext_arr()):
g_dir = -ti.Vector([0, 9.8]) * 300
for i, j in vf:
omx, omy = imp_data[2], imp_data[3]
mdir = ti.Vector([imp_data[0], imp_data[1]])
dx, dy = (i + 0.5 - omx), (j + 0.5 - omy)
d2 = dx * dx + dy * dy
# dv = F * dt
factor = ti.exp(-d2 / force_radius)
dc = dyef[i, j]
a = dc.norm()
momentum = (mdir * f_strength * factor + g_dir * a / (1 + a)) * dt
v = vf[i, j]
vf[i, j] = v + momentum
# add dye
if mdir.norm() > 0.5:
dc += ti.exp(-d2 * (4 / (res / 15)**2)) * ti.Vector(
[imp_data[4], imp_data[5], imp_data[6]])
dyef[i, j] = dc
@ti.kernel
def divergence(vf: ti.template()):
for i, j in vf:
vl = sample(vf, i - 1, j)
vr = sample(vf, i + 1, j)
vb = sample(vf, i, j - 1)
vt = sample(vf, i, j + 1)
vc = sample(vf, i, j)
if i == 0:
vl.x = -vc.x
if i == res - 1:
vr.x = -vc.x
if j == 0:
vb.y = -vc.y
if j == res - 1:
vt.y = -vc.y
velocity_divs[i, j] = (vr.x - vl.x + vt.y - vb.y) * 0.5
@ti.kernel
def vorticity(vf: ti.template()):
for i, j in vf:
vl = sample(vf, i - 1, j)
vr = sample(vf, i + 1, j)
vb = sample(vf, i, j - 1)
vt = sample(vf, i, j + 1)
velocity_curls[i, j] = (vr.y - vl.y - vt.x + vb.x) * 0.5
@ti.kernel
def pressure_jacobi(pf: ti.template(), new_pf: ti.template()):
for i, j in pf:
pl = sample(pf, i - 1, j)
pr = sample(pf, i + 1, j)
pb = sample(pf, i, j - 1)
pt = sample(pf, i, j + 1)
div = velocity_divs[i, j]
new_pf[i, j] = (pl + pr + pb + pt - div) * 0.25
@ti.kernel
def subtract_gradient(vf: ti.template(), pf: ti.template()):
for i, j in vf:
pl = sample(pf, i - 1, j)
pr = sample(pf, i + 1, j)
pb = sample(pf, i, j - 1)
pt = sample(pf, i, j + 1)
vf[i, j] -= 0.5 * ti.Vector([pr - pl, pt - pb])
@ti.kernel
def enhance_vorticity(vf: ti.template(), cf: ti.template()):
# anti-physics visual enhancement...
for i, j in vf:
cl = sample(cf, i - 1, j)
cr = sample(cf, i + 1, j)
cb = sample(cf, i, j - 1)
ct = sample(cf, i, j + 1)
cc = sample(cf, i, j)
force = ti.Vector([abs(ct) - abs(cb),
abs(cl) - abs(cr)]).normalized(1e-3)
force *= curl_strength * cc
vf[i, j] = min(max(vf[i, j] + force * dt, -1e3), 1e3)
@ti.kernel
def copy_divergence(div_in: ti.template(), div_out: ti.template()):
for I in ti.grouped(div_in):
div_out[I[0] * res + I[1]] = -div_in[I]
@ti.kernel
def apply_pressure(p_in: ti.ext_arr(), p_out: ti.template()):
for I in ti.grouped(p_out):
p_out[I] = p_in[I[0] * res + I[1]]
def solve_pressure_sp_mat():
copy_divergence(velocity_divs, b)
x = solver.solve(b)
apply_pressure(x, pressures_pair.cur)
def solve_pressure_jacobi():
for _ in range(p_jacobi_iters):
pressure_jacobi(pressures_pair.cur, pressures_pair.nxt)
pressures_pair.swap()
def step(mouse_data):
advect(velocities_pair.cur, velocities_pair.cur, velocities_pair.nxt)
advect(velocities_pair.cur, dyes_pair.cur, dyes_pair.nxt)
velocities_pair.swap()
dyes_pair.swap()
apply_impulse(velocities_pair.cur, dyes_pair.cur, mouse_data)
divergence(velocities_pair.cur)
if curl_strength:
vorticity(velocities_pair.cur)
enhance_vorticity(velocities_pair.cur, velocity_curls)
if use_sparse_matrix:
solve_pressure_sp_mat()
else:
solve_pressure_jacobi()
subtract_gradient(velocities_pair.cur, pressures_pair.cur)
if debug:
divergence(velocities_pair.cur)
div_s = np.sum(velocity_divs.to_numpy())
print(f'divergence={div_s}')
class MouseDataGen(object):
def __init__(self):
self.prev_mouse = None
self.prev_color = None
def __call__(self, gui):
# [0:2]: normalized delta direction
# [2:4]: current mouse xy
# [4:7]: color
mouse_data = np.zeros(8, dtype=np.float32)
if gui.is_pressed(ti.GUI.LMB):
mxy = np.array(gui.get_cursor_pos(), dtype=np.float32) * res
if self.prev_mouse is None:
self.prev_mouse = mxy
# Set lower bound to 0.3 to prevent too dark colors
self.prev_color = (np.random.rand(3) * 0.7) + 0.3
else:
mdir = mxy - self.prev_mouse
mdir = mdir / (np.linalg.norm(mdir) + 1e-5)
mouse_data[0], mouse_data[1] = mdir[0], mdir[1]
mouse_data[2], mouse_data[3] = mxy[0], mxy[1]
mouse_data[4:7] = self.prev_color
self.prev_mouse = mxy
else:
self.prev_mouse = None
self.prev_color = None
return mouse_data
def reset():
velocities_pair.cur.fill(0)
pressures_pair.cur.fill(0)
dyes_pair.cur.fill(0)
visualize_d = True #visualize dye (default)
visualize_v = False #visualize velocity
visualize_c = False #visualize curl
gui = ti.GUI('Stable Fluid', (res, res))
md_gen = MouseDataGen()
while gui.running:
if gui.get_event(ti.GUI.PRESS):
e = gui.event
if e.key == ti.GUI.ESCAPE:
break
elif e.key == 'r':
paused = False
reset()
elif e.key == 's':
if curl_strength:
curl_strength = 0
else:
curl_strength = 7
elif e.key == 'g':
gravity = not gravity
elif e.key == 'v':
visualize_v = True
visualize_c = False
visualize_d = False
elif e.key == 'd':
visualize_d = True
visualize_v = False
visualize_c = False
elif e.key == 'c':
visualize_c = True
visualize_d = False
visualize_v = False
elif e.key == 'p':
paused = not paused
elif e.key == 'd':
debug = not debug
# Debug divergence:
# print(max((abs(velocity_divs.to_numpy().reshape(-1)))))
if not paused:
mouse_data = md_gen(gui)
step(mouse_data)
if visualize_c:
vorticity(velocities_pair.cur)
gui.set_image(velocity_curls.to_numpy() * 0.03 + 0.5)
elif visualize_d:
gui.set_image(dyes_pair.cur)
elif visualize_v:
gui.set_image(velocities_pair.cur.to_numpy() * 0.01 + 0.5)
gui.show()
|
|
"""Perform regional de-novo assembly calling with cortex_var.
Using a pre-mapped set of reads and BED file of regions, performs de-novo
assembly and variant calling against the reference sequence in each region.
This avoids whole genome costs while gaining the advantage of de-novo
prediction.
http://cortexassembler.sourceforge.net/index_cortex_var.html
"""
from __future__ import print_function
import os
import glob
import subprocess
import itertools
import shutil
import pysam
from Bio import Seq
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from bcbio import bam
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline.shared import subset_variant_regions
from bcbio.utils import file_exists, safe_makedir
from bcbio.variation import vcfutils
def run_cortex(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Top level entry to regional de-novo based variant calling with cortex_var.
"""
raise NotImplementedError("Cortex currently out of date and needs reworking.")
if len(align_bams) == 1:
align_bam = align_bams[0]
config = items[0]["config"]
else:
raise NotImplementedError("Need to add multisample calling for cortex_var")
if out_file is None:
out_file = "%s-cortex.vcf" % os.path.splitext(align_bam)[0]
if region is not None:
work_dir = safe_makedir(os.path.join(os.path.dirname(out_file),
region.replace(".", "_")))
else:
work_dir = os.path.dirname(out_file)
if not file_exists(out_file):
bam.index(align_bam, config)
variant_regions = config["algorithm"].get("variant_regions", None)
if not variant_regions:
raise ValueError("Only support regional variant calling with cortex_var: set variant_regions")
target_regions = subset_variant_regions(variant_regions, region, out_file)
if os.path.isfile(target_regions):
with open(target_regions) as in_handle:
regional_vcfs = [_run_cortex_on_region(x.strip().split("\t")[:3], align_bam,
ref_file, work_dir, out_file, config)
for x in in_handle]
combine_file = "{0}-raw{1}".format(*os.path.splitext(out_file))
_combine_variants(regional_vcfs, combine_file, ref_file, config)
_select_final_variants(combine_file, out_file, config)
else:
vcfutils.write_empty_vcf(out_file)
return out_file
def _passes_cortex_depth(line, min_depth):
"""Do any genotypes in the cortex_var VCF line passes the minimum depth requirement?
"""
parts = line.split("\t")
cov_index = parts[8].split(":").index("COV")
passes_depth = False
for gt in parts[9:]:
cur_cov = gt.split(":")[cov_index]
cur_depth = sum(int(x) for x in cur_cov.split(","))
if cur_depth >= min_depth:
passes_depth = True
return passes_depth
def _select_final_variants(base_vcf, out_vcf, config):
"""Filter input file, removing items with low depth of support.
cortex_var calls are tricky to filter by depth. Count information is in
the COV FORMAT field grouped by alleles, so we need to sum up values and
compare.
"""
min_depth = int(config["algorithm"].get("min_depth", 4))
with file_transaction(out_vcf) as tx_out_file:
with open(base_vcf) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("#"):
passes = True
else:
passes = _passes_cortex_depth(line, min_depth)
if passes:
out_handle.write(line)
return out_vcf
def _combine_variants(in_vcfs, out_file, ref_file, config):
"""Combine variant files, writing the header from the first non-empty input.
in_vcfs is a list with each item starting with the chromosome regions,
and ending with the input file.
We sort by these regions to ensure the output file is in the expected order.
"""
in_vcfs.sort()
wrote_header = False
with open(out_file, "w") as out_handle:
for in_vcf in (x[-1] for x in in_vcfs):
with open(in_vcf) as in_handle:
header = list(itertools.takewhile(lambda x: x.startswith("#"),
in_handle))
if not header[0].startswith("##fileformat=VCFv4"):
raise ValueError("Unexpected VCF file: %s" % in_vcf)
for line in in_handle:
if not wrote_header:
wrote_header = True
out_handle.write("".join(header))
out_handle.write(line)
if not wrote_header:
out_handle.write("".join(header))
return out_file
def _run_cortex_on_region(region, align_bam, ref_file, work_dir, out_file_base, config):
"""Run cortex on a specified chromosome start/end region.
"""
kmers = [31, 51, 71]
min_reads = 1750
cortex_dir = config_utils.get_program("cortex", config, "dir")
stampy_dir = config_utils.get_program("stampy", config, "dir")
vcftools_dir = config_utils.get_program("vcftools", config, "dir")
if cortex_dir is None or stampy_dir is None:
raise ValueError("cortex_var requires path to pre-built cortex and stampy")
region_str = "{0}-{1}-{2}".format(*region)
base_dir = safe_makedir(os.path.join(work_dir, region_str))
try:
out_vcf_base = os.path.join(base_dir, "{0}-{1}".format(
os.path.splitext(os.path.basename(out_file_base))[0], region_str))
out_file = os.path.join(work_dir, os.path.basename("{0}.vcf".format(out_vcf_base)))
if not file_exists(out_file):
fastq = _get_fastq_in_region(region, align_bam, out_vcf_base)
if _count_fastq_reads(fastq, min_reads) < min_reads:
vcfutils.write_empty_vcf(out_file)
else:
local_ref, genome_size = _get_local_ref(region, ref_file, out_vcf_base)
indexes = _index_local_ref(local_ref, cortex_dir, stampy_dir, kmers)
cortex_out = _run_cortex(fastq, indexes, {"kmers": kmers, "genome_size": genome_size,
"sample": get_sample_name(align_bam)},
out_vcf_base, {"cortex": cortex_dir, "stampy": stampy_dir,
"vcftools": vcftools_dir},
config)
if cortex_out:
_remap_cortex_out(cortex_out, region, out_file)
else:
vcfutils.write_empty_vcf(out_file)
finally:
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
return [region[0], int(region[1]), int(region[2]), out_file]
def _remap_cortex_out(cortex_out, region, out_file):
"""Remap coordinates in local cortex variant calls to the original global region.
"""
def _remap_vcf_line(line, contig, start):
parts = line.split("\t")
if parts[0] == "" or parts[1] == "":
return None
parts[0] = contig
try:
parts[1] = str(int(parts[1]) + start)
except ValueError:
raise ValueError("Problem in {0} with \n{1}".format(
cortex_out, parts))
return "\t".join(parts)
def _not_filtered(line):
parts = line.split("\t")
return parts[6] == "PASS"
contig, start, _ = region
start = int(start)
with open(cortex_out) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("##fileDate"):
pass
elif line.startswith("#"):
out_handle.write(line)
elif _not_filtered(line):
update_line = _remap_vcf_line(line, contig, start)
if update_line:
out_handle.write(update_line)
def _run_cortex(fastq, indexes, params, out_base, dirs, config):
"""Run cortex_var run_calls.pl, producing a VCF variant file.
"""
print(out_base)
fastaq_index = "{0}.fastaq_index".format(out_base)
se_fastq_index = "{0}.se_fastq".format(out_base)
pe_fastq_index = "{0}.pe_fastq".format(out_base)
reffasta_index = "{0}.list_ref_fasta".format(out_base)
with open(se_fastq_index, "w") as out_handle:
out_handle.write(fastq + "\n")
with open(pe_fastq_index, "w") as out_handle:
out_handle.write("")
with open(fastaq_index, "w") as out_handle:
out_handle.write("{0}\t{1}\t{2}\t{2}\n".format(params["sample"], se_fastq_index,
pe_fastq_index))
with open(reffasta_index, "w") as out_handle:
for x in indexes["fasta"]:
out_handle.write(x + "\n")
os.environ["PERL5LIB"] = "{0}:{1}:{2}".format(
os.path.join(dirs["cortex"], "scripts/calling"),
os.path.join(dirs["cortex"], "scripts/analyse_variants/bioinf-perl/lib"),
os.environ.get("PERL5LIB", ""))
kmers = sorted(params["kmers"])
kmer_info = ["--first_kmer", str(kmers[0])]
if len(kmers) > 1:
kmer_info += ["--last_kmer", str(kmers[-1]),
"--kmer_step", str(kmers[1] - kmers[0])]
subprocess.check_call(["perl", os.path.join(dirs["cortex"], "scripts", "calling", "run_calls.pl"),
"--fastaq_index", fastaq_index,
"--auto_cleaning", "yes", "--bc", "yes", "--pd", "yes",
"--outdir", os.path.dirname(out_base), "--outvcf", os.path.basename(out_base),
"--ploidy", str(config["algorithm"].get("ploidy", 2)),
"--stampy_hash", indexes["stampy"],
"--stampy_bin", os.path.join(dirs["stampy"], "stampy.py"),
"--refbindir", os.path.dirname(indexes["cortex"][0]),
"--list_ref_fasta", reffasta_index,
"--genome_size", str(params["genome_size"]),
"--max_read_len", "30000",
#"--max_var_len", "4000",
"--format", "FASTQ", "--qthresh", "5", "--do_union", "yes",
"--mem_height", "17", "--mem_width", "100",
"--ref", "CoordinatesAndInCalling", "--workflow", "independent",
"--vcftools_dir", dirs["vcftools"],
"--logfile", "{0}.logfile,f".format(out_base)]
+ kmer_info)
final = glob.glob(os.path.join(os.path.dirname(out_base), "vcfs",
"{0}*FINALcombined_BC*decomp.vcf".format(os.path.basename(out_base))))
# No calls, need to setup an empty file
if len(final) != 1:
print("Did not find output VCF file for {0}".format(out_base))
return None
else:
return final[0]
def _get_cortex_binary(kmer, cortex_dir):
cortex_bin = None
for check_bin in sorted(glob.glob(os.path.join(cortex_dir, "bin", "cortex_var_*"))):
kmer_check = int(os.path.basename(check_bin).split("_")[2])
if kmer_check >= kmer:
cortex_bin = check_bin
break
assert cortex_bin is not None, \
"Could not find cortex_var executable in %s for kmer %s" % (cortex_dir, kmer)
return cortex_bin
def _index_local_ref(fasta_file, cortex_dir, stampy_dir, kmers):
"""Pre-index a generated local reference sequence with cortex_var and stampy.
"""
base_out = os.path.splitext(fasta_file)[0]
cindexes = []
for kmer in kmers:
out_file = "{0}.k{1}.ctx".format(base_out, kmer)
if not file_exists(out_file):
file_list = "{0}.se_list".format(base_out)
with open(file_list, "w") as out_handle:
out_handle.write(fasta_file + "\n")
subprocess.check_call([_get_cortex_binary(kmer, cortex_dir),
"--kmer_size", str(kmer), "--mem_height", "17",
"--se_list", file_list, "--format", "FASTA",
"--max_read_len", "30000",
"--sample_id", base_out,
"--dump_binary", out_file])
cindexes.append(out_file)
if not file_exists("{0}.stidx".format(base_out)):
subprocess.check_call([os.path.join(stampy_dir, "stampy.py"), "-G",
base_out, fasta_file])
subprocess.check_call([os.path.join(stampy_dir, "stampy.py"), "-g",
base_out, "-H", base_out])
return {"stampy": base_out,
"cortex": cindexes,
"fasta": [fasta_file]}
def _get_local_ref(region, ref_file, out_vcf_base):
"""Retrieve a local FASTA file corresponding to the specified region.
"""
out_file = "{0}.fa".format(out_vcf_base)
if not file_exists(out_file):
with pysam.Fastafile(ref_file) as in_pysam:
contig, start, end = region
seq = in_pysam.fetch(contig, int(start), int(end))
with open(out_file, "w") as out_handle:
out_handle.write(">{0}-{1}-{2}\n{3}".format(contig, start, end,
str(seq)))
with open(out_file) as in_handle:
in_handle.readline()
size = len(in_handle.readline().strip())
return out_file, size
def _get_fastq_in_region(region, align_bam, out_base):
"""Retrieve fastq files in region as single end.
Paired end is more complicated since pairs can map off the region, so focus
on local only assembly since we've previously used paired information for mapping.
"""
out_file = "{0}.fastq".format(out_base)
if not file_exists(out_file):
with pysam.Samfile(align_bam, "rb") as in_pysam:
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
contig, start, end = region
for read in in_pysam.fetch(contig, int(start), int(end)):
seq = Seq.Seq(read.seq)
qual = list(read.qual)
if read.is_reverse:
seq = seq.reverse_complement()
qual.reverse()
out_handle.write("@{name}\n{seq}\n+\n{qual}\n".format(
name=read.qname, seq=str(seq), qual="".join(qual)))
return out_file
## Utility functions
def _count_fastq_reads(in_fastq, min_reads):
"""Count the number of fastq reads in a file, stopping after reaching min_reads.
"""
with open(in_fastq) as in_handle:
items = list(itertools.takewhile(lambda i : i <= min_reads,
(i for i, _ in enumerate(FastqGeneralIterator(in_handle)))))
return len(items)
def get_sample_name(align_bam):
with pysam.Samfile(align_bam, "rb") as in_pysam:
if "RG" in in_pysam.header:
return in_pysam.header["RG"][0]["SM"]
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Redfish Inspect Interface
"""
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import units
from ironic.common import boot_modes
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
from ironic.common import utils
from ironic.drivers import base
from ironic.drivers.modules import inspect_utils
from ironic.drivers.modules.redfish import utils as redfish_utils
from ironic.drivers import utils as drivers_utils
from ironic import objects
LOG = log.getLogger(__name__)
sushy = importutils.try_import('sushy')
if sushy:
CPU_ARCH_MAP = {
sushy.PROCESSOR_ARCH_x86: 'x86_64',
sushy.PROCESSOR_ARCH_IA_64: 'ia64',
sushy.PROCESSOR_ARCH_ARM: 'arm',
sushy.PROCESSOR_ARCH_MIPS: 'mips',
sushy.PROCESSOR_ARCH_OEM: 'oem'
}
BOOT_MODE_MAP = {
sushy.BOOT_SOURCE_MODE_UEFI: boot_modes.UEFI,
sushy.BOOT_SOURCE_MODE_BIOS: boot_modes.LEGACY_BIOS
}
class RedfishInspect(base.InspectInterface):
def __init__(self):
"""Initialize the Redfish inspection interface.
:raises: DriverLoadError if the driver can't be loaded due to
missing dependencies
"""
super(RedfishInspect, self).__init__()
if not sushy:
raise exception.DriverLoadError(
driver='redfish',
reason=_('Unable to import the sushy library'))
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return redfish_utils.COMMON_PROPERTIES.copy()
def validate(self, task):
"""Validate the driver-specific Node deployment info.
This method validates whether the 'driver_info' properties of
the task's node contains the required information for this
interface to function.
This method is often executed synchronously in API requests, so it
should not conduct long-running checks.
:param task: A TaskManager instance containing the node to act on.
:raises: InvalidParameterValue on malformed parameter(s)
:raises: MissingParameterValue on missing parameter(s)
"""
redfish_utils.parse_driver_info(task.node)
def inspect_hardware(self, task):
"""Inspect hardware to get the hardware properties.
Inspects hardware to get the essential properties.
It fails if any of the essential properties
are not received from the node.
:param task: a TaskManager instance.
:raises: HardwareInspectionFailure if essential properties
could not be retrieved successfully.
:returns: The resulting state of inspection.
"""
system = redfish_utils.get_system(task.node)
# get the essential properties and update the node properties
# with it.
inspected_properties = task.node.properties
if system.memory_summary and system.memory_summary.size_gib:
inspected_properties['memory_mb'] = str(
system.memory_summary.size_gib * units.Ki)
if system.processors and system.processors.summary:
cpus, arch = system.processors.summary
if cpus:
inspected_properties['cpus'] = cpus
if arch:
try:
inspected_properties['cpu_arch'] = CPU_ARCH_MAP[arch]
except KeyError:
LOG.warning("Unknown CPU arch %(arch)s discovered "
"for node %(node)s", {'node': task.node.uuid,
'arch': arch})
# TODO(etingof): should we respect root device hints here?
local_gb = self._detect_local_gb(task, system)
if local_gb:
inspected_properties['local_gb'] = str(local_gb)
else:
LOG.warning("Could not provide a valid storage size configured "
"for node %(node)s. Assuming this is a disk-less node",
{'node': task.node.uuid})
inspected_properties['local_gb'] = '0'
if system.boot.mode:
if not drivers_utils.get_node_capability(task.node, 'boot_mode'):
capabilities = utils.get_updated_capabilities(
inspected_properties.get('capabilities', ''),
{'boot_mode': BOOT_MODE_MAP[system.boot.mode]})
inspected_properties['capabilities'] = capabilities
valid_keys = self.ESSENTIAL_PROPERTIES
missing_keys = valid_keys - set(inspected_properties)
if missing_keys:
error = (_('Failed to discover the following properties: '
'%(missing_keys)s on node %(node)s'),
{'missing_keys': ', '.join(missing_keys),
'node': task.node.uuid})
raise exception.HardwareInspectionFailure(error=error)
task.node.properties = inspected_properties
task.node.save()
LOG.debug("Node properties for %(node)s are updated as "
"%(properties)s", {'properties': inspected_properties,
'node': task.node.uuid})
self._create_ports(task, system)
pxe_port_macs = self._get_pxe_port_macs(task)
if pxe_port_macs is None:
LOG.warning("No PXE enabled NIC was found for node "
"%(node_uuid)s.", {'node_uuid': task.node.uuid})
else:
pxe_port_macs = [macs.lower() for macs in pxe_port_macs]
ports = objects.Port.list_by_node_id(task.context, task.node.id)
if ports:
for port in ports:
is_baremetal_pxe_port = (port.address.lower()
in pxe_port_macs)
if port.pxe_enabled != is_baremetal_pxe_port:
port.pxe_enabled = is_baremetal_pxe_port
port.save()
LOG.info('Port %(port)s having %(mac_address)s '
'updated with pxe_enabled %(pxe)s for '
'node %(node_uuid)s during inspection',
{'port': port.uuid,
'mac_address': port.address,
'pxe': port.pxe_enabled,
'node_uuid': task.node.uuid})
else:
LOG.warning("No port information discovered "
"for node %(node)s", {'node': task.node.uuid})
return states.MANAGEABLE
def _create_ports(self, task, system):
enabled_macs = redfish_utils.get_enabled_macs(task, system)
if enabled_macs:
inspect_utils.create_ports_if_not_exist(task, list(enabled_macs))
else:
LOG.warning("Not attempting to create any port as no NICs "
"were discovered in 'enabled' state for node "
"%(node)s: %(mac_data)s",
{'mac_data': enabled_macs, 'node': task.node.uuid})
def _detect_local_gb(self, task, system):
simple_storage_size = 0
try:
LOG.debug("Attempting to discover system simple storage size for "
"node %(node)s", {'node': task.node.uuid})
if (system.simple_storage
and system.simple_storage.disks_sizes_bytes):
simple_storage_size = [
size for size in system.simple_storage.disks_sizes_bytes
if size >= 4 * units.Gi
] or [0]
simple_storage_size = simple_storage_size[0]
except sushy.exceptions.SushyError as ex:
LOG.debug("No simple storage information discovered "
"for node %(node)s: %(err)s", {'node': task.node.uuid,
'err': ex})
storage_size = 0
try:
LOG.debug("Attempting to discover system storage volume size for "
"node %(node)s", {'node': task.node.uuid})
if system.storage and system.storage.volumes_sizes_bytes:
storage_size = [
size for size in system.storage.volumes_sizes_bytes
if size >= 4 * units.Gi
] or [0]
storage_size = storage_size[0]
except sushy.exceptions.SushyError as ex:
LOG.debug("No storage volume information discovered "
"for node %(node)s: %(err)s", {'node': task.node.uuid,
'err': ex})
try:
if not storage_size:
LOG.debug("Attempting to discover system storage drive size "
"for node %(node)s", {'node': task.node.uuid})
if system.storage and system.storage.drives_sizes_bytes:
storage_size = [
size for size in system.storage.drives_sizes_bytes
if size >= 4 * units.Gi
] or [0]
storage_size = storage_size[0]
except sushy.exceptions.SushyError as ex:
LOG.debug("No storage drive information discovered "
"for node %(node)s: %(err)s", {'node': task.node.uuid,
'err': ex})
# NOTE(etingof): pick the smallest disk larger than 4G among available
if simple_storage_size and storage_size:
local_gb = min(simple_storage_size, storage_size)
else:
local_gb = max(simple_storage_size, storage_size)
# Note(deray): Convert the received size to GiB and reduce the
# value by 1 GB as consumers like Ironic requires the ``local_gb``
# to be returned 1 less than actual size.
return max(0, int(local_gb / units.Gi - 1))
def _get_pxe_port_macs(self, task):
"""Get a list of PXE port MAC addresses.
:param task: a TaskManager instance.
:returns: Returns list of PXE port MAC addresses.
If cannot be determined, returns None.
"""
return None
|
|
from __future__ import division
import numpy as np
from six.moves import range
def normalize(theta, start=0):
"""
Normalize an angle to be in the range :math:`[0, 2\pi]`
Parameters
-----------
theta : float
input angle to normalize
start: float
input start angle (optional, default: 0.0)
Returns
--------
res : float
normalized angle or :math:`\infty`
"""
if theta < np.inf:
while theta >= start + 2 * np.pi:
theta -= 2 * np.pi
while theta < start:
theta += 2 * np.pi
return theta
else:
return np.inf
def addangles(alpha, beta):
"""
Add two angles
Parameters
----------
alpha : float
Augend (in radians)
beta : float
Addend (in radians)
Returns
-------
sum : float
Sum (in radians, normalized to [0, 2pi])
"""
return normalize(alpha + beta, start=0)
def subangles(alpha, beta):
"""
Substract one angle from another
Parameters
----------
alpha : float
Minuend (in radians)
beta : float
Subtraend (in radians)
Returns
-------
delta : float
Difference (in radians, normalized to [0, 2pi])
"""
delta = 0
if alpha < np.inf and beta < np.inf:
alpha = normalize(alpha, start=0)
beta = normalize(beta, start=0)
delta = alpha - beta
if alpha > beta:
while delta > np.pi:
delta -= 2 * np.pi
elif beta > alpha:
while delta < -np.pi:
delta += 2 * np.pi
else:
delta = np.inf
return delta
def edist(v1, v2):
""" Euclidean distance between the two poses
Parameters
-----------
v1, v2 : array-like
vector of poses
Returns
-----------
dist : float
distance between v1 and v2
"""
return np.hypot((v1[0] - v2[0]), (v1[1] - v2[1]))
def adist(focal_agent, other_agent, ak=2.48, bk=1.0, lambda_=0.4, rij=0.9):
""" Anisotropic distance between two oriented poses
Anisotropic distance based on the Social Force Model (SFM) [TODO - cite]
model of pedestrian dynamics.
.. math::
a \cdot b \exp{\left(\\frac{r_{ij} - d_{ij}}{b}\\right)}
\mathbf{n}_{ij} \left(\lambda + (1 - \lambda) \\frac{1 +
\cos(\\varphi_{ij})}{2}\\right)
Parameters
-----------
focal_agent, other_agent : array-like
Vector of poses (including orientation information as vx, vy)
ak, bk, lambda_, rij : float
Parameters of the anisotropic model
Returns
----------
dist : float
Distance between the two poses
"""
ei = np.array([-focal_agent[2], -focal_agent[3]])
length_ei = np.linalg.norm(ei)
if length_ei > 1e-24:
ei = ei / length_ei
phi = np.arctan2(other_agent[1] - focal_agent[1],
other_agent[0] - focal_agent[0])
dij = edist(focal_agent, other_agent)
nij = np.array([np.cos(phi), np.sin(phi)])
ns = 2
alpha = ak * np.exp((rij - dij) / bk) * nij
beta_ = np.tile(np.ones(shape=(1, ns)) * lambda_ + ((1 - lambda_) *
(np.ones(shape=(1, ns)) - (np.dot(nij.T, ei)).T) / 2.),
[1, 1])
curve = np.multiply(alpha, beta_).T
dc = np.hypot(curve[0], curve[1])
return dc
def distance_to_segment(point, (line_start, line_end)):
""" Distance from a point to a line segment
Compute the distance from a point to a line segment all in 2D.
Additionally return a flag indicating if the points lies within the
boundary of the two perpendicular lines at the line segment ends
Parameters
-----------
point : array-like
Point in 2D, (x, y)
line_start, line_end : array-like
Coordinates of the start and end points of the line sement in 2D
Returns
--------
dist : float or None
Float value if the point is 'inside' the line segment, else None
inside : bool
Flag indicating if the point is 'inside' the line segment
"""
xa, ya = line_start[0], line_start[1]
xb, yb = line_end[0], line_end[1]
xp, yp = point[0], point[1]
# x-coordinates
A = xb-xa
B = yb-ya
C = yp*B+xp*A
a = 2*((B*B)+(A*A))
b = -4*A*C+(2*yp+ya+yb)*A*B-(2*xp+xa+xb)*(B*B)
c = 2*(C*C)-(2*yp+ya+yb)*C*B+(yp*(ya+yb)+xp*(xa+xb))*(B*B)
if b*b < 4*a*c:
return None, False
x1 = (-b + np.sqrt((b*b)-4*a*c))/(2*a)
x2 = (-b - np.sqrt((b*b)-4*a*c))/(2*a)
# y-coordinates
A = yb-ya
B = xb-xa
C = xp*B+yp*A
a = 2*((B*B)+(A*A))
b = -4*A*C+(2*xp+xa+xb)*A*B-(2*yp+ya+yb)*(B*B)
c = 2*(C*C)-(2*xp+xa+xb)*C*B+(xp*(xa+xb)+yp*(ya+yb))*(B*B)
if b*b < 4*a*c:
return None, False
y1 = (-b + np.sqrt((b*b)-4*a*c))/(2*a)
y2 = (-b - np.sqrt((b*b)-4*a*c))/(2*a)
# Put point candidates together
candidates = ((x1, y2), (x2, y2), (x1, y2), (x2, y1))
distances = (edist(candidates[0], point), edist(candidates[1], point),
edist(candidates[2], point), edist(candidates[3], point))
max_index = np.argmax(distances)
cand = candidates[max_index]
dist = distances[max_index]
start_cand = (line_start[0]-cand[0], line_start[1]-cand[1])
end_cand = (line_end[0]-cand[0], line_end[1]-cand[1])
dotp = (start_cand[0] * end_cand[0]) + (start_cand[1] * end_cand[1])
inside = False
if dotp <= 0.0:
inside = True
return dist, inside
def extract_relations(persons, groups):
"""" Extract relation links from grouping information
Given poses of persons and grouping information in form of person ids per
group, this method extracts line segments representing the relation
links between the persons.
Parameters
----------
persons : dict
Dictionary of person poses indexed by id
groups : array-like
2D array with each row containing ids of a pairwise grouping. For
groups with more than 2 persons, multiple rows are used for every to
represent every pairing possible
Returns
--------
elines : array-like
An a array of line segments, each represented by a tuple of start and
end points
"""
min_id = np.amin(groups)
elines = []
for [i, j] in groups:
line = ((persons[i-min_id][0], persons[i-min_id][1]),
(persons[j-min_id][0], persons[j-min_id][1]))
elines.append(line)
return elines
def dtw(x, y, dist=lambda x, y: np.linalg.norm(x - y, ord=1)):
""" Computes the dtw between two signals.
Adapted from: https://github.com/pierre-rouanet/dtw/blob/master/dtw.py
"""
x = np.array(x)
if len(x.shape) == 1:
x = x.reshape(-1, 1)
y = np.array(y)
if len(y.shape) == 1:
y = y.reshape(-1, 1)
r, c = len(x), len(y)
D = np.zeros((r + 1, c + 1))
D[0, 1:] = np.inf
D[1:, 0] = np.inf
for i in range(r):
for j in range(c):
D[i+1, j+1] = dist(x[i], y[j])
for i in range(r):
for j in range(c):
D[i+1, j+1] += min(D[i, j], D[i, j+1], D[i+1, j])
D = D[1:, 1:]
dist = D[-1, -1] / sum(D.shape)
return dist, D, _track_back(D)
def _track_back(D):
i, j = np.array(D.shape) - 1
p, q = [i], [j]
while i > 0 and j > 0:
tb = np.argmin((D[i-1, j-1], D[i-1, j], D[i, j-1]))
if tb == 0:
i -= 1
j -= 1
elif tb == 1:
i -= 1
elif tb == 2:
j -= 1
p.insert(0, i)
q.insert(0, j)
p.insert(0, 0)
q.insert(0, 0)
return (np.array(p), np.array(q))
|
|
# Copyright 2018 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import time
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log
from oslo_utils import excutils
from neutron._i18n import _
from neutron.api import extensions as neutron_extensions
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.db import provisioning_blocks
from neutron.extensions import securitygroup as ext_sg
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import portbindings
from neutron_lib.api import validators as lib_validators
from neutron_lib.callbacks import resources
from neutron_lib import constants as os_constants
from neutron_lib import context as n_context
from neutron_lib.exceptions import PortInUse
from neutron_lib.exceptions import SubnetNotFound
from neutron_lib.plugins.ml2 import api
from nuage_neutron.plugins.common.addresspair import NuageAddressPair
from nuage_neutron.plugins.common import base_plugin
from nuage_neutron.plugins.common import constants
from nuage_neutron.plugins.common.exceptions import NuageBadRequest
from nuage_neutron.plugins.common.exceptions import NuagePortBound
from nuage_neutron.plugins.common import extensions
from nuage_neutron.plugins.common.extensions import nuagepolicygroup
from nuage_neutron.plugins.common import nuagedb
from nuage_neutron.plugins.common import port_security
from nuage_neutron.plugins.common import qos_driver
from nuage_neutron.plugins.common import utils
from nuage_neutron.plugins.common.utils import handle_nuage_api_errorcode
from nuage_neutron.plugins.common.utils import ignore_no_update
from nuage_neutron.plugins.common.utils import ignore_not_found
from nuage_neutron.plugins.nuage_ml2.securitygroup import NuageSecurityGroup
from nuage_neutron.plugins.nuage_ml2 import trunk_driver
from nuage_neutron.vsdclient.common import constants as vsd_constants
from nuage_neutron.vsdclient.common.helper import get_l2_and_l3_sub_id
from nuage_neutron.vsdclient import restproxy
LB_DEVICE_OWNER_V2 = os_constants.DEVICE_OWNER_LOADBALANCERV2
PORT_UNPLUGGED_TYPES = (portbindings.VIF_TYPE_BINDING_FAILED,
portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_OVS)
DEVICE_OWNER_DHCP = os_constants.DEVICE_OWNER_DHCP
LOG = log.getLogger(__name__)
class NuageMechanismDriver(base_plugin.RootNuagePlugin,
api.MechanismDriver,
db_base_plugin_v2.NeutronDbPluginV2,
agents_db.AgentDbMixin):
def __init__(self):
self._core_plugin = None
self.trunk_driver = None
self.qos_driver = None
self.psec_handler = None
self.supported_network_types = [os_constants.TYPE_VXLAN,
constants.NUAGE_HYBRID_MPLS_NET_TYPE]
super(NuageMechanismDriver, self).__init__()
def initialize(self):
LOG.debug('Initializing driver')
neutron_extensions.append_api_extensions_path(extensions.__path__)
self._validate_mech_nuage_configuration()
self.init_vsd_client()
self._wrap_vsdclient()
NuageAddressPair().register()
self.register_callbacks()
self.trunk_driver = trunk_driver.NuageTrunkDriver.create(self)
self.qos_driver = qos_driver.NuageQosDriver.create(self,
self.vsdclient)
# Nuage Security Group works with callbacks but is initialized through
# mech nuage.
NuageSecurityGroup()
self.psec_handler = port_security.NuagePortSecurityHandler(
self.vsdclient, self)
LOG.debug('Initializing complete')
def _validate_mech_nuage_configuration(self):
service_plugins = constants.MIN_MECH_NUAGE_SERVICE_PLUGINS_IN_CONFIG
extensions = constants.MIN_MECH_NUAGE_EXTENSIONS_IN_CONFIG
self._validate_config_for_nuage_driver(constants.NUAGE_ML2_DRIVER_NAME,
service_plugins,
extensions)
def _wrap_vsdclient(self):
"""Wraps nuageclient methods with try-except to ignore certain errors.
When updating an entity on the VSD and there is nothing to actually
update because the values don't change, VSD will throw an error. This
is not needed for neutron so all these exceptions are ignored.
When VSD responds with a 404, this is sometimes good (for example when
trying to update an entity). Yet sometimes this is not required to be
an actual exception. When deleting an entity that does no longer exist
it is fine for neutron. Also when trying to retrieve something from VSD
having None returned is easier to work with than RESTProxy exceptions.
"""
methods = inspect.getmembers(self.vsdclient,
lambda x: inspect.ismethod(x))
for m in methods:
wrapped = ignore_no_update(m[1])
if m[0].startswith('get_') or m[0].startswith('delete_'):
wrapped = ignore_not_found(wrapped)
setattr(self.vsdclient, m[0], wrapped)
@utils.context_log
def create_network_precommit(self, context):
network = context.current
db_context = context._plugin_context
# A network attached to an L2bridge is not allowed to be external or
# shared
self._validate_network_physnet(db_context, network)
def _validate_network_physnet(self, context, network):
l2bridge_id = nuagedb.get_nuage_l2bridge_id_for_network(
context.session, network['id'])
if l2bridge_id:
is_external = network.get(external_net.EXTERNAL)
if is_external:
msg = _("It is not allowed to create a network as external in "
"a physical_network attached to a nuage_l2bridge")
raise NuageBadRequest(msg=msg)
is_shared = network.get('shared')
if is_shared:
msg = _("It is not allowed to create a shared network in "
"a physical_network attached to a nuage_l2bridge")
raise NuageBadRequest(msg=msg)
physnets = self._get_l2bridge_physnets(context, network)
l2bridges = {p['l2bridge_id'] for p in physnets}
if len(l2bridges) > 1:
msg = _("It is not allowed to attach a network to multiple"
"nuage_l2bridges.")
raise NuageBadRequest(msg=msg)
# Block vxlan and nuage_hybrid_segments in a single network
self.check_vxlan_mpls_segments_in_network(network.get('segments', []))
@handle_nuage_api_errorcode
@utils.context_log
def update_network_precommit(self, context):
updated_network = context.current
original_network = context.original
db_context = context._plugin_context
(external_change,
shared_change,
physnets_change,
_) = self._network_no_action(original_network,
updated_network)
if any([external_change, shared_change, physnets_change]):
self._validate_update_network(db_context, external_change,
shared_change, physnets_change,
original_network,
updated_network)
# Block vxlan and nuage_hybrid_segments in a single network
# This cannot be included in the above structure since after the
# create segment operation, neutron calls update_network_precommit
# with the same value for the original and updated network
self.check_vxlan_mpls_segments_in_network(
updated_network.get('segments', []))
@handle_nuage_api_errorcode
@utils.context_log
def update_network_postcommit(self, context):
updated_network = context.current
original_network = context.original
db_context = context._plugin_context
(external_change,
shared_change,
physnets_change,
name_change) = self._network_no_action(original_network,
updated_network)
self.qos_driver.update_network(db_context, original_network,
updated_network)
if not any([external_change, shared_change, physnets_change,
name_change]):
# No update required
return
subnets = self.core_plugin.get_subnets_by_network(
db_context, updated_network['id'])
if external_change:
for subn in subnets:
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(
db_context.session, subn['id'])
LOG.debug("Found subnet %(subn_id)s to l2 domain mapping"
" %(nuage_subn_id)s",
{'subn_id': subn['id'],
'nuage_subn_id':
subnet_l2dom['nuage_subnet_id']})
self.vsdclient.delete_subnet(
l2dom_id=subnet_l2dom['nuage_subnet_id'])
nuagedb.delete_subnetl2dom_mapping(db_context.session,
subnet_l2dom)
# delete the neutron port that was reserved with IP of
# the dhcp server that is reserved.
# Now, this port is not reqd.
self.delete_dhcp_nuage_port(db_context, subn)
self._add_nuage_sharedresource(db_context, subn,
constants.SR_TYPE_FLOATING,
subnets)
if shared_change and not updated_network.get(external_net.EXTERNAL):
for subnet in subnets:
nuage_subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(
db_context.session, subnet['id'])
if self._is_l2(nuage_subnet_l2dom):
# change of perm only reqd in l2dom case
self.vsdclient.change_perm_of_subns(
nuage_subnet_l2dom['net_partition_id'],
nuage_subnet_l2dom['nuage_subnet_id'],
updated_network['shared'],
subnet['tenant_id'], remove_everybody=True)
if name_change:
ipv4s = len([s for s in subnets if self._is_ipv4(s)])
ipv6s = len([s for s in subnets if self._is_ipv6(s)])
if ipv4s == 1 and ipv6s == 1:
# only dualstack subnets use network name as description
subnet = subnets[0]
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(
db_context.session, subnet['id'])
params = {
'dualstack': True,
'network_name': updated_network['name']
}
if self._is_l2(subnet_mapping):
self.vsdclient.update_l2domain_template(
subnet_mapping['nuage_l2dom_tmplt_id'], **params)
self.vsdclient.update_l2domain(
subnet_mapping['nuage_subnet_id'], **params)
else:
params.update({
"subnet_nuage_underlay":
subnet.get(constants.NUAGE_UNDERLAY)
})
self.vsdclient.update_domain_subnet(
subnet_mapping['nuage_subnet_id'], params)
def check_dhcp_agent_alive(self, context):
get_dhcp_agent = self.get_agents(
context, filters={"alive": [True],
"binary": ['neutron-dhcp-agent']})
if get_dhcp_agent:
return True
return False
@utils.context_log
@handle_nuage_api_errorcode
def create_subnet_precommit(self, context):
subnet = context.current
network = context.network.current
db_context = context._plugin_context
prefixlen = netaddr.IPNetwork(subnet['cidr']).prefixlen
nuagenet_set = lib_validators.is_attr_set(subnet.get('nuagenet'))
net_part_set = lib_validators.is_attr_set(subnet.get('net_partition'))
if not self.is_network_type_supported(network):
if nuagenet_set or net_part_set:
# Nuage attributes set on unsupported network types
msg = _("Network should have 'provider:network_type' "
"vxlan or nuage_hybrid_mpls, or have such a segment")
raise NuageBadRequest(msg=msg)
else:
return # Not for us
with db_context.session.begin(subtransactions=True):
self.create_nuage_subnet_precommit(db_context,
network,
prefixlen, subnet,
nuagenet_set)
def _validate_create_subnet(self, db_context, network, prefixlen,
subnet, vsd_managed, l2bridge):
for attribute in ('ipv6_ra_mode', 'ipv6_address_mode'):
if not lib_validators.is_attr_set(subnet.get(attribute)):
continue
if subnet[attribute] != os_constants.DHCPV6_STATEFUL:
msg = _("Attribute %(attribute)s must be '%(allowed)s' or "
"not set.")
raise NuageBadRequest(
resource='subnet',
msg=msg % {'attribute': attribute,
'allowed': os_constants.DHCPV6_STATEFUL})
network_subnets = self.core_plugin.get_subnets(
db_context,
filters={'network_id': [subnet['network_id']]})
if vsd_managed:
self._validate_create_vsd_managed_subnet(network, subnet)
else:
self._validate_create_openstack_managed_subnet(
db_context, subnet, network_subnets)
subnet_ids = [s['id'] for s in network_subnets]
subnet_mappings = nuagedb.get_subnet_l2doms_by_subnet_ids(
db_context.session,
subnet_ids)
if len(set([vsd_managed] + [m['nuage_managed_subnet']
for m in subnet_mappings])) > 1:
msg = _("Can't mix openstack and vsd managed subnets under 1 "
"network.")
raise NuageBadRequest(resource='subnet', msg=msg)
ipv4s = len([s for s in network_subnets if self._is_ipv4(s)])
ipv6s = len([s for s in network_subnets if self._is_ipv6(s)])
if ((ipv4s > 1 or ipv6s > 1) and
self.check_dhcp_agent_alive(db_context) and
not self.is_external(db_context, network['id'])):
msg = _("A network with multiple ipv4 or ipv6 subnets is not "
"allowed when neutron-dhcp-agent is enabled")
raise NuageBadRequest(msg=msg)
# nuage_l2bridge tests
if l2bridge:
if self.check_dhcp_agent_alive(db_context):
msg = _("A network cannot be attached to an l2bridge "
"when neutron-dhcp-agent is enabled")
raise NuageBadRequest(msg=msg)
if ipv4s > 1 or ipv6s > 1:
msg = _("A network attached to a nuage_l2bridge cannot have"
" more than one ipv4 or more than one ipv6 subnet.")
raise NuageBadRequest(msg=msg)
# For l2bridges, certain parameters need to be equal for all
# bridged subnets, as they are reflected on VSD.
bridged_subnets = nuagedb.get_subnets_for_nuage_l2bridge(
db_context.session,
l2bridge['id'])
# Make subnet dict to include extensions
ipv_bridged = [
self.core_plugin._make_subnet_dict(s)
for s in bridged_subnets if
s['id'] != subnet['id'] and
s['ip_version'] == subnet['ip_version']]
if not ipv_bridged:
return
for param in constants.L2BRIDGE_SUBNET_EQUAL_ATTRIBUTES:
self._validate_l2bridge_added_subnet_parameter(
ipv_bridged[0], subnet, param, l2bridge)
@handle_nuage_api_errorcode
@utils.context_log
def create_subnet_postcommit(self, context):
# Set QOS
self.qos_driver.create_subnet(context)
@handle_nuage_api_errorcode
@utils.context_log
def update_subnet_precommit(self, context):
self.update_subnet(context)
@utils.context_log
@handle_nuage_api_errorcode
def delete_subnet_precommit(self, context):
"""Get subnet_l2dom_mapping for later.
In postcommit this nuage_subnet_l2dom_mapping is no longer available
because it is set to CASCADE with the subnet. So this row will be
deleted prior to delete_subnet_postcommit
"""
subnet = context.current
db_context = context._plugin_context
context.nuage_mapping = nuagedb.get_subnet_l2dom_by_id(
db_context.session, subnet['id'])
context.dual_stack_subnet = self.get_dual_stack_subnet(db_context,
subnet)
if not context.nuage_mapping:
return
if self._is_l3(context.nuage_mapping) and context.dual_stack_subnet:
self._validate_vips_in_use(db_context, subnet)
def _validate_vips_in_use(self, db_context, subnet):
other_version = 4 if self._is_ipv6(subnet) else 6
nuage_subnets = (
nuagedb.get_subnet_mapping_by_network_id_and_ip_version(
db_context.session, subnet['network_id'],
ip_version=other_version))
for nuage_mapping in nuage_subnets:
vip_filters = {
'fixed_ips': {'subnet_id': [nuage_mapping['subnet_id']]}
}
ports = self.core_plugin.get_ports(db_context,
filters=vip_filters,
fields='allowed_address_pairs')
ports_with_aap = [p for p in ports if p['allowed_address_pairs']]
for port in ports_with_aap:
for aap in port['allowed_address_pairs']:
if (netaddr.IPNetwork(aap['ip_address']).size == 1 and
netaddr.IPAddress(aap['ip_address']) in
netaddr.IPNetwork(subnet['cidr'])):
msg = _('IP %s is in use for nuage VIP,'
' hence cannot delete the'
' subnet.') % aap['ip_address']
raise NuageBadRequest(msg=msg)
@handle_nuage_api_errorcode
def delete_subnet_postcommit(self, context):
db_context = context._plugin_context
subnet = context.current
network = context.network.current
mapping = context.nuage_mapping
dual_stack_subnet = context.dual_stack_subnet
if not mapping:
return
if self._is_os_mgd(mapping):
if network.get('nuage_l2bridge'):
with db_context.session.begin(subtransactions=True):
l2bridge = nuagedb.get_nuage_l2bridge_blocking(
db_context.session, network['nuage_l2bridge'])
attempt = 0
while True:
try:
bridged_subnets = (
nuagedb.get_subnets_for_nuage_l2bridge(
db_context.session, l2bridge['id']))
break
except db_exc.DBDeadlock:
if attempt < 25:
LOG.debug("Retrying to get bridged subnets"
" due to Deadlock.")
attempt += 1
time.sleep(0.2)
continue
msg = ("Chance of a hanging L2Domain on VSD for"
"resource nuage-l2bridge: %s",
l2bridge['id'])
raise Exception(msg)
ipv4s = [s['id'] for s in bridged_subnets
if self._is_ipv4(s) and s['id'] != subnet['id']]
ipv6s = [s['id'] for s in bridged_subnets
if self._is_ipv6(s) and s['id'] != subnet['id']]
if ((self._is_ipv4(subnet) and ipv4s) or
(self._is_ipv6(subnet) and ipv6s)):
return
elif not ipv4s and not ipv6s:
l2bridge['nuage_subnet_id'] = None
else:
# Delete subnet from dualstack on vsd
dual_stack_subnet = self.core_plugin.get_subnet(
db_context, ipv4s[0] if ipv4s else ipv6s[0])
if dual_stack_subnet:
if self._is_ipv4(subnet):
self.vsdclient.delete_subnet(mapping=mapping,
ipv4_subnet=None,
ipv6_subnet=dual_stack_subnet)
return
else:
self.vsdclient.delete_subnet(mapping=mapping,
ipv4_subnet=dual_stack_subnet,
ipv6_subnet=None)
return
else:
l2_id, l3_sub_id = get_l2_and_l3_sub_id(mapping)
try:
self.vsdclient.delete_subnet(l3_vsd_subnet_id=l3_sub_id,
l2dom_id=l2_id,
mapping=mapping)
except restproxy.RESTProxyError as e:
vm_exist = (e.code == restproxy.RES_CONFLICT and
e.vsd_code in
[vsd_constants.VSD_VM_EXIST,
vsd_constants.VSD_VM_EXISTS_ON_VPORT,
vsd_constants.VSD_PG_IN_USE])
if vm_exist:
if l3_sub_id:
vms = self.vsdclient.vms_on_subnet(l3_sub_id)
else:
vms = self.vsdclient.vms_on_l2domain(l2_id)
np = nuagedb.get_net_partition_by_id(
db_context.session,
id=mapping['net_partition_id'])
for vm in vms:
LOG.debug('deleting VSD vm %s', vm['ID'])
params = {
'id': vm['ID'],
'tenant': subnet['tenant_id'],
'netpart_name': np['name']
}
self.vsdclient.delete_vm_by_id(params)
self.vsdclient.delete_subnet(
l3_vsd_subnet_id=l3_sub_id, l2dom_id=l2_id,
mapping=mapping)
else:
raise
else:
# VSD managed could be ipv6 + ipv4. If only one of the 2 is
# deleted, the use permission should not be removed yet.
# Also, there can be multiple subnets mapped to same VSD subnet.
clean_groups = True
other_mappings = nuagedb.get_subnet_l2doms_by_nuage_id(
db_context.session,
mapping['nuage_subnet_id'])
if other_mappings:
for other_mapping in other_mappings:
other_subnet = context._plugin.get_subnet(
db_context,
other_mapping['subnet_id'])
if subnet['tenant_id'] == other_subnet['tenant_id']:
clean_groups = False
break
if clean_groups:
self._cleanup_group(db_context,
mapping['net_partition_id'],
mapping['nuage_subnet_id'], subnet)
def _is_port_provisioning_required(self, network, port, host):
vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL)
if vnic_type not in self._supported_vnic_types():
LOG.debug('No provisioning block for port %(port_id)s due to '
'unsupported vnic_type: %(vnic_type)s',
{'port_id': port['id'], 'vnic_type': vnic_type})
return False
if port['status'] == os_constants.PORT_STATUS_ACTIVE:
LOG.debug('No provisioning block for port %s since it is active',
port['id'])
return False
if not host:
LOG.debug('No provisioning block for port %s since it does not '
'have a host', port['id'])
return False
if not self._is_port_supported(port, network):
LOG.debug('No provisioning block for port %s since it will not '
'be handled by driver', port['id'])
return False
return True
def _insert_port_provisioning_block(self, context, port_id):
# Insert a provisioning block to prevent the port from
# transitioning to active until Nuage driver reports back
# that the port is up.
provisioning_blocks.add_provisioning_component(
context, port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY
)
def _notify_port_provisioning_complete(self, port_id):
"""Notifies Neutron that the provisioning is complete for port."""
if provisioning_blocks.is_object_blocked(
n_context.get_admin_context(), port_id, resources.PORT):
provisioning_blocks.provisioning_complete(
n_context.get_admin_context(), port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
@handle_nuage_api_errorcode
@utils.context_log
def create_port_precommit(self, context):
if self._is_port_provisioning_required(context.network.current,
context.current, context.host):
self._insert_port_provisioning_block(context._plugin_context,
context.current['id'])
@handle_nuage_api_errorcode
@utils.context_log
def create_port_postcommit(self, context):
self._create_port(context._plugin_context,
context.current,
context.network.current)
self._notify_port_provisioning_complete(context.current['id'])
def _create_port(self, db_context, port, network):
is_network_external = network.get('router:external')
# Validate port
subnet_ids = [ip['subnet_id'] for ip in port['fixed_ips']]
subnet_mappings = nuagedb.get_subnet_l2doms_by_subnet_ids(
db_context.session, subnet_ids)
if not subnet_mappings:
LOG.warn('No VSD subnet found for port.')
return
if not self._should_act_on_port(port, is_network_external):
LOG.warn('Port not applicable for Nuage.')
return
self._validate_port(db_context, port,
is_network_external, subnet_mappings, network)
self.nuage_callbacks.notify(resources.PORT, constants.BEFORE_CREATE,
self, context=db_context,
request_port=port)
subnet_mapping = subnet_mappings[0]
nuage_vport = nuage_vm = np_name = None
np_id = subnet_mapping['net_partition_id']
nuage_subnet = self._find_vsd_subnet(db_context, subnet_mapping)
try:
if port.get('binding:host_id') and self._port_should_have_vm(port):
self._validate_vmports_same_netpartition(db_context,
port, np_id)
desc = ("device_owner:" + constants.NOVA_PORT_OWNER_PREF +
"(please do not edit)")
nuage_vport = self._create_nuage_vport(port, nuage_subnet,
desc)
np_name = nuagedb.get_net_partition_by_id(
db_context.session, np_id)['name']
nuage_vm = self._create_nuage_vm(
db_context, port, np_name, subnet_mapping,
nuage_vport, nuage_subnet, network)
else:
nuage_vport = self._create_nuage_vport(port, nuage_subnet)
self.calculate_vips_for_port_ips(db_context,
port)
domain_type, domain_id = self._get_domain_type_id_from_vsd_subnet(
self.vsdclient, nuage_subnet)
self.qos_driver.process_create_update_port(
db_context, port, nuage_vport, domain_type, domain_id)
self.psec_handler.process_port_create(db_context, port,
nuage_vport,
domain_type, domain_id,
subnet_mapping,
pg_type=constants.SOFTWARE)
except (restproxy.RESTProxyError, NuageBadRequest) as ex:
# TODO(gridinv): looks like in some cases we convert 404 to 400
# so i have to catch both. Question here is - don't we hide
# valid error with this?
if nuage_vm:
if (port.get('device_owner') in
[LB_DEVICE_OWNER_V2, DEVICE_OWNER_DHCP]):
params = {
'externalID': port['id'],
'tenant': port['tenant_id'],
'netpart_name': np_name
}
self.vsdclient.delete_vm_by_external_id(params)
else:
self._delete_nuage_vm(db_context, port, np_name,
subnet_mapping, port['device_id'],
network)
if nuage_vport:
self.vsdclient.delete_nuage_vport(nuage_vport.get('ID'))
if self._get_port_from_neutron(db_context, port):
raise
else:
LOG.info(_("Port was deleted concurrently: %s"), ex)
return
except Exception:
if nuage_vm:
self._delete_nuage_vm(db_context, port, np_name,
subnet_mapping, port['device_id'],
network)
if nuage_vport:
self.vsdclient.delete_nuage_vport(nuage_vport.get('ID'))
raise
rollbacks = []
try:
self.nuage_callbacks.notify(resources.PORT, constants.AFTER_CREATE,
self, context=db_context, port=port,
vport=nuage_vport, rollbacks=rollbacks,
subnet_mapping=subnet_mapping,
vsd_subnet=nuage_subnet)
except Exception:
with excutils.save_and_reraise_exception():
for rollback in reversed(rollbacks):
rollback[0](*rollback[1], **rollback[2])
@handle_nuage_api_errorcode
@utils.context_log
def update_port_precommit(self, context):
db_context = context._plugin_context
port = context.current
original = context.original
network = context.network.current
if self._is_port_provisioning_required(network,
port, context.host):
self._insert_port_provisioning_block(db_context,
port['id'])
is_network_external = network.get('router:external')
self._check_fip_on_port_with_multiple_ips(db_context, port)
currently_actionable = self._should_act_on_port(port,
is_network_external)
previously_actionable = self._should_act_on_port(original,
is_network_external)
subnet_ids = [ip['subnet_id'] for ip in port['fixed_ips']]
subnet_mappings = nuagedb.get_subnet_l2doms_by_subnet_ids(
db_context.session, subnet_ids)
if not currently_actionable and previously_actionable:
# Port no longer needed
vsd_errors = [(vsd_constants.CONFLICT_ERR_CODE,
vsd_constants.VSD_VM_EXISTS_ON_VPORT)]
utils.retry_on_vsdclient_error(
self._delete_port, vsd_error_codes=vsd_errors)(db_context,
original,
network)
return
elif currently_actionable and not previously_actionable:
# Port creation needed
self._create_port(db_context, port, network)
return
elif not currently_actionable or not subnet_mappings:
return
self._validate_port(db_context, port, is_network_external,
subnet_mappings)
# We only need the VSD properties of the subnet mapping, this is equal
# for all subnet_mappings.
subnet_mapping = subnet_mappings[0]
self._check_subport_in_use(original, port)
vm_if_update_required = self._check_vm_if_update(
db_context, original, port)
host_added = host_removed = False
if (not original['binding:host_id'] and port['binding:host_id'] or
not original['device_id'] and port['device_id']):
host_added = True
elif (original['binding:host_id'] and not port['binding:host_id'] or
original['device_id'] and not port['device_id']):
host_removed = True
elif (original['device_owner'] and not port['device_owner'] and
original['device_owner'] == LB_DEVICE_OWNER_V2):
host_removed = True
nuage_vport = self._find_vport(db_context, port, subnet_mapping)
if not nuage_vport:
return
if vm_if_update_required:
data = {
'mac': port['mac_address'],
'ipv4': port['new_ipv4'],
'ipv6': port['new_ipv6'],
'nuage_vport_id': nuage_vport['ID'],
}
if self._is_trunk_subport(port):
# (gridinv) : subport can be updated only if port
# is not in use - so no need for vm resync
self.vsdclient.update_subport(port, nuage_vport, data)
else:
nuage_vip_dict = dict()
try:
self.delete_vips_for_interface_update(data,
port['new_ipv4'],
port['new_ipv6'],
nuage_vip_dict,
nuage_vport,
port['orig_ips'],
subnet_mapping,
original)
self.vsdclient.update_nuage_vm_if(data)
except restproxy.RESTProxyError as e:
if e.vsd_code != vsd_constants.VSD_VM_ALREADY_RESYNC:
self.rollback_deleted_vips(data, port['new_ipv4'],
nuage_vip_dict, nuage_vport,
port, subnet_mapping)
raise
nuage_subnet = self._find_vsd_subnet(
db_context, subnet_mapping)
self._port_device_change(context, db_context, nuage_vport,
original, port,
subnet_mapping, nuage_subnet,
host_added, host_removed)
rollbacks = []
try:
self.nuage_callbacks.notify(resources.PORT, constants.AFTER_UPDATE,
self.core_plugin, context=db_context,
port=port,
original_port=original,
vport=nuage_vport, rollbacks=rollbacks,
subnet_mapping=subnet_mapping,
vsd_subnet=nuage_subnet)
domain_type, domain_id = self._get_domain_type_id_from_vsd_subnet(
self.vsdclient, nuage_subnet)
self.qos_driver.process_create_update_port(db_context, port,
nuage_vport,
domain_type, domain_id,
original)
rollbacks.append((self.qos_driver.process_create_update_port,
[db_context, original, nuage_vport,
domain_type, domain_id, port], {}))
self.psec_handler.process_port_update(db_context, port, original,
nuage_vport,
domain_type, domain_id,
subnet_mapping)
rollbacks.append((self.psec_handler.process_port_update,
[db_context, original, port, nuage_vport,
domain_type, domain_id, subnet_mapping], {}))
except Exception as e:
LOG.error('update_port_precommit(): got exception: %s', e)
with excutils.save_and_reraise_exception():
for rollback in reversed(rollbacks):
rollback[0](*rollback[1], **rollback[2])
@handle_nuage_api_errorcode
@utils.context_log
def update_port_postcommit(self, context):
self._notify_port_provisioning_complete(context.current['id'])
def rollback_deleted_vips(self, data, new_ipv4_ip, nuage_vip_dict,
nuage_vport, port, subnet_mapping):
for vip in nuage_vip_dict.keys():
params = {
'vport_id': nuage_vport['ID'],
'externalID': port['id'],
'vip': vip,
'subnet_id': subnet_mapping['nuage_subnet_id'],
'mac': data['mac']
}
if vip == new_ipv4_ip:
params['IPType'] = 'IPV4'
else:
params['IPType'] = 'IPV6'
LOG.debug("Rolling back due to update interface failure by"
" creating deleted vip ")
self.vsdclient.create_vip_on_vport(params)
def delete_vips_for_interface_update(self, data, new_ipv4_ip, new_ipv6_ip,
nuage_vip_dict, nuage_vport, old_ips,
subnet_mapping, original_port):
if new_ipv4_ip in old_ips[4][:-1] and self._is_l3(subnet_mapping):
# New fixed ip is in use as vip, delete ipv4 vip
nuage_vip_dict[new_ipv4_ip] = data['mac']
if new_ipv6_ip in old_ips[6][:-1] and self._is_l3(subnet_mapping):
# New fixed ip is in use as vip, delete ipv6 vip
nuage_vip_dict[new_ipv6_ip] = data['mac']
for addrpair in original_port['allowed_address_pairs']:
if (addrpair['ip_address'] == new_ipv4_ip or
addrpair['ip_address'] == new_ipv6_ip):
# New fixed ip is in use as vip, delete vip
nuage_vip_dict[addrpair['ip_address']] = (
addrpair['mac_address'])
self.vsdclient.delete_vips(nuage_vport['ID'],
nuage_vip_dict,
nuage_vip_dict)
def _find_vport(self, db_context, port, subnet_mapping):
try:
nuage_vport = self._get_nuage_vport(port,
subnet_mapping,
required=True)
return nuage_vport
except (restproxy.ResourceNotFoundException, NuageBadRequest):
# Get port from db to see if it is deleted concurrently
port_db = self._get_port_from_neutron(db_context,
port)
if not port_db:
LOG.info("Port %s has been deleted concurrently",
port['id'])
return None
else:
# Port was not deleted, it moved l2->l3 or l3->l2
# Update subnet_mapping with new VSD subnet ID
for fixed_ip in port_db['fixed_ips']:
subnet_db = self._get_subnet_from_neutron(
db_context, fixed_ip['subnet_id'])
if not subnet_db:
LOG.info("Subnet %s has been deleted concurrently",
fixed_ip['subnet_id'])
continue
subnet_mapping['subnet_id'] = subnet_db['id']
LOG.debug("Retrying to get new subnet mapping from vsd")
subnet_mapping = self._get_updated_subnet_mapping_from_vsd(
db_context, subnet_mapping)
return self._get_nuage_vport(port, subnet_mapping,
required=True)
def _get_updated_subnet_mapping_from_vsd(self, context, subnet_mapping):
# The subnet has likely changed from l3 to l2 or vice versa
vsd_subnet = self._find_vsd_subnet(context, subnet_mapping)
if vsd_subnet['type'] == constants.SUBNET:
subnet_mapping['nuage_subnet_id'] = vsd_subnet['ID']
subnet_mapping['nuage_l2dom_tmplt_id'] = None
else:
subnet_mapping['nuage_subnet_id'] = vsd_subnet['ID']
subnet_mapping['nuage_l2dom_tmplt_id'] = vsd_subnet['templateID']
return subnet_mapping
def _port_device_change(self, context, db_context, nuage_vport, original,
port, subnet_mapping, nuage_subnet,
host_added=False, host_removed=False):
if not host_added and not host_removed:
return
np_name = nuagedb.get_net_partition_by_id(
db_context.session, subnet_mapping['net_partition_id'])['name']
if host_removed:
if (self._port_should_have_vm(original) or
not original['device_owner']):
# When device_owner is missing it is unknown whether a VM
# exists in VSD
self._delete_nuage_vm(
db_context, original,
np_name,
subnet_mapping, original['device_id'],
context.network.current,
is_port_device_owner_removed=not port['device_owner'])
elif host_added:
self._validate_security_groups(context)
if self._port_should_have_vm(port):
self._create_nuage_vm(db_context, port,
np_name, subnet_mapping, nuage_vport,
nuage_subnet, context.network.current)
@utils.context_log
def delete_port_postcommit(self, context):
db_context = context._plugin_context
network = context.network.current
port = context.current
vsd_errors = [(vsd_constants.CONFLICT_ERR_CODE,
vsd_constants.VSD_VM_EXISTS_ON_VPORT)]
utils.retry_on_vsdclient_error(
self._delete_port, vsd_error_codes=vsd_errors)(db_context,
port,
network)
def _delete_port(self, db_context, port, network):
subnet_mapping = self.get_subnet_mapping_by_port(db_context, port)
if not subnet_mapping:
return
is_network_external = network.get('router:external')
if not self._should_act_on_port(port, is_network_external):
# GW host vport cleanup
self.delete_gw_host_vport(db_context, port, subnet_mapping)
return
# This check is needed because neutron plugin calls delete port
# after raising a nuage exception when virtio ports are created
# in nuage_hybrid_mpls networks
if self.is_nuage_hybrid_mpls_network(network):
return
nuage_vport = self._get_nuage_vport(port, subnet_mapping,
required=False)
if nuage_vport and nuage_vport.get('hasAttachedInterfaces'):
# Delete VMInterface
np_name = nuagedb.get_net_partition_by_id(
db_context.session, subnet_mapping['net_partition_id'])['name']
device_id = port['device_id']
if not device_id:
# Due to concurrent Create/Update/Delete we do not know the
# device_id of the port. We get it from VSD vminterface instead
vm_if = self.vsdclient.get_nuage_vm_if_by_vport_id(
nuage_vport['ID'])
device_id = vm_if['VMUUID']
self._delete_nuage_vm(
db_context, port, np_name, subnet_mapping,
device_id, network,
is_port_device_owner_removed=not port['device_owner'])
if nuage_vport and nuage_vport.get('type') == constants.VM_VPORT:
try:
self.vsdclient.delete_nuage_vport(
nuage_vport['ID'])
except Exception as e:
LOG.error("Failed to delete vport from vsd {vport id: %s}",
nuage_vport['ID'])
raise e
rollbacks = []
try:
self.nuage_callbacks.notify(
resources.PORT, constants.AFTER_DELETE,
self.core_plugin, context=db_context,
updated_port=port,
port=port,
subnet_mapping=subnet_mapping)
except Exception:
with excutils.save_and_reraise_exception():
for rollback in reversed(rollbacks):
rollback[0](*rollback[1], **rollback[2])
else:
self.delete_gw_host_vport(db_context, port, subnet_mapping)
return
@utils.context_log
def bind_port(self, context):
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self._supported_vnic_types():
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
if not self.is_port_vnic_type_supported(context.current):
LOG.debug("Refusing to bind due to unsupported vnic_type: %s with "
"no switchdev capability", portbindings.VNIC_DIRECT)
return
for segment in context.network.network_segments:
if self._check_segment(segment):
context.set_binding(segment[api.ID],
portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: False})
break
@staticmethod
def _network_no_action(original, update):
external_change = original.get(
external_net.EXTERNAL) != update.get(
external_net.EXTERNAL)
shared_change = original.get(
'shared') != update.get('shared')
physnets_change = (
(original.get('provider:physical_network') !=
update.get('provider:physical_network')) or
(original.get('provider:segmentation_id') !=
update.get('provider:segmentation_id')) or
(original.get('provider:network_type') !=
update.get('provider:network_type')) or
original.get('segments') != update.get('segments'))
name_change = original.get('name') != update.get('name')
return external_change, shared_change, physnets_change, name_change
def _validate_update_network(self, context, external_change,
shared_change, physnets_change,
original, updated):
subnets = self.core_plugin.get_subnets(
context, filters={'network_id': [updated['id']]})
for subn in subnets:
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(
context.session, subn['id'])
if subnet_l2dom and subnet_l2dom.get('nuage_managed_subnet'):
msg = _('Network %s has a VSD-Managed subnet associated'
' with it') % updated['id']
raise NuageBadRequest(msg=msg)
if (external_change and subnets and not
updated.get(external_net.EXTERNAL)):
msg = _('External network with subnets can not be '
'changed to non-external network')
raise NuageBadRequest(msg=msg)
if external_change:
self._validate_nuage_sharedresource(updated['id'], subnets, None)
ports = self.core_plugin.get_ports(context, filters={
'network_id': [updated['id']]})
if external_change and updated.get(external_net.EXTERNAL):
for p in ports:
if p['device_owner'] not in [constants.DEVICE_OWNER_DHCP_NUAGE,
os_constants.DEVICE_OWNER_DHCP]:
# Check if there are ports except nuage and neutron dhcp
# ports attached to this network. If there are, then
# updating the network router:external is not possible.
msg = (_("Network %s cannot be updated. "
"There are one or more ports still in"
" use on the network.") % updated['id'])
raise NuageBadRequest(msg=msg)
if shared_change:
for p in ports:
if p['device_owner'].endswith(resources.ROUTER_INTERFACE):
msg = (_("Cannot update the shared attribute value"
" since subnet with id %s is attached to a"
" router.") % p['fixed_ips']['subnet_id'])
raise NuageBadRequest(msg=msg)
# nuage_l2bridge checks
if subnets and physnets_change:
updated_physnets = self._get_l2bridge_physnets(context,
updated)
l2bridges = {p['l2bridge_id'] for p in updated_physnets}
if len(l2bridges) > 1:
msg = _("It is not allowed to attach a network to multiple"
"nuage_l2bridges.")
raise NuageBadRequest(msg=msg)
current_physnets = self._get_l2bridge_physnets(context,
original)
# Adding or removing the network from a l2bridge
if len(current_physnets) != len(updated_physnets):
msg = _("It is not allowed to change the nuage_l2bridge "
"this network is attached to.")
raise NuageBadRequest(msg=msg)
if (current_physnets and
current_physnets[0]['l2bridge_id'] !=
updated_physnets[0]['l2bridge_id']):
msg = _("It is not allowed to change the nuage_l2bridge "
"this network is attached to.")
raise NuageBadRequest(msg=msg)
@staticmethod
def _get_l2bridge_physnets(context, network):
if network.get('provider:physical_network'):
segments = [{
'provider:physical_network':
network['provider:physical_network'],
'provider:segmentation_id':
network['provider:segmentation_id'],
'provider:network_type': network['provider:network_type']
}]
else:
segments = network.get('segments', [])
physnet_list = []
for segment in segments:
physnets = nuagedb.get_nuage_l2bridge_physnet_mappings(
context.session, physnet=segment['provider:physical_network'],
segmentation_id=segment['provider:segmentation_id'],
segmentation_type=segment['provider:network_type'])
physnet_list.extend(physnets)
return physnet_list
@staticmethod
def _validate_security_groups(context):
port = context.current
db_context = context._plugin_context
sg_ids = port[ext_sg.SECURITYGROUPS]
if not sg_ids:
return
baremetal_ports = nuagedb.get_port_bindings_for_sg(
db_context.session,
sg_ids,
[portbindings.VNIC_BAREMETAL],
bound_only=True)
if len(baremetal_ports) > 0:
msg = ("Security Groups for baremetal and normal ports "
"are mutualy exclusive")
raise NuageBadRequest(msg=msg)
def _check_vm_if_update(self, db_context, orig_port, port):
new_ips = self.calculate_vips_for_port_ips(
db_context, port)
orig_ips = self.calculate_vips_for_port_ips(
db_context, orig_port)
orig_ipv4 = orig_ips[4][-1] if orig_ips[4] else None
orig_ipv6 = orig_ips[6][-1] if orig_ips[6] else None
new_ipv4 = new_ips[4][-1] if new_ips[4] else None
new_ipv6 = new_ips[6][-1] if new_ips[6] else None
ips_change = (orig_ipv4 != new_ipv4 or
orig_ipv6 != new_ipv6)
port['new_ipv4'] = new_ipv4
port['new_ipv6'] = new_ipv6
port['orig_ips'] = orig_ips
if (ips_change and
port['device_owner'] == os_constants.DEVICE_OWNER_DHCP):
return True
mac_change = orig_port['mac_address'] != port['mac_address']
vm_if_update = ips_change or mac_change
vif_type = orig_port.get(portbindings.VIF_TYPE)
if vm_if_update and vif_type not in PORT_UNPLUGGED_TYPES:
raise NuagePortBound(port_id=orig_port['id'],
vif_type=vif_type,
old_ips=orig_port['fixed_ips'],
new_ips=port['fixed_ips'])
if ips_change:
# Only 1 corresponding VSD subnet allowed
orig_vsd_subnets = self._get_vsd_subnet_ids_by_port(db_context,
orig_port)
new_vsd_subnets = self._get_vsd_subnet_ids_by_port(db_context,
port)
if orig_vsd_subnets != new_vsd_subnets:
msg = _("Updating fixed ip of port {} "
"to a different subnet is "
"not allowed.").format(port["id"])
raise NuageBadRequest(msg=msg)
if len(new_vsd_subnets) != 1:
msg = _("One neutron port cannot correspond to multiple "
"VSD subnets").format(port["id"])
raise NuageBadRequest(msg=msg)
subnet_ids = set([x['subnet_id'] for x in port['fixed_ips']])
subnet_mappings = nuagedb.get_subnet_l2doms_by_subnet_ids(
db_context.session, subnet_ids)
l2dom = next((subnet for subnet in subnet_mappings
if self._is_l2(subnet)), None)
if l2dom and l2dom['nuage_managed_subnet'] and not self.get_subnet(
db_context, l2dom['subnet_id'])['enable_dhcp']:
nuage_subnet, shared_subnet = self._get_nuage_subnet(
l2dom['nuage_subnet_id'], subnet_type=constants.L2DOMAIN)
vsd_l2dom = shared_subnet or nuage_subnet
return vsd_l2dom['DHCPManaged']
return vm_if_update
@staticmethod
def _get_vsd_subnet_ids_by_port(db_context, port):
subnet_ids = set([x['subnet_id'] for x in port['fixed_ips']])
subnet_mappings = nuagedb.get_subnet_l2doms_by_subnet_ids(
db_context.session,
subnet_ids)
return set([x['nuage_subnet_id'] for x in subnet_mappings])
@staticmethod
def _check_subport_in_use(orig_port, port):
if NuageMechanismDriver._is_trunk_subport(orig_port):
vif_orig = orig_port.get(portbindings.VIF_TYPE)
if vif_orig not in PORT_UNPLUGGED_TYPES and port.get('device_id'):
raise PortInUse(port_id=port['id'],
net_id=port['network_id'],
device_id='trunk:subport')
def _check_fip_on_port_with_multiple_ips(self, context, port):
# Block a port with fip getting multiple ips
fips = nuagedb.get_floatingips_per_port_id(context.session, port['id'])
ipv4s, ipv6s = self.count_fixed_ips_per_version(port['fixed_ips'])
if fips and (ipv4s > 1 or ipv6s > 1):
msg = _("It is not possible to add multiple ipv4 or multiple ipv6"
" addresses on port {} since it has fip {} associated"
"to it.").format(port['id'], fips[0]['id'])
raise NuageBadRequest(msg=msg)
def _should_act_on_port(self, port, is_network_external=False):
# Should Nuage create vport for this port
if not port.get('fixed_ips'):
return False
device_owner = port.get('device_owner')
is_dhcp_port = device_owner == os_constants.DEVICE_OWNER_DHCP
is_nuage_dhcp_port = device_owner == constants.DEVICE_OWNER_DHCP_NUAGE
is_router_gw = device_owner == os_constants.DEVICE_OWNER_ROUTER_GW
is_router_int = device_owner == os_constants.DEVICE_OWNER_ROUTER_INTF
if is_router_gw or is_router_int:
# Router can be attached to multiple subnets.
return False
if not self.needs_vport_creation(device_owner):
return False
if is_dhcp_port and is_network_external:
return False
if not self.is_port_vnic_type_supported(port):
return False
if is_nuage_dhcp_port:
return False
return True
def _validate_port(self, db_context, port, is_network_external,
subnet_mappings, network=None):
"""_validate_port : validating neutron port
"""
fixed_ips = port.get('fixed_ips', [])
subnet_list = {4: [], 6: []}
for fixed_ip in fixed_ips:
subnet_list[netaddr.IPAddress(
fixed_ip['ip_address']).version].append(
fixed_ip['subnet_id'])
if len(set(subnet_list[4])) > 1:
msg = "Port can't have multiple IPv4 IPs of different subnets"
raise NuageBadRequest(msg=msg)
if len(set(subnet_list[6])) > 1:
msg = "Port can't have multiple IPv6 IPs of different subnets"
raise NuageBadRequest(msg=msg)
if is_network_external:
msg = "Cannot create port in a FIP pool Subnet"
raise NuageBadRequest(resource='port', msg=msg)
self._validate_nuage_l2bridges(db_context, port)
self._check_security_groups_per_port_limit(port['security_groups'])
nuage_managed = []
vsd_subnet_ids = set()
for mapping in subnet_mappings:
nuage_managed.append(mapping['nuage_managed_subnet'])
vsd_subnet_ids.add(mapping['nuage_subnet_id'])
if len(vsd_subnet_ids) > 1 and all(nuage_managed):
msg = _("Port has fixed ips for multiple vsd subnets.")
raise NuageBadRequest(msg=msg)
if (not self._is_vsd_mgd(subnet_mappings[0]) and
port.get(nuagepolicygroup.NUAGE_POLICY_GROUPS)):
msg = ("Cannot use VSP policy groups on OS managed subnets,"
" use neutron security groups instead.")
raise NuageBadRequest(resource='port', msg=msg)
if network and self.is_nuage_hybrid_mpls_network(network):
msg = 'Virtio port is not allowed in nuage_mpls_hybrid networks'
raise NuageBadRequest(msg=msg)
@staticmethod
def get_subnet_mapping_by_port(db_context, port):
return nuagedb.get_subnet_l2dom_by_port(db_context.session, port)
@staticmethod
def _port_should_have_vm(port):
device_owner = port.get('device_owner')
return (constants.NOVA_PORT_OWNER_PREF in device_owner or
device_owner == LB_DEVICE_OWNER_V2 or
device_owner == DEVICE_OWNER_DHCP or
constants.DEVICE_OWNER_OCTAVIA_HEALTHMGR in device_owner)
def _create_nuage_vm(self, db_context, port, np_name, subnet_mapping,
nuage_port, nuage_subnet, network):
if (port.get('device_owner') in
[LB_DEVICE_OWNER_V2, DEVICE_OWNER_DHCP,
constants.DEVICE_OWNER_OCTAVIA_HEALTHMGR]):
no_of_ports = 1
vm_id = port['id']
else:
vm_id = port['device_id']
no_of_ports = self.get_num_ports_of_device(
db_context, vm_id, network)
fixed_ips = port['fixed_ips']
subnets = {4: {}, 6: {}}
ips = {4: [], 6: []}
for fixed_ip in fixed_ips:
try:
subnet = self.core_plugin.get_subnet(db_context,
fixed_ip['subnet_id'])
except SubnetNotFound:
LOG.info("Subnet %s has been deleted concurrently",
fixed_ip['subnet_id'])
return
subnets[subnet['ip_version']] = subnet
ips[subnet['ip_version']].append(fixed_ip['ip_address'])
for key in ips:
ips[key] = self.sort_ips(ips[key])
# Only when the tenant who creates the port is different from both
# ipv4 and ipv6 tenant, we have to add extra permissions on the subnet.
# If one of the 2 subnet tenants matches, permissions will already
# exist from subnet-create.
subnet = subnets[4] or subnets[6]
if port['tenant_id'] not in (subnets[4].get('tenant_id'),
subnets[6].get('tenant_id')):
subnet_tenant_id = subnet.get('tenant_id')
else:
subnet_tenant_id = port['tenant_id']
shared = subnet.get('shared') or False
params = {
'port_id': port['id'],
'id': vm_id,
'mac': port['mac_address'],
'netpart_name': np_name,
'ipv4': ips[4][-1] if ips[4] else None,
'ipv6': ips[6][-1] if ips[6] else None,
'no_of_ports': no_of_ports,
'tenant': port['tenant_id'],
'netpart_id': subnet_mapping['net_partition_id'],
'neutron_id': port['fixed_ips'][0]['subnet_id'],
'vport_id': nuage_port.get('ID'),
'subn_tenant': subnet_tenant_id,
'portOnSharedSubn': shared,
'enable_dhcpv4': subnets[4].get('enable_dhcp'),
'enable_dhcpv6': subnets[6].get('enable_dhcp'),
'vsd_subnet': nuage_subnet
}
network_details = self.core_plugin.get_network(db_context,
port['network_id'])
if network_details['shared']:
self.vsdclient.create_usergroup(
port['tenant_id'],
subnet_mapping['net_partition_id'])
try:
return self.vsdclient.create_vms(params)
except restproxy.ResourceNotFoundException as rnf:
try:
subnet = self.core_plugin.get_subnet(db_context,
subnet.get('id'))
except SubnetNotFound:
subnet = None
if not subnet:
LOG.info("Subnet %s has been deleted concurrently",
subnets[4].get('id'))
else:
raise rnf
def get_num_ports_of_device(self, db_context, device_id, network):
filters = {'device_id': [device_id]}
ports = self.core_plugin.get_ports(db_context, filters)
ports = [p for p in ports
if self._is_port_supported(p, network) and
p['binding:host_id']]
return len(ports)
def _is_port_supported(self, port, network):
if not self.is_port_vnic_type_supported(port):
return False
return self.is_network_type_supported(network)
def delete_gw_host_vport(self, context, port, subnet_mapping):
port_params = {
'neutron_port_id': port['id']
}
# Check if l2domain/subnet exist. In case of router_interface_delete,
# subnet is deleted and then call comes to delete_port. In that
# case, we just return
vsd_subnet = self.vsdclient.get_nuage_subnet_by_mapping(subnet_mapping)
if not vsd_subnet:
return
port_params['l2dom_id'], port_params['l3dom_id'] = \
get_l2_and_l3_sub_id(subnet_mapping)
nuage_vport = self.vsdclient.get_nuage_vport_by_neutron_id(
port_params, required=False)
if nuage_vport and (nuage_vport['type'] == constants.HOST_VPORT):
def_netpart = cfg.CONF.RESTPROXY.default_net_partition_name
netpart = nuagedb.get_default_net_partition(context, def_netpart)
self.vsdclient.delete_nuage_gateway_vport(
context,
nuage_vport.get('ID'),
netpart['id'])
def _delete_nuage_vm(self, db_context, port, np_name, subnet_mapping,
device_id, network,
is_port_device_owner_removed=False):
if port.get('device_owner') in [LB_DEVICE_OWNER_V2, DEVICE_OWNER_DHCP]:
no_of_ports = 1
vm_id = port['id']
else:
vm_id = device_id
no_of_ports = self.get_num_ports_of_device(db_context, vm_id,
network)
# In case of device removed, this number should be the amount of
# vminterfaces on VSD. If it's >1, vsdclient knows there are
# still other vminterfaces using the VM, and it will not delete the
# vm. If it's 1 or less. VsdClient will also automatically delete
# the vm. Because the port count is determined on a database count
# of ports with device_id X, AND because the update already
# happened by ml2plugin, AND because we're in the same database
# transaction, the count here would return 1 less (as the updated
# port will not be counted because the device_id is already cleared
if is_port_device_owner_removed:
no_of_ports += 1
fixed_ips = port['fixed_ips']
subnets = {4: {}, 6: {}}
for fixed_ip in fixed_ips:
subnet = self.core_plugin.get_subnet(
db_context, fixed_ip['subnet_id'])
subnets[subnet['ip_version']] = subnet
if port['tenant_id'] not in (subnets[4].get('tenant_id'),
subnets[6].get('tenant_id')):
subnet_tenant_id = subnets[4].get('tenant_id')
else:
subnet_tenant_id = port['tenant_id']
shared = subnets[4].get('shared') or subnets[6].get('shared', False)
vm_interface = self.vsdclient.get_nuage_vm_interface_by_neutron_id(
port['id'])
if not vm_interface:
return
params = {
'no_of_ports': no_of_ports,
'netpart_name': np_name,
'tenant': port['tenant_id'],
'nuage_vif_id': vm_interface['ID'],
'id': vm_id,
'subn_tenant': subnet_tenant_id,
'portOnSharedSubn': shared
}
if not vm_interface.get('domainID'):
params['l2dom_id'] = subnet_mapping['nuage_subnet_id']
else:
params['l3dom_id'] = subnet_mapping['nuage_subnet_id'],
try:
self.vsdclient.delete_vms(params)
except Exception:
LOG.error("Failed to delete vm from vsd {vm id: %s}",
vm_id)
raise
def _get_nuage_vport(self, port, subnet_mapping, required=True):
port_params = {'neutron_port_id': port['id']}
l2dom_id, l3dom_id = get_l2_and_l3_sub_id(subnet_mapping)
port_params['l2dom_id'] = l2dom_id
port_params['l3dom_id'] = l3dom_id
return self.vsdclient.get_nuage_vport_by_neutron_id(
port_params, required=required)
@staticmethod
def _check_segment(segment):
network_type = segment[api.NETWORK_TYPE]
return network_type == os_constants.TYPE_VXLAN
@staticmethod
def _supported_vnic_types():
return [portbindings.VNIC_NORMAL,
portbindings.VNIC_DIRECT,
portbindings.VNIC_VIRTIO_FORWARDER]
@staticmethod
def _direct_vnic_supported(port):
profile = port.get(portbindings.PROFILE)
capabilities = []
if profile:
capabilities = profile.get('capabilities', [])
return (port.get(portbindings.VNIC_TYPE) ==
portbindings.VNIC_DIRECT and
'switchdev' in capabilities)
@staticmethod
def is_port_vnic_type_supported(port):
if port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_DIRECT:
return NuageMechanismDriver._direct_vnic_supported(port)
else:
return (port.get(portbindings.VNIC_TYPE) in
NuageMechanismDriver._supported_vnic_types())
def check_vlan_transparency(self, context):
"""Nuage driver vlan transparency support."""
return True
def check_vxlan_mpls_segments_in_network(self, segments):
if segments:
segment_types = {segment['provider:network_type'] for segment
in segments if
segment['provider:network_type'] in
self.supported_network_types}
if len(segment_types) == 2:
msg = _('It is not allowed to have both vxlan and '
'nuage_hybrid_mpls segments in a single network')
raise NuageBadRequest(msg=msg)
|
|
from __future__ import absolute_import
from collections import namedtuple
import functools
import zipper
from . import fn
from .fn import curry
def union(inclusions, tree):
targets = functools.reduce(
# for each tree func run it, convert to set
lambda p, f: p | set(f(tree)),
inclusions,
set()
)
return make_tree(targets)
# [(tree -> [ImageNames])] -> [Containers]
def eval(specifiers, targets):
"""
Given a list of partially applied functions that
take a tree and return a list of image names.
First apply all non-exclude functinons with the tree built from targets
creating a union of the results.
Then returns the results of applying each exclusion functinon
in order.
"""
inclusions = []
exclusions = []
for spec in specifiers:
if spec.func_name == 'exclude':
exclusions.append(spec)
else:
inclusions.append(spec)
tree = make_tree(targets)
if inclusions:
tree = union(inclusions, tree)
return fn.compose(*exclusions)(tree)
# Ref = git.Ref
# item = <anything>
# [Ref] -> Ref -> [Containers] -> [item]
def needs_building(tree, force_build=False):
gen = breadth_first_iter(tree)
next(gen) # skip root
loc = next(gen)
skip = []
needs = []
while True:
try:
target = loc.node()
if target.current_rel > target.last_built_rel or force_build:
# target has changes, it and all it's descendants need
# to be rebuilt
for modified_loc in breadth_first_iter(loc):
target = modified_loc.node()
# only yield targets commited to git
if target.current_rel is not None:
needs.append(target)
loc = gen.send(True) # don't check this locations children
else:
if target.last_built_rel:
skip.append(target)
loc = next(gen)
except StopIteration:
break
return skip, needs
Root = namedtuple('Root', 'name, children')
# [Container] -> Loc Container
def make_tree(containers):
"""
Converts a list of containers into a tree represented by a zipper.
see http://en.wikipedia.org/wiki/Zipper_(data_structure)
>>> from .dependencies import targets
>>> root = make_tree(targets)
>>> root.node().name is None # doctest: +ELLIPSIS
True
>>> _names(root) # doctest: +NORMALIZE_WHITESPACE
['shipwright_test/1', 'shipwright_test/independent', 'shipwright_test/2',
'shipwright_test/3']
>>> root.down().node() # doctest: +ELLIPSIS
Target(container=Container(name='shipwright_test/1', ...)
>>> _names(root.down()) # doctest: +ELLIPSIS
['shipwright_test/2', 'shipwright_test/3']
>>> root.down().down().node() # doctest: +ELLIPSIS
Target(container=Container(name='shipwright_test/2', ...)
>>> _names(root.down().down()) # doctest: +ELLIPSIS
['shipwright_test/3']
>>> root.down().right().node().name # doctest: +ELLIPSIS
'shipwright_test/independent'
"""
root = Root(None, ())
tree = zipper.zipper(root, is_branch, children, make_node)
for c in containers:
branch_children, root_children = split(is_child(c), tree.children())
t = c._replace(children=tuple(branch_children))
if branch_children:
tree = tree.edit(replace, tuple(root_children))
loc = tree.find(fmap(is_target(t.parent)))
if loc:
tree = loc.insert(t).top()
else:
tree = tree.insert(t)
return tree
def replace(node, children):
return node._replace(children=children)
def children(item):
return item.children
def is_branch(item):
return True
def make_node(node, children):
# keep children sorted to make testing easier
ch = tuple(sorted(children, key=fn.getattr('name')))
return node._replace(children=ch)
def breadth_first_iter(loc):
"""
Given a loctation node (from a zipper) walk it's children in breadth first
order.
>>> from .dependencies import targets
>>> tree = make_tree(targets)
>>> result = [loc.node().name for loc in breadth_first_iter(tree)]
>>> result # doctest: +NORMALIZE_WHITESPACE
[None, 'shipwright_test/1', 'shipwright_test/independent',
'shipwright_test/2', 'shipwright_test/3']
"""
tocheck = [loc]
while tocheck:
l = tocheck.pop(0)
skip = yield l
if skip:
continue
child = l.down()
while child:
tocheck.append(child)
child = child.right()
@curry
def is_target(name, target):
"""
>>> from . import Target
>>> from .container import Container
>>> target = Target(
... Container('test', None, None, None), None, None, None, None,
... )
>>> is_target('test', target)
True
"""
return target.name == name
@curry
def is_child(parent, target):
if not isinstance(target, Root):
return target.parent == parent.name
# (a -> b) -> Loc a -> b
@curry
def fmap(func, loc):
return func(loc.node())
# Loc -> [Target]
def lineage(loc):
results = []
while loc.path:
node = loc.node()
results.append(node)
loc = loc.up()
return results
# (a -> Bool) -> [a] ->[a], [a]
@curry
def split(f, children):
"""
Given a function that returns true or false and a list. Return
a two lists all items f(child) == True is in list 1 and
all items not in the list are in list 2.
"""
l1 = []
l2 = []
for child in children:
if f(child):
l1.append(child)
else:
l2.append(child)
return l1, l2
# Loc -> [Target]
def brood(loc):
return [loc.node() for loc in breadth_first_iter(loc)][1:]
# Target -> Tree -> [Target]
@curry
def upto(target, tree):
"""
returns target and everything it depends on
>>> from .dependencies import targets
>>> targets = upto('shipwright_test/2', make_tree(targets))
>>> _names_list(targets)
['shipwright_test/1', 'shipwright_test/2']
"""
loc = tree.find(fmap(is_target(target)))
return lineage(loc) # make_tree(lineage(loc))
# Target -> Tree -> [Target]
@curry
def dependents(target, tree):
"""
Returns a target it's dependencies and
everything that depends on it
>>> from .dependencies import targets
>>> targets = dependents('shipwright_test/2', make_tree(targets))
>>> _names_list(targets)
['shipwright_test/1', 'shipwright_test/2', 'shipwright_test/3']
"""
loc = tree.find(fmap(is_target(target)))
return lineage(loc) + brood(loc)
# Target -> Tree -> [Target]
@curry
def exact(target, tree):
"""
Returns only the target.
>>> from .dependencies import targets
>>> targets = exact('shipwright_test/2', make_tree(targets))
>>> _names_list(targets)
['shipwright_test/2']
"""
loc = tree.find(fmap(is_target(target)))
return [loc.node()]
# Target -> Tree -> Tree
@curry
def exclude(target, tree):
"""
Returns everything but the target and it's dependents. If target
is not found the whole tree is returned.
>>> from .dependencies import targets
>>> tree = exclude('shipwright_test/2', make_tree(targets))
>>> _names(tree) # doctest: +ELLIPSIS
['shipwright_test/1', 'shipwright_test/independent']
"""
loc = tree.find(fmap(is_target(target)))
if loc:
return loc.remove().top()
else:
return tree
# Test methods ###
def _names(tree):
return [n.name for n in brood(tree)]
def _names_list(targets):
return sorted([n.name for n in targets])
def setup_module(module):
from .container import Container
from . import Target
module.targets = [
Target(
Container(
'shipwright_test/2', 'path2/', 'path2/Dockerfile',
'shipwright_test/1',
),
'abc',
3,
3,
None
),
Target(
Container(
'shipwright_test/1', 'path1/', 'path1/Dockerfile',
'ubuntu',
),
'abc',
3,
3,
None
),
Target(
Container(
'shipwright_test/3', 'path3/', 'path3/Dockerfile',
'shipwright_test/2',
),
'abc',
3,
3,
None
),
Target(
Container(
'shipwright_test/independent', 'independent',
'path1/Dockerfile', 'ubuntu',
),
'abc',
3,
3,
None
)
]
|
|
# ==================================================================================================
# Copyright 2015 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from datetime import datetime
from zktraffic.base.network import BadPacket
from zktraffic.base.util import read_long, read_number
from zktraffic.base.zookeeper import ZK_REQUEST_TYPES
class PacketType(object):
REQUEST = 1
PROPOSAL = 2
ACK = 3
COMMIT = 4
PING = 5
REVALIDATE = 6
SYNC = 7
INFORM = 8
COMMITANDACTIVATE = 9
NEWLEADER = 10
FOLLOWERINFO = 11
UPTODATE = 12
DIFF = 13
TRUNC = 14
SNAP = 15
OBSERVERINFO = 16
LEADERINFO = 17
ACKEPOCH = 18
INFORMANDACTIVATE = 19
VALID = range(REQUEST, INFORMANDACTIVATE + 1)
NAMES = [
"zero",
"request",
"proposal",
"ack",
"commit",
"ping",
"revalidate",
"sync",
"inform",
"commitandactivate",
"newleader",
"followerinfo",
"uptodate",
"diff",
"trunc",
"snap",
"observerinfo",
"leaderinfo",
"ackepoch",
"informandactivate",
]
@classmethod
def invalid(cls, ptype):
return ptype not in cls.VALID
@classmethod
def to_str(cls, ptype):
return "" if cls.invalid(ptype) else cls.NAMES[ptype]
class QuorumPacketBase(type):
TYPES = {}
PTYPE = None
def __new__(cls, clsname, bases, dct):
obj = super(QuorumPacketBase, cls).__new__(cls, clsname, bases, dct)
if obj.PTYPE in cls.TYPES:
raise ValueError("Duplicate ptype name: %s" % obj.PTYPE)
if obj.PTYPE is not None:
cls.TYPES[obj.PTYPE] = obj
return obj
@classmethod
def get(cls, key, default=None):
return cls.TYPES.get(key, default)
class QuorumPacket(QuorumPacketBase("QuorumPacketBase", (object,), {})):
__slots__ = ("timestamp", "src", "dst", "type", "zxid", "length")
MIN_SIZE = 12
def __init__(self, timestamp, src, dst, ptype, zxid, length):
self.timestamp = timestamp
self.src = src
self.dst = dst
self.type = ptype
self.zxid = zxid
self.length = length
@property
def timestr(self):
return datetime.fromtimestamp(self.timestamp).strftime("%H:%M:%S:%f")
@property
def type_literal(self):
return PacketType.to_str(self.type)
@property
def zxid_literal(self):
return self.zxid if self.zxid == -1 else "0x%x" % self.zxid
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
return cls(timestamp, src, dst, ptype, zxid, len(data))
@classmethod
def from_payload(cls, data, src, dst, timestamp):
if len(data) < cls.MIN_SIZE:
raise BadPacket("Too small")
ptype, offset = read_number(data, 0)
if PacketType.invalid(ptype):
raise BadPacket("Invalid type")
zxid, offset = read_long(data, offset)
handler = QuorumPacketBase.get(ptype, cls)
return handler.with_params(timestamp, src, dst, ptype, zxid, data, offset)
def __str__(self):
def attributes():
def valid(key, value):
if not isinstance(value, int) and not isinstance(value, basestring):
return False
if key.isupper() or key.startswith("_") or "_literal" in key or key == "type":
return False
return True
for key in dir(self):
value = getattr(self, key)
if valid(key, value):
alt_key = "%s_literal" % key
if hasattr(self, alt_key):
value = getattr(self, alt_key)
yield key, value
parts = ["%s(" % self.__class__.__name__]
for name, value in attributes():
parts.append(" %s=%s," % (name, value))
parts.append(")")
return "\n".join(parts) + "\n"
class Request(QuorumPacket):
PTYPE = PacketType.REQUEST
__slots__ = ("session_id", "cxid", "req_type")
def __init__(self, timestamp, src, dst, ptype, zxid, length, session_id, cxid, req_type):
super(Request, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.session_id = session_id
self.cxid = cxid
self.req_type = req_type
@property
def req_type_literal(self):
return ZK_REQUEST_TYPES[self.req_type] if self.req_type in ZK_REQUEST_TYPES else str(self.req_type)
@property
def session_id_literal(self):
return "0x%x" % self.session_id
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
session_id, offset = read_long(data, offset)
cxid, offset = read_number(data, offset)
req_type, offset = read_number(data, offset)
# TODO: dissect the remaining data, see server_message.py and client_message.py
# Note: zxid=-1 because requests don't have a zxid
return cls(timestamp, src, dst, ptype, -1, len(data), session_id, cxid, req_type)
class Proposal(QuorumPacket):
PTYPE = PacketType.PROPOSAL
__slots__ = ("session_id", "cxid", "txn_zxid", "txn_time", "txn_type")
def __init__(self, timestamp, src, dst, ptype, zxid, length,
session_id, cxid, txn_zxid, txn_time, txn_type):
super(Proposal, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.session_id = session_id
self.cxid = cxid
self.txn_zxid = txn_zxid
self.txn_time = txn_time
self.txn_type = txn_type
@property
def session_id_literal(self):
return "0x%x" % self.session_id
@property
def txn_type_literal(self):
return ZK_REQUEST_TYPES[self.txn_type] if self.txn_type in ZK_REQUEST_TYPES else str(self.txn_type)
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
session_id, offset = read_long(data, offset)
cxid, offset = read_number(data, offset)
txn_zxid, offset = read_long(data, offset)
txn_time, offset = read_long(data, offset)
txn_type, offset = read_number(data, offset)
# TODO: dissect the remaining data
# see org.apache.zookeeper.server.util.SerializeUtils.deserializeTxn()
return cls(timestamp, src, dst, ptype, zxid, len(data),
session_id, cxid, txn_zxid, txn_time, txn_type)
class Ack(QuorumPacket):
PTYPE = PacketType.ACK
class Commit(QuorumPacket):
PTYPE = PacketType.COMMIT
class Ping(QuorumPacket):
PTYPE = PacketType.PING
# TODO: dissect the data (in almost all cases, data is null)
class Revalidate(QuorumPacket):
PTYPE = PacketType.REVALIDATE
__slots__ = ("session_id", "timeout")
def __init__(self, timestamp, src, dst, ptype, zxid, length, session_id, timeout):
super(Revalidate, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.session_id = session_id
self.timeout = timeout
@property
def session_id_literal(self):
return "0x%x" % self.session_id
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
session_id, offset = read_long(data, offset)
timeout, offset = read_number(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data), session_id, timeout)
class Sync(QuorumPacket):
PTYPE = PacketType.SYNC
class Inform(Proposal):
PTYPE = PacketType.INFORM
class CommitAndActivate(QuorumPacket):
PTYPE = PacketType.COMMITANDACTIVATE
__slots__ = ("suggested_leader_id")
def __init__(self, timestamp, src, dst, ptype, zxid, length, suggested_leader_id):
super(CommitAndActivate, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.suggested_leader_id = suggested_leader_id
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
suggested_leader_id, offset = read_long(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data), suggested_leader_id)
class NewLeader(QuorumPacket):
PTYPE = PacketType.NEWLEADER
# TODO: dissect the data (in almost all cases, data is null)
class FollowerInfo(QuorumPacket):
PTYPE = PacketType.FOLLOWERINFO
__slots__ = ("sid", "protocol_version", "config_version")
def __init__(self, timestamp, src, dst, ptype, zxid, length,
sid, protocol_version, config_version):
super(FollowerInfo, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.sid = sid
self.protocol_version = protocol_version
self.config_version = config_version
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
sid, offset = read_long(data, offset)
protocol_version, offset = read_number(data, offset)
config_version, offset = read_long(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data),
sid, protocol_version, config_version)
class UpToDate(QuorumPacket):
PTYPE = PacketType.UPTODATE
class Diff(QuorumPacket):
PTYPE = PacketType.DIFF
class Trunc(QuorumPacket):
PTYPE = PacketType.TRUNC
class Snap(QuorumPacket):
PTYPE = PacketType.SNAP
class ObserverInfo(FollowerInfo):
PTYPE = PacketType.OBSERVERINFO
class LeaderInfo(QuorumPacket):
PTYPE = PacketType.LEADERINFO
__slots__ = ("protocol_version")
def __init__(self, timestamp, src, dst, ptype, zxid, length, protocol_version):
super(LeaderInfo, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.protocol_version = protocol_version
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
protocol_version, offset = read_number(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data), protocol_version)
class AckEpoch(QuorumPacket):
PTYPE = PacketType.ACKEPOCH
__slots__ = ("epoch")
def __init__(self, timestamp, src, dst, ptype, zxid, length, epoch):
super(AckEpoch, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.epoch = epoch
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
epoch, offset = read_number(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data), epoch)
class InformAndActivate(Proposal):
__slots__ = ("session_id", "cxid", "txn_zxid", "txn_time", "txn_type", "suggested_leader_id")
PTYPE = PacketType.INFORMANDACTIVATE
def __init__(self, timestamp, src, dst, ptype, zxid, length,
suggested_leader_id,
session_id, cxid, txn_zxid, txn_time, txn_type):
super(Proposal, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.suggested_leader_id = suggested_leader_id
self.session_id = session_id
self.cxid = cxid
self.txn_zxid = txn_zxid
self.txn_time = txn_time
self.txn_type = txn_type
@property
def session_id_literal(self):
return "0x%x" % self.session_id
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
suggested_leader_id, offset = read_long(data, offset)
session_id, offset = read_long(data, offset)
cxid, offset = read_number(data, offset)
txn_zxid, offset = read_long(data, offset)
txn_time, offset = read_long(data, offset)
txn_type, offset = read_number(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data),
suggested_leader_id,
session_id, cxid, txn_zxid, txn_time, txn_type)
|
|
"""
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
__docformat__ = 'restructedtext en'
import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyper plain for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper
# plain-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
dataset='mnist.pkl.gz',
batch_size=600):
"""
Demonstrate stochastic gradient descent optimization of a log-linear
model
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
###############
# TRAIN MODEL #
###############
print '... training the model'
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print(
(
' epoch %i, minibatch %i/%i, test error of'
' best model %f %%'
) %
(
epoch,
minibatch_index + 1,
n_train_batches,
test_score * 100.
)
)
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print(
(
'Optimization complete with best validation score of %f %%,'
'with test performance %f %%'
)
% (best_validation_loss * 100., test_score * 100.)
)
print 'The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.1fs' % ((end_time - start_time)))
if __name__ == '__main__':
sgd_optimization_mnist()
|
|
from __future__ import absolute_import
import unittest
from chong import chong
from six.moves import range
board = chong.Board()
class IsLegalPlacementTestCase(unittest.TestCase):
def test_simple_placement(self):
p1 = board.positions[(0, 3)]
p2 = board.positions[(7, 4)]
# p1 to move
player = 1
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (3, 3, True)))
# p2 to move
player = 2
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (4, 4, True)))
def test_p1_home_row(self):
p1 = board.positions[(0, 3)]
p2 = board.positions[(1, 4)]
# p1 to move
player = 1
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (0, 4, True)))
# p2 to move
player = 2
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (0, 4, True)))
def test_p2_home_row(self):
p1 = board.positions[(6, 3)]
p2 = board.positions[(7, 4)]
# p1 to move
player = 1
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (7, 3, True)))
# p2 to move
player = 2
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (7, 3, True)))
def test_occupied_by_enemy_pawn(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 4, True)))
# p2 to move
player = 2
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 3, True)))
def test_occupied_by_friendly_pawn(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 3, True)))
# p2 to move
player = 2
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 4, True)))
def test_occupied_by_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 4, True)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 3, True)))
def test_occupied_by_friendly_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 4, True)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (3, 3, True)))
def test_stones_exhausted(self):
p1 = board.positions[(0, 3)]
p2 = board.positions[(7, 4)]
# p1 to move
player = 1
stones = sum(board.positions[(1, x)] for x in range(6))
state = (p1, p2, stones, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 4, True)))
# p2 to move
player = 2
stones = sum(board.positions[(6, x)] for x in range(7))
state = (p1, p2, 0, stones, player, 1)
self.assertFalse(board.is_legal(state, (3, 3, True)))
class IsLegalMoveTestCase(unittest.TestCase):
def test_north_simple(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (2, 3, False)))
# p2 to move
player = 2
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (3, 4, False)))
def test_north_enemy_pawn_block(self):
# p1 to move
player = 1
p1 = board.positions[(4, 3)]
p2 = board.positions[(3, 3)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(4, 4)]
p2 = board.positions[(5, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 4, False)))
def test_north_enemy_stone_block(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
stone = board.positions[(2, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 3, False)))
# p2 to move
player = 2
stone = board.positions[(3, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 4, False)))
def test_north_friendly_stone_block(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
stone = board.positions[(2, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (2, 3, False)))
# p2 to move
player = 2
stone = board.positions[(3, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (3, 4, False)))
def test_south_simple(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (4, 3, False)))
# p2 to move
player = 2
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (5, 4, False)))
def test_south_enemy_pawn_block(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 3)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(5, 4)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 4, False)))
def test_south_enemy_stone_block(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
stone = board.positions[(4, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 3, False)))
# p2 to move
player = 2
stone = board.positions[(5, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 4, False)))
def test_south_friendly_stone_block(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
stone = board.positions[(4, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 3, False)))
# p2 to move
player = 2
stone = board.positions[(5, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (5, 4, False)))
def test_east_simple(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (3, 2, False)))
# p2 to move
player = 2
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (4, 3, False)))
def test_east_enemy_pawn_block(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(3, 2)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 2, False)))
# p2 to move
player = 2
p1 = board.positions[(4, 3)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 3, False)))
def test_east_enemy_stone_block(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
stone = board.positions[(3, 2)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (3, 2, False)))
# p2 to move
player = 2
stone = board.positions[(4, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 3, False)))
def test_east_friendly_stone_block(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
stone = board.positions[(3, 2)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 2, False)))
# p2 to move
player = 2
stone = board.positions[(4, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 3, False)))
def test_west_simple(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (3, 4, False)))
# p2 to move
player = 2
state = (p1, p2, 0, 0, player, 1)
self.assertTrue(board.is_legal(state, (4, 5, False)))
def test_west_enemy_pawn_block(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(3, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 4, False)))
# p2 to move
player = 2
p1 = board.positions[(4, 5)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 5, False)))
def test_west_enemy_stone_block(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
stone = board.positions[(3, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (3, 4, False)))
# p2 to move
player = 2
stone = board.positions[(4, 5)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 5, False)))
def test_west_friendly_stone_block(self):
p1 = board.positions[(3, 3)]
p2 = board.positions[(4, 4)]
# p1 to move
player = 1
stone = board.positions[(3, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 4, False)))
# p2 to move
player = 2
stone = board.positions[(4, 5)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 5, False)))
class IsLegalJumpTestCase(unittest.TestCase):
def test_north_simple(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(2, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertTrue(board.is_legal(state, (1, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertTrue(board.is_legal(state, (2, 4, False)))
def test_north_no_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (1, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (2, 4, False)))
def test_north_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(2, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (1, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (2, 4, False)))
def test_north_blocking_pawn(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(1, 3)]
stone = board.positions[(2, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (1, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(2, 4)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 4, False)))
def test_north_blocking_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
p1_stone = board.positions[(2, 3)]
p2_stone = board.positions[(1, 3)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (1, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
p1_stone = board.positions[(2, 4)]
p2_stone = board.positions[(3, 4)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 4, False)))
def test_north_blocking_friendly_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(2, 3)] + board.positions[(1, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (1, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 4)] + board.positions[(2, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 4, False)))
def test_nw_simple(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(2, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertTrue(board.is_legal(state, (1, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 5)]
state = (p1, p2, 0, stone, player, 1)
self.assertTrue(board.is_legal(state, (2, 6, False)))
def test_nw_no_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (1, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (2, 6, False)))
def test_nw_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(2, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (1, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 5)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (2, 6, False)))
def test_nw_blocking_pawn(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(1, 5)]
stone = board.positions[(2, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (1, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(2, 6)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 5)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 6, False)))
def test_nw_blocking_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
p1_stone = board.positions[(2, 4)]
p2_stone = board.positions[(1, 5)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (1, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
p1_stone = board.positions[(2, 6)]
p2_stone = board.positions[(3, 5)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 6, False)))
def test_nw_blocking_friendly_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(2, 4)] + board.positions[(1, 5)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (1, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 5)] + board.positions[(2, 6)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 6, False)))
def test_west_simple(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(3, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertTrue(board.is_legal(state, (3, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(4, 5)]
state = (p1, p2, 0, stone, player, 1)
self.assertTrue(board.is_legal(state, (4, 6, False)))
def test_west_no_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 6, False)))
def test_west_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(3, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (3, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(4, 5)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 6, False)))
def test_west_blocking_pawn(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(3, 5)]
stone = board.positions[(3, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(4, 6)]
p2 = board.positions[(4, 4)]
stone = board.positions[(4, 5)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 6, False)))
def test_west_blocking_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
p1_stone = board.positions[(3, 4)]
p2_stone = board.positions[(3, 5)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (3, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
p1_stone = board.positions[(4, 6)]
p2_stone = board.positions[(4, 5)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 6, False)))
def test_west_blocking_friendly_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(3, 4)] + board.positions[(3, 5)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(4, 5)] + board.positions[(4, 6)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 6, False)))
def test_sw_simple(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertTrue(board.is_legal(state, (5, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 5)]
state = (p1, p2, 0, stone, player, 1)
self.assertTrue(board.is_legal(state, (6, 6, False)))
def test_sw_no_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (6, 6, False)))
def test_sw_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (5, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 5)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (6, 6, False)))
def test_sw_blocking_pawn(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(5, 5)]
stone = board.positions[(4, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(6, 6)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 5)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (6, 6, False)))
def test_sw_blocking_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
p1_stone = board.positions[(4, 4)]
p2_stone = board.positions[(5, 5)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (5, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
p1_stone = board.positions[(6, 6)]
p2_stone = board.positions[(5, 5)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (6, 6, False)))
def test_sw_blocking_friendly_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 4)] + board.positions[(5, 5)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 5, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 5)] + board.positions[(6, 6)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (6, 6, False)))
def test_south_simple(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertTrue(board.is_legal(state, (5, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertTrue(board.is_legal(state, (6, 4, False)))
def test_south_no_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (6, 4, False)))
def test_south_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (5, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 4)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (6, 4, False)))
def test_south_blocking_pawn(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(5, 3)]
stone = board.positions[(4, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(6, 4)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (6, 4, False)))
def test_south_blocking_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
p1_stone = board.positions[(4, 3)]
p2_stone = board.positions[(5, 3)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (5, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
p1_stone = board.positions[(6, 4)]
p2_stone = board.positions[(5, 4)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (6, 4, False)))
def test_south_blocking_friendly_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 3)] + board.positions[(5, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 3, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 4)] + board.positions[(6, 4)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (6, 4, False)))
def test_se_simple(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 2)]
state = (p1, p2, stone, 0, player, 1)
self.assertTrue(board.is_legal(state, (5, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertTrue(board.is_legal(state, (6, 2, False)))
def test_se_no_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (6, 2, False)))
def test_se_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 2)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (5, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (6, 2, False)))
def test_se_blocking_pawn(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(5, 1)]
stone = board.positions[(4, 2)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(6, 2)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (6, 2, False)))
def test_se_blocking_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
p1_stone = board.positions[(4, 2)]
p2_stone = board.positions[(5, 1)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (5, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
p1_stone = board.positions[(6, 2)]
p2_stone = board.positions[(5, 3)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (6, 2, False)))
def test_se_blocking_friendly_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(4, 2)] + board.positions[(5, 1)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (5, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(5, 3)] + board.positions[(6, 2)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (6, 2, False)))
def test_east_simple(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(3, 2)]
state = (p1, p2, stone, 0, player, 1)
self.assertTrue(board.is_legal(state, (3, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(4, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertTrue(board.is_legal(state, (4, 2, False)))
def test_east_no_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 2, False)))
def test_east_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(3, 2)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (3, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(4, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (4, 2, False)))
def test_east_blocking_pawn(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(3, 1)]
stone = board.positions[(3, 2)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(4, 2)]
p2 = board.positions[(4, 4)]
stone = board.positions[(4, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 2, False)))
def test_east_blocking_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
p1_stone = board.positions[(3, 2)]
p2_stone = board.positions[(3, 1)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (3, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
p1_stone = board.positions[(4, 2)]
p2_stone = board.positions[(4, 3)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 2, False)))
def test_east_blocking_friendly_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(3, 2)] + board.positions[(3, 1)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (3, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(4, 3)] + board.positions[(4, 2)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (4, 2, False)))
def test_ne_simple(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(2, 2)]
state = (p1, p2, stone, 0, player, 1)
self.assertTrue(board.is_legal(state, (1, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertTrue(board.is_legal(state, (2, 2, False)))
def test_ne_no_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (1, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
state = (p1, p2, 0, 0, player, 1)
self.assertFalse(board.is_legal(state, (2, 2, False)))
def test_ne_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(2, 2)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (1, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 3)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (2, 2, False)))
def test_ne_blocking_pawn(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(1, 1)]
stone = board.positions[(2, 2)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (1, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(2, 2)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 3)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 2, False)))
def test_ne_blocking_enemy_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
p1_stone = board.positions[(2, 2)]
p2_stone = board.positions[(1, 1)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (1, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
p1_stone = board.positions[(2, 2)]
p2_stone = board.positions[(3, 3)]
state = (p1, p2, p1_stone, p2_stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 2, False)))
def test_ne_blocking_friendly_stone(self):
# p1 to move
player = 1
p1 = board.positions[(3, 3)]
p2 = board.positions[(7, 4)]
stone = board.positions[(2, 2)] + board.positions[(1, 1)]
state = (p1, p2, stone, 0, player, 1)
self.assertFalse(board.is_legal(state, (1, 1, False)))
# p2 to move
player = 2
p1 = board.positions[(0, 3)]
p2 = board.positions[(4, 4)]
stone = board.positions[(3, 3)] + board.positions[(2, 2)]
state = (p1, p2, 0, stone, player, 1)
self.assertFalse(board.is_legal(state, (2, 2, False)))
|
|
# coding: utf-8
# In[1]:
import datetime
import glob
import hashlib
import multiprocessing as mp
import os
import queue
import random
import threading
from functools import partial
import keras.backend.tensorflow_backend as KTF
#import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.callbacks import (EarlyStopping, ModelCheckpoint, ReduceLROnPlateau,
TensorBoard)
from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge
from keras.layers.normalization import BatchNormalization
from keras.models import Model, load_model, model_from_json
from keras.optimizers import RMSprop
from keras.preprocessing import image
from keras.utils.np_utils import to_categorical
import pelops.utils as utils
from pelops.analysis import analysis
from pelops.analysis.camerautil import get_match_id, make_good_bad
from pelops.datasets.featuredataset import FeatureDataset
from pelops.datasets.veri import VeriDataset
from pelops.experiment_api.experiment import ExperimentGenerator
from pelops.utils import train_test_key_filter
# In[2]:
# In[3]:
def save_model_workaround(model, model_output_file, weights_output_file):
print('saving model to {}'.format(model_output_file))
print('saving weights to {}'.format(weights_output_file))
# serialize model to JSON
model_json = model.to_json()
with open(model_output_file, 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(weights_output_file)
def load_model_workaround(model_output_file, weights_output_file):
# load json and create model
json_file = open(model_output_file, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(weights_output_file)
return loaded_model
# In[4]:
def makework(workitems, chips, cam_id=None):
left = chips[0]
right = chips[1]
same_vehicle = left.car_id == right.car_id
same_type = left.misc['vehicle_type'] == right.misc['vehicle_type']
same_color = left.misc['color'] == right.misc['color']
#same_angle = cam_id(left.cam_id) == cam_id(right.cam_id)
features = [same_vehicle, same_type, same_color]
workitems.append((left.filepath, right.filepath, features))
workitems.append((right.filepath, left.filepath, features))
def make_examples(gen, examples):
workitems = []
for _ in range(examples):
cameras = gen.generate()
match_id = get_match_id(cameras)
goods, bads = make_good_bad(cameras, match_id)
makework(workitems, goods)
makework(workitems, bads)
print('made', len(workitems))
return workitems
# In[5]:
# get a GPU session and reserve memory
def get_session(gpu_fraction=0.3):
'''Assume that you have 6GB of GPU memory and want to allocate ~2GB'''
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def rgb2bgr(x):
"""
given an array representation of an RGB image, change the image
into an BGR representtaion of the image
"""
return(bgr2rgb(x))
def bgr2rgb(x):
"""
given an array representation of an BGR image, change the image
into an RGB representtaion of the image
"""
y = np.zeros(x.shape)
B = x[:, :, 0]
G = x[:, :, 1]
R = x[:, :, 2]
y[:, :, 0] = R
y[:, :, 1] = G
y[:, :, 2] = B
return y
# load an image from disk
# NOTE: input assumed to be RGB
# NOTE: output is to be BGR for resnet use.
def load_image(img_path,
e_dims=False,
image_flip=0.5,
image_shift=0.20,
image_rotate_degrees=15,
image_zoom=0.15,
output_BGR=True):
"""
WARNING this funciton should only manipulation images meant for resnet50 consumption.
To make it applicable for other environments remove preprocess_input.
Do some image manipulation
image input assumed to be in RGB format
output format default is GBR unless output_BGR is set to False
e_dims = e_dims false will output (x,y,3) sized images
e_domes true will output (1,x,y,3) sized images
image_flip = probability that image will be flipped rt to left
image_shift = percent of image to randomly shift up/down & right/left
image_rotate_degrees = rotate image randomly
between [-image_rotate_degrees image_rotate_degrees]
image_zoom = randomly zoom image [1-image_zoom 1+image_zoom]
output_BGR = True -> image output will be in BGR formate RGB otherwise
"""
img = image.load_img(img_path, target_size=(224, 224))
my_img = image.img_to_array(img)
if image_flip is not None:
if image_flip > 1 or image_flip < -1:
raise ValueError('|image_flip:{0}| > 1'.format(image_flip))
image_flip = abs(image_flip)
if random.random() > image_flip:
my_img = image.flip_axis(my_img, axis=1)
if image_rotate_degrees is not None:
image_rotate_degrees = int(image_rotate_degrees)
if image_rotate_degrees > 360:
image_rotate_degrees = image_rotate_degrees % 360
my_img = image.random_rotation(my_img,
image_rotate_degrees,
row_index=0,
col_index=1,
channel_index=2)
if image_shift is not None:
if image_shift > 1 or image_shift < -1:
raise ValueError('|image_shift:{0}| > 1'.format(image_shift))
image_shift = abs(image_shift)
my_img = image.random_shift(my_img,
image_shift,
image_shift,
row_index=0,
col_index=1,
channel_index=2)
if image_zoom is not None:
if image_zoom > 1 or image_zoom < -1:
raise ValueError('|image_zoom:{0}| > 1'.format(image_zoom))
image_zoom = abs(image_zoom)
low = 1 - image_zoom
high = 1 + image_zoom
rng = [low, high]
my_img = image.random_zoom(my_img,
rng,
row_index=0,
col_index=1,
channel_index=2)
if not output_BGR:
my_img = bgr2rgb(my_img)
my_img = np.expand_dims(my_img, axis=0)
my_img = preprocess_input(my_img)
if not e_dims:
my_img = my_img.squeeze()
return my_img
# In[6]:
def plot_run_no(history, name1, name2, rnd=None):
"""
Take the output of a model.
"""
v = np.array(history[name1])
vc = np.array(history[name2])
if rnd is not None:
vr = np.zeros(vc.shape)
vr.fill(rnd)
b = np.array([v, vc, vr])
else:
b = np.array([v, vc])
c = b.transpose()
ax = plt.subplot(111)
ax.grid(True)
ax.plot(c)
if rnd is not None:
ax.legend((name1, name2, 'random'),
bbox_to_anchor=(1, -0.05),
fancybox=True, shadow=True, ncol=5)
else:
ax.legend((name1, name2),
bbox_to_anchor=(1, -0.05),
fancybox=True, shadow=True, ncol=5)
plt.show()
# In[7]:
def image_class_generator(tasking, batch_size=32, augment=False):
"""
Offload the augmentation of images, create images in batch_size chunks
augment=False -> return image augment=True -> return augmented image
"""
while True:
lefts = []
rights = []
ys = []
for task in random.sample(tasking, batch_size):
left_file = task[0]
right_file = task[1]
classes = task[2]
y = np.zeros(len(classes))
for index, c in enumerate(classes):
y[index] = 1 if c else 0
l_img = None
r_img = None
if augment:
l_img = load_image(left_file)
r_img = load_image(right_file)
else:
l_img = load_image(left_file, False, None, None, None, None)
r_img = load_image(right_file, False, None, None, None, None)
lefts.append(l_img)
rights.append(r_img)
ys.append(y)
yield ([np.array(lefts), np.array(rights)], np.array(ys))
def buffered_gen_mp(source_gen, buffer_size=2, num_processes=4):
"""
Generator that runs a slow source generator in a separate process.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = mp.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the
# buffer.
def _buffered_generation_process(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
buffer.close() # unfortunately this does not suffice as a signal: if buffer.get()
# was called and subsequently the buffer is closed, it will block
# forever.
for _ in range(num_processes):
process = mp.Process(
target=_buffered_generation_process, args=(source_gen, buffer))
process.start()
for data in iter(buffer.get, None):
yield data
# In[8]:
def freeze(model):
"""
Make model untrainable
"""
for layer in model.layers:
layer.trainable = False
model.trainable = False
# In[9]:
def free_model_layers(model):
"""
Make the model trainable
"""
for layer in model.layers:
try:
if layer.name == 'resnet50':
print('found resnet')
for rn_layer in layer.layers:
try:
if not rn_layer.trainable:
rn_layer.trainable = True
except:
if 'merge' not in rn_layer.name:
print('rn layer not trainable', rn_layer.name)
if not layer.trainable:
layer.trainable = True
except:
if 'merge' not in layer.name.lower():
print('layer not trainable:', layer.name)
# In[10]:
def make_siamese_model_concat(num_training_classes=3):
"""
Siamese network created via concatenating resnet50 outputs
@TODO see if less layers can now be used because of not using
binary_crossentropy..
"""
base_model = ResNet50(weights='imagenet', include_top=False)
freeze(base_model)
input_left = Input(shape=(224, 224, 3))
input_right = Input(shape=(224, 224, 3))
processed_left = base_model(input_left)
processed_right = base_model(input_right)
# join by slapping vectors together
siamese_join = merge([processed_left, processed_right], mode='concat')
my_layer = GlobalAveragePooling2D()(siamese_join)
my_layer = Dense(4096, activation='relu')(my_layer)
my_layer = BatchNormalization()(my_layer)
my_layer = Dense(2048, activation='relu')(my_layer)
my_layer = BatchNormalization()(my_layer)
my_layer = Dense(2048, activation='relu')(my_layer)
predictions = Dense(num_training_classes, activation='sigmoid')(my_layer)
model = Model([input_left, input_right], output=predictions)
return model
# In[11]:
def s_distance(vects):
"""
return the abs difference between vectors
"""
x, y = vects
s = K.abs(x - y)
#s = K.sqrt(K.square(x - y))
return (s)
# return K.squeeze(x,1) - K.squeeze(y,1)
def s_shape(shapes):
"""
return the sape of the vector being used
"""
shape = list(shapes)
outshape = (shape[0])
return tuple(outshape)
def make_siamese_model_subtract(num_training_classes=2):
"""
Siamese network created via subtracting resnet50 outputs
"""
base_model = ResNet50(weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
base_model.trainable = False
input_left = Input(shape=(224, 224, 3))
input_right = Input(shape=(224, 224, 3))
processed_left = base_model(input_left)
processed_right = base_model(input_right)
# use a distance measure for making the join
siamese_join = Lambda(s_distance,
output_shape=s_shape)([processed_left, processed_right])
my_layer = GlobalAveragePooling2D()(siamese_join)
my_layer = Dense(1024, activation='relu')(my_layer)
my_layer = BatchNormalization()(my_layer)
predictions = Dense(num_training_classes, activation='sigmoid')(my_layer)
model = Model([input_left, input_right], output=predictions)
return model
# In[12]:
def make_callbacks(model_checkpoint_format_string, tensor_board_log_dir):
"""
programatically make the callbacks to be used for training
"""
callbacks = []
if model_checkpoint_format_string is not None:
callbacks.append(ModelCheckpoint(model_checkpoint_format_string,
monitor='loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='min',
period=1))
if tensor_board_log_dir is not None:
callbacks.append(TensorBoard(log_dir=tensor_board_log_dir,
histogram_freq=0,
write_graph=True,
write_images=False))
callbacks.append(ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=4,
verbose=1,
mode='min',
epsilon=0.001,
cooldown=2,
min_lr=0))
callbacks.append(EarlyStopping(monitor='val_acc',
min_delta=0.003,
patience=6,
verbose=1,
mode='max'))
return callbacks
# In[13]:
def checkLabels(x):
"""
Make a warm fuzzy about the classes being balanced
"""
s_id = 0.0
s_type = 0.0
s_color = 0.0
total = len(x)
for v in x:
if v[2][0]:
s_id += 1
if v[2][1]:
s_type += 1
if v[2][2]:
s_color += 1
print('P(s_id==1):{0} P(s_type==1):{1} P(s_color==1):{2}'.format(
s_id / total, s_type / total, s_color / total))
return s_id / total, s_type / total, s_color / total
# In[14]:
#---------------------------------------
# In[15]:
# set some constants
ITEMSPERCAMERA = 2
YRANDOM = 13024
CAMERAS = 2
DROPPED = 0
EXPERIMENTS = int(40000 / 4)
batch_size = 16
tbld = '/local_data/dgrossman/tensorboard_logs'
mcfs = '/local_data/dgrossman/tempdir/veri-siamese.{epoch:02d}-{val_loss:.2f}-{val_acc:.2f}.hdf5'
# In[16]:
veri_validate = VeriDataset(
'/local_data/dgrossman/VeRi', set_type=utils.SetType.TEST.value)
veri_train = VeriDataset('/local_data/dgrossman/VeRi',
set_type=utils.SetType.TRAIN.value)
expGen_validate = ExperimentGenerator(veri_train,
CAMERAS,
ITEMSPERCAMERA,
DROPPED,
YRANDOM,
key_filter=partial(train_test_key_filter, split="test"))
expGen_train = ExperimentGenerator(veri_train,
CAMERAS,
ITEMSPERCAMERA,
DROPPED,
YRANDOM,
key_filter=partial(train_test_key_filter, split="train"))
# In[17]:
training_examples = make_examples(expGen_train, EXPERIMENTS)
validaiton_examples = make_examples(expGen_validate, EXPERIMENTS) # GROSSMAN
# In[18]:
checkLabels(training_examples)
# In[19]:
checkLabels(validaiton_examples)
# In[19]:
# GROSSMAN change augment to True when running for real.
train_buffered_generator_mp = buffered_gen_mp(image_class_generator(training_examples,
batch_size,
augment=True),
buffer_size=20,
num_processes=5)
val_buffered_generator_mp = buffered_gen_mp(image_class_generator(validaiton_examples,
batch_size,
augment=False),
buffer_size=20,
num_processes=5)
# In[20]:
callbacks = make_callbacks(mcfs, tbld)
# In[21]:
KTF.set_session(get_session(.90))
# In[25]:
#model = make_siamese_model_concat(3)
model = make_siamese_model_subtract(3)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# In[26]:
fixed_history = model.fit_generator(train_buffered_generator_mp,
samples_per_epoch=10240,
nb_epoch=20,
callbacks=None,
nb_val_samples=10240,
validation_data=val_buffered_generator_mp,
verbose=2)
fixed_history.history
free_model_layers(model)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
free_history = model.fit_generator(train_buffered_generator_mp,
samples_per_epoch=10240,
nb_epoch=50,
callbacks=callbacks,
nb_val_samples=10240,
validation_data=val_buffered_generator_mp,
verbose=2)
save_model_workaround(model,
'/local_data/dgrossman/model_save_dir/VeRi-siamese-weekend-6.model.json',
'/local_data/dgrossman/model_save_dir/VeRi-siamese-weekend-6.weights.hdf5')
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.clip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import tensorflow as tf
class ClipTest(tf.test.TestCase):
# ClipByValue test
def testClipByValue(self):
with self.test_session():
x = tf.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
np_ans = [[-4.4, 2.0, 3.0],
[4.0, 4.4, 4.4]]
clip_value = 4.4
ans = tf.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByValueNonFinite(self):
with self.test_session():
x = tf.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = tf.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
# ClipByNorm tests
def testClipByNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-2.4, 0.0, 0.0],
[3.2, 0.0, 0.0]]
clip_norm = 4.0
ans = tf.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0],
[4.0, 0.0, 0.0]]
clip_norm = 6.0
ans = tf.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
clip_norm = 6.0
ans = tf.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByGlobalNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0],
[3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = tf.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormSupportsNone(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0],
[3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = tf.clip_by_global_norm((x0, None, x1, None), clip_norm)
self.assertTrue(ans[1] is None)
self.assertTrue(ans[3] is None)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[2].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
# ClipByGlobalNorm tests
def testClipByGlobalNormWithIndexedSlicesClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.IndexedSlices(tf.constant([1.0, -2.0]),
tf.constant([3, 4]))
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0],
[3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].values.eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
np_ans_0 = [[-2.0, 0.0, 0.0],
[4.0, 0.0, 0.0]]
np_ans_1 = [1.0, -2.0]
clip_norm = 6.0
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
x0 = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([0.0, 0.0])
# Norm = 0, no changes
np_ans_0 = [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
np_ans_1 = [0.0, 0.0]
clip_norm = 6.0
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 0.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByAverageNormClipped(self):
# Norm clipping when average clip_norm < 0.83333333
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0],
[3.84, 0.0, 0.0]]
clip_norm = 0.8
ans = tf.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormNotClipped(self):
# No norm clipping when average clip_norm >= 0.83333333
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-3.0, 0.0, 0.0],
[4.0, 0.0, 0.0]]
clip_norm = 0.9
ans = tf.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormZero(self):
# No norm clipping when average clip_norm = 0
with self.test_session():
x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Average norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
clip_norm = 0.9
ans = tf.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
if __name__ == "__main__":
tf.test.main()
|
|
import os
import xlrd
from xlutils.copy import copy
from datetime import datetime as dt
from PIL import Image
from shutil import rmtree
from pcwg_tool import pcwg_inner_ranges
import numpy as np
template_name = 'Share_1_template.xls'
sheet_map = {'Submission': 0,
'Meta Data': 1,
'Baseline': 2,
'REWS': 3,
'TI Renorm': 4,
'REWS and TI Renorm': 5,
'PDM': 6}
def wrt_cell_keep_style(value, sheet, row, col):
style = _get_cell_style(sheet, row, col)
sheet.write(row, col, value)
_apply_cell_style(style, sheet, row, col)
def _get_cell_style(sheet, row, col):
return sheet._Worksheet__rows.get(row)._Row__cells.get(col).xf_idx
def _apply_cell_style(style, sheet, row, col):
sheet._Worksheet__rows.get(row)._Row__cells.get(col).xf_idx = style
class pcwg_share1_rpt(object):
def __init__(self, analysis, version = 'Unknown', template = template_name, output_fname = (os.getcwd() + os.sep + 'Data Sharing Initiative 1 Report.xls')):
rb = xlrd.open_workbook(template, formatting_info=True)
self.workbook = copy(rb)
self.analysis = analysis
self.no_of_datasets = len(analysis.datasetConfigs)
self.output_fname = output_fname
self.version = version
def report(self):
self.write_submission_data(sheet_map['Submission'])
self.write_meta_data()
self.write_metrics()
self.insert_images()
self.export()
def write_meta_data(self):
sh = self.workbook.get_sheet(sheet_map['Meta Data'])
col = 2
used_inner_range = [self.analysis.innerRangeLowerShear, self.analysis.innerRangeUpperShear, self.analysis.innerRangeLowerTurbulence, self.analysis.innerRangeUpperTurbulence]
range_A = [pcwg_inner_ranges['A']['LSh'], pcwg_inner_ranges['A']['USh'], pcwg_inner_ranges['A']['LTI'], pcwg_inner_ranges['A']['UTI']]
range_B = [pcwg_inner_ranges['B']['LSh'], pcwg_inner_ranges['B']['USh'], pcwg_inner_ranges['B']['LTI'], pcwg_inner_ranges['B']['UTI']]
range_C = [pcwg_inner_ranges['C']['LSh'], pcwg_inner_ranges['C']['USh'], pcwg_inner_ranges['C']['LTI'], pcwg_inner_ranges['C']['UTI']]
if used_inner_range == range_A:
range_id = 'A'
elif used_inner_range == range_B:
range_id = 'B'
elif used_inner_range == range_C:
range_id = 'C'
else:
raise Exception('The inner range %s is not valid for use in the PCWG Sharing Initiative.' % used_inner_range)
manual_required_style = _get_cell_style(sh, 7, 2)
manual_optional_style = _get_cell_style(sh, 13, 2)
calculated_style = _get_cell_style(sh, 8, 2)
dset_header_style = _get_cell_style(sh, 6, 2)
man_req_rows = [7, 11, 12, 18, 19, 21, 26, 29]
man_opt_rows = [13, 14, 15, 16, 17, 20, 22, 28]
for conf in self.analysis.datasetConfigs:
sh.write(6, col, conf.invariant_rand_id)
_apply_cell_style(dset_header_style, sh, 6, col)
wsl = len(conf.windSpeedLevels) if self.analysis.rewsActive else None
if self.analysis.rewsActive:
rews_has_veer = (conf.windDirectionLevels[conf.windDirectionLevels.keys()[0]] is not None and len(conf.windDirectionLevels[conf.windDirectionLevels.keys()[0]]) > 0)
else:
rews_has_veer = None
sh.write(8, col, wsl)
sh.write(9, col, rews_has_veer)
_apply_cell_style(calculated_style, sh, 8, col)
_apply_cell_style(calculated_style, sh, 9, col)
sh.write(10, col, range_id)
_apply_cell_style(calculated_style, sh, 10, col)
sh.write(23, col, self.analysis.config.diameter)
_apply_cell_style(calculated_style, sh, 23, col)
sh.write(24, col, self.analysis.config.hubHeight)
_apply_cell_style(calculated_style, sh, 24, col)
specific_power = self.analysis.config.ratedPower / (np.pi * (self.analysis.config.diameter / 2.) ** 2.)
sh.write(25, col, specific_power)
_apply_cell_style(calculated_style, sh, 25, col)
sh.write(27, col, int(min(self.analysis.dataFrame.loc[self.analysis.dataFrame[self.analysis.nameColumn] == conf.name, self.analysis.timeStamp].dt.year)))
_apply_cell_style(calculated_style, sh, 27, col)
sh.write(30, col, self.analysis.config.interpolationMode)
_apply_cell_style(calculated_style, sh, 30, col)
for row in man_req_rows:
sh.write(row, col, None)
_apply_cell_style(manual_required_style, sh, row, col)
for row in man_opt_rows:
sh.write(row, col, None)
_apply_cell_style(manual_optional_style, sh, row, col)
col += 1
def write_submission_data(self, sheet_no):
sh = self.workbook.get_sheet(sheet_no)
wrt_cell_keep_style(self.analysis.uniqueAnalysisId, sh, 5, 2)
wrt_cell_keep_style(str(dt.now()), sh, 6, 2)
wrt_cell_keep_style(str(self.version), sh, 7, 2)
conf_inv_row, conf_row, ts_row, col = 11, 12, 13, 2
style_fntsz1 = _get_cell_style(sh, conf_row, col)
style = _get_cell_style(sh, conf_inv_row, col)
for conf in self.analysis.datasetConfigs:
sh.write(conf_inv_row, col, conf.invariant_rand_id)
_apply_cell_style(style, sh, conf_inv_row, col)
sh.write(conf_row, col, self.analysis.datasetUniqueIds[conf.name]['Configuration'])
_apply_cell_style(style_fntsz1, sh, conf_row, col)
sh.write(ts_row, col, self.analysis.datasetUniqueIds[conf.name]['Time Series'])
_apply_cell_style(style_fntsz1, sh, ts_row, col)
col += 1
styles_dict = {True: _get_cell_style(sh, 17, 2),
False: _get_cell_style(sh, 17, 3),
'N/A': _get_cell_style(sh, 18, 2)}
sh.write(17, 2, self.analysis.densityCorrectionActive)
_apply_cell_style(styles_dict[self.analysis.densityCorrectionActive], sh, 17, 2)
for col in [3,4,5]:
sh.write(17, col, False)
_apply_cell_style(styles_dict[False], sh, 17, col)
if self.analysis.rewsActive:
sh.write(18, 2, self.analysis.densityCorrectionActive)
_apply_cell_style(styles_dict[self.analysis.densityCorrectionActive], sh, 18, 2)
for col in [4,5]:
sh.write(18, col, False)
_apply_cell_style(styles_dict[False], sh, 18, col)
sh.write(18, 3, True)
_apply_cell_style(styles_dict[True], sh, 18, 3)
else:
for col in [2,3,4,5]:
sh.write(18, col, 'N/A')
_apply_cell_style(styles_dict['N/A'], sh, 18, col)
if self.analysis.turbRenormActive:
sh.write(19, 2, self.analysis.densityCorrectionActive)
_apply_cell_style(styles_dict[self.analysis.densityCorrectionActive], sh, 19, 2)
for col in [3,5]:
sh.write(19, col, False)
_apply_cell_style(styles_dict[False], sh, 19, col)
sh.write(19, 4, True)
_apply_cell_style(styles_dict[True], sh, 19, 4)
else:
for col in [2,3,4,5]:
sh.write(19, col, 'N/A')
_apply_cell_style(styles_dict['N/A'], sh, 19, col)
if (self.analysis.turbRenormActive and self.analysis.rewsActive):
sh.write(20, 2, self.analysis.densityCorrectionActive)
_apply_cell_style(styles_dict[self.analysis.densityCorrectionActive], sh, 20, 2)
sh.write(20, 5, False)
_apply_cell_style(styles_dict[False], sh, 20, 5)
for col in [3,4]:
sh.write(20, col, True)
_apply_cell_style(styles_dict[True], sh, 20, col)
else:
for col in [2,3,4,5]:
sh.write(20, col, 'N/A')
_apply_cell_style(styles_dict['N/A'], sh, 20, col)
if self.analysis.powerDeviationMatrixActive:
sh.write(21, 2, self.analysis.densityCorrectionActive)
_apply_cell_style(styles_dict[self.analysis.densityCorrectionActive], sh, 21, 2)
for col in [3,4]:
sh.write(21, col, False)
_apply_cell_style(styles_dict[False], sh, 21, col)
sh.write(21, 5, True)
_apply_cell_style(styles_dict[True], sh, 21, 5)
else:
for col in [2,3,4,5]:
sh.write(21, col, 'N/A')
_apply_cell_style(styles_dict['N/A'], sh, 21, col)
def write_metrics(self):
self._write_metrics_sheet('Baseline', self.analysis.pcwgErrorBaseline)
if self.analysis.turbRenormActive:
self._write_metrics_sheet('TI Renorm', self.analysis.pcwgErrorTurbRenor)
if self.analysis.rewsActive:
self._write_metrics_sheet('REWS', self.analysis.pcwgErrorRews)
if (self.analysis.turbRenormActive and self.analysis.rewsActive):
self._write_metrics_sheet('REWS and TI Renorm', self.analysis.pcwgErrorTiRewsCombined)
if self.analysis.powerDeviationMatrixActive:
self._write_metrics_sheet('PDM', self.analysis.pcwgErrorPdm)
def _write_metrics_sheet(self, sh_name, error_col):
self.__write_overall_metric_sheet(sh_name)
self.__write_by_ws_metric_sheet(sh_name, error_col)
self.__write_by_ws_metric_inner_sheet(sh_name, error_col)
self.__write_by_ws_metric_outer_sheet(sh_name, error_col)
if self.analysis.hasDirection:
self.__write_by_dir_metric_sheet(sh_name, error_col)
self.__write_by_time_metric_sheet(sh_name, error_col)
self.__write_by_range_metric_sheet(sh_name, error_col)
self.__write_by_four_cell_matrix_metric_sheet(sh_name, error_col)
self.__write_by_month_metric_sheet(sh_name, error_col)
def __write_overall_metric_sheet(self, sh_name):
sh = self.workbook.get_sheet(sheet_map[sh_name])
wrt_cell_keep_style(self.analysis.overall_pcwg_err_metrics[self.analysis.dataCount], sh, 3, 3)
wrt_cell_keep_style(self.analysis.overall_pcwg_err_metrics[sh_name + ' NME'], sh, 4, 3)
wrt_cell_keep_style(self.analysis.overall_pcwg_err_metrics[sh_name + ' NMAE'], sh, 5, 3)
def __write_by_ws_metric_sheet(self, sh_name, err_col):
df = self.analysis.binned_pcwg_err_metrics[self.analysis.normalisedWSBin][err_col]
sh = self.workbook.get_sheet(sheet_map[sh_name])
col = 3
for i in self.analysis.normalisedWindSpeedBins.centers:
try:
if df.loc[i, 'Data Count'] > 0:
wrt_cell_keep_style(int(df.loc[i, 'Data Count']), sh, 7, col)
wrt_cell_keep_style(df.loc[i, 'NME'], sh, 8, col)
wrt_cell_keep_style(df.loc[i, 'NMAE'], sh, 9, col)
col += 1
except:
col += 1
def __write_by_ws_metric_inner_sheet(self, sh_name, err_col):
df = self.analysis.binned_pcwg_err_metrics[self.analysis.normalisedWSBin + ' ' + 'Inner' + ' Range'][err_col]
sh = self.workbook.get_sheet(sheet_map[sh_name])
col = 3
for i in self.analysis.normalisedWindSpeedBins.centers:
try:
if df.loc[i, 'Data Count'] > 0:
wrt_cell_keep_style(int(df.loc[i, 'Data Count']), sh, 11, col)
wrt_cell_keep_style(df.loc[i, 'NME'], sh, 12, col)
wrt_cell_keep_style(df.loc[i, 'NMAE'], sh, 13, col)
col += 1
except:
col += 1
def __write_by_ws_metric_outer_sheet(self, sh_name, err_col):
df = self.analysis.binned_pcwg_err_metrics[self.analysis.normalisedWSBin + ' ' + 'Outer' + ' Range'][err_col]
sh = self.workbook.get_sheet(sheet_map[sh_name])
col = 3
for i in self.analysis.normalisedWindSpeedBins.centers:
try:
if df.loc[i, 'Data Count'] > 0:
wrt_cell_keep_style(int(df.loc[i, 'Data Count']), sh, 15, col)
wrt_cell_keep_style(df.loc[i, 'NME'], sh, 16, col)
wrt_cell_keep_style(df.loc[i, 'NMAE'], sh, 17, col)
col += 1
except:
col += 1
def __write_by_dir_metric_sheet(self, sh_name, err_col):
df = self.analysis.binned_pcwg_err_metrics[self.analysis.pcwgDirectionBin][err_col]
sh = self.workbook.get_sheet(sheet_map[sh_name])
col = 3
for i in self.analysis.pcwgWindDirBins.centers:
try:
if df.loc[i, 'Data Count'] > 0:
wrt_cell_keep_style(int(df.loc[i, 'Data Count']), sh, 27, col)
wrt_cell_keep_style(df.loc[i, 'NME'], sh, 28, col)
wrt_cell_keep_style(df.loc[i, 'NMAE'], sh, 29, col)
col += 1
except:
col += 1
def __write_by_time_metric_sheet(self, sh_name, err_col):
df = self.analysis.binned_pcwg_err_metrics[self.analysis.hourOfDay][err_col]
sh = self.workbook.get_sheet(sheet_map[sh_name])
col = 3
for i in range(0,24):
try:
if df.loc[i, 'Data Count'] > 0:
wrt_cell_keep_style(int(df.loc[i, 'Data Count']), sh, 19, col)
wrt_cell_keep_style(df.loc[i, 'NME'], sh, 20, col)
wrt_cell_keep_style(df.loc[i, 'NMAE'], sh, 21, col)
col += 1
except:
col += 1
def __write_by_range_metric_sheet(self, sh_name, err_col):
df = self.analysis.binned_pcwg_err_metrics[self.analysis.pcwgRange][err_col]
sh = self.workbook.get_sheet(sheet_map[sh_name])
col = 3
for i in ['Inner','Outer']:
try:
if df.loc[i, 'Data Count'] > 0:
wrt_cell_keep_style(int(df.loc[i, 'Data Count']), sh, 31, col)
wrt_cell_keep_style(df.loc[i, 'NME'], sh, 32, col)
wrt_cell_keep_style(df.loc[i, 'NMAE'], sh, 33, col)
col += 1
except:
col += 1
def __write_by_four_cell_matrix_metric_sheet(self, sh_name, err_col):
df = self.analysis.binned_pcwg_err_metrics[self.analysis.pcwgFourCellMatrixGroup][err_col]
sh = self.workbook.get_sheet(sheet_map[sh_name])
col = 3
for i in ['LWS-LTI','LWS-HTI','HWS-LTI','HWS-HTI']:
try:
if df.loc[i, 'Data Count'] > 0:
wrt_cell_keep_style(int(df.loc[i, 'Data Count']), sh, 35, col)
wrt_cell_keep_style(df.loc[i, 'NME'], sh, 36, col)
wrt_cell_keep_style(df.loc[i, 'NMAE'], sh, 37, col)
col += 1
except:
col += 1
def __write_by_month_metric_sheet(self, sh_name, err_col):
df = self.analysis.binned_pcwg_err_metrics[self.analysis.calendarMonth][err_col]
sh = self.workbook.get_sheet(sheet_map[sh_name])
col = 3
for i in range(1,13):
try:
if df.loc[i, 'Data Count'] > 0:
wrt_cell_keep_style(int(df.loc[i, 'Data Count']), sh, 23, col)
wrt_cell_keep_style(df.loc[i, 'NME'], sh, 24, col)
wrt_cell_keep_style(df.loc[i, 'NMAE'], sh, 25, col)
col += 1
except:
col += 1
def insert_images(self):
from plots import MatplotlibPlotter
plt_path = 'Temp'
plotter = MatplotlibPlotter(plt_path, self.analysis)
for conf in self.analysis.datasetConfigs:
sh = self.workbook.add_sheet(conf.invariant_rand_id)
row_filt = (self.analysis.dataFrame[self.analysis.nameColumn] == conf.name)
fname = (conf.invariant_rand_id) + ' Anonymous Power Curve Plot'
plotter.plotPowerCurve(self.analysis.inputHubWindSpeed, self.analysis.actualPower, self.analysis.innerMeasuredPowerCurve, anon = True, row_filt = row_filt, fname = fname + '.png', show_analysis_pc = False, mean_title = 'Inner Range Power Curve', mean_pc_color = '#FF0000')
im = Image.open(plt_path + os.sep + fname + '.png').convert('RGB')
im.save(plt_path + os.sep + fname + '.bmp')
sh.write(0, 0, 'Power curve scatter plot for dataset with invariant random ID ' + (conf.invariant_rand_id) + '. The Inner Range Power Curve shown is derived using all datasets.')
sh.insert_bitmap(plt_path + os.sep + fname + '.bmp' , 2, 1)
try:
rmtree(plt_path)
except:
print 'Could not delete folder %s' % (os.getcwd() + os.sep + plt_path)
def export(self):
self._write_confirmation_of_export()
print "Exporting the PCWG Share 1 report to:\n\t%s" % (self.output_fname)
self.workbook.save(self.output_fname)
def _write_confirmation_of_export(self):
sh = self.workbook.get_sheet(sheet_map['Submission'])
wrt_cell_keep_style(True, sh, 8, 2)
|
|
from nose.tools import eq_
from allmychanges.changelog_updater import update_changelog_from_raw_data3
from django.test import Client
from allmychanges.models import (
Changelog,
User,
Issue)
from allmychanges.env import Environment
from allmychanges.utils import first, reverse
from allmychanges.issues import calculate_issue_importance
from allmychanges.tests.utils import create_user
def v(**kwargs):
kwargs.setdefault('content', '')
kwargs.setdefault('processed_content', '')
env = Environment()
env._data.update(kwargs)
return env
def test_dont_add_issue_if_we_found_only_one_new_version():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test')
# we already know about one version
changelog.versions.create(number='0.2.0')
# we discovered a new 0.3.0 version
# and this is OK.
data = [v(version='0.2.0'),
v(version='0.3.0')]
update_changelog_from_raw_data3(changelog, data)
eq_([],
[i.type for i in changelog.issues.all()])
def test_add_issue_if_we_found_more_than_one_new_version():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test')
# we already know about one version
changelog.versions.create(number='0.2.0')
# everything is ok here, 0.4.0 follows 0.3.0
# and 0.3.0 follows 0.2.0
data = [v(version='0.2.0'),
v(version='0.3.0'),
v(version='0.4.0')]
update_changelog_from_raw_data3(changelog, data)
eq_(0, changelog.issues.count())
def test_add_issue_if_we_found_more_than_one_new_version_and_they_have_bad_order():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test')
# we already know about one version
changelog.versions.create(number='0.2.0')
# it is strange, that version 1.2.3 follows 0.3.0
# and there is no 1.2.2 in the database
data = [v(version='0.2.0'),
v(version='0.3.0'),
v(version='1.2.3')]
update_changelog_from_raw_data3(changelog, data)
eq_(['some-versions-out-of-order'],
[i.type for i in changelog.issues.all()])
i = first(changelog.issues)
eq_(['0.3.0', '1.2.3'],
i.get_related_versions())
def test_add_issue_only_if_there_are_already_some_versions():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test')
# there isnt' any versions in the changelog
# but we found three new versions
# possible, this package was added few seconds ago
data = [v(version='0.2.0'),
v(version='0.3.0'),
v(version='0.4.0')]
update_changelog_from_raw_data3(changelog, data)
# there isn't any versions in the changelog
# so we shouldn't create an issue
eq_([],
[i.type for i in changelog.issues.all()])
def test_add_issue_if_subsequent_discovery_found_less_versions():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test')
# first discovery found 3 versions
data = [v(version='0.2.0'),
v(version='0.3.0'),
v(version='0.4.0')]
update_changelog_from_raw_data3(changelog, data)
eq_([],
[i.type for i in changelog.issues.all()])
# second discovery found only one versions
data = [v(version='0.2.0')]
update_changelog_from_raw_data3(changelog, data)
eq_(['lesser-version-count'],
[i.type for i in changelog.issues.all()])
eq_('This time we didn\'t discover 0.3.0, 0.4.0 versions',
changelog.issues.latest('id').comment)
# now we check that subsequent discoveries don't
# create new issues until we resolve this one
data = [v(version='0.2.0')]
update_changelog_from_raw_data3(changelog, data)
eq_(['lesser-version-count'],
[i.type for i in changelog.issues.all()])
def test_two_or_more_lesser_versions_issue_could_be_added_for_different_versions_sets():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test')
# first discovery found 3 versions
data = [v(version='0.2.0'),
v(version='0.3.0'),
v(version='0.4.0')]
update_changelog_from_raw_data3(changelog, data)
eq_([],
[i.type for i in changelog.issues.all()])
# second discovery found only two versions
data = [v(version='0.3.0'),
v(version='0.4.0')]
update_changelog_from_raw_data3(changelog, data)
# and second time we discovered only 0.4.0
data = [v(version='0.4.0')]
update_changelog_from_raw_data3(changelog, data)
# this should create two issues
eq_([('lesser-version-count', '0.2.0'),
('lesser-version-count', '0.3.0')],
[(i.type, i.related_versions)
for i in changelog.issues.all()])
def test_lesser_versions_autoresolve():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test')
# first discovery found 3 versions
data = [v(version='0.2.0'),
v(version='0.3.0'),
v(version='0.4.0')]
update_changelog_from_raw_data3(changelog, data)
eq_([],
[i.type for i in changelog.issues.all()])
# second discovery found only two versions
data = [v(version='0.3.0'),
v(version='0.4.0')]
update_changelog_from_raw_data3(changelog, data)
# and now we again discovered all three versions
data = [v(version='0.2.0'),
v(version='0.3.0'),
v(version='0.4.0')]
update_changelog_from_raw_data3(changelog, data)
# this should create one issues
eq_([('lesser-version-count', '0.2.0')],
[(i.type, i.related_versions)
for i in changelog.issues.all()])
issue = changelog.issues.all()[0]
# and it should be resolved automatically
assert issue.resolved_at is not None
eq_('Autoresolved', issue.comments.all()[0].message)
def test_issue_importance():
# it should be called on issue creation
c = lambda **kwargs: Issue.objects.create(**kwargs)
eq_(1, c(changelog=None, user=None).importance)
user = create_user('art')
eq_(10, c(changelog=None, user=user).importance)
# and now we'll test the logic of function it self
# when no trackers and issue is automatic
eq_(1, calculate_issue_importance(
num_trackers=0, user=None, light_user=None))
# each tracker adds 1 to the final value
eq_(6, calculate_issue_importance(
num_trackers=5, user=None, light_user=None))
# if issue is reported by registered user, then
# value is multiplied by 10
eq_(6, calculate_issue_importance(
num_trackers=5, user=None, light_user=None))
def test_redirect_to_project_issues():
name = 'pip'
namespace = 'python'
cl = Client()
url = reverse('project-issues', name=name, namespace=namespace)
response = cl.get(url)
expected_url = ('http://testserver/issues/?'
'namespace={0}&name={1}&'
'order=-id&resolved=any').format(
namespace, name)
eq_(302, response.status_code)
eq_(expected_url, response['Location'])
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import unittest
import zlib
from textwrap import dedent
import os
import six
from six import StringIO
from six.moves import range
from six.moves.urllib.parse import quote
from test.unit import FakeLogger
from eventlet.green import urllib2
from swift.common import internal_client
from swift.common import swob
from swift.common.storage_policy import StoragePolicy
from test.unit import with_tempdir, write_fake_ring, patch_policies
from test.unit.common.middleware.helpers import FakeSwift
def not_sleep(seconds):
pass
def unicode_string(start, length):
return u''.join([six.unichr(x) for x in range(start, start + length)])
def path_parts():
account = unicode_string(1000, 4) + ' ' + unicode_string(1100, 4)
container = unicode_string(2000, 4) + ' ' + unicode_string(2100, 4)
obj = unicode_string(3000, 4) + ' ' + unicode_string(3100, 4)
return account, container, obj
def make_path(account, container=None, obj=None):
path = '/v1/%s' % quote(account.encode('utf-8'))
if container:
path += '/%s' % quote(container.encode('utf-8'))
if obj:
path += '/%s' % quote(obj.encode('utf-8'))
return path
def make_path_info(account, container=None, obj=None):
# FakeSwift keys on PATH_INFO - which is *encoded* but unquoted
path = '/v1/%s' % '/'.join(
p for p in (account, container, obj) if p)
return path.encode('utf-8')
def get_client_app():
app = FakeSwift()
with mock.patch('swift.common.internal_client.loadapp',
new=lambda *args, **kwargs: app):
client = internal_client.InternalClient({}, 'test', 1)
return client, app
class InternalClient(internal_client.InternalClient):
def __init__(self):
pass
class GetMetadataInternalClient(internal_client.InternalClient):
def __init__(self, test, path, metadata_prefix, acceptable_statuses):
self.test = test
self.path = path
self.metadata_prefix = metadata_prefix
self.acceptable_statuses = acceptable_statuses
self.get_metadata_called = 0
self.metadata = 'some_metadata'
def _get_metadata(self, path, metadata_prefix, acceptable_statuses=None,
headers=None):
self.get_metadata_called += 1
self.test.assertEquals(self.path, path)
self.test.assertEquals(self.metadata_prefix, metadata_prefix)
self.test.assertEquals(self.acceptable_statuses, acceptable_statuses)
return self.metadata
class SetMetadataInternalClient(internal_client.InternalClient):
def __init__(
self, test, path, metadata, metadata_prefix, acceptable_statuses):
self.test = test
self.path = path
self.metadata = metadata
self.metadata_prefix = metadata_prefix
self.acceptable_statuses = acceptable_statuses
self.set_metadata_called = 0
self.metadata = 'some_metadata'
def _set_metadata(
self, path, metadata, metadata_prefix='',
acceptable_statuses=None):
self.set_metadata_called += 1
self.test.assertEquals(self.path, path)
self.test.assertEquals(self.metadata_prefix, metadata_prefix)
self.test.assertEquals(self.metadata, metadata)
self.test.assertEquals(self.acceptable_statuses, acceptable_statuses)
class IterInternalClient(internal_client.InternalClient):
def __init__(
self, test, path, marker, end_marker, acceptable_statuses, items):
self.test = test
self.path = path
self.marker = marker
self.end_marker = end_marker
self.acceptable_statuses = acceptable_statuses
self.items = items
def _iter_items(
self, path, marker='', end_marker='', acceptable_statuses=None):
self.test.assertEquals(self.path, path)
self.test.assertEquals(self.marker, marker)
self.test.assertEquals(self.end_marker, end_marker)
self.test.assertEquals(self.acceptable_statuses, acceptable_statuses)
for item in self.items:
yield item
class TestCompressingfileReader(unittest.TestCase):
def test_init(self):
class CompressObj(object):
def __init__(self, test, *args):
self.test = test
self.args = args
def method(self, *args):
self.test.assertEquals(self.args, args)
return self
try:
compressobj = CompressObj(
self, 9, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
old_compressobj = internal_client.compressobj
internal_client.compressobj = compressobj.method
f = StringIO('')
fobj = internal_client.CompressingFileReader(f)
self.assertEquals(f, fobj._f)
self.assertEquals(compressobj, fobj._compressor)
self.assertEquals(False, fobj.done)
self.assertEquals(True, fobj.first)
self.assertEquals(0, fobj.crc32)
self.assertEquals(0, fobj.total_size)
finally:
internal_client.compressobj = old_compressobj
def test_read(self):
exp_data = 'abcdefghijklmnopqrstuvwxyz'
fobj = internal_client.CompressingFileReader(
StringIO(exp_data), chunk_size=5)
data = ''
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
for chunk in fobj.read():
data += d.decompress(chunk)
self.assertEquals(exp_data, data)
def test_seek(self):
exp_data = 'abcdefghijklmnopqrstuvwxyz'
fobj = internal_client.CompressingFileReader(
StringIO(exp_data), chunk_size=5)
# read a couple of chunks only
for _ in range(2):
fobj.read()
# read whole thing after seek and check data
fobj.seek(0)
data = ''
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
for chunk in fobj.read():
data += d.decompress(chunk)
self.assertEquals(exp_data, data)
def test_seek_not_implemented_exception(self):
fobj = internal_client.CompressingFileReader(
StringIO(''), chunk_size=5)
self.assertRaises(NotImplementedError, fobj.seek, 10)
self.assertRaises(NotImplementedError, fobj.seek, 0, 10)
class TestInternalClient(unittest.TestCase):
@mock.patch('swift.common.utils.HASH_PATH_SUFFIX', new='endcap')
@with_tempdir
def test_load_from_config(self, tempdir):
conf_path = os.path.join(tempdir, 'interal_client.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
auto_create_account_prefix = -
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
account_ring_path = os.path.join(tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
object_ring_path = os.path.join(tempdir, 'object.ring.gz')
write_fake_ring(object_ring_path)
with patch_policies([StoragePolicy(0, 'legacy', True)]):
client = internal_client.InternalClient(conf_path, 'test', 1)
self.assertEqual(client.account_ring,
client.app.app.app.account_ring)
self.assertEqual(client.account_ring.serialized_path,
account_ring_path)
self.assertEqual(client.container_ring,
client.app.app.app.container_ring)
self.assertEqual(client.container_ring.serialized_path,
container_ring_path)
object_ring = client.app.app.app.get_object_ring(0)
self.assertEqual(client.get_object_ring(0),
object_ring)
self.assertEqual(object_ring.serialized_path,
object_ring_path)
self.assertEquals(client.auto_create_account_prefix, '-')
def test_init(self):
class App(object):
def __init__(self, test, conf_path):
self.test = test
self.conf_path = conf_path
self.load_called = 0
def load(self, uri, allow_modify_pipeline=True):
self.load_called += 1
self.test.assertEquals(conf_path, uri)
self.test.assertFalse(allow_modify_pipeline)
return self
conf_path = 'some_path'
app = App(self, conf_path)
old_loadapp = internal_client.loadapp
internal_client.loadapp = app.load
user_agent = 'some_user_agent'
request_tries = 'some_request_tries'
try:
client = internal_client.InternalClient(
conf_path, user_agent, request_tries)
finally:
internal_client.loadapp = old_loadapp
self.assertEquals(1, app.load_called)
self.assertEquals(app, client.app)
self.assertEquals(user_agent, client.user_agent)
self.assertEquals(request_tries, client.request_tries)
def test_make_request_sets_user_agent(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test):
self.test = test
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 1
def fake_app(self, env, start_response):
self.test.assertEquals(self.user_agent, env['HTTP_USER_AGENT'])
start_response('200 Ok', [('Content-Length', '0')])
return []
client = InternalClient(self)
client.make_request('GET', '/', {}, (200,))
def test_make_request_retries(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test):
self.test = test
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 4
self.tries = 0
self.sleep_called = 0
def fake_app(self, env, start_response):
self.tries += 1
if self.tries < self.request_tries:
start_response(
'500 Internal Server Error', [('Content-Length', '0')])
else:
start_response('200 Ok', [('Content-Length', '0')])
return []
def sleep(self, seconds):
self.sleep_called += 1
self.test.assertEquals(2 ** (self.sleep_called), seconds)
client = InternalClient(self)
old_sleep = internal_client.sleep
internal_client.sleep = client.sleep
try:
client.make_request('GET', '/', {}, (200,))
finally:
internal_client.sleep = old_sleep
self.assertEquals(3, client.sleep_called)
self.assertEquals(4, client.tries)
def test_base_request_timeout(self):
# verify that base_request passes timeout arg on to urlopen
body = {"some": "content"}
class FakeConn(object):
def read(self):
return json.dumps(body)
for timeout in (0.0, 42.0, None):
mocked_func = 'swift.common.internal_client.urllib2.urlopen'
with mock.patch(mocked_func) as mock_urlopen:
mock_urlopen.side_effect = [FakeConn()]
sc = internal_client.SimpleClient('http://0.0.0.0/')
_, resp_body = sc.base_request('GET', timeout=timeout)
mock_urlopen.assert_called_once_with(mock.ANY, timeout=timeout)
# sanity check
self.assertEquals(body, resp_body)
def test_make_request_method_path_headers(self):
class InternalClient(internal_client.InternalClient):
def __init__(self):
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 3
self.env = None
def fake_app(self, env, start_response):
self.env = env
start_response('200 Ok', [('Content-Length', '0')])
return []
client = InternalClient()
for method in 'GET PUT HEAD'.split():
client.make_request(method, '/', {}, (200,))
self.assertEquals(client.env['REQUEST_METHOD'], method)
for path in '/one /two/three'.split():
client.make_request('GET', path, {'X-Test': path}, (200,))
self.assertEquals(client.env['PATH_INFO'], path)
self.assertEquals(client.env['HTTP_X_TEST'], path)
def test_make_request_codes(self):
class InternalClient(internal_client.InternalClient):
def __init__(self):
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 3
def fake_app(self, env, start_response):
start_response('200 Ok', [('Content-Length', '0')])
return []
client = InternalClient()
try:
old_sleep = internal_client.sleep
internal_client.sleep = not_sleep
client.make_request('GET', '/', {}, (200,))
client.make_request('GET', '/', {}, (2,))
client.make_request('GET', '/', {}, (400, 200))
client.make_request('GET', '/', {}, (400, 2))
try:
client.make_request('GET', '/', {}, (400,))
except Exception as err:
pass
self.assertEquals(200, err.resp.status_int)
try:
client.make_request('GET', '/', {}, (201,))
except Exception as err:
pass
self.assertEquals(200, err.resp.status_int)
try:
client.make_request('GET', '/', {}, (111,))
except Exception as err:
self.assertTrue(str(err).startswith('Unexpected response'))
else:
self.fail("Expected the UnexpectedResponse")
finally:
internal_client.sleep = old_sleep
def test_make_request_calls_fobj_seek_each_try(self):
class FileObject(object):
def __init__(self, test):
self.test = test
self.seek_called = 0
def seek(self, offset, whence=0):
self.seek_called += 1
self.test.assertEquals(0, offset)
self.test.assertEquals(0, whence)
class InternalClient(internal_client.InternalClient):
def __init__(self):
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 3
def fake_app(self, env, start_response):
start_response('404 Not Found', [('Content-Length', '0')])
return []
fobj = FileObject(self)
client = InternalClient()
try:
old_sleep = internal_client.sleep
internal_client.sleep = not_sleep
try:
client.make_request('PUT', '/', {}, (2,), fobj)
except Exception as err:
pass
self.assertEquals(404, err.resp.status_int)
finally:
internal_client.sleep = old_sleep
self.assertEquals(client.request_tries, fobj.seek_called)
def test_make_request_request_exception(self):
class InternalClient(internal_client.InternalClient):
def __init__(self):
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 3
def fake_app(self, env, start_response):
raise Exception()
client = InternalClient()
try:
old_sleep = internal_client.sleep
internal_client.sleep = not_sleep
self.assertRaises(
Exception, client.make_request, 'GET', '/', {}, (2,))
finally:
internal_client.sleep = old_sleep
def test_get_metadata(self):
class Response(object):
def __init__(self, headers):
self.headers = headers
self.status_int = 200
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, resp_headers):
self.test = test
self.path = path
self.resp_headers = resp_headers
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEquals('HEAD', method)
self.test.assertEquals(self.path, path)
self.test.assertEquals((2,), acceptable_statuses)
self.test.assertEquals(None, body_file)
return Response(self.resp_headers)
path = 'some_path'
metadata_prefix = 'some_key-'
resp_headers = {
'%sone' % (metadata_prefix): '1',
'%sTwo' % (metadata_prefix): '2',
'%sThree' % (metadata_prefix): '3',
'some_header-four': '4',
'Some_header-five': '5',
}
exp_metadata = {
'one': '1',
'two': '2',
'three': '3',
}
client = InternalClient(self, path, resp_headers)
metadata = client._get_metadata(path, metadata_prefix)
self.assertEquals(exp_metadata, metadata)
self.assertEquals(1, client.make_request_called)
def test_get_metadata_invalid_status(self):
class FakeApp(object):
def __call__(self, environ, start_response):
start_response('404 Not Found', [('x-foo', 'bar')])
return ['nope']
class InternalClient(internal_client.InternalClient):
def __init__(self):
self.user_agent = 'test'
self.request_tries = 1
self.app = FakeApp()
client = InternalClient()
self.assertRaises(internal_client.UnexpectedResponse,
client._get_metadata, 'path')
metadata = client._get_metadata('path', metadata_prefix='x-',
acceptable_statuses=(4,))
self.assertEqual(metadata, {'foo': 'bar'})
def test_make_path(self):
account, container, obj = path_parts()
path = make_path(account, container, obj)
c = InternalClient()
self.assertEquals(path, c.make_path(account, container, obj))
def test_make_path_exception(self):
c = InternalClient()
self.assertRaises(ValueError, c.make_path, 'account', None, 'obj')
def test_iter_items(self):
class Response(object):
def __init__(self, status_int, body):
self.status_int = status_int
self.body = body
class InternalClient(internal_client.InternalClient):
def __init__(self, test, responses):
self.test = test
self.responses = responses
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
return self.responses.pop(0)
exp_items = []
responses = [Response(200, json.dumps([])), ]
items = []
client = InternalClient(self, responses)
for item in client._iter_items('/'):
items.append(item)
self.assertEquals(exp_items, items)
exp_items = []
responses = []
for i in range(3):
data = [
{'name': 'item%02d' % (2 * i)},
{'name': 'item%02d' % (2 * i + 1)}]
responses.append(Response(200, json.dumps(data)))
exp_items.extend(data)
responses.append(Response(204, ''))
items = []
client = InternalClient(self, responses)
for item in client._iter_items('/'):
items.append(item)
self.assertEquals(exp_items, items)
def test_iter_items_with_markers(self):
class Response(object):
def __init__(self, status_int, body):
self.status_int = status_int
self.body = body
class InternalClient(internal_client.InternalClient):
def __init__(self, test, paths, responses):
self.test = test
self.paths = paths
self.responses = responses
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
exp_path = self.paths.pop(0)
self.test.assertEquals(exp_path, path)
return self.responses.pop(0)
paths = [
'/?format=json&marker=start&end_marker=end',
'/?format=json&marker=one%C3%A9&end_marker=end',
'/?format=json&marker=two&end_marker=end',
]
responses = [
Response(200, json.dumps([{'name': 'one\xc3\xa9'}, ])),
Response(200, json.dumps([{'name': 'two'}, ])),
Response(204, ''),
]
items = []
client = InternalClient(self, paths, responses)
for item in client._iter_items('/', marker='start', end_marker='end'):
items.append(item['name'].encode('utf8'))
self.assertEquals('one\xc3\xa9 two'.split(), items)
def test_set_metadata(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, exp_headers):
self.test = test
self.path = path
self.exp_headers = exp_headers
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEquals('POST', method)
self.test.assertEquals(self.path, path)
self.test.assertEquals(self.exp_headers, headers)
self.test.assertEquals((2,), acceptable_statuses)
self.test.assertEquals(None, body_file)
path = 'some_path'
metadata_prefix = 'some_key-'
metadata = {
'%sone' % (metadata_prefix): '1',
'%stwo' % (metadata_prefix): '2',
'three': '3',
}
exp_headers = {
'%sone' % (metadata_prefix): '1',
'%stwo' % (metadata_prefix): '2',
'%sthree' % (metadata_prefix): '3',
}
client = InternalClient(self, path, exp_headers)
client._set_metadata(path, metadata, metadata_prefix)
self.assertEquals(1, client.make_request_called)
def test_iter_containers(self):
account, container, obj = path_parts()
path = make_path(account)
items = '0 1 2'.split()
marker = 'some_marker'
end_marker = 'some_end_marker'
acceptable_statuses = 'some_status_list'
client = IterInternalClient(
self, path, marker, end_marker, acceptable_statuses, items)
ret_items = []
for container in client.iter_containers(
account, marker, end_marker,
acceptable_statuses=acceptable_statuses):
ret_items.append(container)
self.assertEquals(items, ret_items)
def test_get_account_info(self):
class Response(object):
def __init__(self, containers, objects):
self.headers = {
'x-account-container-count': containers,
'x-account-object-count': objects,
}
self.status_int = 200
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, resp):
self.test = test
self.path = path
self.resp = resp
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.test.assertEquals('HEAD', method)
self.test.assertEquals(self.path, path)
self.test.assertEquals({}, headers)
self.test.assertEquals((2, 404), acceptable_statuses)
self.test.assertEquals(None, body_file)
return self.resp
account, container, obj = path_parts()
path = make_path(account)
containers, objects = 10, 100
client = InternalClient(self, path, Response(containers, objects))
info = client.get_account_info(account)
self.assertEquals((containers, objects), info)
def test_get_account_info_404(self):
class Response(object):
def __init__(self):
self.headers = {
'x-account-container-count': 10,
'x-account-object-count': 100,
}
self.status_int = 404
class InternalClient(internal_client.InternalClient):
def __init__(self):
pass
def make_path(self, *a, **kw):
return 'some_path'
def make_request(self, *a, **kw):
return Response()
client = InternalClient()
info = client.get_account_info('some_account')
self.assertEquals((0, 0), info)
def test_get_account_metadata(self):
account, container, obj = path_parts()
path = make_path(account)
acceptable_statuses = 'some_status_list'
metadata_prefix = 'some_metadata_prefix'
client = GetMetadataInternalClient(
self, path, metadata_prefix, acceptable_statuses)
metadata = client.get_account_metadata(
account, metadata_prefix, acceptable_statuses)
self.assertEquals(client.metadata, metadata)
self.assertEquals(1, client.get_metadata_called)
def test_get_metadadata_with_acceptable_status(self):
account, container, obj = path_parts()
path = make_path_info(account)
client, app = get_client_app()
resp_headers = {'some-important-header': 'some value'}
app.register('GET', path, swob.HTTPOk, resp_headers)
metadata = client.get_account_metadata(
account, acceptable_statuses=(2, 4))
self.assertEqual(metadata['some-important-header'],
'some value')
app.register('GET', path, swob.HTTPNotFound, resp_headers)
metadata = client.get_account_metadata(
account, acceptable_statuses=(2, 4))
self.assertEqual(metadata['some-important-header'],
'some value')
app.register('GET', path, swob.HTTPServerError, resp_headers)
self.assertRaises(internal_client.UnexpectedResponse,
client.get_account_metadata, account,
acceptable_statuses=(2, 4))
def test_set_account_metadata(self):
account, container, obj = path_parts()
path = make_path(account)
metadata = 'some_metadata'
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = SetMetadataInternalClient(
self, path, metadata, metadata_prefix, acceptable_statuses)
client.set_account_metadata(
account, metadata, metadata_prefix, acceptable_statuses)
self.assertEquals(1, client.set_metadata_called)
def test_container_exists(self):
class Response(object):
def __init__(self, status_int):
self.status_int = status_int
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, resp):
self.test = test
self.path = path
self.make_request_called = 0
self.resp = resp
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEquals('HEAD', method)
self.test.assertEquals(self.path, path)
self.test.assertEquals({}, headers)
self.test.assertEquals((2, 404), acceptable_statuses)
self.test.assertEquals(None, body_file)
return self.resp
account, container, obj = path_parts()
path = make_path(account, container)
client = InternalClient(self, path, Response(200))
self.assertEquals(True, client.container_exists(account, container))
self.assertEquals(1, client.make_request_called)
client = InternalClient(self, path, Response(404))
self.assertEquals(False, client.container_exists(account, container))
self.assertEquals(1, client.make_request_called)
def test_create_container(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, headers):
self.test = test
self.path = path
self.headers = headers
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEquals('PUT', method)
self.test.assertEquals(self.path, path)
self.test.assertEquals(self.headers, headers)
self.test.assertEquals((2,), acceptable_statuses)
self.test.assertEquals(None, body_file)
account, container, obj = path_parts()
path = make_path(account, container)
headers = 'some_headers'
client = InternalClient(self, path, headers)
client.create_container(account, container, headers)
self.assertEquals(1, client.make_request_called)
def test_delete_container(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path):
self.test = test
self.path = path
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEquals('DELETE', method)
self.test.assertEquals(self.path, path)
self.test.assertEquals({}, headers)
self.test.assertEquals((2, 404), acceptable_statuses)
self.test.assertEquals(None, body_file)
account, container, obj = path_parts()
path = make_path(account, container)
client = InternalClient(self, path)
client.delete_container(account, container)
self.assertEquals(1, client.make_request_called)
def test_get_container_metadata(self):
account, container, obj = path_parts()
path = make_path(account, container)
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = GetMetadataInternalClient(
self, path, metadata_prefix, acceptable_statuses)
metadata = client.get_container_metadata(
account, container, metadata_prefix, acceptable_statuses)
self.assertEquals(client.metadata, metadata)
self.assertEquals(1, client.get_metadata_called)
def test_iter_objects(self):
account, container, obj = path_parts()
path = make_path(account, container)
marker = 'some_maker'
end_marker = 'some_end_marker'
acceptable_statuses = 'some_status_list'
items = '0 1 2'.split()
client = IterInternalClient(
self, path, marker, end_marker, acceptable_statuses, items)
ret_items = []
for obj in client.iter_objects(
account, container, marker, end_marker, acceptable_statuses):
ret_items.append(obj)
self.assertEquals(items, ret_items)
def test_set_container_metadata(self):
account, container, obj = path_parts()
path = make_path(account, container)
metadata = 'some_metadata'
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = SetMetadataInternalClient(
self, path, metadata, metadata_prefix, acceptable_statuses)
client.set_container_metadata(
account, container, metadata, metadata_prefix, acceptable_statuses)
self.assertEquals(1, client.set_metadata_called)
def test_delete_object(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path):
self.test = test
self.path = path
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEquals('DELETE', method)
self.test.assertEquals(self.path, path)
self.test.assertEquals({}, headers)
self.test.assertEquals((2, 404), acceptable_statuses)
self.test.assertEquals(None, body_file)
account, container, obj = path_parts()
path = make_path(account, container, obj)
client = InternalClient(self, path)
client.delete_object(account, container, obj)
self.assertEquals(1, client.make_request_called)
def test_get_object_metadata(self):
account, container, obj = path_parts()
path = make_path(account, container, obj)
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = GetMetadataInternalClient(
self, path, metadata_prefix, acceptable_statuses)
metadata = client.get_object_metadata(
account, container, obj, metadata_prefix,
acceptable_statuses)
self.assertEquals(client.metadata, metadata)
self.assertEquals(1, client.get_metadata_called)
def test_get_metadata_extra_headers(self):
class InternalClient(internal_client.InternalClient):
def __init__(self):
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 3
def fake_app(self, env, start_response):
self.req_env = env
start_response('200 Ok', [('Content-Length', '0')])
return []
client = InternalClient()
headers = {'X-Foo': 'bar'}
client.get_object_metadata('account', 'container', 'obj',
headers=headers)
self.assertEqual(client.req_env['HTTP_X_FOO'], 'bar')
def test_get_object(self):
account, container, obj = path_parts()
path_info = make_path_info(account, container, obj)
client, app = get_client_app()
headers = {'foo': 'bar'}
body = 'some_object_body'
app.register('GET', path_info, swob.HTTPOk, headers, body)
req_headers = {'x-important-header': 'some_important_value'}
status_int, resp_headers, obj_iter = client.get_object(
account, container, obj, req_headers)
self.assertEqual(status_int // 100, 2)
for k, v in headers.items():
self.assertEqual(v, resp_headers[k])
self.assertEqual(''.join(obj_iter), body)
self.assertEqual(resp_headers['content-length'], str(len(body)))
self.assertEqual(app.call_count, 1)
req_headers.update({
'host': 'localhost:80', # from swob.Request.blank
'user-agent': 'test', # from InternalClient.make_request
})
self.assertEqual(app.calls_with_headers, [(
'GET', path_info, swob.HeaderKeyDict(req_headers))])
def test_iter_object_lines(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, lines):
self.lines = lines
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 3
def fake_app(self, env, start_response):
start_response('200 Ok', [('Content-Length', '0')])
return ['%s\n' % x for x in self.lines]
lines = 'line1 line2 line3'.split()
client = InternalClient(lines)
ret_lines = []
for line in client.iter_object_lines('account', 'container', 'object'):
ret_lines.append(line)
self.assertEquals(lines, ret_lines)
def test_iter_object_lines_compressed_object(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, lines):
self.lines = lines
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 3
def fake_app(self, env, start_response):
start_response('200 Ok', [('Content-Length', '0')])
return internal_client.CompressingFileReader(
StringIO('\n'.join(self.lines)))
lines = 'line1 line2 line3'.split()
client = InternalClient(lines)
ret_lines = []
for line in client.iter_object_lines(
'account', 'container', 'object.gz'):
ret_lines.append(line)
self.assertEquals(lines, ret_lines)
def test_iter_object_lines_404(self):
class InternalClient(internal_client.InternalClient):
def __init__(self):
self.app = self.fake_app
self.user_agent = 'some_agent'
self.request_tries = 3
def fake_app(self, env, start_response):
start_response('404 Not Found', [])
return ['one\ntwo\nthree']
client = InternalClient()
lines = []
for line in client.iter_object_lines(
'some_account', 'some_container', 'some_object',
acceptable_statuses=(2, 404)):
lines.append(line)
self.assertEquals([], lines)
def test_set_object_metadata(self):
account, container, obj = path_parts()
path = make_path(account, container, obj)
metadata = 'some_metadata'
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = SetMetadataInternalClient(
self, path, metadata, metadata_prefix, acceptable_statuses)
client.set_object_metadata(
account, container, obj, metadata, metadata_prefix,
acceptable_statuses)
self.assertEquals(1, client.set_metadata_called)
def test_upload_object(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, headers, fobj):
self.test = test
self.path = path
self.headers = headers
self.fobj = fobj
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEquals(self.path, path)
exp_headers = dict(self.headers)
exp_headers['Transfer-Encoding'] = 'chunked'
self.test.assertEquals(exp_headers, headers)
self.test.assertEquals(self.fobj, fobj)
fobj = 'some_fobj'
account, container, obj = path_parts()
path = make_path(account, container, obj)
headers = {'key': 'value'}
client = InternalClient(self, path, headers, fobj)
client.upload_object(fobj, account, container, obj, headers)
self.assertEquals(1, client.make_request_called)
def test_upload_object_not_chunked(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, headers, fobj):
self.test = test
self.path = path
self.headers = headers
self.fobj = fobj
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEquals(self.path, path)
exp_headers = dict(self.headers)
self.test.assertEquals(exp_headers, headers)
self.test.assertEquals(self.fobj, fobj)
fobj = 'some_fobj'
account, container, obj = path_parts()
path = make_path(account, container, obj)
headers = {'key': 'value', 'Content-Length': len(fobj)}
client = InternalClient(self, path, headers, fobj)
client.upload_object(fobj, account, container, obj, headers)
self.assertEquals(1, client.make_request_called)
class TestGetAuth(unittest.TestCase):
@mock.patch('eventlet.green.urllib2.urlopen')
@mock.patch('eventlet.green.urllib2.Request')
def test_ok(self, request, urlopen):
def getheader(name):
d = {'X-Storage-Url': 'url', 'X-Auth-Token': 'token'}
return d.get(name)
urlopen.return_value.info.return_value.getheader = getheader
url, token = internal_client.get_auth(
'http://127.0.0.1', 'user', 'key')
self.assertEqual(url, "url")
self.assertEqual(token, "token")
request.assert_called_with('http://127.0.0.1')
request.return_value.add_header.assert_any_call('X-Auth-User', 'user')
request.return_value.add_header.assert_any_call('X-Auth-Key', 'key')
def test_invalid_version(self):
self.assertRaises(SystemExit, internal_client.get_auth,
'http://127.0.0.1', 'user', 'key', auth_version=2.0)
mock_time_value = 1401224049.98
def mock_time():
global mock_time_value
mock_time_value += 1
return mock_time_value
class TestSimpleClient(unittest.TestCase):
@mock.patch('eventlet.green.urllib2.urlopen')
@mock.patch('eventlet.green.urllib2.Request')
@mock.patch('swift.common.internal_client.time', mock_time)
def test_get(self, request, urlopen):
# basic GET request, only url as kwarg
request.return_value.get_type.return_value = "http"
urlopen.return_value.read.return_value = ''
urlopen.return_value.getcode.return_value = 200
urlopen.return_value.info.return_value = {'content-length': '345'}
sc = internal_client.SimpleClient(url='http://127.0.0.1')
logger = FakeLogger()
retval = sc.retry_request(
'GET', headers={'content-length': '123'}, logger=logger)
self.assertEqual(urlopen.call_count, 1)
request.assert_called_with('http://127.0.0.1?format=json',
headers={'content-length': '123'},
data=None)
self.assertEqual([None, None], retval)
self.assertEqual('GET', request.return_value.get_method())
self.assertEqual(logger.log_dict['debug'], [(
('-> 2014-05-27T20:54:11 GET http://127.0.0.1%3Fformat%3Djson 200 '
'123 345 1401224050.98 1401224051.98 1.0 -',), {})])
# Check if JSON is decoded
urlopen.return_value.read.return_value = '{}'
retval = sc.retry_request('GET')
self.assertEqual([None, {}], retval)
# same as above, now with token
sc = internal_client.SimpleClient(url='http://127.0.0.1',
token='token')
retval = sc.retry_request('GET')
request.assert_called_with('http://127.0.0.1?format=json',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([None, {}], retval)
# same as above, now with prefix
sc = internal_client.SimpleClient(url='http://127.0.0.1',
token='token')
retval = sc.retry_request('GET', prefix="pre_")
request.assert_called_with('http://127.0.0.1?format=json&prefix=pre_',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([None, {}], retval)
# same as above, now with container name
retval = sc.retry_request('GET', container='cont')
request.assert_called_with('http://127.0.0.1/cont?format=json',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([None, {}], retval)
# same as above, now with object name
retval = sc.retry_request('GET', container='cont', name='obj')
request.assert_called_with('http://127.0.0.1/cont/obj',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([None, {}], retval)
@mock.patch('eventlet.green.urllib2.urlopen')
@mock.patch('eventlet.green.urllib2.Request')
def test_get_with_retries_all_failed(self, request, urlopen):
# Simulate a failing request, ensure retries done
request.return_value.get_type.return_value = "http"
urlopen.side_effect = urllib2.URLError('')
sc = internal_client.SimpleClient(url='http://127.0.0.1', retries=1)
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
self.assertRaises(urllib2.URLError, sc.retry_request, 'GET')
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(request.call_count, 2)
self.assertEqual(urlopen.call_count, 2)
@mock.patch('eventlet.green.urllib2.urlopen')
@mock.patch('eventlet.green.urllib2.Request')
def test_get_with_retries(self, request, urlopen):
# First request fails, retry successful
request.return_value.get_type.return_value = "http"
mock_resp = mock.MagicMock()
mock_resp.read.return_value = ''
urlopen.side_effect = [urllib2.URLError(''), mock_resp]
sc = internal_client.SimpleClient(url='http://127.0.0.1', retries=1,
token='token')
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
retval = sc.retry_request('GET')
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(request.call_count, 2)
self.assertEqual(urlopen.call_count, 2)
request.assert_called_with('http://127.0.0.1?format=json', data=None,
headers={'X-Auth-Token': 'token'})
self.assertEqual([None, None], retval)
self.assertEqual(sc.attempts, 2)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_get_with_retries_param(self, mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = ''
mock_urlopen.side_effect = internal_client.httplib.BadStatusLine('')
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
# first without retries param
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
self.assertRaises(internal_client.httplib.BadStatusLine,
c.retry_request, 'GET')
self.assertEqual(mock_sleep.call_count, 5)
self.assertEqual(mock_urlopen.call_count, 6)
# then with retries param
mock_urlopen.reset_mock()
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
self.assertRaises(internal_client.httplib.BadStatusLine,
c.retry_request, 'GET', retries=2)
self.assertEqual(mock_sleep.call_count, 2)
self.assertEqual(mock_urlopen.call_count, 3)
# and this time with a real response
mock_urlopen.reset_mock()
mock_urlopen.side_effect = [internal_client.httplib.BadStatusLine(''),
mock_response]
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
retval = c.retry_request('GET', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
self.assertEqual([None, None], retval)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_request_with_retries_with_HTTPError(self, mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = ''
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
for request_method in 'GET PUT POST DELETE HEAD COPY'.split():
mock_urlopen.reset_mock()
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(urllib2.HTTPError,
c.retry_request, request_method, retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_request_container_with_retries_with_HTTPError(self,
mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = ''
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
for request_method in 'GET PUT POST DELETE HEAD COPY'.split():
mock_urlopen.reset_mock()
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(urllib2.HTTPError,
c.retry_request, request_method,
container='con', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_request_object_with_retries_with_HTTPError(self,
mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = ''
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
for request_method in 'GET PUT POST DELETE HEAD COPY'.split():
mock_urlopen.reset_mock()
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(urllib2.HTTPError,
c.retry_request, request_method,
container='con', name='obj', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
def test_proxy(self):
# check that proxy arg is passed through to the urllib Request
scheme = 'http'
proxy_host = '127.0.0.1:80'
proxy = '%s://%s' % (scheme, proxy_host)
url = 'https://127.0.0.1:1/a'
class FakeConn(object):
def read(self):
return 'irrelevant'
mocked = 'swift.common.internal_client.urllib2.urlopen'
# module level methods
for func in (internal_client.put_object,
internal_client.delete_object):
with mock.patch(mocked) as mock_urlopen:
mock_urlopen.return_value = FakeConn()
func(url, container='c', name='o1', contents='', proxy=proxy,
timeout=0.1, retries=0)
self.assertEqual(1, mock_urlopen.call_count)
args, kwargs = mock_urlopen.call_args
self.assertEqual(1, len(args))
self.assertEqual(1, len(kwargs))
self.assertEqual(0.1, kwargs['timeout'])
self.assertTrue(isinstance(args[0], urllib2.Request))
self.assertEqual(proxy_host, args[0].host)
self.assertEqual(scheme, args[0].type)
# class methods
content = mock.MagicMock()
cl = internal_client.SimpleClient(url)
scenarios = ((cl.get_account, []),
(cl.get_container, ['c']),
(cl.put_container, ['c']),
(cl.put_object, ['c', 'o', content]))
for scenario in scenarios:
with mock.patch(mocked) as mock_urlopen:
mock_urlopen.return_value = FakeConn()
scenario[0](*scenario[1], proxy=proxy, timeout=0.1)
self.assertEqual(1, mock_urlopen.call_count)
args, kwargs = mock_urlopen.call_args
self.assertEqual(1, len(args))
self.assertEqual(1, len(kwargs))
self.assertEqual(0.1, kwargs['timeout'])
self.assertTrue(isinstance(args[0], urllib2.Request))
self.assertEqual(proxy_host, args[0].host)
self.assertEqual(scheme, args[0].type)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015 Brocade Communications Systems, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from networking_brocade.mlx.services.l3_router.brocade import (
l3_router_plugin as brocadel3routerplugin)
from neutron import context
from neutron.db import l3_db
from neutron.i18n import _LE
from neutron.services.l3_router import l3_router_plugin as router
from neutron.tests import base
from oslo_utils import importutils
MECHANISM_NAME = ('networking_brocade.mlx.services.l3_router.brocade.'
'l3_router_plugin.BrocadeRouterPlugin')
VE = 've '
config_map = {}
vlan_map = {}
interface_map = {}
device_map = {
'mlx': {
'address': '2.2.2.2',
'username': 'admin',
'password': 'pass',
'physical_networks': 'physnet1',
'ports': '2/1,2/2',
'os_type': 'NI'
}
}
add_config_map = {
'2.2.2.2': {
'402': {'ve 402': '12.0.0.1/24'
}
}
}
delete_config_map = {
'2.2.2.2': {
'402': {}
}
}
interface_info = {
'subnet_id': 'subnet1',
'port_id': 'port1'}
vlan_id = '402'
gateway_ip_cidr = '12.0.0.1/24'
ROUTER = 'router1'
SUBNET = 'subnet1'
PORT = 'port1'
INVALID_INPUT = "Invalid input"
RANGE_ERROR = "outside of allowed max"
class TestBrocadeL3RouterPlugin(base.BaseTestCase, router.L3RouterPlugin):
"""
Test Brocade L3 Router FI/NI plugin.
"""
def setUp(self):
_mechanism_name = MECHANISM_NAME
def mocked_initialize(self):
self._devices = device_map
with mock.patch.object(brocadel3routerplugin
.BrocadeRouterPlugin,
'brocade_init', new=mocked_initialize):
super(TestBrocadeL3RouterPlugin, self).setUp()
self.driver = importutils.import_object(_mechanism_name)
@mock.patch.object(brocadel3routerplugin.BrocadeRouterPlugin,
'_get_driver')
@mock.patch.object(brocadel3routerplugin.BrocadeRouterPlugin,
'_get_network_info')
@mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_core_plugin')
@mock.patch.object(router.L3RouterPlugin, 'add_router_interface')
def test_add_router_interface(
self, mock_super, mock_core_plugin, mock_network_info,
mock_driver):
mock_driver.side_effect = self.side_effect
mech_ctx = self._get_network_context('physnet1', 'vlan')
ctx = mech_ctx._plugin_context
ctx.session.begin = mock.MagicMock()
mock_super.returnValue = interface_info
mock_core_plugin.side_effect = mock.MagicMock()
mock_network_info.side_effect = self.side_effect_network_info
self.driver.add_router_interface(ctx, ROUTER,
interface_info)
self.assertDictSupersetOf(config_map, add_config_map)
@mock.patch.object(router.L3RouterPlugin, 'remove_router_interface')
@mock.patch.object(brocadel3routerplugin.BrocadeRouterPlugin,
'_get_driver')
@mock.patch.object(brocadel3routerplugin.BrocadeRouterPlugin,
'_get_network_info')
@mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_core_plugin')
@mock.patch.object(router.L3RouterPlugin, 'add_router_interface')
def test_add_router_interface_exception(
self, mock_super, mock_core_plugin, mock_network_info,
mock_driver, mock_super_remove):
mock_driver.side_effect = self.side_effect_error
mech_ctx = self._get_network_context('physnet1', 'vlan')
ctx = mech_ctx._plugin_context
ctx.session.begin = mock.MagicMock()
mock_super.returnValue = interface_info
mock_core_plugin.side_effect = mock.MagicMock()
mock_network_info.side_effect = self.side_effect_network_info
mock_super_remove.returnValue = interface_info
self.assertRaisesRegexp(
Exception, (_LE("BrocadeRouterPlugin")),
self.driver.add_router_interface,
ctx, ROUTER, interface_info)
@mock.patch.object(brocadel3routerplugin.BrocadeRouterPlugin,
'_get_driver')
@mock.patch.object(brocadel3routerplugin.BrocadeRouterPlugin,
'_get_network_info')
@mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_core_plugin')
@mock.patch.object(router.L3RouterPlugin, 'remove_router_interface')
def test_remove_router_interface(
self, mock_super, mock_core_plugin, mock_network_info,
mock_driver):
mock_driver.side_effect = self.side_effect
mech_ctx = self._get_network_context('physnet1', 'vlan')
ctx = mech_ctx._plugin_context
ctx.session.begin = mock.MagicMock()
mock_super.returnValue = interface_info
mock_core_plugin.side_effect = mock.MagicMock()
mock_network_info.side_effect = self.side_effect_network_info
ve_map = {'ve 402': '12.0.0.1/24'}
vlan_map.update({'402': ve_map})
config_map.update({'2.2.2.2': vlan_map})
self.driver.remove_router_interface(ctx, ROUTER,
interface_info)
self.assertDictSupersetOf(config_map, delete_config_map)
def side_effect(self, dev_name):
"""
Mock _get_driver method and return FakeDriver
"""
device = device_map.get(dev_name)
return FakeDriver(device)
def side_effect_error(self, dev_name):
"""
Mock _get_driver method and return FakeDriver
"""
device = device_map.get(dev_name)
return FakeDriver(device, error=True)
def _get_network_context(self, physnet, network_type):
"""
Create mock network context
"""
network = {
'id': 1,
'name': 'private',
'tenant_id': 1,
'vlan': 200,
'network_type': network_type,
'provider:segmentation_id': 200
}
network_segments = [{
'id': 1,
'segmentation_id': 200,
'network_type': network_type,
'physical_network': physnet
}]
_plugin_context = context.get_admin_context()
return FakeNetworkContext(network, network_segments, _plugin_context)
def side_effect_network_info(self, vlan, ip):
"""
Mock _get_driver method and return FakeDriver
"""
return vlan_id, gateway_ip_cidr
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
def __init__(self, network, segments=None, original_network=None):
self._network = network
self._original_network = original_network
self._segments = segments
@property
def current(self):
return self._network
@property
def _plugin_context(self):
return self._original_network
@property
def network_segments(self):
return self._segments
class FakeDriver(object):
"""
Fake driver which will implement create and delete
network. Create network will update the global dictionary with
the address of the device along with vlan and ports to be tagged.
Example : {'10.10.23.1':{'200':['1/1/1', '1/1/2']}}
Delete network will delete the corresponding entry from the dictionary.
"""
def __init__(self, device, error=None):
self.error = error
self.device = device
self.address = device.get('address')
def add_router_interface(self, vlan_id, gateway_ip_cidr):
if self.error is INVALID_INPUT:
raise Exception("Ethernet Driver : Create"
"network failed: error= Invalid Input")
elif self.error is RANGE_ERROR:
raise Exception("Configuring router interface failed: "
"ve out of range error")
elif self.error:
raise Exception("Add Router Interface failed")
interface_map.update({VE + vlan_id: gateway_ip_cidr})
vlan_map.update({vlan_id: interface_map})
config_map.update({self.address: vlan_map})
def remove_router_interface(self, vlan_id):
if vlan_id in config_map[self.address]:
config_map[self.address].update({vlan_id: {}})
else:
raise Exception("vlan is not there")
|
|
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from ...models.neural_network import NeuralNetworkBuilder as _NeuralNetworkBuilder
from ...proto import FeatureTypes_pb2 as _FeatureTypes_pb2
from ...models import datatypes, _METADATA_VERSION, _METADATA_SOURCE
from ...models import MLModel as _MLModel
from ...models import (
_MLMODEL_FULL_PRECISION,
_MLMODEL_HALF_PRECISION,
_VALID_MLMODEL_PRECISION_TYPES,
)
from ...models._deprecation import deprecated as _deprecated
from ...models.utils import _convert_neural_network_spec_weights_to_fp16
from ..._deps import _HAS_KERAS_TF
from ..._deps import _HAS_KERAS2_TF
from coremltools import __version__ as ct_version
if _HAS_KERAS_TF:
import keras as _keras
from . import _layers
from . import _topology
_KERAS_LAYER_REGISTRY = {
_keras.layers.core.Dense: _layers.convert_dense,
_keras.layers.core.Activation: _layers.convert_activation,
_keras.layers.advanced_activations.LeakyReLU: _layers.convert_activation,
_keras.layers.advanced_activations.PReLU: _layers.convert_activation,
_keras.layers.advanced_activations.ELU: _layers.convert_activation,
_keras.layers.advanced_activations.ParametricSoftplus: _layers.convert_activation,
_keras.layers.advanced_activations.ThresholdedReLU: _layers.convert_activation,
_keras.activations.softmax: _layers.convert_activation,
_keras.layers.convolutional.Convolution2D: _layers.convert_convolution,
_keras.layers.convolutional.Deconvolution2D: _layers.convert_convolution,
_keras.layers.convolutional.AtrousConvolution2D: _layers.convert_convolution,
_keras.layers.convolutional.AveragePooling2D: _layers.convert_pooling,
_keras.layers.convolutional.MaxPooling2D: _layers.convert_pooling,
_keras.layers.pooling.GlobalAveragePooling2D: _layers.convert_pooling,
_keras.layers.pooling.GlobalMaxPooling2D: _layers.convert_pooling,
_keras.layers.convolutional.ZeroPadding2D: _layers.convert_padding,
_keras.layers.convolutional.Cropping2D: _layers.convert_cropping,
_keras.layers.convolutional.UpSampling2D: _layers.convert_upsample,
_keras.layers.convolutional.Convolution1D: _layers.convert_convolution1d,
_keras.layers.convolutional.AtrousConvolution1D: _layers.convert_convolution1d,
_keras.layers.convolutional.AveragePooling1D: _layers.convert_pooling,
_keras.layers.convolutional.MaxPooling1D: _layers.convert_pooling,
_keras.layers.pooling.GlobalAveragePooling1D: _layers.convert_pooling,
_keras.layers.pooling.GlobalMaxPooling1D: _layers.convert_pooling,
_keras.layers.convolutional.ZeroPadding1D: _layers.convert_padding,
_keras.layers.convolutional.Cropping1D: _layers.convert_cropping,
_keras.layers.convolutional.UpSampling1D: _layers.convert_upsample,
_keras.layers.recurrent.LSTM: _layers.convert_lstm,
_keras.layers.recurrent.SimpleRNN: _layers.convert_simple_rnn,
_keras.layers.recurrent.GRU: _layers.convert_gru,
_keras.layers.wrappers.Bidirectional: _layers.convert_bidirectional,
_keras.layers.normalization.BatchNormalization: _layers.convert_batchnorm,
_keras.engine.topology.Merge: _layers.convert_merge,
_keras.layers.core.Flatten: _layers.convert_flatten,
_keras.layers.core.Permute: _layers.convert_permute,
_keras.layers.core.Reshape: _layers.convert_reshape,
_keras.layers.embeddings.Embedding: _layers.convert_embedding,
_keras.layers.core.RepeatVector: _layers.convert_repeat_vector,
## All the layers that can be skipped (merged with conv)
_keras.engine.topology.InputLayer: _layers.default_skip,
_keras.layers.core.Dropout: _layers.default_skip,
_keras.layers.wrappers.TimeDistributed: _layers.default_skip,
}
_KERAS_SKIP_LAYERS = [
_keras.layers.core.Dropout,
]
def _check_unsupported_layers(model):
for i, layer in enumerate(model.layers):
if isinstance(layer, _keras.models.Sequential) or isinstance(
layer, _keras.models.Model
):
_check_unsupported_layers(layer)
else:
if type(layer) not in _KERAS_LAYER_REGISTRY:
raise ValueError("Keras layer '%s' not supported. " % str(type(layer)))
if isinstance(layer, _keras.engine.topology.Merge):
if layer.layers is None:
continue
for merge_layer in layer.layers:
if isinstance(merge_layer, _keras.models.Sequential) or isinstance(
merge_layer, _keras.models.Model
):
_check_unsupported_layers(merge_layer)
if isinstance(layer, _keras.layers.wrappers.TimeDistributed):
if type(layer.layer) not in _KERAS_LAYER_REGISTRY:
raise ValueError(
"Keras layer '%s' not supported. " % str(type(layer.layer))
)
if isinstance(layer, _keras.layers.wrappers.Bidirectional):
if not isinstance(layer.layer, _keras.layers.recurrent.LSTM):
raise ValueError(
"Keras bi-directional wrapper conversion supports only "
"LSTM layer at this time. "
)
def _get_layer_converter_fn(layer):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
return _KERAS_LAYER_REGISTRY[layer_type]
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer))
def _load_keras_model(model_network_path, model_weight_path, custom_objects=None):
"""Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
custom_objects:
A dictionary of layers or other custom classes
or functions used by the model
Returns
-------
model: A keras model
"""
from keras.models import model_from_json
import json
# Load the model network
json_file = open(model_network_path, "r")
json_string = json_file.read()
json_file.close()
loaded_model_json = json.loads(json_string)
if not custom_objects:
custom_objects = {}
# Load the model weights
loaded_model = model_from_json(loaded_model_json, custom_objects=custom_objects)
loaded_model.load_weights(model_weight_path)
return loaded_model
def _convert(
model,
input_names=None,
output_names=None,
image_input_names=None,
is_bgr=False,
red_bias=0.0,
green_bias=0.0,
blue_bias=0.0,
gray_bias=0.0,
image_scale=1.0,
class_labels=None,
predicted_feature_name=None,
predicted_probabilities_output="",
custom_objects=None,
respect_trainable=False,
):
if not (_HAS_KERAS_TF):
raise RuntimeError(
"keras not found or unsupported version or backend "
"found. keras conversion API is disabled."
)
if isinstance(model, str):
model = _keras.models.load_model(model, custom_objects=custom_objects)
elif isinstance(model, tuple):
model = _load_keras_model(model[0], model[1], custom_objects=custom_objects)
# Check valid versions
_check_unsupported_layers(model)
# Build network graph to represent Keras model
graph = _topology.NetGraph(model)
graph.build()
graph.remove_skip_layers(_KERAS_SKIP_LAYERS)
graph.insert_1d_permute_layers()
graph.insert_permute_for_spatial_bn()
graph.defuse_activation()
graph.remove_internal_input_layers()
graph.make_output_layers()
# The graph should be finalized before executing this
graph.generate_blob_names()
graph.add_recurrent_optionals()
inputs = graph.get_input_layers()
outputs = graph.get_output_layers()
# check input / output names validity
if input_names is not None:
if isinstance(input_names, str):
input_names = [input_names]
else:
input_names = ["input" + str(i + 1) for i in range(len(inputs))]
if output_names is not None:
if isinstance(output_names, str):
output_names = [output_names]
else:
output_names = ["output" + str(i + 1) for i in range(len(outputs))]
if image_input_names is not None and isinstance(image_input_names, str):
image_input_names = [image_input_names]
graph.reset_model_input_names(input_names)
graph.reset_model_output_names(output_names)
# Keras -> Core ML input dimension dictionary
# (None, None) -> [1, 1, 1, 1, 1]
# (None, D) -> [D] or [D, 1, 1, 1, 1]
# (None, Seq, D) -> [Seq, 1, D, 1, 1]
# (None, H, W, C) -> [C, H, W]
# (D) -> [D]
# (Seq, D) -> [Seq, 1, 1, D, 1]
# (Batch, Sequence, D) -> [D]
# Retrieve input shapes from model
if type(model.input_shape) is list:
input_dims = [list(filter(None, x)) for x in model.input_shape]
unfiltered_shapes = model.input_shape
else:
input_dims = [list(filter(None, model.input_shape))]
unfiltered_shapes = [model.input_shape]
for idx, dim in enumerate(input_dims):
unfiltered_shape = unfiltered_shapes[idx]
if len(dim) == 0:
# Used to be [None, None] before filtering; indicating unknown
# sequence length
input_dims[idx] = tuple([1])
elif len(dim) == 1:
s = graph.get_successors(inputs[idx])[0]
if isinstance(graph.get_keras_layer(s), _keras.layers.embeddings.Embedding):
# Embedding layer's special input (None, D) where D is actually
# sequence length
input_dims[idx] = (1,)
else:
input_dims[idx] = dim # dim is just a number
elif len(dim) == 2: # [Seq, D]
input_dims[idx] = (dim[1],)
elif len(dim) == 3: # H,W,C
if len(unfiltered_shape) > 3:
# keras uses the reverse notation from us
input_dims[idx] = (dim[2], dim[0], dim[1])
else: # keras provided fixed batch and sequence length, so the input
# was (batch, sequence, channel)
input_dims[idx] = (dim[2],)
else:
raise ValueError(
"Input" + input_names[idx] + "has input shape of length" + str(len(dim))
)
# Retrieve output shapes from model
if type(model.output_shape) is list:
output_dims = [list(filter(None, x)) for x in model.output_shape]
else:
output_dims = [list(filter(None, model.output_shape[1:]))]
for idx, dim in enumerate(output_dims):
if len(dim) == 1:
output_dims[idx] = dim
elif len(dim) == 2: # [Seq, D]
output_dims[idx] = (dim[1],)
elif len(dim) == 3:
output_dims[idx] = (dim[2], dim[1], dim[0])
input_types = [datatypes.Array(*dim) for dim in input_dims]
output_types = [datatypes.Array(*dim) for dim in output_dims]
# Some of the feature handling is sensitive about string vs. unicode
input_names = map(str, input_names)
output_names = map(str, output_names)
is_classifier = class_labels is not None
if is_classifier:
mode = "classifier"
else:
mode = None
# assuming these match
input_features = list(zip(input_names, input_types))
output_features = list(zip(output_names, output_types))
builder = _NeuralNetworkBuilder(input_features, output_features, mode=mode)
for iter, layer in enumerate(graph.layer_list):
keras_layer = graph.keras_layer_map[layer]
print("%d : %s, %s" % (iter, layer, keras_layer))
if isinstance(keras_layer, _keras.layers.wrappers.TimeDistributed):
keras_layer = keras_layer.layer
converter_func = _get_layer_converter_fn(keras_layer)
input_names, output_names = graph.get_layer_blobs(layer)
converter_func(builder, layer, input_names, output_names, keras_layer)
# Set the right inputs and outputs on the model description (interface)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
# Since we aren't mangling anything the user gave us, we only need to update
# the model interface here
builder.add_optionals(graph.optional_inputs, graph.optional_outputs)
# Add classifier classes (if applicable)
if is_classifier:
classes_in = class_labels
if isinstance(classes_in, str):
import os
if not os.path.isfile(classes_in):
raise ValueError(
"Path to class labels (%s) does not exist." % classes_in
)
with open(classes_in, "r") as f:
classes = f.read()
classes = classes.splitlines()
elif type(classes_in) is list: # list[int or str]
classes = classes_in
else:
raise ValueError(
"Class labels must be a list of integers / strings, or a file path"
)
if predicted_feature_name is not None:
builder.set_class_labels(
classes,
predicted_feature_name=predicted_feature_name,
prediction_blob=predicted_probabilities_output,
)
else:
builder.set_class_labels(classes)
# Set pre-processing paramsters
builder.set_pre_processing_parameters(
image_input_names=image_input_names,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
)
# Return the protobuf spec
spec = builder.spec
return spec
def _convert_to_spec(
model,
input_names=None,
output_names=None,
image_input_names=None,
input_name_shape_dict={},
is_bgr=False,
red_bias=0.0,
green_bias=0.0,
blue_bias=0.0,
gray_bias=0.0,
image_scale=1.0,
class_labels=None,
predicted_feature_name=None,
model_precision=_MLMODEL_FULL_PRECISION,
predicted_probabilities_output="",
add_custom_layers=False,
custom_conversion_functions=None,
custom_objects=None,
input_shapes=None,
output_shapes=None,
respect_trainable=False,
use_float_arraytype=False,
):
"""
Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)
- a tuple of strings, where the first is the path to a Keras model
architecture (.json file), the second is the path to its weights
stored in h5 file.
input_names: [str] | str
Optional name(s) that can be given to the inputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the inputs of the Keras model. If not provided, the Keras inputs
are named to [input1, input2, ..., inputN] in the Core ML model. When
multiple inputs are present, the input feature names are in the same
order as the Keras inputs.
output_names: [str] | str
Optional name(s) that can be given to the outputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the outputs of the Keras model. If not provided, the Keras outputs
are named to [output1, output2, ..., outputN] in the Core ML model.
When multiple outputs are present, output feature names are in the same
order as the Keras inputs.
image_input_names: [str] | str
Input names to the Keras model (a subset of the input_names
parameter) that can be treated as images by Core ML. All other inputs
are treated as MultiArrays (N-D Arrays).
input_name_shape_dict: {str: [int]}
Optional Dictionary of input tensor names and their corresponding shapes expressed
as a list of ints
is_bgr: bool | dict()
Flag indicating the channel order the model internally uses to represent
color images. Set to True if the internal channel order is BGR,
otherwise it will be assumed RGB. This flag is applicable only if
image_input_names is specified. To specify a different value for each
image input, provide a dictionary with input names as keys.
Note that this flag is about the models internal channel order.
An input image can be passed to the model in any color pixel layout
containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag
determines how those pixel values get mapped to the internal multiarray
representation.
red_bias: float | dict()
Bias value to be added to the red channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale). Defaults
to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
filepath where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
model_precision: str
Precision at which model will be saved. Currently full precision (float) and half precision
(float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision).
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function. Defaults to the first output blob.
add_custom_layers: bool
If True, then unknown Keras layer types will be added to the model as
'custom' layers, which must then be filled in as postprocessing.
custom_conversion_functions: {'str': (Layer -> CustomLayerParams)}
A dictionary with keys corresponding to names of custom layers and values
as functions taking a Keras custom layer and returning a parameter dictionary
and list of weights.
custom_objects: {'str': (function)}
Dictionary that includes a key, value pair of {'<function name>': <function>}
for custom objects such as custom loss in the Keras model.
Provide a string of the name of the custom function as a key.
Provide a function as a value.
respect_trainable: bool
If True, then Keras layers that are marked 'trainable' will
automatically be marked updatable in the Core ML model.
use_float_arraytype: bool
If true, the datatype of input/output multiarrays is set to Float32 instead
of double.
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a Keras model
>>> model = Sequential()
>>> model.add(Dense(num_channels, input_dim = input_dim))
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.keras.convert(model)
# Saving the Core ML model to a file.
>>> coreml_model.save('my_model.mlmodel')
Converting a model with a single image input.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image')
Core ML also lets you add class labels to models to expose them as
classifiers.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names = 'image',
... image_input_names = 'image', class_labels = ['cat', 'dog', 'rat'])
Class labels for classifiers can also come from a file on disk.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image', class_labels = 'labels.txt')
Provide customized input and output names to the Keras inputs and outputs
while exposing them to Core ML.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... ['my_input_1', 'my_input_2'], output_names = ['my_output'])
"""
if model_precision not in _VALID_MLMODEL_PRECISION_TYPES:
raise RuntimeError("Model precision {} is not valid".format(model_precision))
if _HAS_KERAS_TF:
spec = _convert(
model=model,
input_names=input_names,
output_names=output_names,
image_input_names=image_input_names,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
class_labels=class_labels,
predicted_feature_name=predicted_feature_name,
predicted_probabilities_output=predicted_probabilities_output,
custom_objects=custom_objects,
respect_trainable=respect_trainable,
)
elif _HAS_KERAS2_TF:
from . import _keras2_converter
spec = _keras2_converter._convert(
model=model,
input_names=input_names,
output_names=output_names,
image_input_names=image_input_names,
input_name_shape_dict=input_name_shape_dict,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
class_labels=class_labels,
predicted_feature_name=predicted_feature_name,
predicted_probabilities_output=predicted_probabilities_output,
add_custom_layers=add_custom_layers,
custom_conversion_functions=custom_conversion_functions,
custom_objects=custom_objects,
input_shapes=input_shapes,
output_shapes=output_shapes,
respect_trainable=respect_trainable,
use_float_arraytype=use_float_arraytype,
)
else:
raise RuntimeError(
"Keras not found or unsupported version or backend found. keras conversion API is disabled."
)
if model_precision == _MLMODEL_HALF_PRECISION and model is not None:
spec = _convert_neural_network_spec_weights_to_fp16(spec)
return spec
@_deprecated()
def convert(
model,
input_names=None,
output_names=None,
image_input_names=None,
input_name_shape_dict={},
is_bgr=False,
red_bias=0.0,
green_bias=0.0,
blue_bias=0.0,
gray_bias=0.0,
image_scale=1.0,
class_labels=None,
predicted_feature_name=None,
model_precision=_MLMODEL_FULL_PRECISION,
predicted_probabilities_output="",
add_custom_layers=False,
custom_conversion_functions=None,
input_shapes=None,
output_shapes=None,
respect_trainable=False,
use_float_arraytype=False,
):
"""
WARNING: This function is deprecated. It will be removed in the 6.0.
Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)
- a tuple of strings, where the first is the path to a Keras model
architecture (.json file), the second is the path to its weights stored in h5 file.
input_names: [str] | str
Optional name(s) that can be given to the inputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the inputs of the Keras model. If not provided, the Keras inputs
are named to [input1, input2, ..., inputN] in the Core ML model. When
multiple inputs are present, the input feature names are in the same
order as the Keras inputs.
output_names: [str] | str
Optional name(s) that can be given to the outputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the outputs of the Keras model. If not provided, the Keras outputs
are named to [output1, output2, ..., outputN] in the Core ML model.
When multiple outputs are present, output feature names are in the same
order as the Keras inputs.
image_input_names: [str] | str
Input names to the Keras model (a subset of the input_names
parameter) that can be treated as images by Core ML. All other inputs
are treated as MultiArrays (N-D Arrays).
is_bgr: bool | dict()
Flag indicating the channel order the model internally uses to represent
color images. Set to True if the internal channel order is BGR,
otherwise it will be assumed RGB. This flag is applicable only if
image_input_names is specified. To specify a different value for each
image input, provide a dictionary with input names as keys.
Note that this flag is about the models internal channel order.
An input image can be passed to the model in any color pixel layout
containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag
determines how those pixel values get mapped to the internal multiarray
representation.
red_bias: float | dict()
Bias value to be added to the red channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale). Defaults
to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
filepath where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
model_precision: str
Precision at which model will be saved. Currently full precision (float) and half precision
(float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision).
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function. Defaults to the first output blob.
add_custom_layers: bool
If yes, then unknown Keras layer types will be added to the model as
'custom' layers, which must then be filled in as postprocessing.
custom_conversion_functions: {str:(Layer -> (dict, [weights])) }
A dictionary with keys corresponding to names of custom layers and values
as functions taking a Keras custom layer and returning a parameter dictionary
and list of weights.
respect_trainable: bool
If yes, then Keras layers marked 'trainable' will automatically be
marked updatable in the Core ML model.
use_float_arraytype: bool
If true, the datatype of input/output multiarrays is set to Float32 instead
of double.
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a Keras model
>>> model = Sequential()
>>> model.add(Dense(num_channels, input_dim = input_dim))
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.keras.convert(model)
# Saving the Core ML model to a file.
>>> coreml_model.save('my_model.mlmodel')
Converting a model with a single image input.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image')
Core ML also lets you add class labels to models to expose them as
classifiers.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names = 'image',
... image_input_names = 'image', class_labels = ['cat', 'dog', 'rat'])
Class labels for classifiers can also come from a file on disk.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image', class_labels = 'labels.txt')
Provide customized input and output names to the Keras inputs and outputs
while exposing them to Core ML.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... ['my_input_1', 'my_input_2'], output_names = ['my_output'])
"""
spec = _convert_to_spec(
model,
input_names=input_names,
output_names=output_names,
image_input_names=image_input_names,
input_name_shape_dict=input_name_shape_dict,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
class_labels=class_labels,
predicted_feature_name=predicted_feature_name,
model_precision=model_precision,
predicted_probabilities_output=predicted_probabilities_output,
add_custom_layers=add_custom_layers,
custom_conversion_functions=custom_conversion_functions,
input_shapes=input_shapes,
output_shapes=output_shapes,
respect_trainable=respect_trainable,
use_float_arraytype=use_float_arraytype,
)
model = _MLModel(spec)
from keras import __version__ as keras_version
model.user_defined_metadata[_METADATA_VERSION] = ct_version
model.user_defined_metadata[_METADATA_SOURCE] = "keras=={0}".format(keras_version)
return model
|
|
"""Author: Arthur Mensch
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.externals.joblib import delayed, Parallel, Memory
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=1e-24,
max_iter=this_max_iter,
random_state=42,
)
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
mem = Memory(cachedir=expanduser('~/cache'), verbose=0)
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
digits = load_digits()
X, y = digits.data, digits.target
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
cached_fit = mem.cache(fit_single)
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(cached_fit)(solver, X, y,
penalty=penalty, single_target=single_target,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for penalty in penalties)
res = []
idx = 0
for solver in solvers:
for penalty in penalties:
if not (skip_slow and solver == 'lightning' and penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot():
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target', 'penalty'], inplace=True)
grouped = res.groupby(level=['single_target', 'penalty'])
colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'}
for idx, group in grouped:
single_target, penalty = idx
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
train_scores = group['train_scores'].values
ref = np.min(np.concatenate(train_scores)) * 0.999
for scores, times, solver in zip(group['train_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(132)
test_scores = group['test_scores'].values
ref = np.min(np.concatenate(test_scores)) * 0.999
for scores, times, solver in zip(group['test_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(133)
for accuracy, times, solver in zip(group['accuracies'], group['times'],
group['solver']):
ax.plot(times, accuracy, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
name += '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig(name)
plt.close(fig)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
single_target = True
exp(solvers, penalties, single_target, n_samples=None, n_jobs=1,
dataset='20newspaper', max_iter=20)
plot()
|
|
# Copyright 2014 Objectif Libre
# Copyright 2015 Dot Hill Systems Corp.
# Copyright 2016 Seagate Technology or one of its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import hashlib
import math
import time
from lxml import etree
from oslo_log import log as logging
from oslo_utils import units
import requests
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW, _LI
from cinder import utils
LOG = logging.getLogger(__name__)
class DotHillClient(object):
def __init__(self, host, login, password, protocol, ssl_verify):
self._mgmt_ip_addrs = list(map(str.strip, host.split(',')))
self._login = login
self._password = password
self._protocol = protocol
self._session_key = None
self.ssl_verify = ssl_verify
self._set_host(self._mgmt_ip_addrs[0])
self._fw = ''
self._luns_in_use_by_host = {}
def _set_host(self, ip_addr):
self._curr_ip_addr = ip_addr
self._base_url = "%s://%s/api" % (self._protocol, ip_addr)
def _get_auth_token(self, xml):
"""Parse an XML authentication reply to extract the session key."""
self._session_key = None
try:
tree = etree.XML(xml)
if (tree.findtext(".//PROPERTY[@name='response-type']") ==
"success"):
self._session_key = (
tree.findtext(".//PROPERTY[@name='response']"))
except Exception as e:
msg = _("Cannot parse session key: %s") % e.msg
raise exception.DotHillConnectionError(message=msg)
def login(self):
if self._session_key is None:
return self.session_login()
def session_login(self):
"""Authenticates the service on the device.
Tries all the IP addrs listed in the san_ip parameter
until a working one is found or the list is exhausted.
"""
try:
self._get_session_key()
self.get_firmware_version()
LOG.debug("Logged in to array at %s (session %s)",
self._base_url, self._session_key)
return
except exception.DotHillConnectionError:
not_responding = self._curr_ip_addr
LOG.exception(_LE('session_login failed to connect to %s'),
self._curr_ip_addr)
# Loop through the remaining management addresses
# to find one that's up.
for host in self._mgmt_ip_addrs:
if host is not_responding:
continue
self._set_host(host)
try:
self._get_session_key()
return
except exception.DotHillConnectionError:
LOG.error(_LE('Failed to connect to %s'),
self._curr_ip_addr)
continue
raise exception.DotHillConnectionError(
message=_("Failed to log in to management controller"))
@utils.synchronized(__name__, external = True)
def _get_session_key(self):
"""Retrieve a session key from the array."""
self._session_key = None
hash_ = "%s_%s" % (self._login, self._password)
if six.PY3:
hash_ = hash_.encode('utf-8')
hash_ = hashlib.md5(hash_)
digest = hash_.hexdigest()
url = self._base_url + "/login/" + digest
try:
xml = requests.get(url, verify=self.ssl_verify, timeout=30)
except requests.exceptions.RequestException:
msg = _("Failed to obtain MC session key")
LOG.exception(msg)
raise exception.DotHillConnectionError(message=msg)
self._get_auth_token(xml.text.encode('utf8'))
LOG.debug("session key = %s", self._session_key)
if self._session_key is None:
raise exception.DotHillAuthenticationError
def _assert_response_ok(self, tree):
"""Parses the XML returned by the device to check the return code.
Raises a DotHillRequestError error if the return code is not 0
or if the return code is None.
"""
# Get the return code for the operation, raising an exception
# if it is not present.
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if not return_code:
raise exception.DotHillRequestError(message="No status found")
# If no error occurred, just return.
if return_code == '0':
return
# Format a message for the status code.
msg = "%s (%s)" % (tree.findtext(".//PROPERTY[@name='response']"),
return_code)
raise exception.DotHillRequestError(message=msg)
def _build_request_url(self, path, *args, **kargs):
url = self._base_url + path
if kargs:
url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v)
for (k, v) in kargs.items()])
if args:
url += '/' + '/'.join(args)
return url
def _request(self, path, *args, **kargs):
"""Performs an API request on the array, with retry.
Propagates a DotHillConnectionError if no valid response is
received from the array, e.g. if the network is down.
Propagates a DotHillRequestError if the device returned a response
but the status is not 0. The device error message will be used
in the exception message.
If the status is OK, returns the XML data for further processing.
"""
tries_left = 2
while tries_left > 0:
try:
return self._api_request(path, *args, **kargs)
except exception.DotHillConnectionError as e:
if tries_left < 1:
LOG.error(_LE("Array Connection error: "
"%s (no more retries)"), e.msg)
raise
# Retry on any network connection errors, SSL errors, etc
LOG.error(_LE("Array Connection error: %s (retrying)"), e.msg)
except exception.DotHillRequestError as e:
if tries_left < 1:
LOG.error(_LE("Array Request error: %s (no more retries)"),
e.msg)
raise
# Retry specific errors which may succeed if we log in again
# -10027 => The user is not recognized on this system.
if '(-10027)' in e.msg:
LOG.error(_LE("Array Request error: %s (retrying)"), e.msg)
else:
raise
tries_left -= 1
self.session_login()
@utils.synchronized(__name__, external=True)
def _api_request(self, path, *args, **kargs):
"""Performs an HTTP request on the device, with locking.
Raises a DotHillRequestError if the device returned but the status is
not 0. The device error message will be used in the exception message.
If the status is OK, returns the XML data for further processing.
"""
url = self._build_request_url(path, *args, **kargs)
LOG.debug("Array Request URL: %s (session %s)",
url, self._session_key)
headers = {'dataType': 'api', 'sessionKey': self._session_key}
try:
xml = requests.get(url, headers=headers,
verify=self.ssl_verify, timeout=60)
tree = etree.XML(xml.text.encode('utf8'))
except Exception as e:
message = _("Exception handling URL %(url)s: %(msg)s") % {
'url': url, 'msg': e}
raise exception.DotHillConnectionError(message=message)
if path == "/show/volumecopy-status":
return tree
self._assert_response_ok(tree)
return tree
def logout(self):
pass
def session_logout(self):
url = self._base_url + '/exit'
try:
requests.get(url, verify=self.ssl_verify, timeout=30)
return True
except Exception:
return False
def is_titanium(self):
"""True if array is an older generation."""
return True if len(self._fw) > 0 and self._fw[0] == 'T' else False
def create_volume(self, name, size, backend_name, backend_type):
# NOTE: size is in this format: [0-9]+GiB
path_dict = {'size': size}
if backend_type == "linear":
path_dict['vdisk'] = backend_name
else:
path_dict['pool'] = backend_name
try:
self._request("/create/volume", name, **path_dict)
except exception.DotHillRequestError as e:
# -10186 => The specified name is already in use.
# This can occur during controller failover.
if '(-10186)' in e.msg:
LOG.warning(_LW("Ignoring error in create volume: %s"), e.msg)
return None
raise
return None
def delete_volume(self, name):
try:
self._request("/delete/volumes", name)
except exception.DotHillRequestError as e:
# -10075 => The specified volume was not found.
# This can occur during controller failover.
if '(-10075)' in e.msg:
LOG.warning(_LW("Ignorning error while deleting %(volume)s:"
" %(reason)s"),
{'volume': name, 'reason': e.msg})
return
raise
def extend_volume(self, name, added_size):
self._request("/expand/volume", name, size=added_size)
def create_snapshot(self, volume_name, snap_name):
try:
self._request("/create/snapshots", snap_name, volumes=volume_name)
except exception.DotHillRequestError as e:
# -10186 => The specified name is already in use.
# This can occur during controller failover.
if '(-10186)' in e.msg:
LOG.warning(_LW("Ignoring error attempting to create snapshot:"
" %s"), e.msg)
return None
def delete_snapshot(self, snap_name):
try:
self._request("/delete/snapshot", "cleanup", snap_name)
except exception.DotHillRequestError as e:
# -10050 => The volume was not found on this system.
# This can occur during controller failover.
if '(-10050)' in e.msg:
LOG.warning(_LW("Ignoring unmap error -10050: %s"), e.msg)
return None
raise
def backend_exists(self, backend_name, backend_type):
try:
if backend_type == "linear":
path = "/show/vdisks"
else:
path = "/show/pools"
self._request(path, backend_name)
return True
except exception.DotHillRequestError:
return False
def _get_size(self, size):
return int(math.ceil(float(size) * 512 / (units.G)))
def backend_stats(self, backend_name, backend_type):
stats = {'free_capacity_gb': 0,
'total_capacity_gb': 0}
prop_list = []
if backend_type == "linear":
path = "/show/vdisks"
prop_list = ["size-numeric", "freespace-numeric"]
else:
path = "/show/pools"
prop_list = ["total-size-numeric", "total-avail-numeric"]
tree = self._request(path, backend_name)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0])
if size:
stats['total_capacity_gb'] = self._get_size(size)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1])
if size:
stats['free_capacity_gb'] = self._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
"""Find next available LUN number.
Returns a lun number greater than 0 which is not known to be in
use between the array and the specified host.
"""
luns = self.list_luns_for_host(host)
self._luns_in_use_by_host[host] = luns
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def _get_next_available_lun_for_host(self, host, after=0):
# host can be a comma-separated list of WWPNs; we only use the first.
firsthost = host.split(',')[0]
LOG.debug('get_next_available_lun: host=%s, firsthost=%s, after=%d',
host, firsthost, after)
if after == 0:
return self._get_first_available_lun_for_host(firsthost)
luns = self._luns_in_use_by_host[firsthost]
lun = after + 1
while lun < 1024:
LOG.debug('get_next_available_lun: host=%s, trying lun %d',
firsthost, lun)
if lun not in luns:
LOG.debug('get_next_available_lun: host=%s, RETURNING lun %d',
firsthost, lun)
return lun
lun += 1
raise exception.DotHillRequestError(
message=_("No LUNs available for mapping to host %s.") % host)
@utils.synchronized(__name__ + '.map_volume', external=True)
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
try:
self._request("/create/host", hostname, id=host)
except exception.DotHillRequestError as e:
# -10058: The host identifier or nickname is already in use
if '(-10058)' in e.msg:
LOG.error(_LE("While trying to create host nickname"
" %(nickname)s: %(error_msg)s"),
{'nickname': hostname,
'error_msg': e.msg})
else:
raise
lun = self._get_first_available_lun_for_host(host)
while lun < 255:
try:
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
except exception.DotHillRequestError as e:
# -3177 => "The specified LUN overlaps a previously defined LUN
if '(-3177)' in e.msg:
LOG.info(_LI("Unable to map volume"
" %(volume_name)s to lun %(lun)d:"
" %(reason)s"),
{'volume_name': volume_name,
'lun': lun, 'reason': e.msg})
lun = self._get_next_available_lun_for_host(host,
after=lun)
continue
raise
except Exception as e:
LOG.error(_LE("Error while mapping volume"
" %(volume_name)s to lun %(lun)d:"),
{'volume_name': volume_name, 'lun': lun},
e)
raise
raise exception.DotHillRequestError(
message=_("Failed to find a free LUN for host %s") % host)
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
try:
self._request("/unmap/volume", volume_name, host=host)
except exception.DotHillRequestError as e:
# -10050 => The volume was not found on this system.
# This can occur during controller failover.
if '(-10050)' in e.msg:
LOG.warning(_LW("Ignoring unmap error -10050: %s"), e.msg)
return None
raise
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
for prop in obj.iter("PROPERTY")
if prop.get('name') in
["port-type", "target-id", "status"]}
if port['status'] == 'Up':
ports.append(port)
return ports
def get_active_fc_target_ports(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "FC"]
def get_active_iscsi_target_iqns(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "iSCSI"]
def linear_copy_volume(self, src_name, dest_name, dest_bknd_name):
"""Copy a linear volume."""
self._request("/volumecopy",
dest_name,
dest_vdisk=dest_bknd_name,
source_volume=src_name,
prompt='yes')
# The copy has started; now monitor until the operation completes.
count = 0
while True:
tree = self._request("/show/volumecopy-status")
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code == '0':
status = tree.findtext(".//PROPERTY[@name='progress']")
progress = False
if status:
progress = True
LOG.debug("Volume copy is in progress: %s", status)
if not progress:
LOG.debug("Volume copy completed: %s", status)
break
else:
if count >= 5:
LOG.error(_LE('Error in copying volume: %s'), src_name)
raise exception.DotHillRequestError
time.sleep(1)
count += 1
time.sleep(5)
def copy_volume(self, src_name, dest_name, dest_bknd_name,
backend_type='virtual'):
"""Copy a linear or virtual volume."""
if backend_type == 'linear':
return self.linear_copy_volume(src_name, dest_name, dest_bknd_name)
# Copy a virtual volume to another in the same pool.
self._request("/copy/volume", src_name, name=dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s started.",
{'src_name': src_name, 'dest_name': dest_name, })
# Loop until this volume copy is no longer in progress.
while self.volume_copy_in_progress(src_name):
time.sleep(5)
# Once the copy operation is finished, check to ensure that
# the volume was not deleted because of a subsequent error. An
# exception will be raised if the named volume is not present.
self._request("/show/volumes", dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s completed.",
{'src_name': src_name, 'dest_name': dest_name, })
def volume_copy_in_progress(self, src_name):
"""Check if a volume copy is in progress for the named volume."""
# 'show volume-copies' always succeeds, even if none in progress.
tree = self._request("/show/volume-copies")
# Find 0 or 1 job(s) with source volume we're interested in
q = "OBJECT[PROPERTY[@name='source-volume']/text()='%s']" % src_name
joblist = tree.xpath(q)
if len(joblist) == 0:
return False
LOG.debug("Volume copy of volume: %(src_name)s is "
"%(pc)s percent completed.",
{'src_name': src_name,
'pc': joblist[0].findtext("PROPERTY[@name='progress']"), })
return True
def _check_host(self, host):
host_status = -1
tree = self._request("/show/hosts")
for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']"
% host):
host_status = 0
return host_status
def _safe_hostname(self, hostname):
"""Modify an initiator name to match firmware requirements.
Initiator name cannot include certain characters and cannot exceed
15 bytes in 'T' firmware (31 bytes in 'G' firmware).
"""
for ch in [',', '"', '\\', '<', '>']:
if ch in hostname:
hostname = hostname.replace(ch, '')
hostname = hostname.replace('.', '_')
name_limit = 15 if self.is_titanium() else 31
index = len(hostname)
if index > name_limit:
index = name_limit
return hostname[:index]
def get_active_iscsi_target_portals(self):
# This function returns {'ip': status,}
portals = {}
prop = 'ip-address'
tree = self._request("/show/ports")
for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"):
prop = 'primary-ip-address'
break
iscsi_ips = [ip.text for ip in tree.xpath(
"//PROPERTY[@name='%s']" % prop)]
if not iscsi_ips:
return portals
for index, port_type in enumerate(tree.xpath(
"//PROPERTY[@name='port-type' and text()='iSCSI']")):
status = port_type.getparent().findtext("PROPERTY[@name='status']")
if status == 'Up':
portals[iscsi_ips[index]] = status
return portals
def get_chap_record(self, initiator_name):
tree = self._request("/show/chap-records")
for prop in tree.xpath("//PROPERTY[@name='initiator-name' and "
"text()='%s']" % initiator_name):
chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator"
"-secret']")
return chap_secret
def create_chap_record(self, initiator_name, chap_secret):
self._request("/create/chap-record",
name=initiator_name,
secret=chap_secret)
def get_serial_number(self):
tree = self._request("/show/system")
return tree.findtext(".//PROPERTY[@name='midplane-serial-number']")
def get_owner_info(self, backend_name, backend_type):
if backend_type == 'linear':
tree = self._request("/show/vdisks", backend_name)
else:
tree = self._request("/show/pools", backend_name)
return tree.findtext(".//PROPERTY[@name='owner']")
def modify_volume_name(self, old_name, new_name):
self._request("/set/volume", old_name, name=new_name)
def get_volume_size(self, volume_name):
tree = self._request("/show/volumes", volume_name)
size = tree.findtext(".//PROPERTY[@name='size-numeric']")
return self._get_size(size)
def get_firmware_version(self):
tree = self._request("/show/controllers")
self._fw = tree.xpath("//PROPERTY[@name='sc-fw']")[0].text
LOG.debug("Array firmware is %s\n", self._fw)
return self._fw
|
|
'''
To run a Bokeh application on a Bokeh server from a single Python script,
pass the script name to ``bokeh serve`` on the command line:
.. code-block:: sh
bokeh serve app_script.py
By default, the Bokeh application will be served by the Bokeh server on a
default port ({DEFAULT_PORT}) at localhost, under the path ``/app_script``,
i.e.,
.. code-block:: none
http://localhost:{DEFAULT_PORT}/app_script
It is also possible to run the same commmand with jupyter notebooks:
.. code-block:: sh
bokeh serve app_notebook.ipynb
This will generate the same results as described with a python script
and the application will be served on a default port ({DEFAULT_PORT})
at localhost, under the path ``/app_notebook``
Applications can also be created from directories. The directory should
contain a ``main.py`` (and any other helper modules that are required) as
well as any additional assets (e.g., theme files). Pass the directory name
to ``bokeh serve`` to run the application:
.. code-block:: sh
bokeh serve app_dir
It is possible to run multiple applications at once:
.. code-block:: sh
bokeh serve app_script.py app_dir
If you would like to automatically open a browser to display the HTML
page(s), you can pass the ``--show`` option on the command line:
.. code-block:: sh
bokeh serve app_script.py app_dir --show
This will open two pages, for ``/app_script`` and ``/app_dir``,
respectively.
If you would like to pass command line arguments to Bokeh applications,
you can pass the ``--args`` option as the LAST option on the command
line:
.. code-block:: sh
bokeh serve app_script.py myapp.py --args foo bar --baz
Everything that follows ``--args`` will be included in ``sys.argv`` when
the application runs. In this case, when ``myapp.py`` executes, the
contents of ``sys.argv`` will be ``['myapp.py', 'foo', 'bar', '--baz']``,
consistent with standard Python expectations for ``sys.argv``.
Note that if multiple scripts or directories are provided, they
all receive the same set of command line arguments (if any) given by
``--args``.
Network Configuration
~~~~~~~~~~~~~~~~~~~~~
To control the port that the Bokeh server listens on, use the ``--port``
argument:
.. code-block:: sh
bokeh serve app_script.py --port=8080
Similarly, a specific network address can be specified with the
``--address`` argument. For example:
.. code-block:: sh
bokeh serve app_script.py --address=0.0.0.0
will have the Bokeh server listen all available network addresses.
Additionally, it is possible to configure a hosts whitelist that must be
matched by the ``Host`` header in new requests. You can specify multiple
acceptable host values with the ``--host`` option:
.. code-block:: sh
bokeh serve app_script.py --host foo.com:8081 --host bar.com
If no port is specified in a host value, then port 80 will be used. In
the example above Bokeh server will accept requests from ``foo.com:8081``
and ``bar.com:80``.
If no host values are specified, then by default the Bokeh server will
accept requests from ``localhost:<port>`` where ``<port>`` is the port
that the server is configured to listen on (by default: {DEFAULT_PORT}).
If an asterix ``*`` is used in the host value then it will be treated as a
wildcard:
.. code-block:: sh
bokeh serve app_script.py --address=0.0.0.0 --host='*'
Using the wildcard can be helpful when testing applications that are deployed
with cloud orchestration tools and when the public endpoint is not known ahead
of time: for instance if the public IP is dynamically allocated during the
deployment process and no public DNS has been configured for the testing
environment.
As a warning, using permissive host values like ``*`` may be insecure and open
your application to HTTP host header attacks. Production deployments should
always set the ``--host`` flag to use the DNS name of the public endpoint such
as a TLS-enabled load balancer or reverse proxy that serves the application to
the end users.
Also note that the host whitelist applies to all request handlers,
including any extra ones added to extend the Bokeh server.
By default, cross site connections to the Bokeh server websocket are not
allowed. You can enable websocket connections originating from additional
hosts by specifying them with the ``--allow-websocket-origin`` option:
.. code-block:: sh
bokeh serve app_script.py --allow-websocket-origin foo.com:8081
It is possible to specify multiple allowed websocket origins by adding
the ``--allow-websocket-origin`` option multiple times.
The Bokeh server can also add an optional prefix to all URL paths.
This can often be useful in conjunction with "reverse proxy" setups.
.. code-block:: sh
bokeh serve app_script.py --prefix=foobar
Then the application will be served under the following URL:
.. code-block:: none
http://localhost:{DEFAULT_PORT}/foobar/app_script
If needed, Bokeh server can send keep-alive pings at a fixed interval.
To configure this feature, set the ``--keep-alive`` option:
.. code-block:: sh
bokeh serve app_script.py --keep-alive 10000
The value is specified in milliseconds. The default keep-alive interval
is 37 seconds. Give a value of 0 to disable keep-alive pings.
To control how often statistic logs are written, set the
--stats-log-frequency option:
.. code-block:: sh
bokeh serve app_script.py --stats-log-frequency 30000
The value is specified in milliseconds. The default interval for
logging stats is 15 seconds. Only positive integer values are accepted.
To have the Bokeh server override the remote IP and URI scheme/protocol for
all requests with ``X-Real-Ip``, ``X-Forwarded-For``, ``X-Scheme``,
``X-Forwarded-Proto`` headers (if they are provided), set the
``--use-xheaders`` option:
.. code-block:: sh
bokeh serve app_script.py --use-xheaders
This is typically needed when running a Bokeh server behind a reverse proxy
that is SSL-terminated.
.. warning::
It is not advised to set this option on a Bokeh server directly facing
the Internet.
Session ID Options
~~~~~~~~~~~~~~~~~~
Typically, each browser tab connected to a Bokeh server will have
its own session ID. When the server generates an ID, it will make
it cryptographically unguessable. This keeps users from accessing
one another's sessions.
To control who can use a Bokeh application, the server can sign
sessions with a secret key and reject "made up" session
names. There are three modes, controlled by the ``--session-ids``
argument:
.. code-block:: sh
bokeh serve app_script.py --session-ids=signed
The available modes are: {SESSION_ID_MODES}
In ``unsigned`` mode, the server will accept any session ID
provided to it in the URL. For example,
``http://localhost/app_script?bokeh-session-id=foo`` will create a
session ``foo``. In ``unsigned`` mode, if the session ID isn't
provided with ``?bokeh-session-id=`` in the URL, the server will
still generate a cryptographically-unguessable ID. However, the
server allows clients to create guessable or deliberately-shared
sessions if they want to.
``unsigned`` mode is most useful when the server is running
locally for development, for example you can have multiple
processes access a fixed session name such as
``default``. ``unsigned`` mode is also convenient because there's
no need to generate or configure a secret key.
In ``signed`` mode, the session ID must be in a special format and
signed with a secret key. Attempts to use the application with an
invalid session ID will fail, but if no ``?bokeh-session-id=``
parameter is provided, the server will generate a fresh, signed
session ID. The result of ``signed`` mode is that only secure
session IDs are allowed but anyone can connect to the server.
In ``external-signed`` mode, the session ID must be signed but the
server itself won't generate a session ID; the
``?bokeh-session-id=`` parameter will be required. To use this
mode, you would need some sort of external process (such as
another web app) which would use the
``bokeh.util.session_id.generate_session_id()`` function to create
valid session IDs. The external process and the Bokeh server must
share the same ``BOKEH_SECRET_KEY`` environment variable.
``external-signed`` mode is useful if you want another process to
authenticate access to the Bokeh server; if someone is permitted
to use the Bokeh application, you would generate a session ID for
them, then redirect them to the Bokeh server with that valid
session ID. If you don't generate a session ID for someone, then
they can't load the app from the Bokeh server.
In both ``signed`` and ``external-signed`` mode, the secret key
must be kept secret; anyone with the key can generate a valid
session ID.
The secret key should be set in a ``BOKEH_SECRET_KEY`` environment
variable and should be a cryptographically random string with at
least 256 bits (32 bytes) of entropy. You can generate a new
secret key with the ``bokeh secret`` command.
Session Expiration Options
~~~~~~~~~~~~~~~~~~~~~~~~~~
To configure how often to check for unused sessions. set the
--check-unused-sessions option:
.. code-block:: sh
bokeh serve app_script.py --check-unused-sessions 10000
The value is specified in milliseconds. The default interval for
checking for unused sessions is 17 seconds. Only positive integer
values are accepted.
To configure how often unused sessions last. set the
--unused-session-lifetime option:
.. code-block:: sh
bokeh serve app_script.py --unused-session-lifetime 60000
The value is specified in milliseconds. The default lifetime interval
for unused sessions is 15 seconds. Only positive integer values are
accepted.
Logging Options
~~~~~~~~~~~~~~~
The logging level can be controlled by the ``--log-level`` argument:
.. code-block:: sh
bokeh serve app_script.py --log-level=debug
The available log levels are: {LOGLEVELS}
The log format can be controlled by the ``--log-format`` argument:
.. code-block:: sh
bokeh serve app_script.py --log-format="%(levelname)s: %(message)s"
The default log format is ``"{DEFAULT_LOG_FORMAT}"``
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import argparse
from bokeh.application import Application
from bokeh.resources import DEFAULT_SERVER_PORT
from bokeh.server.server import Server
from bokeh.util.string import nice_join
from bokeh.settings import settings
from os import getpid
from ..subcommand import Subcommand
from ..util import build_single_handler_applications, die
LOGLEVELS = ('debug', 'info', 'warning', 'error', 'critical')
SESSION_ID_MODES = ('unsigned', 'signed', 'external-signed')
DEFAULT_LOG_FORMAT = "%(asctime)s %(message)s"
__doc__ = __doc__.format(
DEFAULT_PORT=DEFAULT_SERVER_PORT,
LOGLEVELS=nice_join(LOGLEVELS),
SESSION_ID_MODES=nice_join(SESSION_ID_MODES),
DEFAULT_LOG_FORMAT=DEFAULT_LOG_FORMAT
)
base_serve_args = (
('--port', dict(
metavar = 'PORT',
type = int,
help = "Port to listen on",
default = None
)),
('--address', dict(
metavar = 'ADDRESS',
type = str,
help = "Address to listen on",
default = None,
)),
('--log-level', dict(
metavar = 'LOG-LEVEL',
action = 'store',
default = 'info',
choices = LOGLEVELS,
help = "One of: %s" % nice_join(LOGLEVELS),
)),
('--log-format', dict(
metavar ='LOG-FORMAT',
action = 'store',
default = DEFAULT_LOG_FORMAT,
help = "A standard Python logging format string (default: %r)" % DEFAULT_LOG_FORMAT.replace("%", "%%"),
)),
)
class Serve(Subcommand):
''' Subcommand to launch the Bokeh server.
'''
name = "serve"
help = "Run a Bokeh server hosting one or more applications"
args = base_serve_args + (
('files', dict(
metavar='DIRECTORY-OR-SCRIPT',
nargs='*',
help="The app directories or scripts to serve (serve empty document if not specified)",
default=None,
)),
('--args', dict(
metavar='COMMAND-LINE-ARGS',
nargs=argparse.REMAINDER,
help="Any command line arguments remaining are passed on to the application handler",
)),
('--develop', dict(
action='store_true',
help="Enable develop-time features that should not be used in production",
)),
('--show', dict(
action='store_true',
help="Open server app(s) in a browser",
)),
('--allow-websocket-origin', dict(
metavar='HOST[:PORT]',
action='append',
type=str,
help="Public hostnames which may connect to the Bokeh websocket",
)),
('--host', dict(
metavar='HOST[:PORT]',
action='append',
type=str,
help="Public hostnames to allow in requests",
)),
('--prefix', dict(
metavar='PREFIX',
type=str,
help="URL prefix for Bokeh server URLs",
default=None,
)),
('--keep-alive', dict(
metavar='MILLISECONDS',
type=int,
help="How often to send a keep-alive ping to clients, 0 to disable.",
default=None,
)),
('--check-unused-sessions', dict(
metavar='MILLISECONDS',
type=int,
help="How often to check for unused sessions",
default=None,
)),
('--unused-session-lifetime', dict(
metavar='MILLISECONDS',
type=int,
help="How long unused sessions last",
default=None,
)),
('--stats-log-frequency', dict(
metavar='MILLISECONDS',
type=int,
help="How often to log stats",
default=None,
)),
('--use-xheaders', dict(
action='store_true',
help="Prefer X-headers for IP/protocol information",
)),
('--session-ids', dict(
metavar='MODE',
action = 'store',
default = None,
choices = SESSION_ID_MODES,
help = "One of: %s" % nice_join(SESSION_ID_MODES),
)),
('--disable-index', dict(
action = 'store_true',
help = 'Do not use the default index on the root path',
)),
)
def invoke(self, args):
argvs = { f : args.args for f in args.files}
applications = build_single_handler_applications(args.files, argvs)
log_level = getattr(logging, args.log_level.upper())
logging.basicConfig(level=log_level, format=args.log_format)
if len(applications) == 0:
# create an empty application by default, typically used with output_server
applications['/'] = Application()
if args.keep_alive is not None:
if args.keep_alive == 0:
log.info("Keep-alive ping disabled")
else:
log.info("Keep-alive ping configured every %d milliseconds", args.keep_alive)
# rename to be compatible with Server
args.keep_alive_milliseconds = args.keep_alive
if args.check_unused_sessions is not None:
log.info("Check for unused sessions every %d milliseconds", args.check_unused_sessions)
# rename to be compatible with Server
args.check_unused_sessions_milliseconds = args.check_unused_sessions
if args.unused_session_lifetime is not None:
log.info("Unused sessions last for %d milliseconds", args.unused_session_lifetime)
# rename to be compatible with Server
args.unused_session_lifetime_milliseconds = args.unused_session_lifetime
if args.stats_log_frequency is not None:
log.info("Log statistics every %d milliseconds", args.stats_log_frequency)
# rename to be compatible with Server
args.stats_log_frequency_milliseconds = args.stats_log_frequency
server_kwargs = { key: getattr(args, key) for key in ['port',
'address',
'allow_websocket_origin',
'host',
'prefix',
'develop',
'keep_alive_milliseconds',
'check_unused_sessions_milliseconds',
'unused_session_lifetime_milliseconds',
'stats_log_frequency_milliseconds',
'use_xheaders',
]
if getattr(args, key, None) is not None }
server_kwargs['sign_sessions'] = settings.sign_sessions()
server_kwargs['secret_key'] = settings.secret_key_bytes()
server_kwargs['generate_session_ids'] = True
if args.session_ids is None:
# no --session-ids means use the env vars
pass
elif args.session_ids == 'unsigned':
server_kwargs['sign_sessions'] = False
elif args.session_ids == 'signed':
server_kwargs['sign_sessions'] = True
elif args.session_ids == 'external-signed':
server_kwargs['sign_sessions'] = True
server_kwargs['generate_session_ids'] = False
else:
raise RuntimeError("argparse should have filtered out --session-ids mode " +
args.session_ids)
if server_kwargs['sign_sessions'] and not server_kwargs['secret_key']:
die("To sign sessions, the BOKEH_SECRET_KEY environment variable must be set; " +
"the `bokeh secret` command can be used to generate a new key.")
server_kwargs['use_index'] = not args.disable_index
server = Server(applications, **server_kwargs)
if args.show:
# we have to defer opening in browser until we start up the server
def show_callback():
for route in applications.keys():
server.show(route)
server.io_loop.add_callback(show_callback)
if args.develop:
log.info("Using develop mode (do not enable --develop in production)")
address_string = ''
if server.address is not None and server.address != '':
address_string = ' address ' + server.address
log.info("Starting Bokeh server on port %d%s with applications at paths %r",
server.port,
address_string,
sorted(applications.keys()))
log.info("Starting Bokeh server with process id: %d" % getpid())
server.start()
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.admin.images import tables
IMAGE_METADATA_URL = reverse('horizon:admin:images:update_metadata',
kwargs={
"id": "007e7d55-fe1e-4c5c-bf08-44b4a4964822"})
class ImageCreateViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_admin_image_create_view_uses_admin_template(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:admin:images:create'))
self.assertTemplateUsed(res, 'admin/images/create.html')
class ImagesViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.glance: ('image_list_detailed',),
api.keystone: ('tenant_list',)})
def test_images_list(self):
filters = {'is_public': None}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([self.images.list(),
False, False])
# Test tenant list
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:admin:images:index'))
self.assertContains(res, 'test_tenant', 8, 200)
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertEqual(len(res.context['images_table'].data),
len(self.images.list()))
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.keystone: ('tenant_list',)})
def test_images_list_get_pagination(self):
images = self.images.list()[:5]
filters = {'is_public': None}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images, True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[:2], True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[2:4], True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[4].id,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[4:], True, True])
# Test tenant list
api.keystone.tenant_list(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
url = reverse('horizon:admin:images:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['images_table'].data),
len(images))
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertContains(res, 'test_tenant', 6, 200)
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
params = "=".join([tables.AdminImagesTable._meta.pagination_param,
images[2].id])
url = "?".join([reverse('horizon:admin:images:index'), params])
res = self.client.get(url)
# get second page (items 2-4)
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
self.assertContains(res, 'test_tenant', 3, 200)
params = "=".join([tables.AdminImagesTable._meta.pagination_param,
images[4].id])
url = "?".join([reverse('horizon:admin:images:index'), params])
res = self.client.get(url)
# get third page (item 5)
self.assertEqual(len(res.context['images_table'].data),
1)
self.assertContains(res, 'test_tenant', 2, 200)
@test.create_stubs({api.glance: ('image_get',
'metadefs_namespace_list',
'metadefs_namespace_get')})
def test_images_metadata_get(self):
image = self.images.first()
api.glance.image_get(
IsA(http.HttpRequest),
image.id
).AndReturn(image)
namespaces = self.metadata_defs.list()
api.glance.metadefs_namespace_list(IsA(http.HttpRequest), filters={
'resource_types': ['OS::Glance::Image']}).AndReturn(
(namespaces, False, False))
for namespace in namespaces:
api.glance.metadefs_namespace_get(
IsA(http.HttpRequest),
namespace.namespace,
'OS::Glance::Image'
).AndReturn(namespace)
self.mox.ReplayAll()
res = self.client.get(IMAGE_METADATA_URL)
self.assertTemplateUsed(res, 'admin/images/update_metadata.html')
self.assertContains(res, 'namespace_1')
self.assertContains(res, 'namespace_2')
self.assertContains(res, 'namespace_3')
self.assertContains(res, 'namespace_4')
@test.create_stubs({api.glance: ('image_get', 'image_update_properties')})
def test_images_metadata_update(self):
image = self.images.first()
api.glance.image_get(
IsA(http.HttpRequest),
image.id
).AndReturn(image)
api.glance.image_update_properties(
IsA(http.HttpRequest), image.id, ['image_type'],
hw_machine_type='mock_value').AndReturn(None)
self.mox.ReplayAll()
metadata = [{"value": "mock_value", "key": "hw_machine_type"}]
formData = {"metadata": json.dumps(metadata)}
res = self.client.post(IMAGE_METADATA_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(
res, reverse('horizon:admin:images:index')
)
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.keystone: ('tenant_list',)})
def test_images_list_get_prev_pagination(self):
images = self.images.list()[:3]
filters = {'is_public': None}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images, True, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[:2], True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
paginate=True,
filters=filters,
sort_dir='desc') \
.AndReturn([images[2:], True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
paginate=True,
filters=filters,
sort_dir='asc') \
.AndReturn([images[:2], True, True])
# Test tenant list
api.keystone.tenant_list(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
url = reverse('horizon:admin:images:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['images_table'].data),
len(images))
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertContains(res, 'test_tenant', 4, 200)
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
self.assertContains(res, 'test_tenant', 3, 200)
params = "=".join([tables.AdminImagesTable._meta.pagination_param,
images[2].id])
url = "?".join([reverse('horizon:admin:images:index'), params])
res = self.client.get(url)
# get second page (item 3)
self.assertEqual(len(res.context['images_table'].data), 1)
self.assertContains(res, 'test_tenant', 2, 200)
params = "=".join([tables.AdminImagesTable._meta.prev_pagination_param,
images[2].id])
url = "?".join([reverse('horizon:admin:images:index'), params])
res = self.client.get(url)
# prev back to get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
self.assertContains(res, 'test_tenant', 3, 200)
|
|
import numpy as np
from scipy.constants import pi
from numpy.fft import fftshift
from scipy.fftpack import fft, ifft
from six.moves import builtins
from cython_files.cython_integrand import *
from time import time
import sys
from numpy.testing import assert_allclose
import pickle as pl
#from scipy.fftpack import fftshift
import numba
from functools import lru_cache as cache
vectorize = numba.vectorize
jit = numba.jit
autojit = numba.autojit
# Pass through the @profile decorator if line profiler (kernprof) is not in use
# Thanks Paul!
try:
builtins.profile
except AttributeError:
def profile(func):
return func
trgt = 'cpu'
class Integrand(object):
"""
Serves as the intrgrand of the nonlinear term of the GNLSE. Care has been
taken to pick out the combination of multiplications needed for computation.
Additionally there is use of cython by default
"""
def __init__(self, gama, tsh, w_tiled, s, ram, cython_tick=True, timer=False):
self.data_minimum = ((0, 1), (0, 3), (0, 4), (0, 5), (0, 6),
(1, 1), (1, 3), (1, 4), (1, 5), (1, 6),
(2, 0), (2, 1), (2, 2), (2,
3), (2, 4), (2, 5), (2, 6),
(3, 3), (3, 4), (3, 5), (3, 6),
(4, 4), (4, 5), (4, 6), (5, 5))
self.fwm_map = (((2, 5), (3, 11), (4, 12, 6), (5, 13, 7), (6, 14, 8, 17)),
((1, 10), (2, 1), (3, 12, 2), (4, 13, 3),
(5, 14, 4, 17), (6, 15, 18)),
((0, 5), (1, 1), (2, 6, 2), (3, 7, 3),
(4, 8, 4, 17), (5, 9, 18), (6, 19, 21)),
((0, 11), (1, 12, 2), (2, 7, 3), (3, 14, 8, 4),
(4, 15, 9), (5, 16, 21), (6, 22)),
((0, 12, 6), (1, 13, 3), (2, 8, 4, 17),
(3, 15, 9), (4, 16, 19), (5, 20), (6, 24)),
((0, 13, 7), (1, 14, 4, 17), (2, 9, 18),
(3, 16, 21), (4, 20), (5, 23)),
((0, 14, 8, 17), (1, 15, 18), (2, 19, 21), (3, 22), (4, 24)))
self.gama = gama
self.tsh = tsh
self.w_tiled = w_tiled
self.fwm_map = np.asanyarray(self.fwm_map)
self.factors_xpm = xpm_coeff(ram.H, ram.fr)
self.factors_fwm = fwm_coeff(ram.H, ram.fr)
self.shape1, self.shape2 = self.w_tiled.shape
if s == 1 and cython_tick:
self.dAdzmm = self.cython_s1
elif s == 0 and cython_tick:
self.dAdzmm = self.cython_s0
elif s == 1 and not(cython_tick):
self.dAdzmm = self.python_s1
elif s == 0 and not(cython_tick):
self.dAdzmm = self.python_s0
if timer:
self.dAdzmm = self.timer
#@profile
def cython_s1(self, u0, dz):
return fwm_s1(u0, self.factors_xpm,
self.factors_fwm,dz * self.gama, self.tsh, self.w_tiled, dz,
self.shape1,self.shape2)
def cython_s0(self, u0, dz):
fwm_s0 = dAdzmm
return fwm_s0(u0, self.factors_xpm,
self.factors_fwm, dz * self.gama, self.tsh, self.w_tiled, dz,
self.shape1,self.shape2)
@profile
def SPM_XPM_FWM_python(self, u0):
# SPM-XPM
u0_abs2 = np.abs(u0)**2
N = np.matmul(self.factors_xpm, u0_abs2) * u0
# FWM
u0_multi = np.empty(
[len(self.data_minimum), u0.shape[1]], dtype=np.complex)
for i in range(len(self.data_minimum)):
u0_multi[i, :] = u0[self.data_minimum[i][0], :] \
* u0[self.data_minimum[i][1], :]
for i in range(u0.shape[0]):
for f in self.fwm_map[i]:
N[i, :] += (self.factors_fwm[f[0], f[1:], np.newaxis] *
u0_multi[f[1:], :]).sum(axis=0) * \
(u0[f[0], :].conjugate())
return N
def self_step_s1(self, N):
temp = ifft(self.w_tiled * fft(N))
return N + self.tsh[:, np.newaxis]*temp
@profile
def python_s1(self, u0, dz):
N = self.SPM_XPM_FWM_python(u0)
N = dz * self.gama[:, np.newaxis] * self.self_step_s1(N)
return N
@profile
def python_s0(self, u0, dz):
N = self.SPM_XPM_FWM_python(u0)
N = dz * self.gama[:, np.newaxis] * N
return N
def timer(self, u0, dz):
"""
Times the functions of python, cython etc.
"""
dt1, dt2, dt3, dt4 = [], [], [], []
NN = 1000
for i in range(NN):
t = time()
N1 = self.cython_s1(u0, dz)
dt1.append(time() - t)
t = time()
N2 = self.python_s1(u0, dz)
dt2.append(time() - t)
assert_allclose(N1, N2)
t = time()
N1 = self.cython_s0(u0, dz)
dt3.append(time() - t)
t = time()
N2 = self.python_s0(u0, dz)
dt4.append(time() - t)
assert_allclose(N1, N2)
print('cython_s1: {} +/- {}'.format(np.average(dt1), np.std(dt1)))
print('cython_s0: {} +/- {}'.format(np.average(dt3), np.std(dt3)))
print('python_s1: {} +/- {}'.format(np.average(dt2), np.std(dt2)))
print('python_s0: {} +/- {}'.format(np.average(dt4), np.std(dt4)))
print('s1 Cython is {} times faster than Python.'.format(np.average(dt2)/np.average(dt1)))
print('s0 Cython is {} times faster than Python.'.format(np.average(dt4)/np.average(dt3)))
sys.exit()
return N
def xpm_coeff(H, fr):
"""
XPM coefficients for the 7 band version that include Raman.
"""
c = Factors(H, fr)
coeff = np.zeros([7, 7], dtype=np.complex128)
for i in range(7):
for j in range(7):
coeff[i, j] = c.xpm(i-j)
return coeff
class Factors(object):
def __init__(self, H, fr):
self.H = H
self.fr = fr
self.H = np.concatenate((H[6:], H[0:6]))
def xpm(self, i):
if i == 0:
return 1
else:
return self.H[i]*self.fr - self.fr + 2
def f(self, i_vec):
res = sum([self.H[i]*self.fr - self.fr + 1 for i in i_vec])
return res
def fwm_coeff(H, fr):
c = Factors(H, fr)
coeff = np.zeros([7, 25], dtype=np.complex128)
# 0
coeff[0, 5] = c.f([1])
coeff[0, 6] = c.f([1, 3])
coeff[0, 7] = c.f([1, 4])
coeff[0, 8] = c.f([1, 5])
coeff[0, 11] = c.f([1, 2])
coeff[0, 12] = c.f([2])
coeff[0, 13] = c.f([2, 3])
coeff[0, 14] = c.f([2, 4])
coeff[0, 17] = c.f([3])
# 1
coeff[1, 1] = c.f([2, -1])
coeff[1, 2] = c.f([3, -1])
coeff[1, 3] = c.f([4, -1])
coeff[1, 4] = c.f([5, -1])
coeff[1, 10] = c.f([1, -1])
coeff[1, 12] = c.f([1])
coeff[1, 13] = c.f([1, 2])
coeff[1, 14] = c.f([1, 3])
coeff[1, 15] = c.f([1, 4])
coeff[1, 17] = c.f([2])
coeff[1, 18] = c.f([2, 3])
# 2
coeff[2, 1] = c.f([1, -2])
coeff[2, 2] = c.f([2, -2])
coeff[2, 3] = c.f([3, -2])
coeff[2, 4] = c.f([4, -2])
coeff[2, 5] = c.f([-1])
coeff[2, 6] = c.f([1, -1])
coeff[2, 7] = c.f([2, -1])
coeff[2, 8] = c.f([3, -1])
coeff[2, 9] = c.f([4, -1])
coeff[2, 17] = c.f([1])
coeff[2, 18] = c.f([1, 2])
coeff[2, 19] = c.f([1, 3])
coeff[2, 21] = c.f([2])
# 3
coeff[3, 2] = c.f([1, -3])
coeff[3, 3] = c.f([2, -3])
coeff[3, 4] = c.f([3, -3])
coeff[3, 7] = c.f([1, -2])
coeff[3, 8] = c.f([2, -2])
coeff[3, 9] = c.f([3, -2])
coeff[3, 11] = c.f([-1, -2])
coeff[3, 12] = c.f([-1])
coeff[3, 14] = c.f([1, -1])
coeff[3, 15] = c.f([2, -1])
coeff[3, 16] = c.f([3, -1])
coeff[3, 21] = c.f([1])
coeff[3, 22] = c.f([1, 2])
# 4
coeff[4, 3] = c.f([1, -4])
coeff[4, 4] = c.f([2, -4])
coeff[4, 6] = c.f([-1, -3])
coeff[4, 8] = c.f([1, -3])
coeff[4, 9] = c.f([2, -3])
coeff[4, 12] = c.f([-2])
coeff[4, 13] = c.f([-1, -2])
coeff[4, 15] = c.f([1, -2])
coeff[4, 16] = c.f([2, -2])
coeff[4, 17] = c.f([-1])
coeff[4, 19] = c.f([1, -1])
coeff[4, 20] = c.f([2, -1])
coeff[4, 24] = c.f([1])
# 5
coeff[5, 4] = c.f([1, -5])
coeff[5, 7] = c.f([-1, -4])
coeff[5, 9] = c.f([1, -4])
coeff[5, 13] = c.f([-2, -3])
coeff[5, 14] = c.f([-1, -3])
coeff[5, 16] = c.f([1, -3])
coeff[5, 17] = c.f([-2])
coeff[5, 18] = c.f([-1, -2])
coeff[5, 20] = c.f([1, -2])
coeff[5, 21] = c.f([-1])
coeff[5, 23] = c.f([1, -1])
# 6
coeff[6, 8] = c.f([-1, -5])
coeff[6, 14] = c.f([-2, -4])
coeff[6, 15] = c.f([-1, -4])
coeff[6, 17] = c.f([-3])
coeff[6, 18] = c.f([-2, -3])
coeff[6, 19] = c.f([-1, -3])
coeff[6, 21] = c.f([-2])
coeff[6, 22] = c.f([-1, -2])
coeff[6, 24] = c.f([-1])
return coeff
@vectorize(['complex128(complex128,complex128)'], target=trgt)
def multi(x, y):
return x*y
@vectorize(['complex128(complex128,complex128)'], target=trgt)
def add(x, y):
return x + y
|
|
"""The tests for the openalpr cloud platform."""
import asyncio
from homeassistant.components import camera, image_processing as ip
from homeassistant.components.openalpr_cloud.image_processing import OPENALPR_API_URL
from homeassistant.core import callback
from homeassistant.setup import setup_component
from tests.async_mock import PropertyMock, patch
from tests.common import assert_setup_component, get_test_home_assistant, load_fixture
from tests.components.image_processing import common
class TestOpenAlprCloudSetup:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_platform(self):
"""Set up platform with one entity."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera"},
"region": "eu",
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.openalpr_demo_camera")
def test_setup_platform_name(self):
"""Set up platform with one entity and set name."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.test_local")
def test_setup_platform_without_api_key(self):
"""Set up platform with one entity without api_key."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera"},
"region": "eu",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(0, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
def test_setup_platform_without_region(self):
"""Set up platform with one entity without region."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera"},
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(0, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
class TestOpenAlprCloud:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with patch(
"homeassistant.components.openalpr_cloud.image_processing."
"OpenAlprCloudEntity.should_poll",
new_callable=PropertyMock(return_value=False),
):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
self.alpr_events = []
@callback
def mock_alpr_event(event):
"""Mock event."""
self.alpr_events.append(event)
self.hass.bus.listen("image_processing.found_plate", mock_alpr_event)
self.params = {
"secret_key": "sk_abcxyz123456",
"tasks": "plate",
"return_image": 0,
"country": "eu",
}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_openalpr_process_image(self, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.post(
OPENALPR_API_URL,
params=self.params,
text=load_fixture("alpr_cloud.json"),
status=200,
)
with patch(
"homeassistant.components.camera.async_get_image",
return_value=camera.Image("image/jpeg", b"image"),
):
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
state = self.hass.states.get("image_processing.test_local")
assert len(aioclient_mock.mock_calls) == 1
assert len(self.alpr_events) == 5
assert state.attributes.get("vehicles") == 1
assert state.state == "H786P0J"
event_data = [
event.data
for event in self.alpr_events
if event.data.get("plate") == "H786P0J"
]
assert len(event_data) == 1
assert event_data[0]["plate"] == "H786P0J"
assert event_data[0]["confidence"] == float(90.436699)
assert event_data[0]["entity_id"] == "image_processing.test_local"
def test_openalpr_process_image_api_error(self, aioclient_mock):
"""Set up and scan a picture and test api error."""
aioclient_mock.post(
OPENALPR_API_URL,
params=self.params,
text="{'error': 'error message'}",
status=400,
)
with patch(
"homeassistant.components.camera.async_get_image",
return_value=camera.Image("image/jpeg", b"image"),
):
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(self.alpr_events) == 0
def test_openalpr_process_image_api_timeout(self, aioclient_mock):
"""Set up and scan a picture and test api error."""
aioclient_mock.post(
OPENALPR_API_URL, params=self.params, exc=asyncio.TimeoutError()
)
with patch(
"homeassistant.components.camera.async_get_image",
return_value=camera.Image("image/jpeg", b"image"),
):
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(self.alpr_events) == 0
|
|
# 3rd party imports
from bidi.algorithm import get_display as apply_bidi
# Django imports
from django.conf import settings
# Project imports
from .base import TestGeneratePdfBase
from .factories import create_voters
from .utils_for_tests import extract_pdf_page, extract_textlines, clean_textlines, unwrap_lines
from ..arabic_reshaper import reshape
from ..generate_pdf import generate_pdf
from ..utils import truncate_center_name, format_name
from libya_elections.constants import ARABIC_COMMA, MALE, FEMALE, UNISEX
class TestGeneratePdfNoRegistrants(TestGeneratePdfBase):
"""Compare the word-by-word content of the PDF with expected content when there are no voters"""
def setUp(self):
super(TestGeneratePdfNoRegistrants, self).setUp()
self.voter_roll = []
def test_blank_page_content_male(self):
"""tests that the "blank" page explains why it is blank (no voters)"""
generate_pdf(self.filename, self.center, self.voter_roll, MALE)
# Build a list of the lines I expect to see.
expected_lines = []
expected_lines.append(self.STRINGS['center_header_prefix'])
expected_lines.append(self.STRINGS['center_list_header'])
expected_lines.append(self.STRINGS['no_male_registrants'])
expected_lines.append("1 / 1")
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 1)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
def test_blank_page_content_female(self):
"""tests that the "blank" page explains why it is blank (no voters)"""
# Build a list of the lines I expect to see.
generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
expected_lines = []
expected_lines.append(self.STRINGS['center_header_prefix'])
expected_lines.append(self.STRINGS['center_list_header'])
expected_lines.append(self.STRINGS['no_female_registrants'])
expected_lines.append("1 / 1")
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 1)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
class TestGeneratePdfGenderParam(TestGeneratePdfBase):
"""Ensure that passing UNISEX to generate_pdf() raises an error.
This is a small test that didn't seem to fit elsewhere.
"""
def test_gender_param(self):
self.voter_roll = create_voters(1, self.gender)
with self.assertRaises(ValueError):
generate_pdf(self.filename, self.center, self.voter_roll, UNISEX)
class GeneratePdfContentTestMixin(object):
"""Mixin the provides the main methods tested in several test cases in this file (below).
These methods compare the actual word-by-word content of the PDF with expected content.
There's no unisex tests needed here because that concept only matters when dealing with
polling stations.
"""
def test_cover_content(self):
"""tests that the cover page contains the expected text"""
# Build a list of the lines I expect to see.
expected_lines = []
key = 'center_book_cover' if self.center_book else 'center_list_cover'
expected_lines += self.STRINGS[key]
# These are constructed "backwards" relative to how the actual code does it. It's
# necessary to do so because the text is laid out RtoL in the PDF.
expected_lines.append('{} :{}'.format(self.STRINGS['female'], self.STRINGS['gender']))
expected_lines.append('{} :{}'.format(self.center.center_id, self.STRINGS['center_number']))
center_name = apply_bidi(reshape(self.center.name))
expected_lines.append('{} :{}'.format(center_name, self.STRINGS['center_name']))
copied_by = self.center.copied_by.all()
if self.center.copy_of:
expected_lines.append('{} :{}'.format(self.center.copy_of.center_id,
self.STRINGS['copy_of']))
elif copied_by:
copied_by = [center.center_id for center in copied_by]
copied_by = (' ' + ARABIC_COMMA).join(map(str, reversed(copied_by)))
expected_lines.append('{} :{}'.format(copied_by, self.STRINGS['copied_by_plural']))
subconstituency_id = self.center.subconstituency.id
subconstituency_name = reshape(self.center.subconstituency.name_arabic)
subconstituency_name = apply_bidi(subconstituency_name)
subconstituency = '{} / {} :{}'.format(subconstituency_name, subconstituency_id,
self.STRINGS['subconstituency_name'])
expected_lines.append(subconstituency)
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 0)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
# Did center name wrap? If so, unwrap.
if expected_lines[4].startswith(actual_lines[5]):
actual_lines = unwrap_lines(actual_lines, 4)
has_copy_info = (self.center.copy_of or self.center.copied_by)
if has_copy_info:
# Did center name wrap? If so, unwrap.
if expected_lines[5].startswith(actual_lines[6]):
actual_lines = unwrap_lines(actual_lines, 5)
# Did subcon name wrap? If so, unwrap.
offset = 1 if has_copy_info else 0
if len(actual_lines) >= 7 + offset:
if expected_lines[5 + offset].startswith(actual_lines[6 + offset]):
actual_lines = unwrap_lines(actual_lines, 5 + offset)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
def test_inner_page_content(self):
"""tests that the non-cover pages of a multipage PDF contain the expected text"""
# 1 pages = cover + 2 pages of names
self.assertEqual(self.n_pages, 3)
# Build a list of the lines I expect to see. I don't care about the cover page, just
# the inner pages.
expected_lines = []
page_header = []
page_header.append(self.STRINGS['center_header_prefix'])
key = 'center_book_header' if self.center_book else 'center_list_header'
page_header.append(self.STRINGS[key])
mf_string = self.STRINGS['female']
page_header.append('{} :{}'.format(mf_string, self.STRINGS['gender']))
page_header.append('{} :{}'.format(self.center.center_id, self.STRINGS['center_number']))
center_name = apply_bidi(truncate_center_name(reshape(self.center.name)))
page_header.append('{} :{}'.format(center_name, self.STRINGS['center_name']))
expected_lines += page_header
expected_lines.append(self.STRINGS['the_names'])
for voter in self.voter_roll[:self.n_voters - 1]:
expected_lines.append(apply_bidi(reshape(format_name(voter))))
expected_lines.append(mf_string)
# '2 / 1' is the 'page N/n_pages' from the footer
expected_lines.append('2 / 1')
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 1)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
# OK now test the second (final) inner page. It only has one voter on it.
expected_lines = page_header
expected_lines.append(self.STRINGS['the_names'])
for voter in self.voter_roll[-1:]:
expected_lines.append(apply_bidi(reshape(format_name(voter))))
expected_lines.append(mf_string)
# '2 / 2' is the 'page N/n_pages' from the footer
expected_lines.append('2 / 2')
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 2)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
class TestGeneratePdfContentCenterList(TestGeneratePdfBase, GeneratePdfContentTestMixin):
"""Invoke GeneratePdfContentTestMixin for center lists.
Center lists are only used during the in-person phase.
"""
def setUp(self):
super(TestGeneratePdfContentCenterList, self).setUp()
self.center_book = False
# Create a PDF that will spill to multiple pages.
self.n_voters = settings.ROLLGEN_REGISTRATIONS_PER_PAGE_REGISTRATION + 1
self.voter_roll = create_voters(self.n_voters, FEMALE)
self.n_pages = generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
class TestGeneratePdfContentCenterBook(TestGeneratePdfBase):
"""Invoke GeneratePdfContentTestMixin for center books.
Center books are only used during the exhibitions phase.
"""
def setUp(self):
super(TestGeneratePdfContentCenterBook, self).setUp()
self.center_book = True
self.n_pages = generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
class TestCopyOfCenter(TestGeneratePdfBase, GeneratePdfContentTestMixin):
"""Invoke GeneratePdfContentTestMixin for a copy center.
This class uses a center that is a copy in order to exercise the copy_of code branch.
"""
def setUp(self):
super(TestCopyOfCenter, self).setUp()
self.center_book = False
self.n_voters = 5
self.voter_roll = create_voters(self.n_voters, FEMALE)
# Any of the copy centers will do.
self.center = self.copy_centers[2]
self.n_pages = generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
def test_inner_page_content(self):
# This doesn't need to be re-tested for copy centers; they only affect the cover page.
self.assertTrue(True)
class TestCopiedByCenter(TestGeneratePdfBase, GeneratePdfContentTestMixin):
"""Invoke GeneratePdfContentTestMixin for a copied center.
This class uses a center that is copied by other centers in order to exercise the copied_by
code branch.
"""
def setUp(self):
super(TestCopiedByCenter, self).setUp()
self.center_book = False
self.n_voters = 5
self.voter_roll = create_voters(self.n_voters, FEMALE)
self.center = self.original_center
self.n_pages = generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
def test_inner_page_content(self):
# This doesn't need to be re-tested for copy centers; they only affect the cover page.
self.assertTrue(True)
|
|
#!/usr/bin/env python3
from ircbot import IRCBot
from threading import Thread
from concurrent import futures
from time import sleep
import uuid
import re
"""
IRC Bot able to sync with other peers and to know them. Then it can be extended
to support stuff like coordinated searches and so on
"""
class Botsync:
started = False
peers = set()
unique_id = None
advertised = False
recved_jobs_pending = []
recved_jobs_hired = []
recved_jobs_working = []
emitted_jobs_sentout = []
emitted_jobs_accepted = []
jobbers = set()
known_results = dict()
"""TODO: fetch the channel from the irc connector"""
def __init__(self, irc, channel, capacity = 3):
self.unique_id = str(uuid.uuid4())
self._irc = irc
self._channel = channel
self._capacity = capacity
"""Add a work executor"""
def add_work_executor(self, cutor):
self.jobbers.add(cutor)
"""Dispatch job"""
def dispatch_job(self):
with futures.ThreadPoolExecutor(max_workers=3) as executor:
promises = {}
for t in self.recved_jobs_hired:
for jobber in self.jobbers:
if jobber.would_accept(t[1]):
self.recved_jobs_hired.remove(t)
self.recved_jobs_working.append((t,jobber))
promises[executor.submit(jobber.perform, t[1])] = t
for future in futures.as_completed(promises):
job = promises[future]
r = future.result()
self._irc.send_chat(self._channel, "JOBRESULT:%s:%s"%(t[0],r))
self.known_results[t[0]] = r
"""schedule a start at the end of MOTD (hackish Freenodeism)"""
def schedule_start(self):
self._irc.add_regex_callback(re.compile(".*MOTD*"), self._start)
"""announce oneself to other bots, many times if needed"""
def announce(self, dest=None):
if dest is None:
dest = self._channel
self._irc.send_chat(dest, "BOT:"+self.unique_id)
"""say hello first time"""
def advertise(self):
if not self.advertised:
self._irc.send_chat(self._channel, "ADV:"+self.unique_id)
self.advertised = True
"""handle other peer announcements"""
def parse_announce(self, user, host, dest, mesg):
self.peers.add((user, mesg[4:]))
print(self._irc._nick, "New peer!", "parse_announce", self.peers)
"""handle other peer announcements and announce oneself"""
def parse_advertised(self, user, host, dest, mesg):
self.peers.add((user, mesg[4:]))
print(self._irc._nick, "New peer!", "parse_advertised", self.peers)
self.announce(user)
"""as worker. handle a job proposal: send a CLAIM request to the original
author to claim the job. If he agrees, then we will perform the job otherwise
we will just discard it"""
def handle_job_proposal(self, user, host, dest, mesg):
_, jobid, parameters = mesg.split(sep=':', maxsplit=3)
print(self._irc._nick,"handle_job_proposal",(jobid,parameters))
if len(self.recved_jobs_pending) < self._capacity:
self.recved_jobs_pending.append((jobid, parameters))
self._irc.send_chat(user, "CLAIM:"+jobid)
"""handle job result"""
def handle_job_result(self, user, host, dest, mesg):
_, jobid, result = mesg.split(sep=':', maxsplit=3)
self.known_results[jobid] = result
"""as job offerer. handle a peer's claim for a job"""
def handle_job_claim(self, user, host, dest, mesg):
_, jobid = mesg.split(sep=':', maxsplit=2)
if len([ x for x in self.emitted_jobs_accepted if x[0] == jobid])>0:
self._irc.send_chat(user, 'NOTHANKS:'+jobid)
print(self._irc._nick,"handle_job_claim",jobid,"NOTHANKS")
else:
self.emitted_jobs_accepted.append((jobid,[x for x in self.peers if x[0]==user][0]))
self._irc.send_chat(user, 'THANKS:'+jobid)
print(self._irc._nick,"handle_job_claim",jobid,"THANKS")
def handle_hire_accepted(self, user, host, dest, mesg):
_, jobid = mesg.split(sep=':', maxsplit=2)
print(self._irc._nick,"hire_accepted",jobid)
self.recved_jobs_hired.append([x for x in self.recved_jobs_pending if x[0] == jobid][0])
try:
self.recved_jobs_pending.remove([x for x in self.recved_jobs_pending if x[0] == jobid][0])
except ValueError:
pass
Thread(target=self.dispatch_job).start()
# no need to join that thread, it's running in another thread that's joined
def handle_hire_refused(self, user, host, dest, mesg):
_, jobid = mesg.split(sep=':', maxsplit=2)
print(self._irc._nick,"hire_refused",jobid)
try:
self.recved_jobs_pending.remove([x for x in self.recved_jobs_pending if x[0] == jobid][0])
except ValueError:
pass
"""tell who I know as peers"""
def tell_who_you_know(self, user, host, dest, mesg):
self._irc.send_chat(dest, "I know " + ", ".join([ x[0] for x in self.peers ]))
"""Report jobs"""
def report_my_jobs(self, user, host, dest, mesg):
self._irc.send_chat(dest, "Emitted jobs sentout: " + ", ".join([ x[0] for x in self.emitted_jobs_sentout ]))
self._irc.send_chat(dest, "Emitted jobs accepted: " + ", ".join([ x[0] for x in self.emitted_jobs_accepted ]))
self._irc.send_chat(dest, "Received jobs pending: " + ", ".join([ x[0] for x in self.recved_jobs_pending ]))
self._irc.send_chat(dest, "Received jobs hired: " + ", ".join([ x[0] for x in self.recved_jobs_hired ]))
"""Send a job offer to the peers"""
def emit_a_job(self, user, host, dest, mesg):
_, jobid, parameters = mesg.split(sep=':', maxsplit=3)
job = (jobid, parameters)
if len([ x for x in self.emitted_jobs_sentout if x[0]==jobid])==0:
self.emitted_jobs_sentout.append(job)
self._irc.send_chat(self._channel,"JOB:%s:%s" % job)
"""Send a result (trigger parameter is a job id)"""
def get_known_result(self, user, host, dest, mesg):
_, wantedkey = mesg.split(sep=':', maxsplit=2)
if wantedkey in self.known_results:
self._irc.send_chat(user, "RESULT:%s:%s" % (wantedkey, self.known_results[wantedkey]))
self._irc.send_chat(dest, "RESULT:%s:%s" % (wantedkey, self.known_results[wantedkey]))
else:
self._irc.send_chat(user, "UNKNOWN_RESULT:%s" % (wantedkey))
self._irc.send_chat(dest, "UNKNOWN_RESULT:%s" % (wantedkey))
"""handle messages"""
def _start(self):
# peer recognition
self._irc.add_privmsg_callback(re.compile("^ADV:\S+"), self.parse_advertised)
self._irc.add_privmsg_callback(re.compile("^BOT:\S+"), self.parse_announce)
# handle a job proposal
self._irc.add_privmsg_callback(re.compile("^JOB:([^:]+):(.*)+"), self.handle_job_proposal)
# handle a job claim
self._irc.add_privmsg_callback(re.compile("^CLAIM:([^:]+)"), self.handle_job_claim)
# handle (non-)hiring
self._irc.add_privmsg_callback(re.compile("^THANKS:([^:]+)"), self.handle_hire_accepted)
self._irc.add_privmsg_callback(re.compile("^NOTHANKS:([^:]+)"), self.handle_hire_refused)
# handle a job result announcement
self._irc.add_privmsg_callback(re.compile("^JOBRESULT:([^:]+):(.*)"), self.handle_job_result)
# some triggers
self._irc.add_privmsg_callback(re.compile("!whoyouknow"), self.tell_who_you_know)
self._irc.add_privmsg_callback(re.compile("!jobs"), self.report_my_jobs)
self._irc.add_privmsg_callback(re.compile("!emitjob:([^:]+):(.*)"), self.emit_a_job)
self._irc.add_privmsg_callback(re.compile("!getresult:(.*)"), self.get_known_result)
# start the real thing
self._irc.join_channel_then_cb(self._channel, self.advertise)
"""this is a stupid job planner that computes badly basic stuff such as powers,
after sleeping for a while"""
class StupidMathsWorker:
def would_accept(self, parameters):
split = parameters.split(',')
if split[0]=="pow" or split[0]=="fibonacci":
return True
return False
def perform(self, parameters):
split = parameters.split(',')
sleep(5)
if split[0]=="pow":
f = float(split[1])
e = float(split[2])
return f**e
if split[0]=="fibonacci":
a = [0, 1]
tgt = int(split[1])
v = 0
while tgt >= 2:
v = a[0] + a[1]
a[0] = a[1]
a[1] = v
tgt -= 1
return v
if __name__ == '__main__':
import sys
c = IRCBot(sys.argv[1], verbosity=True)
def exampleexitcallback(user, host, dest, mesg):
if (user == 'ChloeD'):
print("Oh, hello ChloeD")
c.send_chat("#bottest", "I'm leaving now")
c.disconnect()
sys.exit(0)
c.add_privmsg_callback(re.compile("!exit"), exampleexitcallback)
bot = Botsync(c, "#bottest")
bot.add_work_executor(StupidMathsWorker())
t1 = Thread(target=lambda:c.connect("irc.freenode.net", 6667, "#bottest"))
t2 = Thread(target=bot.schedule_start)
t1.start()
t2.start()
[ x.join() for x in [ t1, t2 ] ]
|
|
import pytest
from mopidy import models
from mopidy_spotify import images
@pytest.fixture
def img_provider(provider):
images._cache = {}
return provider
def test_get_artist_images(web_client_mock, img_provider):
uris = [
"spotify:artist:4FCGgZrVQtcbDFEap3OAb2",
"http://open.spotify.com/artist/0Nsz79ZcE8E4i3XZhCzZ1l",
]
web_client_mock.get.return_value = {
"artists": [
{
"id": "4FCGgZrVQtcbDFEap3OAb2",
"images": [
{"height": 640, "url": "img://1/a", "width": 640},
{"height": 300, "url": "img://1/b", "width": 300},
],
},
{
"id": "0Nsz79ZcE8E4i3XZhCzZ1l",
"images": [{"height": 64, "url": "img://2/a", "width": 64}],
},
]
}
result = img_provider.get_images(uris)
web_client_mock.get.assert_called_once_with(
"artists",
params={"ids": "4FCGgZrVQtcbDFEap3OAb2,0Nsz79ZcE8E4i3XZhCzZ1l"},
)
assert len(result) == 2
assert sorted(result.keys()) == sorted(uris)
assert len(result[uris[0]]) == 2
assert len(result[uris[1]]) == 1
image1a = result[uris[0]][0]
assert isinstance(image1a, models.Image)
assert image1a.uri == "img://1/a"
assert image1a.height == 640
assert image1a.width == 640
image1b = result[uris[0]][1]
assert isinstance(image1b, models.Image)
assert image1b.uri == "img://1/b"
assert image1b.height == 300
assert image1b.width == 300
image2a = result[uris[1]][0]
assert isinstance(image2a, models.Image)
assert image2a.uri == "img://2/a"
assert image2a.height == 64
assert image2a.width == 64
def test_get_album_images(web_client_mock, img_provider):
uris = ["http://play.spotify.com/album/1utFPuvgBHXzLJdqhCDOkg"]
web_client_mock.get.return_value = {
"albums": [
{
"id": "1utFPuvgBHXzLJdqhCDOkg",
"images": [{"height": 640, "url": "img://1/a", "width": 640}],
}
]
}
result = img_provider.get_images(uris)
web_client_mock.get.assert_called_once_with(
"albums", params={"ids": "1utFPuvgBHXzLJdqhCDOkg"}
)
assert len(result) == 1
assert sorted(result.keys()) == sorted(uris)
assert len(result[uris[0]]) == 1
image = result[uris[0]][0]
assert isinstance(image, models.Image)
assert image.uri == "img://1/a"
assert image.height == 640
assert image.width == 640
def test_get_track_images(web_client_mock, img_provider):
uris = ["spotify:track:41shEpOKyyadtG6lDclooa"]
web_client_mock.get.return_value = {
"tracks": [
{
"id": "41shEpOKyyadtG6lDclooa",
"album": {
"uri": "spotify:album:1utFPuvgBHXzLJdqhCDOkg",
"images": [
{"height": 640, "url": "img://1/a", "width": 640}
],
},
}
]
}
result = img_provider.get_images(uris)
web_client_mock.get.assert_called_once_with(
"tracks", params={"ids": "41shEpOKyyadtG6lDclooa"}
)
assert len(result) == 1
assert sorted(result.keys()) == sorted(uris)
assert len(result[uris[0]]) == 1
image = result[uris[0]][0]
assert isinstance(image, models.Image)
assert image.uri == "img://1/a"
assert image.height == 640
assert image.width == 640
def test_get_relinked_track_images(web_client_mock, img_provider):
uris = ["spotify:track:4nqN0p0FjfH39G3hxeuKad"]
web_client_mock.get.return_value = {
"tracks": [
{
"id": "39S0DVDKeneEjsq4pV45PT",
"linked_from": {
"id": "4nqN0p0FjfH39G3hxeuKad",
"type": "track",
"uri": "spotify:track:4nqN0p0FjfH39G3hxeuKad",
},
"album": {
"uri": "spotify:album:1utFPuvgBHXzLJdqhCDOkg",
"images": [
{"height": 640, "url": "img://1/a", "width": 640}
],
},
}
]
}
result = img_provider.get_images(uris)
web_client_mock.get.assert_called_once_with(
"tracks", params={"ids": "4nqN0p0FjfH39G3hxeuKad"}
)
assert len(result) == 1
assert sorted(result.keys()) == sorted(uris)
assert len(result[uris[0]]) == 1
image = result[uris[0]][0]
assert isinstance(image, models.Image)
assert image.uri == "img://1/a"
assert image.height == 640
assert image.width == 640
def test_get_playlist_image(web_client_mock, img_provider):
uris = ["spotify:playlist:41shEpOKyyadtG6lDclooa"]
web_client_mock.get.return_value = {
"id": "41shEpOKyyadtG6lDclooa",
"images": [{"height": 640, "url": "img://1/a", "width": 640}],
}
result = img_provider.get_images(uris)
web_client_mock.get.assert_called_once_with(
"playlists/41shEpOKyyadtG6lDclooa"
)
assert len(result) == 1
assert sorted(result.keys()) == sorted(uris)
assert len(result[uris[0]]) == 1
image = result[uris[0]][0]
assert isinstance(image, models.Image)
assert image.uri == "img://1/a"
assert image.height == 640
assert image.width == 640
def test_results_are_cached(web_client_mock, img_provider):
uris = ["spotify:track:41shEpOKyyadtG6lDclooa"]
web_client_mock.get.return_value = {
"tracks": [
{
"id": "41shEpOKyyadtG6lDclooa",
"album": {
"uri": "spotify:album:1utFPuvgBHXzLJdqhCDOkg",
"images": [
{"height": 640, "url": "img://1/a", "width": 640}
],
},
}
]
}
result1 = img_provider.get_images(uris)
result2 = img_provider.get_images(uris)
assert web_client_mock.get.call_count == 1
assert result1 == result2
def test_max_50_ids_per_request(web_client_mock, img_provider):
uris = [f"spotify:track:{i}" for i in range(51)]
web_client_mock.get.return_value = {}
img_provider.get_images(uris)
assert web_client_mock.get.call_count == 2
request_ids_1 = web_client_mock.get.call_args_list[0][1]["params"]["ids"]
assert request_ids_1 == ",".join(str(i) for i in range(50))
request_ids_2 = web_client_mock.get.call_args_list[1][1]["params"]["ids"]
assert request_ids_2 == "50"
def test_invalid_uri_fails(img_provider):
with pytest.raises(ValueError) as exc:
img_provider.get_images(["foo:bar"])
assert str(exc.value) == "Could not parse 'foo:bar' as a Spotify URI"
def test_no_uris_gives_no_results(img_provider):
result = img_provider.get_images([])
assert result == {}
def test_service_returns_empty_result(web_client_mock, img_provider):
web_client_mock.get.return_value = {"tracks": [{}]}
result = img_provider.get_images(["spotify:track:41shEpOKyyadtG6lDclooa"])
assert result == {}
|
|
from __future__ import print_function
import sys, hashlib, time, threading, socket, signal, os, re
import random, sqlite3, requests, pickle, feedparser
try:
import queue
except ImportError:
import Queue as queue
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
from . import param, normalize, util, transform, filters, dbop, autodiscovery
import imp
#socket.setdefaulttimeout(10)
feedparser.USER_AGENT = param.user_agent
class ParseError(Exception):
pass
class AutoDiscoveryError(Exception):
pass
class FeedAlreadyExists(Exception):
pass
class UnknownError(Exception):
pass
ratings = [
('all', 'all', 'All articles', 'item_rating is not null'),
('unread', 'unread', 'Unread only', 'item_rating = 0'),
('down', 'uninteresting', 'Uninteresting only','item_rating = -1'),
('up', 'interesting', 'Interesting only', 'item_rating > 0'),
('filtered', 'filtered', 'Filtered only', 'item_rating = -2')
]
ratings_dict = dict((ratings[i][0], i) for i in list(range(len(ratings))))
sorts = [
('created', 'Article date', 'Article date', 'item_created DESC'),
('seen', 'Cached date', 'Cached date', 'item_uid DESC'),
('rated', 'Rated on', 'Rated on', 'item_rated DESC'),
('snr', 'Feed SNR', 'Feed SNR', 'snr DESC'),
('oldest', 'Oldest seen', 'Oldest seen', 'item_uid ASC'),
('random', 'Random order', 'Random order', 'random() ASC'),
]
sorts_dict = dict((sorts[i][0], i) for i in list(range(len(sorts))))
def add_feed(feed_xml):
"""Try to add a feed. Returns a tuple (feed_uid, num_added, num_filtered)"""
with dbop.db() as db:
c = db.cursor()
feed_xml = feed_xml.replace('feed://', 'http://')
# verify the feed
r = requests.get(feed_xml, timeout=param.http_timeout)
f = feedparser.parse(r.content)
normalize.basic(f, feed_xml)
if not f.feed or ('link' not in f.feed or 'title' not in f.feed):
original = feed_xml
feed_xml = autodiscovery.find(original)
if not feed_xml:
raise AutoDiscoveryError
print('add_feed:autodiscovery of', original, 'found', feed_xml,
file=param.log)
r = requests.get(feed_xml, timeout=param.http_timeout)
f = feedparser.parse(r.text)
normalize.basic(f, feed_xml)
if not f.feed or 'url' not in f:
print('add_feed:autodiscovery failed %r %r' % (r.text, f.__dict__),
file=param.log)
raise ParseError
# we have a valid feed, normalize it
normalize.normalize_feed(f)
feed = {
'xmlUrl': f['url'],
'htmlUrl': str(f.feed['link']),
'etag': r.headers.get('Etag'),
'title': f.feed['title'],
'desc': f.feed['description']
}
for key, value in list(feed.items()):
if type(value) == str:
feed[key] = value
filters.load_rules(c)
try:
c.execute("""insert into fm_feeds
(feed_xml, feed_etag, feed_html, feed_title, feed_desc) values
(:xmlUrl, :etag, :htmlUrl, :title, :desc)""", feed)
feed_uid = c.lastrowid
num_added, num_filtered = process_parsed_feed(db, c, f, feed_uid)
db.commit()
return feed_uid, feed['title'], num_added, num_filtered
except sqlite3.IntegrityError as e:
if 'feed_xml' in str(e):
db.rollback()
raise FeedAlreadyExists
else:
db.rollback()
raise UnknownError(str(e))
def update_feed_xml(feed_uid, feed_xml):
"""Update a feed URL and fetch the feed. Returns the number of new items"""
feed_uid = int(feed_uid)
r = requests.get(feed_xml, timeout=param.http_timeout)
f = feedparser.parse(r.content)
if not f.feed:
raise ParseError
normalize.normalize_feed(f)
with dbop.db() as db:
c = db.cursor()
clear_errors(db, c, feed_uid, f)
try:
c.execute("""update fm_feeds set feed_xml=?, feed_html=?
where feed_uid=?""",
[feed_xml, str(f.feed['link']), feed_uid])
except sqlite3.IntegrityError as e:
if 'feed_xml' in str(e):
db.rollback()
raise FeedAlreadyExists
else:
db.rollback()
raise UnknownError(str(e))
filters.load_rules(c)
num_added = process_parsed_feed(db, c, f, feed_uid)
db.commit()
return num_added
def update_feed_pubxml(feed_uid, feed_pubxml):
"""Update a feed HTML link"""
feed_uid = int(feed_uid)
with dbop.db() as db:
db.execute("update fm_feeds set feed_pubxml=? where feed_uid=?",
[feed_pubxml, feed_uid])
db.commit()
def update_feed_title(feed_uid, feed_title):
"""Update a feed title"""
feed_uid = int(feed_uid)
with dbop.db() as db:
db.execute("update fm_feeds set feed_title=? where feed_uid=?",
[feed_title, feed_uid])
db.commit()
def update_feed_html(feed_uid, feed_html):
"""Update a feed HTML link"""
feed_uid = int(feed_uid)
with dbop.db() as db:
db.execute("update fm_feeds set feed_html=? where feed_uid=?",
[feed_html, feed_uid])
db.commit()
def update_feed_desc(feed_uid, feed_desc):
"""Update a feed desc"""
feed_uid = int(feed_uid)
with dbop.db() as db:
db.execute("update fm_feeds set feed_desc=? where feed_uid=?",
[feed_desc, feed_uid])
db.commit()
def update_feed_filter(feed_uid, feed_filter):
"""Update a feed desc"""
feed_uid = int(feed_uid)
feed_filter = feed_filter.strip()
if feed_filter:
# check syntax
compile(filters.normalize_rule(feed_filter), 'web form', 'eval')
val = feed_filter
else:
val = None
with dbop.db() as db:
db.execute("update fm_feeds set feed_filter=? where feed_uid=?",
[val, feed_uid])
db.commit()
filters.invalidate()
def update_feed_private(feed_uid, private):
feed_uid = int(feed_uid)
private = int(bool(private))
with dbop.db() as db:
db.execute("update fm_feeds set feed_private=? where feed_uid=?",
[private, feed_uid])
db.commit()
def update_feed_exempt(feed_uid, exempt):
feed_uid = int(feed_uid)
exempt = int(bool(exempt))
with dbop.db() as db:
c = db.cursor()
db.execute("update fm_feeds set feed_exempt=? where feed_uid=?",
[exempt, feed_uid])
if exempt:
filters.exempt_feed_retroactive(db, c, feed_uid)
db.commit()
def update_feed_dupcheck(feed_uid, dupcheck):
feed_uid = int(feed_uid)
dupcheck = int(bool(dupcheck))
# XXX run a dupcheck pass retroactively here if dupcheck == 1
with dbop.db() as db:
db.execute("update fm_feeds set feed_dupcheck=? where feed_uid=?",
[dupcheck, feed_uid])
db.commit()
def update_item(item_uid, link, title, content):
item_uid = int(item_uid)
with dbop.db() as db:
db.execute("""update fm_items set item_link=?, item_title=?, item_content=?
where item_uid=?""", [link, title, content, item_uid])
db.commit()
def title_url(feed_uid):
feed_uid = int(feed_uid)
with dbop.db() as db:
c = db.execute("""select feed_title, feed_html from fm_feeds
where feed_uid=?""",
[feed_uid])
return c.fetchone()
ratings_q = queue.Queue()
def set_rating(*args):
ratings_q.put(args)
class RatingsWorker(threading.Thread):
def __init__(self, in_q):
threading.Thread.__init__(self)
self.in_q = in_q
# we need to do this so temboz --refresh honors Ctrl-C
self.setDaemon(True)
def run(self):
while True:
item_uid = None
try:
item_uid, rating = self.in_q.get()
with dbop.db() as db:
c = db.cursor()
try:
c.execute("""update fm_items
set item_rating=?, item_rated=julianday('now')
where item_uid=?""", [rating, item_uid])
fb_token = param.settings.get('fb_token', None)
if rating == 1 and fb_token:
c.execute("""select feed_uid, item_link, item_title, feed_private
from fm_items, fm_feeds
where item_uid=? and feed_uid=item_feed_uid""",
[item_uid])
feed_uid, url, title, private = c.fetchone()
db.commit()
except:
util.print_stack()
except:
util.print_stack()
if item_uid is not None:
self.in_q.put((item_uid, rating))
def catch_up(feed_uid):
feed_uid = int(feed_uid)
with dbop.db() as db:
db.execute("""update fm_items set item_rating=-1
where item_feed_uid=? and item_rating=0""", [feed_uid])
db.commit()
def purge_reload(feed_uid):
imp.reload(transform)
feed_uid = int(feed_uid)
if feed_uid in feed_guid_cache:
del feed_guid_cache[feed_uid]
with dbop.db() as db:
c = db.cursor()
# refresh filtering rules
filters.load_rules(c)
c.execute("delete from fm_items where item_feed_uid=? and item_rating=0",
[feed_uid])
c.execute("""delete from fm_tags
where exists (
select item_uid from fm_items
where item_uid=tag_item_uid and item_feed_uid=? and item_rating=0
)""", [feed_uid])
c.execute("""update fm_feeds set feed_modified=NULL, feed_etag=NULL
where feed_uid=?""", [feed_uid])
c.execute("""select feed_xml from fm_feeds
where feed_uid=?""", [feed_uid])
feed_xml = c.fetchone()[0]
db.commit()
r = requests.get(feed_xml, timeout=param.http_timeout)
f = feedparser.parse(r.content)
if not f.feed:
raise ParseError
normalize.normalize_feed(f)
clear_errors(db, c, feed_uid, f)
filters.load_rules(c)
num_added = process_parsed_feed(db, c, f, feed_uid)
db.commit()
def hard_purge(feed_uid):
feed_uid = int(feed_uid)
with dbop.db() as db:
db.execute("delete from fm_items where item_feed_uid=?", [feed_uid])
db.execute("delete from fm_rules where rule_feed_uid=?", [feed_uid])
db.execute("delete from fm_feeds where feed_uid=?", [feed_uid])
db.commit()
filters.invalidate()
def set_status(feed_uid, status):
feed_uid = int(feed_uid)
status = int(status)
with dbop.db() as db:
db.execute("update fm_feeds set feed_status=? where feed_uid=?",
[status, feed_uid])
db.commit()
class FeedWorker(threading.Thread):
def __init__(self, id, in_q, out_q):
threading.Thread.__init__(self)
self.id = id
self.in_q = in_q
self.out_q = out_q
# we need to do this so temboz --refresh honors Ctrl-C
self.setDaemon(True)
def run(self):
try:
while True:
feed = self.in_q.get()
if not feed: return
self.out_q.put((self.fetch_feed(*feed),) + feed)
finally:
self.out_q.put(None)
def fetch_feed(self, feed_uid, feed_xml, feed_etag, feed_modified,
feed_dupcheck):
print(self.id, feed_xml, file=param.activity)
return fetch_feed(feed_uid, feed_xml, feed_etag, feed_modified)
def fetch_feed(feed_uid, feed_xml, feed_etag, feed_modified):
if not feed_etag:
feed_etag = None
if not feed_modified:
feed_modified = None
try:
r = requests.get(feed_xml, headers={
'If-None-Match': feed_etag
}, timeout=param.http_timeout)
if r.content == '':
return {'channel': {}, 'items': [], 'why': 'no change since Etag'}
f = feedparser.parse(r.content, etag=r.headers.get('Etag'),
modified=feed_modified)
except (socket.timeout, requests.exceptions.RequestException) as e:
if param.debug:
print('EEEEE error fetching feed', feed_xml, e, file=param.log)
f = {'channel': {}, 'items': [], 'why': repr(e)}
except:
if param.debug:
util.print_stack()
f = {'channel': {}, 'items': [], 'why': repr(sys.exc_info[1])}
normalize.normalize_feed(f)
return f
def increment_errors(db, c, feed_uid):
"""Increment the error counter, and suspend the feed if the threshold is
reached
"""
c.execute("update fm_feeds set feed_errors=feed_errors+1 where feed_uid=?",
[feed_uid])
c.execute("select feed_errors, feed_title from fm_feeds where feed_uid=?",
[feed_uid])
errors, feed_title = c.fetchone()
max_errors = getattr(param, 'max_errors', 100)
if max_errors != -1 and errors > max_errors:
notification(db, c, feed_uid, 'Service notification',
'This feed was suspended because Temboz encountered '
+ str(errors) + ' consecutive errors')
print('EEEEE too many errors, suspending feed', feed_title, file=param.log)
c.execute("update fm_feeds set feed_status = 1 where feed_uid=?",
[feed_uid])
def clear_errors(db, c, feed_uid, f):
'On successful feed parse, reset etag and/or modified date and error count'
stmt = 'update fm_feeds set feed_errors=0'
params = []
if 'etag' in f and f['etag']:
stmt += ", feed_etag=?"
params.append(f['etag'])
else:
stmt += ", feed_etag=NULL"
if 'modified' in f and f['modified']:
stmt += ", feed_modified=julianday(?, 'unixepoch')"
params.append(time.mktime(f['modified']))
else:
stmt += ", feed_modified=NULL"
stmt += " where feed_uid=?"
params.append(feed_uid)
c.execute(stmt, params)
def update_feed(db, c, f, feed_uid, feed_xml, feed_etag, feed_modified,
feed_dupcheck=None):
print(feed_xml, file=param.activity)
if 'why' in f and f['why'] == 'no change since Etag':
return
# check for errors - HTTP code 304 means no change
if not hasattr(f, 'feed') \
or 'title' not in f.feed and 'link' not in f.feed:
if not hasattr(f, 'feed'):
print("""FFFFF not hasattr(f, 'feed')""", end=' ', file=param.log)
else:
print("""FFFFF title=%r link=%r""" % (
'title' not in f.feed,
'link' not in f.feed
), end=' ', file=param.log)
if 'why' in f:
print(feed_xml, f['why'], file=param.log)
else:
print(feed_xml, file=param.log)
# error or timeout - increment error count
increment_errors(db, c, feed_uid)
else:
# no error - reset etag and/or modified date and error count
clear_errors(db, c, feed_uid, f)
try:
process_parsed_feed(db, c, f, feed_uid, feed_dupcheck)
except:
util.print_stack(['c', 'f'])
feed_guid_cache = {}
def prune_feed_guid_cache():
yesterday = time.time() - 86400
for feed_uid in feed_guid_cache:
for guid in list(feed_guid_cache[feed_uid].keys())[:]:
if feed_guid_cache[feed_uid][guid] < yesterday:
del feed_guid_cache[feed_uid][guid]
def process_parsed_feed(db, c, f, feed_uid, feed_dupcheck=None, exempt=None):
"""Insert the entries from a feedparser parsed feed f in the database using
the cursor c for feed feed_uid.
Returns a tuple (number of items added unread, number of filtered items)"""
num_added = 0
num_filtered = 0
filters.load_rules(c)
# check if duplicate title checking is in effect
if feed_dupcheck is None:
c.execute("select feed_dupcheck from fm_feeds where feed_uid=?",
[feed_uid])
feed_dupcheck = bool(c.fetchone()[0])
# check if the feed is exempt from filtering
if exempt is None:
c.execute("select feed_exempt from fm_feeds where feed_uid=?", [feed_uid])
exempt = bool(c.fetchone()[0])
# the Radio convention is reverse chronological order
f['items'].reverse()
for item in f['items']:
try:
normalize.normalize(item, f)
except:
util.print_stack()
continue
# evaluate the FilteringRules
skip, rule = filters.evaluate_rules(item, f, feed_uid, exempt)
filtered_by = None
if skip:
skip = -2
if type(rule.uid) == int:
filtered_by = rule.uid
else:
# XXX clunky convention for feed_rule, but that should disappear
# XXX eventually
filtered_by = 0
title = item['title']
link = item['link']
guid = item['id']
author = item['author']
created = item['created']
modified = item['modified']
if not modified:
modified = None
content = item['content']
# check if the item already exists, using the GUID as key
# but cache all seen GUIDs in a dictionary first, since most articles are
# existing ones and we can save a database query this way
if feed_uid in feed_guid_cache and guid in feed_guid_cache[feed_uid]:
# existing entry and we've seen it before in this process instance
# update the time stamp to prevent premature garbage-collection
# in prune_feed_guid_cache
feed_guid_cache.setdefault(feed_uid, dict())[guid] = time.time()
continue
else:
feed_guid_cache.setdefault(feed_uid, dict())[guid] = time.time()
# not seen yet, it may or may not be a duplicate, we have to find out the
# hard way
c.execute("""select item_uid, item_link,
item_loaded, item_created, item_modified,
item_md5hex, item_title, item_content, item_creator
from fm_items where item_feed_uid=? and item_guid=?""",
[feed_uid, guid])
l = c.fetchall()
# unknown GUID, but title/link duplicate checking may be in effect
if not l:
if feed_dupcheck:
c.execute("""select count(*) from fm_items
where item_feed_uid=? and (item_title=? or item_link=?)""",
[feed_uid, title, link])
l = bool(c.fetchone()[0])
if l:
print('DUPLICATE TITLE', title, file=param.activity)
# XXX Runt items (see normalize.py) are almost always spurious, we just
# XXX skip them, although we may revisit this decision in the future
if not l and item.get('RUNT', False):
print('RUNT ITEM', item, file=param.activity)
l = True
# GUID already exists, this is a change
else:
assert len(l) == 1
(item_uid, item_link, item_loaded, item_created, item_modified,
item_md5hex, item_title, item_content, item_creator) = l[0]
# if this is a feed without timestamps, use our timestamp to determine
# the oldest item in the feed XML file
if 'oldest' in f and f['oldest'] == '1970-01-01 00:00:00':
if 'oldest_ts' not in f:
f['oldest_ts'] = item_created
else:
f['oldest_ts'] = min(f['oldest_ts'], item_created)
# XXX update item here
# XXX update tags if required
# GUID doesn't exist yet, insert it
if not l:
# finally, dereference the URL to get rid of annoying tracking servers
# like feedburner, but only do this once to avoid wasting bandwidth
link = normalize.dereference(link)
try:
c.execute("""insert into fm_items (item_feed_uid, item_guid,
item_created, item_modified, item_link, item_md5hex,
item_title, item_content, item_creator, item_rating, item_rule_uid)
values
(?, ?, julianday(?), julianday(?), ?, ?, ?, ?, ?, ?, ?)""",
[feed_uid, guid, created, modified, link,
hashlib.md5(content.encode('UTF-8')).hexdigest(),
title, content, author, skip, filtered_by])
# if we have tags, insert them
# note: feedparser.py handles 'category' as a special case, so we
# need to work around that to get to the data
if item['item_tags']:
c.execute("""select item_uid
from fm_items where item_feed_uid=? and item_guid=?""",
[feed_uid, guid])
item_uid = c.fetchone()[0]
for tag in item['item_tags']:
c.execute("""insert or ignore into fm_tags (tag_name, tag_item_uid)
values (?, ?)""", [tag, item_uid])
if skip:
num_filtered += 1
print('SKIP', title, rule, file=param.activity)
else:
num_added += 1
print(' ' * 4, title, file=param.activity)
except:
util.print_stack(['c', 'f'])
continue
# update timestamp of the oldest item still in the feed file
if 'oldest' in f and f['oldest'] != '9999-99-99 99:99:99':
if f['oldest'] == '1970-01-01 00:00:00' and 'oldest_ts' in f:
c.execute("update fm_feeds set feed_oldest=? where feed_uid=?",
[f['oldest_ts'], feed_uid])
else:
c.execute("""update fm_feeds set feed_oldest=julianday(?)
where feed_uid=?""", [f['oldest'], feed_uid])
return (num_added, num_filtered)
def notification(db, c, feed_uid, title, content, link=None):
"""Insert a service notification, e.g. to notify before a feed is disabled
due to too many errors"""
hash = hashlib.md5(content.encode('UTF-8')).hexdigest()
guid = 'temboz://%s/%s' % (feed_uid, hash)
# do nothing if the link is clicked
if link is None:
link = '/feed/%d' % feed_uid
c.execute("""insert into fm_items (item_feed_uid, item_guid,
item_created, item_modified, item_link, item_md5hex,
item_title, item_content, item_creator, item_rating, item_rule_uid)
values
(?, ?, julianday('now'), julianday('now'), ?, ?,
?, ?, ?, 0, NULL)""",
[feed_uid, guid, link, hash,
title, content, 'Temboz notifications'])
db.commit()
def cleanup(db=None, c=None):
"""garbage collection - see param.py
this is done only once a day between 3 and 4 AM as this is quite intensive
and could interfere with user activity
It can also be invoked by running temboz --clean
"""
if not db:
with dbop.db() as db:
c = db.cursor()
return cleanup(db, c)
# XXX need to use PATH instead
sqlite_cli = '/usr/local/bin/sqlite3'
print('Starting cleanup', file=param.log)
print('garbage_contents: ', getattr(param, 'garbage_contents', False),
file=param.log)
print('garbage_items: ', getattr(param, 'garbage_items', False),
file=param.log)
if getattr(param, 'garbage_contents', False):
print('starting garbage_contents ', file=param.log)
c.execute("""update fm_items set item_content=''
where item_rating < 0 and item_created < julianday('now')-?""",
[param.garbage_contents])
db.commit()
if getattr(param, 'garbage_items', False):
print('starting garbage_items ', file=param.log)
c.execute("""delete from fm_items where item_uid in (
select item_uid from fm_items, fm_feeds
where item_created < min(julianday('now')-?, feed_oldest-7)
and item_rating<0 and feed_uid=item_feed_uid)""", [param.garbage_items])
db.commit()
print('recreating SNR materialized view', file=param.log)
dbop.snr_mv(db, c)
print('deleting unused tags', file=param.log)
c.execute("""delete from fm_tags
where not exists(
select item_uid from fm_items where item_uid=tag_item_uid
)""")
db.commit()
print('deleting unused tags', file=param.log)
if dbop.fts_enabled:
print('rebuilding full-text search index', file=param.log)
c.execute("""insert into search(search) values ('rebuild')""")
db.commit()
print('vacuuming', file=param.log)
c.execute('vacuum')
# we still hold the PseudoCursor lock, this is a good opportunity to backup
print('creating backups dir', file=param.log)
try:
os.mkdir('backups')
print('backups dir created', file=param.log)
except OSError:
print('backups dir already exists', file=param.log)
print('pruning feed GUID cache', file=param.log)
prune_feed_guid_cache()
print('backing up SQLite', file=param.log)
os.system((sqlite_cli + ' rss.db .dump | %s > backups/daily_' \
+ time.strftime('%Y-%m-%d') + '%s') % param.backup_compressor)
# delete old backups
print('deleting old backups', file=param.log)
backup_re = re.compile(
'daily_[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\\.')
log_re = re.compile(
'log_[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]')
for fn in os.listdir('backups'):
if backup_re.match(fn) or log_re.match(fn):
elapsed = time.time() - os.stat('backups/' + fn).st_ctime
if elapsed > 86400 * param.daily_backups:
try:
print('deleting', fn, file=param.log)
os.remove('backups/' + fn)
except OSError:
pass
print('Ended cleanup', file=param.log)
def update(where_clause=''):
with dbop.db() as db:
c = db.cursor()
# refresh filtering rules
filters.load_rules(c)
# at 3AM by default, perform house-cleaning
if time.localtime()[3] == param.backup_hour:
cleanup(db, c)
# create worker threads and the queues used to communicate with them
work_q = queue.Queue()
process_q = queue.Queue()
workers = []
for i in range(param.feed_concurrency):
workers.append(FeedWorker(i + 1, work_q, process_q))
workers[-1].start()
# assign work
c.execute("""select feed_uid, feed_xml, feed_etag, feed_dupcheck,
strftime('%s', feed_modified) from fm_feeds where feed_status=0 """
+ where_clause)
for feed_uid, feed_xml, feed_etag, feed_dupcheck, feed_modified in c:
if feed_modified:
feed_modified = float(feed_modified)
feed_modified = time.localtime(feed_modified)
else:
feed_modified = None
work_q.put((feed_uid, feed_xml, feed_etag, feed_modified, feed_dupcheck))
# None is an indication for workers to stop
for i in range(param.feed_concurrency):
work_q.put(None)
workers_left = param.feed_concurrency
while workers_left > 0:
feed_info = process_q.get()
# exited worker
if not feed_info:
workers_left -= 1
else:
try:
update_feed(db, c, *feed_info)
except:
util.print_stack()
db.commit()
# give reader threads an opportunity to get their work done
time.sleep(1)
class PeriodicUpdater(threading.Thread):
def __init__(self):
self.event = threading.Event()
threading.Thread.__init__(self)
self.setDaemon(True)
def run(self):
while True:
# XXX should wrap this in a try/except clause
self.event.wait(param.refresh_interval)
print(time.ctime(), '- refreshing feeds', file=param.activity)
try:
update()
except:
util.print_stack()
self.event.clear()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import unittest.mock
import numpy as np
from tests.unit import utils
class Test_newton_refine(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(s, nodes1, t, nodes2):
from bezier.hazmat import intersection_helpers
return intersection_helpers.newton_refine(s, nodes1, t, nodes2)
def test_linear(self):
import bezier
nodes1 = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
nodes2 = np.asfortranarray([[1.0, 0.0], [0.0, 3.0]])
curve1 = bezier.Curve(nodes1, degree=1)
curve2 = bezier.Curve(nodes2, degree=1)
known_s = 0.75
known_t = 0.25
self.assertEqual(curve1.evaluate(known_s), curve2.evaluate(known_t))
wrong_s = known_s - 0.125
wrong_t = known_t + 0.125
# NOTE: By construction, the Jacobian matrix will be
# [1, 1], [1, -3]
# which has determinant -4.0, hence there will
# be no round-off when solving.
new_s, new_t = self._call_function_under_test(
wrong_s, nodes1, wrong_t, nodes2
)
# Newton's method is exact on linear problems so will
# always converge after one step.
self.assertEqual(new_s, known_s)
self.assertEqual(new_t, known_t)
@staticmethod
def _get_quadratics():
import bezier
nodes1 = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, 1.0, 0.0]])
nodes2 = np.asfortranarray([[1.0, 0.5, 0.0], [0.75, -0.25, 0.75]])
curve1 = bezier.Curve(nodes1, degree=2)
curve2 = bezier.Curve(nodes2, degree=2)
return curve1, curve2
def test_mixed_degree(self):
import bezier
curve1, _ = self._get_quadratics()
nodes2 = np.asfortranarray([[1.0, 0.0], [0.0, 1.0]])
curve2 = bezier.Curve(nodes2, degree=1)
known_s = 0.5
known_t = 0.5
self.assertEqual(curve1.evaluate(known_s), curve2.evaluate(known_t))
wrong_s = 0.25
wrong_t = 0.25
# NOTE: By construction, the Jacobian matrix will be
# [1, 1], [1, -1]
# which has determinant -2.0, hence there will
# be no round-off when solving.
new_s, new_t = self._call_function_under_test(
wrong_s, curve1._nodes, wrong_t, nodes2
)
self.assertEqual(new_s, 0.4375)
self.assertEqual(new_t, 0.5625)
# Make sure we have gotten closer to correct.
self.assertLess(abs(known_s - new_s), abs(known_s - wrong_s))
self.assertLess(abs(known_t - new_t), abs(known_t - wrong_t))
def test_early_exit(self):
curve1, curve2 = self._get_quadratics()
known_s = 0.25
known_t = 0.75
self.assertEqual(curve1.evaluate(known_s), curve2.evaluate(known_t))
new_s, new_t = self._call_function_under_test(
known_s, curve1._nodes, known_t, curve2._nodes
)
self.assertEqual(new_s, known_s)
self.assertEqual(new_t, known_t)
def test_quadratic(self):
curve1, curve2 = self._get_quadratics()
known_s = 0.25
known_t = 0.75
self.assertEqual(curve1.evaluate(known_s), curve2.evaluate(known_t))
wrong_s = known_s + 0.0625 # 1/16
wrong_t = known_t + 0.0625 # 1/16
# NOTE: By construction, the Jacobian matrix will be
# [1, 3/4], [1, -5/4]
# which has determinant -2.0, hence there will
# be no round-off when solving.
new_s, new_t = self._call_function_under_test(
wrong_s, curve1._nodes, wrong_t, curve2._nodes
)
self.assertEqual(new_s, 0.2421875)
self.assertEqual(new_t, 0.7578125)
# Make sure we have gotten closer to correct.
self.assertLess(abs(known_s - new_s), abs(known_s - wrong_s))
self.assertLess(abs(known_t - new_t), abs(known_t - wrong_t))
def test_convergence(self):
import bezier
nodes1 = np.asfortranarray(
[[0.0, 0.25, 0.5, 0.75, 1.0], [0.0, 1.0, -0.75, 1.0, 0.0]]
)
curve1 = bezier.Curve(nodes1, degree=4)
# Vertical line forces a unique solution.
nodes2 = np.asfortranarray([[0.5, 0.5], [0.0, 1.0]])
curve2 = bezier.Curve(nodes2, degree=1)
num_guess = 4
parameters = np.zeros((2, num_guess), order="F")
# NOTE: This means our "first" guess is (s, t) = (0, 0).
for guess in range(1, num_guess):
prev_s, prev_t = parameters[:, guess - 1]
parameters[:, guess] = self._call_function_under_test(
prev_s, nodes1, prev_t, nodes2
)
expected = np.asfortranarray(
[[0.0, 0.5, 0.5, 0.5], [0.0, 2.0, 0.21875, 0.21875]]
)
self.assertEqual(parameters, expected)
# Make sure that we've actually converged.
exact_s, exact_t = parameters[:, -1]
self.assertEqual(curve1.evaluate(exact_s), curve2.evaluate(exact_t))
def test_singular_jacobian(self):
nodes1 = np.asfortranarray([[0.5, 1.0, 1.5], [0.0, 1.0, 0.0]])
nodes2 = np.asfortranarray([[0.0, 1.0], [0.5, 0.5]])
with self.assertRaises(ValueError) as exc_info:
self._call_function_under_test(0.5, nodes1, 0.5, nodes2)
exc_args = exc_info.exception.args
self.assertEqual(exc_args, ("Jacobian is singular.",))
class TestNewtonSimpleRoot(utils.NumPyTestCase):
@staticmethod
def _get_target_class():
from bezier.hazmat import intersection_helpers
return intersection_helpers.NewtonSimpleRoot
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def test_constructor(self):
nodes1 = unittest.mock.sentinel.nodes1
first_deriv1 = unittest.mock.sentinel.first_deriv1
nodes2 = unittest.mock.sentinel.nodes2
first_deriv2 = unittest.mock.sentinel.first_deriv2
evaluate_fn = self._make_one(
nodes1, first_deriv1, nodes2, first_deriv2
)
self.assertIs(evaluate_fn.nodes1, nodes1)
self.assertIs(evaluate_fn.first_deriv1, first_deriv1)
self.assertIs(evaluate_fn.nodes2, nodes2)
self.assertIs(evaluate_fn.first_deriv2, first_deriv2)
def test___call__(self):
# B1(s) = [s(s + 2) ]
# [4s(1 - s)]
# B2(t) = [1 + 3t]
# [4 - 4t]
# DF = [2 + 2s, -3]
# [4 - 8s, 4]
nodes1 = np.asfortranarray([[0.0, 1.0, 3.0], [0.0, 2.0, 0.0]])
first_deriv1 = np.asfortranarray([[2.0, 4.0], [4.0, -4.0]])
nodes2 = np.asfortranarray([[1.0, 4.0], [4.0, 0.0]])
first_deriv2 = np.asfortranarray([[3.0], [-4.0]])
evaluate_fn = self._make_one(
nodes1, first_deriv1, nodes2, first_deriv2
)
jacobian, func_val = evaluate_fn(0.5, 0.25)
expected_jacobian = np.asfortranarray([[3.0, -3.0], [0.0, 4.0]])
self.assertEqual(jacobian, expected_jacobian)
expected_func_val = np.asfortranarray([[-0.5], [-2.0]])
self.assertEqual(func_val, expected_func_val)
def test___call__exact_zero(self):
# B1(s) = [2s(1 + s)]
# [6s(1 - s)]
# B2(t) = [21t]
# [ 9t]
# DF = [2 + 4s, -21]
# [6 - 12s, -9]
nodes1 = np.asfortranarray([[0.0, 1.0, 4.0], [0.0, 3.0, 0.0]])
first_deriv1 = np.asfortranarray([[2.0, 6.0], [6.0, -6.0]])
nodes2 = np.asfortranarray([[0.0, 21.0], [0.0, 9.0]])
first_deriv2 = np.asfortranarray([[21.0], [9.0]])
evaluate_fn = self._make_one(
nodes1, first_deriv1, nodes2, first_deriv2
)
jacobian, func_val = evaluate_fn(0.75, 0.125)
self.assertIsNone(jacobian)
expected_func_val = np.asfortranarray([[0.0], [0.0]])
self.assertEqual(func_val, expected_func_val)
class TestNewtonDoubleRoot(utils.NumPyTestCase):
@staticmethod
def _get_target_class():
from bezier.hazmat import intersection_helpers
return intersection_helpers.NewtonDoubleRoot
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def test_constructor(self):
nodes1 = unittest.mock.sentinel.nodes1
first_deriv1 = unittest.mock.sentinel.first_deriv1
second_deriv1 = unittest.mock.sentinel.second_deriv1
nodes2 = unittest.mock.sentinel.nodes2
first_deriv2 = unittest.mock.sentinel.first_deriv2
second_deriv2 = unittest.mock.sentinel.second_deriv2
evaluate_fn = self._make_one(
nodes1,
first_deriv1,
second_deriv1,
nodes2,
first_deriv2,
second_deriv2,
)
self.assertIs(evaluate_fn.nodes1, nodes1)
self.assertIs(evaluate_fn.first_deriv1, first_deriv1)
self.assertIs(evaluate_fn.second_deriv1, second_deriv1)
self.assertIs(evaluate_fn.nodes2, nodes2)
self.assertIs(evaluate_fn.first_deriv2, first_deriv2)
self.assertIs(evaluate_fn.second_deriv2, second_deriv2)
def _make_default(self):
# B1(s) = [4s(1 - s)]
# [ 2s]
# B2(t) = [2(2t^2 - 2t + 1)]
# [ 2t]
# B1'(s) x B2'(s) = -16(s + t - 1)
# DG = [4 - 8s, 4 - 8t]
# [ 2, -2]
# [ -16, -16]
# DG^T DG = [ 4(16s^2 - 16s + 69), 4(16st - 8s - 8t + 67)]
# [4(16st - 8s - 8t + 67), 4(16t^2 - 16t + 69)]
# DG^T G = [4(8s^3 - 12s^2 + 8st^2 - 8st + 73s - 4t^2 + 67t - 66)]
# [4(8s^2t - 4s^2 - 8st + 67s + 8t^3 - 12t^2 + 73t - 66)]
nodes1 = np.asfortranarray([[0.0, 2.0, 0.0], [0.0, 1.0, 2.0]])
first_deriv1 = np.asfortranarray([[4.0, -4.0], [2.0, 2.0]])
second_deriv1 = np.asfortranarray([[-8.0], [0.0]])
nodes2 = np.asfortranarray([[2.0, 0.0, 2.0], [0.0, 1.0, 2.0]])
first_deriv2 = np.asfortranarray([[-4.0, 4.0], [2.0, 2.0]])
second_deriv2 = np.asfortranarray([[8.0], [0.0]])
return self._make_one(
nodes1,
first_deriv1,
second_deriv1,
nodes2,
first_deriv2,
second_deriv2,
)
def test___call__(self):
evaluate_fn = self._make_default()
jacobian, func_val = evaluate_fn(0.75, 0.25)
expected_jacobian = np.asfortranarray([[264.0, 248.0], [248.0, 264.0]])
self.assertEqual(jacobian, expected_jacobian)
expected_func_val = np.asfortranarray([[3.0], [-3.0]])
self.assertEqual(func_val, expected_func_val)
def test___call__exact_zero(self):
evaluate_fn = self._make_default()
jacobian, func_val = evaluate_fn(0.5, 0.5)
self.assertEqual(jacobian, None)
expected_func_val = np.asfortranarray([[0.0], [0.0]])
self.assertEqual(func_val, expected_func_val)
def test___call__linear_curves(self):
nodes1 = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
first_deriv1 = np.asfortranarray([[1.0], [1.0]])
second_deriv1 = np.empty((2, 0))
nodes2 = np.asfortranarray([[0.0, 1.0], [1.0, 0.0]])
first_deriv2 = np.asfortranarray([[1.0], [-1.0]])
second_deriv2 = np.empty((2, 0))
evaluate_fn = self._make_one(
nodes1,
first_deriv1,
second_deriv1,
nodes2,
first_deriv2,
second_deriv2,
)
jacobian, func_val = evaluate_fn(0.25, 0.25)
expected_jacobian = np.asfortranarray([[2.0, 0.0], [0.0, 2.0]])
self.assertEqual(jacobian, expected_jacobian)
expected_func_val = np.asfortranarray([[-0.5], [-0.5]])
self.assertEqual(func_val, expected_func_val)
class Test_newton_iterate(unittest.TestCase):
HALF_EPS = 0.5**26
@staticmethod
def _call_function_under_test(evaluate_fn, s, t):
from bezier.hazmat import intersection_helpers
return intersection_helpers.newton_iterate(evaluate_fn, s, t)
@staticmethod
def _simple_evaluate(quadratic1, quadratic2):
from bezier.hazmat import intersection_helpers
first_deriv1 = 2.0 * (quadratic1[:, 1:] - quadratic1[:, :-1])
first_deriv2 = 2.0 * (quadratic2[:, 1:] - quadratic2[:, :-1])
return intersection_helpers.NewtonSimpleRoot(
quadratic1, first_deriv1, quadratic2, first_deriv2
)
@staticmethod
def _double_evaluate(quadratic1, quadratic2):
from bezier.hazmat import intersection_helpers
first_deriv1 = 2.0 * (quadratic1[:, 1:] - quadratic1[:, :-1])
second_deriv1 = first_deriv1[:, 1:] - first_deriv1[:, :-1]
first_deriv2 = 2.0 * (quadratic2[:, 1:] - quadratic2[:, :-1])
second_deriv2 = first_deriv2[:, 1:] - first_deriv2[:, :-1]
return intersection_helpers.NewtonDoubleRoot(
quadratic1,
first_deriv1,
second_deriv1,
quadratic2,
first_deriv2,
second_deriv2,
)
def test_rhs_exactly_zero(self):
# B1([10922/32768, 10923/32768]) and B2([16383/16384, 1]) are
# linearized and when the segments intersect they produce
# t = 109217/109216 > 1.
nodes1 = np.asfortranarray([[0.0, 4.5, 9.0], [0.0, 9.0, 0.0]])
s = 671023103.0 / 2013069312.0
nodes2 = np.asfortranarray([[11.0, 7.0, 3.0], [8.0, 10.0, 4.0]])
t = 1789394945.0 / 1789394944.0
evaluate_fn = self._simple_evaluate(nodes1, nodes2)
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertTrue(converged)
self.assertEqual(3.0 * current_s, 1.0)
self.assertEqual(current_t, 1.0)
def test_singular_jacobian(self):
# B1([5461/8192, 5462/8192]) and B2([2730/8192, 2731/8192]) are
# linearized and the segments are parallel. The curves intersect
# at the point B1(2/3) = [1/2, 1/2] = B2(1/3) and they have parallel
# tangent vectors B1'(2/3) = [3/4, 0] = B2'(1/3).
nodes1 = np.asfortranarray([[0.0, 0.375, 0.75], [0.0, 0.75, 0.375]])
s = 10923.0 / 16384.0
nodes2 = np.asfortranarray([[0.25, 0.625, 1.0], [0.625, 0.25, 1.0]])
t = 5461.0 / 16384.0
evaluate_fn = self._simple_evaluate(nodes1, nodes2)
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertFalse(converged)
self.assertEqual(3.0 * current_s, 2.0 + 0.5**14)
self.assertEqual(3.0 * current_t, 1 - 0.5**14)
def _check_closer(
self, s, current_s, expected_s, t, current_t, expected_t
):
# Make sure we are closer ...
err_s = abs(expected_s - current_s)
err_t = abs(expected_t - current_t)
self.assertLess(err_s, abs(expected_s - s))
self.assertLess(err_t, abs(expected_t - t))
# ... but still not very close.
self.assertGreater(err_s, self.HALF_EPS * expected_s)
self.assertGreater(err_t, self.HALF_EPS * expected_t)
def test_singular_jacobian_dg(self):
# The curves are tangent and have the same curvature (i.e.
# triple root).
nodes1 = np.asfortranarray([[12.0, -4.0, -4.0], [4.0, -4.0, 4.0]])
s = float.fromhex("0x1.fffff4dad8308p-2")
nodes2 = np.asfortranarray([[6.0, -2.0, -2.0], [1.0, -1.0, 1.0]])
t = float.fromhex("0x1.ffffe9b5a0f3ep-2")
evaluate_fn = self._double_evaluate(nodes1, nodes2)
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertFalse(converged)
self._check_closer(s, current_s, 0.5, t, current_t, 0.5)
def test_convergence_linear(self):
# B1([2730/8192, 2731/8192]) and B2([1/2, 2049/4096]) are
# linearized and when the segments intersect they produce
# t = -1/6 < 0.
nodes1 = np.asfortranarray([[0.5, 1.25, 2.0], [0.125, -0.25, 0.5]])
s = 12287.0 / 36864.0
nodes2 = np.asfortranarray([[0.5, 1.0, 1.5], [-0.125, 0.125, -0.125]])
t = 12287.0 / 24576.0
# Due to linear convergence, this "bails out" after 5 iterations.
evaluate_fn = self._simple_evaluate(nodes1, nodes2)
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertFalse(converged)
self._check_closer(s, current_s, 1.0 / 3.0, t, current_t, 0.5)
def test_convergence_linear_dg(self):
# B1([16387/32768, 16388/32768]) and B2([8195/16384, 8196/16384]) are
# linearized and when the segments intersect they produce
# s = t = -9/7 < 0.
nodes1 = np.asfortranarray([[12.0, -4.0, -4.0], [4.0, -4.0, 4.0]])
nodes2 = np.asfortranarray([[6.0, -2.0, -2.0], [1.0, -1.0, 1.0]])
# NOTE: These ``s-t`` values come after the simple root case exits
# due to linear convergence, having started from
# s = 28675 / 57344 and t = 14339 / 28672.
s = float.fromhex("0x1.00006f0b2bb91p-1")
t = float.fromhex("0x1.0000de165968ap-1")
evaluate_fn = self._double_evaluate(nodes1, nodes2)
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertFalse(converged)
self._check_closer(s, current_s, 0.5, t, current_t, 0.5)
def test_below_error_ratio(self):
# B1([12287/16384, 3/4]) and B2([2457/8192, 2458/8192]) are linearized
# and when the segments intersect they produce
# s = 33555797/33551701 > 1.
nodes1 = np.asfortranarray([[1.0, -1.0, 1.0], [0.0, 0.25, 0.5]])
s = 25163776.0 / 33551701.0
nodes2 = np.asfortranarray(
[[-0.125, 0.5, 1.125], [-0.28125, 1.28125, -0.28125]]
)
t = 41228331827.0 / 137427767296.0
evaluate_fn = self._simple_evaluate(nodes1, nodes2)
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertTrue(converged)
self.assertEqual(0.75, current_s)
utils.almost(self, 3.0 / 10.0, current_t, 1)
def test_below_error_ratio_dg(self):
# B1([2730/8192, 2731/8192]) and B2([2047/4096, 1/2]) are
# linearized and when the segments intersect they produce
# t = 11/10 > 1.
nodes1 = np.asfortranarray([[0.5, 1.25, 2.0], [0.125, -0.25, 0.5]])
nodes2 = np.asfortranarray([[0.5, 1.0, 1.5], [-0.125, 0.125, -0.125]])
# NOTE: These ``s-t`` values come after the simple root case exits
# due to linear convergence, having started from
# s = 6827 / 20480 and t = 20481 / 40960 and updating 4 times.
s = 109227.0 / 327680.0
t = 327681.0 / 655360.0
evaluate_fn = self._double_evaluate(nodes1, nodes2)
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertTrue(converged)
utils.almost(self, 1.0 / 3.0, current_s, 1)
utils.almost(self, 0.5, current_t, 1)
def test_all_iterations(self):
# We start very far away from the root s = t = 0.5 by using
# s, t >> 1. (There is also a root at s = t = 0.0.)
nodes1 = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 2.0, 0.0]])
s = 64.0
nodes2 = np.asfortranarray([[0.0, 2.0, 0.0], [0.0, 1.0, 2.0]])
t = 64.0
evaluate_fn = self._simple_evaluate(nodes1, nodes2)
# We "fake" MAX_NEWTON_ITERATIONS=3 because when we are sufficiently
# far from a root, convergence **appears** linear. This is because
# ``pn`` is moving significantly, so the change in ``||pn||`` tracks
# the change in ``||p{n+1} - pn||``.
patch = unittest.mock.patch(
"bezier.hazmat.intersection_helpers.MAX_NEWTON_ITERATIONS", new=3
)
with patch:
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertFalse(converged)
self.assertEqual(8.221323371115176, current_s)
self.assertEqual(8.221323371115176, current_t)
def test_premature_exit(self):
from bezier.hazmat import intersection_helpers
nodes1 = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, 1.0, 0.0]])
first_deriv1 = 2.0 * (nodes1[:, 1:] - nodes1[:, :-1])
nodes2 = np.asfortranarray([[0.0, 1.0], [0.5, 0.5]])
first_deriv2 = nodes2[:, 1:] - nodes2[:, :-1]
evaluate_fn = intersection_helpers.NewtonSimpleRoot(
nodes1, first_deriv1, nodes2, first_deriv2
)
# First, show that sn = tn = 0.5 - k results in
# s{n + 1} = t{n + 1} = 0.5 - 0.5 k. So starting from k = 1 and
# attempting four linearly converging iterations, we end up
# with s4 = t4 = 0.5 - 0.5**4 (1).
s = -0.5
t = -0.5
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertFalse(converged)
self.assertEqual(current_s, 0.5 - 0.5**4)
self.assertEqual(current_t, 0.5 - 0.5**4)
# Do the same as above, but start a bit closer to a root.
s = 0.5 - 0.5**20
t = 0.5 - 0.5**20
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertFalse(converged)
self.assertEqual(current_s, 0.5 - 0.5**24)
self.assertEqual(current_t, 0.5 - 0.5**24)
# Finally, start "too close" so that ``F(s, t)`` evaluates to zero
# earlier than it should.
s = 0.5 - 0.5**26
t = 0.5 - 0.5**26
converged, current_s, current_t = self._call_function_under_test(
evaluate_fn, s, t
)
self.assertTrue(converged)
self.assertEqual(current_s, 0.5 - 0.5**28)
self.assertEqual(current_t, 0.5 - 0.5**28)
class Test_full_newton_nonzero(unittest.TestCase):
@staticmethod
def _call_function_under_test(s, nodes1, t, nodes2):
from bezier.hazmat import intersection_helpers
return intersection_helpers.full_newton_nonzero(s, nodes1, t, nodes2)
def test_simple_root(self):
# B1([4095/8192, 1/2]) and B2([1365/8192, 1366/8192]) are linearized
# and when the segments intersect they produce s = 24580/24579 > 1.
nodes1 = np.asfortranarray([[0.0, 0.375, 0.75], [0.0, 0.75, 0.375]])
s = 100675585.0 / 201351168.0
nodes2 = np.asfortranarray(
[[0.25, 0.625, 1.0], [0.5625, 0.1875, 0.9375]]
)
t = 33558529.0 / 201351168.0
computed_s, computed_t = self._call_function_under_test(
s, nodes1, t, nodes2
)
utils.almost(self, 0.5, computed_s, 1)
utils.almost(self, 1.0 / 6.0, computed_t, 4)
def test_double_root(self):
# B1([5461/8192, 5462/8192]) and B2([2730/8192, 2731/8192]) are
# linearized and the segments are parallel. The curves intersect
# at the point B1(2/3) = [1/2, 1/2] = B2(1/3) and they have parallel
# tangent vectors B1'(2/3) = [3/4, 0] = B2'(1/3).
nodes1 = np.asfortranarray([[0.0, 0.375, 0.75], [0.0, 0.75, 0.375]])
s = 10923.0 / 16384.0
nodes2 = np.asfortranarray([[0.25, 0.625, 1.0], [0.625, 0.25, 1.0]])
t = 5461.0 / 16384.0
computed_s, computed_t = self._call_function_under_test(
s, nodes1, t, nodes2
)
utils.almost(self, 2.0 / 3.0, computed_s, 1)
utils.almost(self, 1.0 / 3.0, computed_t, 1)
def test_triple_root(self):
from bezier.hazmat import intersection_helpers
# B1([16382/32768, 16383/32768]) and B2([8190/16384, 8191/16384]) are
# linearized and when the segments intersect they produce
# s = t = 4/3 > 1.
nodes1 = np.asfortranarray([[12.0, -4.0, -4.0], [4.0, -4.0, 4.0]])
s = 24575.0 / 49152.0
nodes2 = np.asfortranarray([[6.0, -2.0, -2.0], [1.0, -1.0, 1.0]])
t = 12287.0 / 24576.0
with self.assertRaises(NotImplementedError) as exc_info:
self._call_function_under_test(s, nodes1, t, nodes2)
expected = (intersection_helpers.NEWTON_NO_CONVERGE,)
self.assertEqual(exc_info.exception.args, expected)
def test_line_and_curve(self):
# B1([5461/16384, 5462/16384]) and B2([0, 1]) are linearized
# and when the segments intersect they produce s = -1/3 < 0.
nodes1 = np.asfortranarray([[0.0, 1.5, 3.0], [2.25, -2.25, 2.25]])
s = 8191.0 / 24576.0
nodes2 = np.asfortranarray([[-0.5, 4.0], [1.75, -2.75]])
t = 12287.0 / 36864.0
computed_s, computed_t = self._call_function_under_test(
s, nodes1, t, nodes2
)
utils.almost(self, 1.0 / 3.0, computed_s, 1)
utils.almost(self, 1.0 / 3.0, computed_t, 1)
def test_nearby_solutions(self):
# B1([158/512, 159/512]) and B2([304/1024, 305/1024]) are linearized
# and when the segments intersect they produce perfectly valid
# s = float.fromhex("0x1.f19b11c66f80cp-7") ~= 0.0152 and
# t = float.fromhex("0x1.edecc2b71e352p-1") ~= 0.9647. This causes
# a convergence failure even though the curves are not tangent at
# the nearby point of intersection.
nodes1 = np.asfortranarray(
[
[
float.fromhex("0x1.002f11833164ap-1"),
float.fromhex("0x1.c516e980c0ce0p-2"),
float.fromhex("0x1.89092ee6e6df4p-2"),
],
[
float.fromhex("-0x1.32718972d77a1p-1"),
float.fromhex("-0x1.5e002a95d165ep-1"),
float.fromhex("-0x1.8640e302433dfp-1"),
],
]
)
s = float.fromhex("0x1.3c07c66c4719cp-2")
nodes2 = np.asfortranarray(
[
[
float.fromhex("0x1.fbb8cfd966f05p-2"),
float.fromhex("0x1.c6cd0e74ae3ecp-2"),
float.fromhex("0x1.8c4a283f3c04dp-2"),
],
[
float.fromhex("-0x1.325bc2f4e012cp-1"),
float.fromhex("-0x1.614b060a21ebap-1"),
float.fromhex("-0x1.8192083f28f11p-1"),
],
]
)
t = float.fromhex("0x1.30f6f6615b8f1p-2")
computed_s, computed_t = self._call_function_under_test(
s, nodes1, t, nodes2
)
known_s = float.fromhex("0x1.3c07c30226a3cp-2")
utils.almost(self, known_s, computed_s, 447)
known_t = float.fromhex("0x1.30f6f2bdde113p-2")
utils.almost(self, known_t, computed_t, 474)
class Test_full_newton(unittest.TestCase):
@staticmethod
def _call_function_under_test(s, nodes1, t, nodes2):
from bezier.hazmat import intersection_helpers
return intersection_helpers.full_newton(s, nodes1, t, nodes2)
def test_both_near_zero(self):
# B1([0, 1/8192]) and B2([0, 1/8192]) are linearized and the
# segments are parallel, and the root is a double root.
nodes1 = np.asfortranarray([[1.0, 1.0, 0.0], [0.0, 1.0, 1.0]])
s = 1.0 / 16384.0
nodes2 = np.asfortranarray([[1.0, 1.0, -0.5], [0.0, 1.5, 1.5]])
t = 1.0 / 16384.0
computed_s, computed_t = self._call_function_under_test(
s, nodes1, t, nodes2
)
self.assertEqual(0.0, computed_s)
self.assertEqual(0.0, computed_t)
def _one_parameter_near_zero(self, swap=False):
# B1([0, 1/8192]) and B2([1/4, 2049/8192]) are linearized and the
# segments are parallel, and the root is a double root.
nodes1 = np.asfortranarray([[1.0, 1.0, -0.5], [0.0, 1.5, 1.5]])
s = 1.0 / 16384.0
nodes2 = np.asfortranarray(
[[0.9375, 1.1875, 0.4375], [-0.5625, 0.6875, 0.9375]]
)
t = 4097.0 / 16384.0
if swap:
nodes1, nodes2 = nodes2, nodes1
s, t = t, s
computed_s, computed_t = self._call_function_under_test(
s, nodes1, t, nodes2
)
if swap:
self.assertEqual(0.25, computed_s)
self.assertEqual(0.0, computed_t)
else:
self.assertEqual(0.0, computed_s)
self.assertEqual(0.25, computed_t)
def test_s_near_zero(self):
self._one_parameter_near_zero()
def test_t_near_zero(self):
self._one_parameter_near_zero(swap=True)
def test_both_nonzero(self):
# B1([6826/8192, 6827/8192]) and B2([1/2, 4097/8192]) are linearized
# and when the segments intersect they produce t = -1/24579 < 0.
# The root is a simple root.
nodes1 = np.asfortranarray([[0.0, 0.375, 0.75], [0.0, 0.75, 0.375]])
s = 167792639.0 / 201351168.0
nodes2 = np.asfortranarray(
[[0.25, 0.625, 1.0], [0.5625, 0.1875, 0.9375]]
)
t = 100675583.0 / 201351168.0
computed_s, computed_t = self._call_function_under_test(
s, nodes1, t, nodes2
)
utils.almost(self, 5.0 / 6.0, computed_s, 2)
utils.almost(self, 0.5, computed_t, 1)
class TestIntersection(unittest.TestCase):
@staticmethod
def _get_target_class():
from bezier.hazmat import intersection_helpers
return intersection_helpers.Intersection
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def _constructor_helper(self, **kwargs):
index_first = 2
s_val = 0.25
index_second = 1
t_val = 0.75
intersection = self._make_one(
index_first, s_val, index_second, t_val, **kwargs
)
self.assertEqual(intersection.index_first, index_first)
self.assertEqual(intersection.s, s_val)
self.assertEqual(intersection.index_second, index_second)
self.assertEqual(intersection.t, t_val)
return intersection
def test_constructor(self):
intersection = self._constructor_helper()
self.assertIsNone(intersection.interior_curve)
def test_constructor_with_interior_curve(self):
intersection = self._constructor_helper(
interior_curve=unittest.mock.sentinel.interior_curve
)
self.assertIs(
intersection.interior_curve, unittest.mock.sentinel.interior_curve
)
def test___dict___property(self):
intersection = self._constructor_helper(
interior_curve=unittest.mock.sentinel.interior_curve
)
props_dict = intersection.__dict__
expected = {
"index_first": 2,
"s": 0.25,
"index_second": 1,
"t": 0.75,
"interior_curve": unittest.mock.sentinel.interior_curve,
}
self.assertEqual(props_dict, expected)
# Check that modifying ``props_dict`` won't modify ``curve``.
props_dict["s"] = 0.5
self.assertNotEqual(intersection.s, props_dict["s"])
|
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Currency(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'currency_code': 'str',
'default_fraction_digits': 'int',
'numeric_code': 'int',
'symbol': 'str',
'display_name': 'str'
}
attribute_map = {
'currency_code': 'currencyCode',
'default_fraction_digits': 'defaultFractionDigits',
'numeric_code': 'numericCode',
'symbol': 'symbol',
'display_name': 'displayName'
}
def __init__(self, currency_code=None, default_fraction_digits=None, numeric_code=None, symbol=None, display_name=None): # noqa: E501
"""Currency - a model defined in Swagger""" # noqa: E501
self._currency_code = None
self._default_fraction_digits = None
self._numeric_code = None
self._symbol = None
self._display_name = None
self.discriminator = None
if currency_code is not None:
self.currency_code = currency_code
if default_fraction_digits is not None:
self.default_fraction_digits = default_fraction_digits
if numeric_code is not None:
self.numeric_code = numeric_code
if symbol is not None:
self.symbol = symbol
if display_name is not None:
self.display_name = display_name
@property
def currency_code(self):
"""Gets the currency_code of this Currency. # noqa: E501
:return: The currency_code of this Currency. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this Currency.
:param currency_code: The currency_code of this Currency. # noqa: E501
:type: str
"""
self._currency_code = currency_code
@property
def default_fraction_digits(self):
"""Gets the default_fraction_digits of this Currency. # noqa: E501
:return: The default_fraction_digits of this Currency. # noqa: E501
:rtype: int
"""
return self._default_fraction_digits
@default_fraction_digits.setter
def default_fraction_digits(self, default_fraction_digits):
"""Sets the default_fraction_digits of this Currency.
:param default_fraction_digits: The default_fraction_digits of this Currency. # noqa: E501
:type: int
"""
self._default_fraction_digits = default_fraction_digits
@property
def numeric_code(self):
"""Gets the numeric_code of this Currency. # noqa: E501
:return: The numeric_code of this Currency. # noqa: E501
:rtype: int
"""
return self._numeric_code
@numeric_code.setter
def numeric_code(self, numeric_code):
"""Sets the numeric_code of this Currency.
:param numeric_code: The numeric_code of this Currency. # noqa: E501
:type: int
"""
self._numeric_code = numeric_code
@property
def symbol(self):
"""Gets the symbol of this Currency. # noqa: E501
:return: The symbol of this Currency. # noqa: E501
:rtype: str
"""
return self._symbol
@symbol.setter
def symbol(self, symbol):
"""Sets the symbol of this Currency.
:param symbol: The symbol of this Currency. # noqa: E501
:type: str
"""
self._symbol = symbol
@property
def display_name(self):
"""Gets the display_name of this Currency. # noqa: E501
:return: The display_name of this Currency. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this Currency.
:param display_name: The display_name of this Currency. # noqa: E501
:type: str
"""
self._display_name = display_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Currency, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Currency):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
# Copyright (c) 2016 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Set Huawei private configuration into Configuration object.
For conveniently get private configuration. We parse Huawei config file
and set every property into Configuration object as an attribute.
"""
import base64
import six
from xml.etree import ElementTree as ET
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.huawei import constants
LOG = logging.getLogger(__name__)
class HuaweiConf(object):
def __init__(self, conf):
self.conf = conf
def _encode_authentication(self):
need_encode = False
tree = ET.parse(self.conf.cinder_huawei_conf_file)
xml_root = tree.getroot()
name_node = xml_root.find('Storage/UserName')
pwd_node = xml_root.find('Storage/UserPassword')
if (name_node is not None
and not name_node.text.startswith('!$$$')):
name_node.text = '!$$$' + base64.b64encode(name_node.text)
need_encode = True
if (pwd_node is not None
and not pwd_node.text.startswith('!$$$')):
pwd_node.text = '!$$$' + base64.b64encode(pwd_node.text)
need_encode = True
if need_encode:
utils.execute('chmod',
'600',
self.conf.cinder_huawei_conf_file,
run_as_root=True)
tree.write(self.conf.cinder_huawei_conf_file, 'UTF-8')
def update_config_value(self):
self._encode_authentication()
set_attr_funcs = (self._san_address,
self._san_user,
self._san_password,
self._san_product,
self._san_protocol,
self._lun_type,
self._lun_ready_wait_interval,
self._lun_copy_wait_interval,
self._lun_timeout,
self._lun_write_type,
self._lun_mirror_switch,
self._lun_prefetch,
self._lun_policy,
self._lun_read_cache_policy,
self._lun_write_cache_policy,
self._storage_pools,
self._iscsi_default_target_ip,
self._iscsi_info,)
tree = ET.parse(self.conf.cinder_huawei_conf_file)
xml_root = tree.getroot()
for f in set_attr_funcs:
f(xml_root)
def _san_address(self, xml_root):
text = xml_root.findtext('Storage/RestURL')
if not text:
msg = _("RestURL is not configured.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
addrs = text.split(';')
addrs = list(set([x.strip() for x in addrs if x.strip()]))
setattr(self.conf, 'san_address', addrs)
def _san_user(self, xml_root):
text = xml_root.findtext('Storage/UserName')
if not text:
msg = _("UserName is not configured.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
user = base64.b64decode(text[4:])
setattr(self.conf, 'san_user', user)
def _san_password(self, xml_root):
text = xml_root.findtext('Storage/UserPassword')
if not text:
msg = _("UserPassword is not configured.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
pwd = base64.b64decode(text[4:])
setattr(self.conf, 'san_password', pwd)
def _san_product(self, xml_root):
text = xml_root.findtext('Storage/Product')
if not text:
msg = _("SAN product is not configured.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
product = text.strip()
setattr(self.conf, 'san_product', product)
def _san_protocol(self, xml_root):
text = xml_root.findtext('Storage/Protocol')
if not text:
msg = _("SAN protocol is not configured.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
protocol = text.strip()
setattr(self.conf, 'san_protocol', protocol)
def _lun_type(self, xml_root):
lun_type = constants.THICK_LUNTYPE
text = xml_root.findtext('LUN/LUNType')
if text:
lun_type = text.strip()
if lun_type == 'Thick':
lun_type = constants.THICK_LUNTYPE
elif lun_type == 'Thin':
lun_type = constants.THIN_LUNTYPE
else:
msg = (_("Invalid lun type %s is configured.") % lun_type)
LOG.exception(msg)
raise exception.InvalidInput(reason=msg)
setattr(self.conf, 'lun_type', lun_type)
def _lun_ready_wait_interval(self, xml_root):
text = xml_root.findtext('LUN/LUNReadyWaitInterval')
interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL
setattr(self.conf, 'lun_ready_wait_interval', int(interval))
def _lun_copy_wait_interval(self, xml_root):
text = xml_root.findtext('LUN/LUNcopyWaitInterval')
interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL
setattr(self.conf, 'lun_copy_wait_interval', int(interval))
def _lun_timeout(self, xml_root):
text = xml_root.findtext('LUN/Timeout')
interval = text.strip() if text else constants.DEFAULT_WAIT_TIMEOUT
setattr(self.conf, 'lun_timeout', int(interval))
def _lun_write_type(self, xml_root):
text = xml_root.findtext('LUN/WriteType')
write_type = text.strip() if text else '1'
setattr(self.conf, 'lun_write_type', write_type)
def _lun_mirror_switch(self, xml_root):
text = xml_root.findtext('LUN/MirrorSwitch')
mirror_switch = text.strip() if text else '1'
setattr(self.conf, 'lun_mirror_switch', mirror_switch)
def _lun_prefetch(self, xml_root):
prefetch_type = '3'
prefetch_value = '0'
node = xml_root.find('LUN/Prefetch')
if (node is not None
and node.attrib['Type']
and node.attrib['Value']):
prefetch_type = node.attrib['Type'].strip()
if prefetch_type not in ['0', '1', '2', '3']:
msg = (_(
"Invalid prefetch type '%s' is configured. "
"PrefetchType must be in 0,1,2,3.") % prefetch_type)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
prefetch_value = node.attrib['Value'].strip()
factor = {'1': 2}
factor = int(factor.get(prefetch_type, '1'))
prefetch_value = int(prefetch_value) * factor
prefetch_value = six.text_type(prefetch_value)
setattr(self.conf, 'lun_prefetch_type', prefetch_type)
setattr(self.conf, 'lun_prefetch_value', prefetch_value)
def _lun_policy(self, xml_root):
setattr(self.conf, 'lun_policy', '0')
def _lun_read_cache_policy(self, xml_root):
setattr(self.conf, 'lun_read_cache_policy', '2')
def _lun_write_cache_policy(self, xml_root):
setattr(self.conf, 'lun_write_cache_policy', '5')
def _storage_pools(self, xml_root):
nodes = xml_root.findall('LUN/StoragePool')
if not nodes:
msg = _('Storage pool is not configured.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
texts = [x.text for x in nodes]
merged_text = ';'.join(texts)
pools = set(x.strip() for x in merged_text.split(';') if x.strip())
if not pools:
msg = _('Invalid storage pool is configured.')
LOG.error(msg)
raise exception.InvalidInput(msg)
setattr(self.conf, 'storage_pools', list(pools))
def _iscsi_default_target_ip(self, xml_root):
text = xml_root.findtext('iSCSI/DefaultTargetIP')
target_ip = text.split() if text else []
setattr(self.conf, 'iscsi_default_target_ip', target_ip)
def _iscsi_info(self, xml_root):
nodes = xml_root.findall('iSCSI/Initiator')
if nodes is None:
setattr(self.conf, 'iscsi_info', [])
return
iscsi_info = []
for node in nodes:
props = {}
for item in node.items():
props[item[0].strip()] = item[1].strip()
iscsi_info.append(props)
setattr(self.conf, 'iscsi_info', iscsi_info)
def _parse_rmt_iscsi_info(self, iscsi_info):
if not (iscsi_info and iscsi_info.strip()):
return []
# Consider iscsi_info value:
# ' {Name:xxx ;;TargetPortGroup: xxx};\n'
# '{Name:\t\rxxx;CHAPinfo: mm-usr#mm-pwd} '
# Step 1, ignore whitespace characters, convert to:
# '{Name:xxx;;TargetPortGroup:xxx};{Name:xxx;CHAPinfo:mm-usr#mm-pwd}'
iscsi_info = ''.join(iscsi_info.split())
# Step 2, make initiators configure list, convert to:
# ['Name:xxx;;TargetPortGroup:xxx', 'Name:xxx;CHAPinfo:mm-usr#mm-pwd']
initiator_infos = iscsi_info[1:-1].split('};{')
# Step 3, get initiator configure pairs, convert to:
# [['Name:xxx', '', 'TargetPortGroup:xxx'],
# ['Name:xxx', 'CHAPinfo:mm-usr#mm-pwd']]
initiator_infos = map(lambda x: x.split(';'), initiator_infos)
# Step 4, remove invalid configure pairs, convert to:
# [['Name:xxx', 'TargetPortGroup:xxx'],
# ['Name:xxx', 'CHAPinfo:mm-usr#mm-pwd']]
initiator_infos = map(lambda x: filter(lambda y: y, x),
initiator_infos)
# Step 5, make initiators configure dict, convert to:
# [{'TargetPortGroup': 'xxx', 'Name': 'xxx'},
# {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}]
get_opts = lambda x: x.split(':', 1)
initiator_infos = map(lambda x: dict(map(get_opts, x)),
initiator_infos)
# Convert generator to list for py3 compatibility.
initiator_infos = list(initiator_infos)
# Step 6, replace CHAPinfo 'user#pwd' to 'user;pwd'
key = 'CHAPinfo'
for info in initiator_infos:
if key in info:
info[key] = info[key].replace('#', ';', 1)
return initiator_infos
def get_replication_devices(self):
devs = self.conf.safe_get('replication_device')
if not devs:
return []
devs_config = []
for dev in devs:
dev_config = {}
dev_config['backend_id'] = dev['backend_id']
dev_config['san_address'] = dev['san_address'].split(';')
dev_config['san_user'] = dev['san_user']
dev_config['san_password'] = dev['san_password']
dev_config['storage_pool'] = dev['storage_pool'].split(';')
dev_config['iscsi_info'] = self._parse_rmt_iscsi_info(
dev.get('iscsi_info'))
dev_config['iscsi_default_target_ip'] = (
dev['iscsi_default_target_ip'].split(';')
if 'iscsi_default_target_ip' in dev
else [])
devs_config.append(dev_config)
return devs_config
def get_local_device(self):
dev_config = {
'backend_id': "default",
'san_address': self.conf.san_address,
'san_user': self.conf.san_user,
'san_password': self.conf.san_password,
'storage_pool': self.conf.storage_pools,
'iscsi_info': self.conf.iscsi_info,
'iscsi_default_target_ip': self.conf.iscsi_default_target_ip,
}
return dev_config
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line version of the wiki neighbors demo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import urllib.parse
from absl import app
from absl import flags
from absl import logging
from bert import tokenization
from language.labs.drkit import search_utils
import numpy as np
import scipy.sparse as sp
from sklearn.preprocessing import normalize
import tensorflow.compat.v1 as tf
from tqdm import tqdm
FLAGS = flags.FLAGS
flags.DEFINE_string("hotpotqa_file", None, "Path to HotpotQA dataset file.")
flags.DEFINE_string("entity_dir", None,
"Path to Entity co-occurrence directory.")
flags.DEFINE_string("vocab_file", None, "Path to vocab for tokenizer.")
flags.DEFINE_string("output_file", None, "Path to Output file.")
def tfidf_linking(questions, base_dir, tokenizer, top_k, batch_size=100):
"""Match questions to entities via Tf-IDF."""
# Load entity ids and masks.
tf.reset_default_graph()
id_ckpt = os.path.join(base_dir, "entity_ids")
entity_ids = search_utils.load_database(
"entity_ids", None, id_ckpt, dtype=tf.int32)
mask_ckpt = os.path.join(base_dir, "entity_mask")
entity_mask = search_utils.load_database("entity_mask", None, mask_ckpt)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.logging.info("Loading entity ids and masks...")
np_ent_ids, np_ent_mask = sess.run([entity_ids, entity_mask])
tf.logging.info("Building entity count matrix...")
entity_count_matrix = search_utils.build_count_matrix(np_ent_ids, np_ent_mask)
# Tokenize questions and build count matrix.
tf.logging.info("Tokenizing questions...")
ques_toks, ques_masks = [], []
for question in questions:
toks = tokenizer.tokenize(question["question"])
tok_ids = tokenizer.convert_tokens_to_ids(toks)
ques_toks.append(tok_ids)
ques_masks.append([1 for _ in tok_ids])
tf.logging.info("Building question count matrix...")
question_count_matrix = search_utils.build_count_matrix(ques_toks, ques_masks)
# Tf-IDF.
tf.logging.info("Computing IDFs...")
idfs = search_utils.counts_to_idfs(entity_count_matrix, cutoff=1e-5)
tf.logging.info("Computing entity Tf-IDFs...")
ent_tfidfs = search_utils.counts_to_tfidf(entity_count_matrix, idfs)
ent_tfidfs = normalize(ent_tfidfs, norm="l2", axis=0)
tf.logging.info("Computing question TF-IDFs...")
qry_tfidfs = search_utils.counts_to_tfidf(question_count_matrix, idfs)
qry_tfidfs = normalize(qry_tfidfs, norm="l2", axis=0)
tf.logging.info("Searching...")
top_doc_indices = np.empty((len(questions), top_k), dtype=np.int32)
top_doc_distances = np.empty((len(questions), top_k), dtype=np.float32)
# distances = qry_tfidfs.transpose().dot(ent_tfidfs)
num_batches = len(questions) // batch_size
tf.logging.info("Computing distances in %d batches of size %d",
num_batches + 1, batch_size)
for nb in tqdm(range(num_batches + 1)):
min_ = nb * batch_size
max_ = (nb + 1) * batch_size
if min_ >= len(questions):
break
if max_ > len(questions):
max_ = len(questions)
distances = qry_tfidfs[:, min_:max_].transpose().dot(ent_tfidfs).tocsr()
for ii in range(min_, max_):
my_distances = distances[ii - min_, :].tocsr()
if len(my_distances.data) <= top_k:
o_sort = np.argsort(-my_distances.data)
top_doc_indices[ii, :len(o_sort)] = my_distances.indices[o_sort]
top_doc_distances[ii, :len(o_sort)] = my_distances.data[o_sort]
top_doc_indices[ii, len(o_sort):] = 0
top_doc_distances[ii, len(o_sort):] = 0
else:
o_sort = np.argpartition(-my_distances.data, top_k)[:top_k]
top_doc_indices[ii, :] = my_distances.indices[o_sort]
top_doc_distances[ii, :] = my_distances.data[o_sort]
# Load entity metadata and conver to kb_id.
metadata_file = os.path.join(base_dir, "entities.json")
entity2id, entity2name = json.load(tf.gfile.Open(metadata_file))
id2entity = {i: e for e, i in entity2id.items()}
id2name = {i: entity2name[e] for e, i in entity2id.items()}
mentions = []
for ii in range(len(questions)):
my_mentions = []
for m in range(top_k):
my_mentions.append({
"kb_id": id2entity[top_doc_indices[ii, m]],
"score": str(top_doc_distances[ii, m]),
"name": id2name[top_doc_indices[ii, m]],
})
mentions.append(my_mentions)
return mentions
def load_entity_matrices(base_dir):
"""Load entity co-occurrence and co-reference matrices."""
cooccur_ckpt = os.path.join(base_dir, "ent2ment.npz")
coref_ckpt = os.path.join(base_dir, "coref.npz")
tf.reset_default_graph()
co_data, co_indices, co_rowsplits = search_utils.load_ragged_matrix(
"ent2ment", cooccur_ckpt)
coref_map = search_utils.load_database(
"coref", None, coref_ckpt, dtype=tf.int32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.logging.info("Loading ragged matrix...")
np_data, np_indices, np_indptr = sess.run(
[co_data, co_indices, co_rowsplits])
tf.logging.info("Loading coref map...")
np_coref = sess.run(coref_map)
num_entities = np_indptr.shape[0] - 1
num_mentions = np_coref.shape[0]
tf.logging.info("Creating sparse matrix %d x %d...", num_entities,
num_mentions)
sp_cooccur = sp.csr_matrix((np_data, np_indices, np_indptr),
shape=(num_entities, num_mentions))
tf.logging.info("Creating sparse matrix %d x %d...", num_mentions,
num_entities)
sp_coref = sp.csr_matrix((np.ones_like(np_coref, dtype=np.int32),
(np.arange(np_coref.shape[0]), np_coref)),
shape=(num_mentions, num_entities))
metadata_file = os.path.join(base_dir, "entities.json")
entity2id, _ = json.load(tf.gfile.Open(metadata_file))
return sp_cooccur, sp_coref, entity2id
def evaluate_entity_linking(questions, base_dir, num_hops):
"""Evaluate how often answers can be reached from linked entities."""
def _check_answers(sp_vec, answers):
found_ans = np.zeros((len(answers),), dtype=np.bool)
for ii, ans in enumerate(answers):
if sp_vec[0, ans] > 0.:
found_ans[ii] = True
return found_ans
sp_cooccur, sp_coref, entity2id = load_entity_matrices(base_dir)
num_found_ans = {i: 0. for i in range(num_hops + 1)}
for ii, question in enumerate(questions):
if (ii + 1) % 1000 == 0:
tf.logging.info("Evaluated %d questions...", ii)
subjects = [
entity2id.get(ee["kb_id"].lower(), 0) for ee in question["entities"]
]
answers = [
entity2id.get(ee["kb_id"].lower(), 0)
for ee in question["supporting_facts"]
]
# Create the initial sparse vector.
vals = np.ones((len(subjects),), dtype=np.float32)
vals = vals / vals.sum()
rows = np.zeros((len(subjects),), dtype=np.int32)
cols = np.asarray(subjects, dtype=np.int32)
v_st = sp.csr_matrix((vals, (rows, cols)), shape=(1, len(entity2id)))
found_ans = _check_answers(v_st, answers)
if found_ans.all():
num_found_ans[0] += 1
continue
for i in range(num_hops):
v_m = v_st * sp_cooccur
v_st = v_m * sp_coref
# print("then", v_st.getnnz(), v_st[0].nonzero()[:10])
found_ans = np.logical_or(found_ans, _check_answers(v_st, answers))
if found_ans.all():
num_found_ans[i + 1] += 1
break
for i in range(num_hops + 1):
num_found_ans[i] /= len(questions)
return num_found_ans
def _process_url(url):
"""Get textual identifier from URL and quote it."""
return urllib.parse.quote(" ".join(url.rsplit("/", 1)[-1].split("_")))
def main(_):
logging.set_verbosity(logging.INFO)
logging.info("Reading HotpotQA data...")
with tf.gfile.Open(FLAGS.hotpotqa_file) as f:
data = json.load(f)
logging.info("Done.")
logging.info("Entity linking %d questions...", len(data))
num_empty_questions = 0
recall, recall_at1 = 0., 0.
all_questions = []
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=True)
linked_entities = tfidf_linking(data, FLAGS.entity_dir, tokenizer, 20)
for ii, item in enumerate(data):
sup_facts = list(set([title for title, _ in item["supporting_facts"]]))
# pylint: disable=g-complex-comprehension
all_questions.append({
"question":
item["question"],
"entities":
linked_entities[ii],
"answer":
item["answer"],
"_id":
item["_id"],
"level":
item["level"],
"type":
item["type"],
"supporting_facts": [{
"kb_id": urllib.parse.quote(title),
"name": title
} for title in sup_facts],
})
logging.info("Writing questions to output file...")
f_out = tf.gfile.Open(FLAGS.output_file, "w")
f_out.write("\n".join(json.dumps(q) for q in all_questions))
f_out.close()
questions_to_eval = random.sample(all_questions, 1000)
num_found_ans = evaluate_entity_linking(questions_to_eval, FLAGS.entity_dir,
3)
logging.info("===============================================")
logging.info("===============================================")
logging.info("%d questions without entities (out of %d)", num_empty_questions,
len(data))
logging.info("recall of at least 1 supporting facts %.3f", recall / len(data))
logging.info("recall @1 of supporting facts %.3f", recall_at1 / len(data))
total_ans_reachable = 0.
for i in num_found_ans:
logging.info("answers reachable in %d hops %.3f", i, num_found_ans[i])
total_ans_reachable += num_found_ans[i]
logging.info("answers reachable %.3f", total_ans_reachable)
logging.info("===============================================")
logging.info("===============================================")
if __name__ == "__main__":
app.run(main)
|
|
# -*- coding: utf-8 -*-
"""
Python advanced pretty printer. This pretty printer is intended to
replace the old `pprint` python module which does not allow developers
to provide their own pretty print callbacks.
This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
Example Usage
-------------
To directly print the representation of an object use `pprint`::
from pretty import pprint
pprint(complex_object)
To get a string of the output use `pretty`::
from pretty import pretty
string = pretty(complex_object)
Extending
---------
The pretty library allows developers to add pretty printing rules for their
own objects. This process is straightforward. All you have to do is to
add a `_repr_pretty_` method to your object and call the methods on the
pretty printer passed::
class MyObject(object):
def _repr_pretty_(self, p, cycle):
...
Depending on the python version you want to support you have two
possibilities. The following list shows the python 2.5 version and the
compatibility one.
Here the example implementation of a `_repr_pretty_` method for a list
subclass for python 2.5 and higher (python 2.5 requires the with statement
__future__ import)::
class MyList(list):
def _repr_pretty_(self, p, cycle):
if cycle:
p.text('MyList(...)')
else:
with p.group(8, 'MyList([', '])'):
for idx, item in enumerate(self):
if idx:
p.text(',')
p.breakable()
p.pretty(item)
The `cycle` parameter is `True` if pretty detected a cycle. You *have* to
react to that or the result is an infinite loop. `p.text()` just adds
non breaking text to the output, `p.breakable()` either adds a whitespace
or breaks here. If you pass it an argument it's used instead of the
default space. `p.pretty` prettyprints another object using the pretty print
method.
The first parameter to the `group` function specifies the extra indentation
of the next line. In this example the next item will either be not
breaked (if the items are short enough) or aligned with the right edge of
the opening bracked of `MyList`.
If you want to support python 2.4 and lower you can use this code::
class MyList(list):
def _repr_pretty_(self, p, cycle):
if cycle:
p.text('MyList(...)')
else:
p.begin_group(8, 'MyList([')
for idx, item in enumerate(self):
if idx:
p.text(',')
p.breakable()
p.pretty(item)
p.end_group(8, '])')
If you just want to indent something you can use the group function
without open / close parameters. Under python 2.5 you can also use this
code::
with p.indent(2):
...
Or under python2.4 you might want to modify ``p.indentation`` by hand but
this is rather ugly.
Inheritance diagram:
.. inheritance-diagram:: IPython.lib.pretty
:parts: 3
:copyright: 2007 by Armin Ronacher.
Portions (c) 2009 by Robert Kern.
:license: BSD License.
"""
from __future__ import print_function
from contextlib import contextmanager
import sys
import types
import re
import datetime
from collections import deque
from IPython.utils.py3compat import PY3, cast_unicode
from IPython.utils.encoding import get_stream_enc
from io import StringIO
__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
'for_type', 'for_type_by_name']
MAX_SEQ_LENGTH = 1000
_re_pattern_type = type(re.compile(''))
def _safe_getattr(obj, attr, default=None):
"""Safe version of getattr.
Same as getattr, but will return ``default`` on any Exception,
rather than raising.
"""
try:
return getattr(obj, attr, default)
except Exception:
return default
if PY3:
CUnicodeIO = StringIO
else:
class CUnicodeIO(StringIO):
"""StringIO that casts str to unicode on Python 2"""
def write(self, text):
return super(CUnicodeIO, self).write(
cast_unicode(text, encoding=get_stream_enc(sys.stdout)))
def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
"""
Pretty print the object's representation.
"""
stream = CUnicodeIO()
printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length)
printer.pretty(obj)
printer.flush()
return stream.getvalue()
def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
"""
Like `pretty` but print to stdout.
"""
printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length)
printer.pretty(obj)
printer.flush()
sys.stdout.write(newline)
sys.stdout.flush()
class _PrettyPrinterBase(object):
@contextmanager
def indent(self, indent):
"""with statement support for indenting/dedenting."""
self.indentation += indent
try:
yield
finally:
self.indentation -= indent
@contextmanager
def group(self, indent=0, open='', close=''):
"""like begin_group / end_group but for the with statement."""
self.begin_group(indent, open)
try:
yield
finally:
self.end_group(indent, close)
class PrettyPrinter(_PrettyPrinterBase):
"""
Baseclass for the `RepresentationPrinter` prettyprinter that is used to
generate pretty reprs of objects. Contrary to the `RepresentationPrinter`
this printer knows nothing about the default pprinters or the `_repr_pretty_`
callback method.
"""
def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
self.output = output
self.max_width = max_width
self.newline = newline
self.max_seq_length = max_seq_length
self.output_width = 0
self.buffer_width = 0
self.buffer = deque()
root_group = Group(0)
self.group_stack = [root_group]
self.group_queue = GroupQueue(root_group)
self.indentation = 0
def _break_outer_groups(self):
while self.max_width < self.output_width + self.buffer_width:
group = self.group_queue.deq()
if not group:
return
while group.breakables:
x = self.buffer.popleft()
self.output_width = x.output(self.output, self.output_width)
self.buffer_width -= x.width
while self.buffer and isinstance(self.buffer[0], Text):
x = self.buffer.popleft()
self.output_width = x.output(self.output, self.output_width)
self.buffer_width -= x.width
def text(self, obj):
"""Add literal text to the output."""
width = len(obj)
if self.buffer:
text = self.buffer[-1]
if not isinstance(text, Text):
text = Text()
self.buffer.append(text)
text.add(obj, width)
self.buffer_width += width
self._break_outer_groups()
else:
self.output.write(obj)
self.output_width += width
def breakable(self, sep=' '):
"""
Add a breakable separator to the output. This does not mean that it
will automatically break here. If no breaking on this position takes
place the `sep` is inserted which default to one space.
"""
width = len(sep)
group = self.group_stack[-1]
if group.want_break:
self.flush()
self.output.write(self.newline)
self.output.write(' ' * self.indentation)
self.output_width = self.indentation
self.buffer_width = 0
else:
self.buffer.append(Breakable(sep, width, self))
self.buffer_width += width
self._break_outer_groups()
def break_(self):
"""
Explicitly insert a newline into the output, maintaining correct indentation.
"""
self.flush()
self.output.write(self.newline)
self.output.write(' ' * self.indentation)
self.output_width = self.indentation
self.buffer_width = 0
def begin_group(self, indent=0, open=''):
"""
Begin a group. If you want support for python < 2.5 which doesn't has
the with statement this is the preferred way:
p.begin_group(1, '{')
...
p.end_group(1, '}')
The python 2.5 expression would be this:
with p.group(1, '{', '}'):
...
The first parameter specifies the indentation for the next line (usually
the width of the opening text), the second the opening text. All
parameters are optional.
"""
if open:
self.text(open)
group = Group(self.group_stack[-1].depth + 1)
self.group_stack.append(group)
self.group_queue.enq(group)
self.indentation += indent
def _enumerate(self, seq):
"""like enumerate, but with an upper limit on the number of items"""
for idx, x in enumerate(seq):
if self.max_seq_length and idx >= self.max_seq_length:
self.text(',')
self.breakable()
self.text('...')
raise StopIteration
yield idx, x
def end_group(self, dedent=0, close=''):
"""End a group. See `begin_group` for more details."""
self.indentation -= dedent
group = self.group_stack.pop()
if not group.breakables:
self.group_queue.remove(group)
if close:
self.text(close)
def flush(self):
"""Flush data that is left in the buffer."""
for data in self.buffer:
self.output_width += data.output(self.output, self.output_width)
self.buffer.clear()
self.buffer_width = 0
def _get_mro(obj_class):
""" Get a reasonable method resolution order of a class and its superclasses
for both old-style and new-style classes.
"""
if not hasattr(obj_class, '__mro__'):
# Old-style class. Mix in object to make a fake new-style class.
try:
obj_class = type(obj_class.__name__, (obj_class, object), {})
except TypeError:
# Old-style extension type that does not descend from object.
# FIXME: try to construct a more thorough MRO.
mro = [obj_class]
else:
mro = obj_class.__mro__[1:-1]
else:
mro = obj_class.__mro__
return mro
class RepresentationPrinter(PrettyPrinter):
"""
Special pretty printer that has a `pretty` method that calls the pretty
printer for a python object.
This class stores processing data on `self` so you must *never* use
this class in a threaded environment. Always lock it or reinstanciate
it.
Instances also have a verbose flag callbacks can access to control their
output. For example the default instance repr prints all attributes and
methods that are not prefixed by an underscore if the printer is in
verbose mode.
"""
def __init__(self, output, verbose=False, max_width=79, newline='\n',
singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None,
max_seq_length=MAX_SEQ_LENGTH):
PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length)
self.verbose = verbose
self.stack = []
if singleton_pprinters is None:
singleton_pprinters = _singleton_pprinters.copy()
self.singleton_pprinters = singleton_pprinters
if type_pprinters is None:
type_pprinters = _type_pprinters.copy()
self.type_pprinters = type_pprinters
if deferred_pprinters is None:
deferred_pprinters = _deferred_type_pprinters.copy()
self.deferred_pprinters = deferred_pprinters
def pretty(self, obj):
"""Pretty print the given object."""
obj_id = id(obj)
cycle = obj_id in self.stack
self.stack.append(obj_id)
self.begin_group()
try:
obj_class = _safe_getattr(obj, '__class__', None) or type(obj)
# First try to find registered singleton printers for the type.
try:
printer = self.singleton_pprinters[obj_id]
except (TypeError, KeyError):
pass
else:
return printer(obj, self, cycle)
# Next walk the mro and check for either:
# 1) a registered printer
# 2) a _repr_pretty_ method
for cls in _get_mro(obj_class):
if cls in self.type_pprinters:
# printer registered in self.type_pprinters
return self.type_pprinters[cls](obj, self, cycle)
else:
# deferred printer
printer = self._in_deferred_types(cls)
if printer is not None:
return printer(obj, self, cycle)
else:
# Finally look for special method names.
# Some objects automatically create any requested
# attribute. Try to ignore most of them by checking for
# callability.
if '_repr_pretty_' in cls.__dict__:
meth = cls._repr_pretty_
if callable(meth):
return meth(obj, self, cycle)
return _default_pprint(obj, self, cycle)
finally:
self.end_group()
self.stack.pop()
def _in_deferred_types(self, cls):
"""
Check if the given class is specified in the deferred type registry.
Returns the printer from the registry if it exists, and None if the
class is not in the registry. Successful matches will be moved to the
regular type registry for future use.
"""
mod = _safe_getattr(cls, '__module__', None)
name = _safe_getattr(cls, '__name__', None)
key = (mod, name)
printer = None
if key in self.deferred_pprinters:
# Move the printer over to the regular registry.
printer = self.deferred_pprinters.pop(key)
self.type_pprinters[cls] = printer
return printer
class Printable(object):
def output(self, stream, output_width):
return output_width
class Text(Printable):
def __init__(self):
self.objs = []
self.width = 0
def output(self, stream, output_width):
for obj in self.objs:
stream.write(obj)
return output_width + self.width
def add(self, obj, width):
self.objs.append(obj)
self.width += width
class Breakable(Printable):
def __init__(self, seq, width, pretty):
self.obj = seq
self.width = width
self.pretty = pretty
self.indentation = pretty.indentation
self.group = pretty.group_stack[-1]
self.group.breakables.append(self)
def output(self, stream, output_width):
self.group.breakables.popleft()
if self.group.want_break:
stream.write(self.pretty.newline)
stream.write(' ' * self.indentation)
return self.indentation
if not self.group.breakables:
self.pretty.group_queue.remove(self.group)
stream.write(self.obj)
return output_width + self.width
class Group(Printable):
def __init__(self, depth):
self.depth = depth
self.breakables = deque()
self.want_break = False
class GroupQueue(object):
def __init__(self, *groups):
self.queue = []
for group in groups:
self.enq(group)
def enq(self, group):
depth = group.depth
while depth > len(self.queue) - 1:
self.queue.append([])
self.queue[depth].append(group)
def deq(self):
for stack in self.queue:
for idx, group in enumerate(reversed(stack)):
if group.breakables:
del stack[idx]
group.want_break = True
return group
for group in stack:
group.want_break = True
del stack[:]
def remove(self, group):
try:
self.queue[group.depth].remove(group)
except ValueError:
pass
try:
_baseclass_reprs = (object.__repr__, types.InstanceType.__repr__)
except AttributeError: # Python 3
_baseclass_reprs = (object.__repr__,)
def _default_pprint(obj, p, cycle):
"""
The default print function. Used if an object does not provide one and
it's none of the builtin objects.
"""
klass = _safe_getattr(obj, '__class__', None) or type(obj)
if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs:
# A user-provided repr. Find newlines and replace them with p.break_()
_repr_pprint(obj, p, cycle)
return
p.begin_group(1, '<')
p.pretty(klass)
p.text(' at 0x%x' % id(obj))
if cycle:
p.text(' ...')
elif p.verbose:
first = True
for key in dir(obj):
if not key.startswith('_'):
try:
value = getattr(obj, key)
except AttributeError:
continue
if isinstance(value, types.MethodType):
continue
if not first:
p.text(',')
p.breakable()
p.text(key)
p.text('=')
step = len(key) + 1
p.indentation += step
p.pretty(value)
p.indentation -= step
first = False
p.end_group(1, '>')
def _seq_pprinter_factory(start, end, basetype):
"""
Factory that returns a pprint function useful for sequences. Used by
the default pprint for tuples, dicts, and lists.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text(start + '...' + end)
step = len(start)
p.begin_group(step, start)
for idx, x in p._enumerate(obj):
if idx:
p.text(',')
p.breakable()
p.pretty(x)
if len(obj) == 1 and type(obj) is tuple:
# Special case for 1-item tuples.
p.text(',')
p.end_group(step, end)
return inner
def _set_pprinter_factory(start, end, basetype):
"""
Factory that returns a pprint function useful for sets and frozensets.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text(start + '...' + end)
if len(obj) == 0:
# Special case.
p.text(basetype.__name__ + '()')
else:
step = len(start)
p.begin_group(step, start)
# Like dictionary keys, we will try to sort the items if there aren't too many
items = obj
if not (p.max_seq_length and len(obj) >= p.max_seq_length):
try:
items = sorted(obj)
except Exception:
# Sometimes the items don't sort.
pass
for idx, x in p._enumerate(items):
if idx:
p.text(',')
p.breakable()
p.pretty(x)
p.end_group(step, end)
return inner
def _dict_pprinter_factory(start, end, basetype=None):
"""
Factory that returns a pprint function used by the default pprint of
dicts and dict proxies.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text('{...}')
p.begin_group(1, start)
keys = obj.keys()
# if dict isn't large enough to be truncated, sort keys before displaying
if not (p.max_seq_length and len(obj) >= p.max_seq_length):
try:
keys = sorted(keys)
except Exception:
# Sometimes the keys don't sort.
pass
for idx, key in p._enumerate(keys):
if idx:
p.text(',')
p.breakable()
p.pretty(key)
p.text(': ')
p.pretty(obj[key])
p.end_group(1, end)
return inner
def _super_pprint(obj, p, cycle):
"""The pprint for the super type."""
p.begin_group(8, '<super: ')
p.pretty(obj.__thisclass__)
p.text(',')
p.breakable()
p.pretty(obj.__self__)
p.end_group(8, '>')
def _re_pattern_pprint(obj, p, cycle):
"""The pprint function for regular expression patterns."""
p.text('re.compile(')
pattern = repr(obj.pattern)
if pattern[:1] in 'uU':
pattern = pattern[1:]
prefix = 'ur'
else:
prefix = 'r'
pattern = prefix + pattern.replace('\\\\', '\\')
p.text(pattern)
if obj.flags:
p.text(',')
p.breakable()
done_one = False
for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
'UNICODE', 'VERBOSE', 'DEBUG'):
if obj.flags & getattr(re, flag):
if done_one:
p.text('|')
p.text('re.' + flag)
done_one = True
p.text(')')
def _type_pprint(obj, p, cycle):
"""The pprint for classes and types."""
# Heap allocated types might not have the module attribute,
# and others may set it to None.
# Checks for a __repr__ override in the metaclass
if type(obj).__repr__ is not type.__repr__:
_repr_pprint(obj, p, cycle)
return
mod = _safe_getattr(obj, '__module__', None)
name = _safe_getattr(obj, '__qualname__', obj.__name__)
if mod in (None, '__builtin__', 'builtins', 'exceptions'):
p.text(name)
else:
p.text(mod + '.' + name)
def _repr_pprint(obj, p, cycle):
"""A pprint that just redirects to the normal repr function."""
# Find newlines and replace them with p.break_()
output = repr(obj)
for idx,output_line in enumerate(output.splitlines()):
if idx:
p.break_()
p.text(output_line)
def _function_pprint(obj, p, cycle):
"""Base pprint for all functions and builtin functions."""
name = _safe_getattr(obj, '__qualname__', obj.__name__)
mod = obj.__module__
if mod and mod not in ('__builtin__', 'builtins', 'exceptions'):
name = mod + '.' + name
p.text('<function %s>' % name)
def _exception_pprint(obj, p, cycle):
"""Base pprint for all exceptions."""
name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__)
if obj.__class__.__module__ not in ('exceptions', 'builtins'):
name = '%s.%s' % (obj.__class__.__module__, name)
step = len(name) + 1
p.begin_group(step, name + '(')
for idx, arg in enumerate(getattr(obj, 'args', ())):
if idx:
p.text(',')
p.breakable()
p.pretty(arg)
p.end_group(step, ')')
#: the exception base
try:
_exception_base = BaseException
except NameError:
_exception_base = Exception
#: printers for builtin types
_type_pprinters = {
int: _repr_pprint,
float: _repr_pprint,
str: _repr_pprint,
tuple: _seq_pprinter_factory('(', ')', tuple),
list: _seq_pprinter_factory('[', ']', list),
dict: _dict_pprinter_factory('{', '}', dict),
set: _set_pprinter_factory('{', '}', set),
frozenset: _set_pprinter_factory('frozenset({', '})', frozenset),
super: _super_pprint,
_re_pattern_type: _re_pattern_pprint,
type: _type_pprint,
types.FunctionType: _function_pprint,
types.BuiltinFunctionType: _function_pprint,
types.MethodType: _repr_pprint,
datetime.datetime: _repr_pprint,
datetime.timedelta: _repr_pprint,
_exception_base: _exception_pprint
}
try:
_type_pprinters[types.DictProxyType] = _dict_pprinter_factory('<dictproxy {', '}>')
_type_pprinters[types.ClassType] = _type_pprint
_type_pprinters[types.SliceType] = _repr_pprint
except AttributeError: # Python 3
_type_pprinters[slice] = _repr_pprint
try:
_type_pprinters[xrange] = _repr_pprint
_type_pprinters[long] = _repr_pprint
_type_pprinters[unicode] = _repr_pprint
except NameError:
_type_pprinters[range] = _repr_pprint
_type_pprinters[bytes] = _repr_pprint
#: printers for types specified by name
_deferred_type_pprinters = {
}
def for_type(typ, func):
"""
Add a pretty printer for a given type.
"""
oldfunc = _type_pprinters.get(typ, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_type_pprinters[typ] = func
return oldfunc
def for_type_by_name(type_module, type_name, func):
"""
Add a pretty printer for a type specified by the module and name of a type
rather than the type object itself.
"""
key = (type_module, type_name)
oldfunc = _deferred_type_pprinters.get(key, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_deferred_type_pprinters[key] = func
return oldfunc
#: printers for the default singletons
_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
NotImplemented]), _repr_pprint)
if __name__ == '__main__':
from random import randrange
class Foo(object):
def __init__(self):
self.foo = 1
self.bar = re.compile(r'\s+')
self.blub = dict.fromkeys(range(30), randrange(1, 40))
self.hehe = 23424.234234
self.list = ["blub", "blah", self]
def get_foo(self):
print("foo")
pprint(Foo(), verbose=True)
|
|
from TASSELpy.java.lang.Object import Object
import TASSELpy.net.maizegenetics.matrixalgebra.decomposition.EigenvalueDecomposition
import TASSELpy.net.maizegenetics.matrixalgebra.decomposition.SingularValueDecomposition
from TASSELpy.utils.helper import make_sig
from TASSELpy.utils.Overloading import javaOverload,javaConstructorOverload
from TASSELpy.java.lang.Integer import metaInteger
from TASSELpy.java.lang.Double import metaDouble
from TASSELpy.java.lang.Boolean import metaBoolean
from TASSELpy.java.lang.Number import metaNumber
from TASSELpy.utils.primativeArray import javaPrimativeArray, meta_int_array
from abc import ABCMeta
import numpy as np
java_imports = {'DoubleMatrix':'net/maizegenetics/matrixalgebra/Matrix/DoubleMatrix',
'EigenvalueDecomposition':'net/maizegenetics/matrixalgebra/decomposition/EigenvalueDecomposition',
'SingularValueDecomposition': 'net/maizegenetics/matrixalgebra/decomposition/SingularValueDecomposition'}
class metaDoubleMatrix:
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
if issubclass(C, DoubleMatrix):
return True
else:
return False
class DoubleMatrix(Object):
"""
Wrapper for the DoubleMatrix interface, which is implemented by several classes
that call native methods
"""
_java_name = java_imports['DoubleMatrix']
@javaConstructorOverload(java_imports['DoubleMatrix'])
def __init__(self, *args, **kwargs):
pass
############
# Python magic methods
############
def __getitem__(self, key):
if type(key) != tuple:
raise KeyError("Must specify particular cell")
return self.getChecked(*key)
def __setitem__(self, key, value):
if type(key) != tuple:
raise KeyError("Must specify particular cell")
self.setChecked(key[0],key[1],value)
def __len__(self):
return self.numberOfRows()
def __repr__(self):
return "%d x %d Matrix" % (self.numberOfRows(), self.numberOfColumns())
def __add__(self, other):
if isinstance(other, metaNumber):
return self.scalarAdd(np.float64(other))
elif isinstance(other, DoubleMatrix):
return self.plus(other)
else:
raise TypeError("Unsupported operand type(s)")
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
if isinstance(other, metaNumber):
return self.__add__(other)
elif isinstance(other, DoubleMatrix):
self.plusEquals(other)
return self
else:
raise TypeError("Unsupported operand type(s)")
def __sub__(self, other):
if isinstance(other, metaNumber):
return self.__add__(np.float64(-other))
elif isinstance(other, DoubleMatrix):
return self.minus(other)
else:
raise TypeError("Unsupported operand type(s)")
def __rsub__(self, other):
if isinstance(other, metaNumber):
return self.__add__(self.__mul__(-1),other)
elif isinstance(other, DoubleMatrix):
return -self + other
else:
raise TypeError("Unsupported operand type(s)")
def __isub__(self, other):
if isinstance(other, metaNumber):
return self.__sub__(other)
elif isinstance(other, DoubleMatrix):
self.minusEquals(other)
return self
else:
raise TypeError("Unsupported operand type(s)")
def __mul__(self, other):
if isinstance(other, metaNumber):
return self.scalarMult(np.float64(other))
elif isinstance(other, DoubleMatrix):
return self.mult(other)
else:
raise TypeError("Unsupported operand type(s)")
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, metaNumber):
self.scalarMultEquals(np.float64(other))
return self
elif isinstance(other, DoubleMatrix):
return self.__mul__(other)
else:
raise TypeError("Unsupported operand type(s)")
## Gets data and doesn't check to make sure they are in the matrix
# @param row The zero-based row index
# @param col The zero-based column index
# @return The value at row, col
@javaOverload("get",
(make_sig(['int','int'],'double'),(metaInteger,metaInteger),
np.float64))
def get(self, *args):
"""
Gets data and doesn't check to make sure they are in the matrix
Signatures:
double get(int row, int col)
Arguments:
row -- The zero-based row index
col -- The zero-based column index
Returns:
The value at row, col
"""
pass
## Gets data and checks to make sure they are in the matrix
# @param row The zero-based row index
# @param col The zero-based column index
# @return The value at row, col
@javaOverload("getChecked",
(make_sig(['int','int'],'double'),(metaInteger,metaInteger),
np.float64))
def getChecked(self, *args):
"""
Gets data and checks to make sure they are in the matrix
Signatures:
double getChecked(int row, int col)
Arguments:
row -- The zero-based row index
col -- The zero-based column index
Returns:
The value at row, col
"""
pass
## Sets the matrix value at row, col. The coordinates are not checked to make
# sure they fall in the matrix
# @param row The zero-based row index
# @param col The zero-based column index
# @param value The value to be set at row, col
@javaOverload("set",
(make_sig(['int','int','double'],'void'),(metaInteger,metaInteger,metaDouble),
None))
def set(self, *args):
"""
Sets the matrix value at row, col. The coordinates are not checked to make
sure they fall in the matrix
Signatures:
void set(int row, int col, double value)
Arguments:
row -- The zero-based row index
col -- The zero-based column index
value -- The value to be set at row, col
"""
pass
## Sets the matrix value at row, col. The coordinates are checked to make
# sure they fall in the matrix
# @param row The zero-based row index
# @param col The zero-based column index
# @param value The value to be set at row, col
@javaOverload("setChecked",
(make_sig(['int','int','double'],'void'),(metaInteger,metaInteger,metaDouble),
None))
def setChecked(self, *args):
"""
Sets the matrix value at row, col. The coordinates are checked to make
sure they fall in the matrix
Signatures:
void setChecked(int row, int col, double value)
Arguments:
row -- The zero-based row index
col -- The zero-based column index
value -- The value to be set at row, col
"""
pass
## Gets the tranpose of this matrix
# @return The transpose of this matrix
@javaOverload("transpose",
(make_sig([],java_imports['DoubleMatrix']),(),
lambda x: DoubleMatrix(obj=x)))
def transpose(self, *args):
"""
Gets the tranpose of this matrix
Signatures:
DoubleMatrix transpose()
Returns:
The transpose of this matrix
"""
pass
## Multiply this matrix times another
# @param dm A double matrix
# @param transpose If true, this matrix will be transposed before multiplying
# @param transposedm If ture, dm will be transposed before multiplying
# @return The multiplied matrix
@javaOverload("mult",
(make_sig([java_imports['DoubleMatrix'],'boolean','boolean'],
java_imports['DoubleMatrix']),(metaDoubleMatrix,metaBoolean,
metaBoolean),lambda x: DoubleMatrix(obj=x)),
(make_sig([java_imports['DoubleMatrix']],java_imports['DoubleMatrix']),
(metaDoubleMatrix,),lambda x: DoubleMatrix(obj=x)))
def mult(self, *args):
"""
Multiply this matrix times another
Signatures:
DoubleMatrix mult(DoubleMatrix dm, boolean transpose, boolean transposedm)
DoubleMatrix mult(DoubleMatrix dm)
Arguments:
DoubleMatrix mult(DoubleMatrix dm, boolean transpose, boolean transposedm)
dm -- A double matrix
transpose -- If true, this matrix will be transposed before multiplying
transposedm -- If true, dm will be transposed before multiplying
DoubleMatrix mult(DoubleMatrix dm)
dm -- A double matrix
Returns:
The multiplied matrix
"""
pass
## Using this function for combining multiplication and addition allows the implementing
# library to optimize the operations
# @param A The matrix to be multiplied
# @param B The matrix to be added, can be null
# @param alpha -- scalar multiplier for A
# @param beta scalar multiplier for B
# @param transpose -- If true, X is transposed
# @param transposeA - If true, A is transposed
# @param transposeB -- If true, B is transposed
# @return alpha*XA+beta*B where X is this matrix
@javaOverload("multadd",
(make_sig([java_imports['DoubleMatrix'],java_imports['DoubleMatrix'],
'double','double','boolean','boolean'],
java_imports['DoubleMatrix']),
(metaDoubleMatrix,metaDoubleMatrix,metaDouble,metaDouble,
metaBoolean,metaBoolean),
lambda x: DoubleMatrix(obj=x)))
def multadd(self, *args):
"""
Using this function for combining multiplication and addition allows the
implementing library to optimize the operations
Signatures:
DoubleMatrix multadd(DoubleMatrix A, DoubleMatrix B, double alpha, double beta,
boolean transpose, boolean transposeA)
Arguments:
A -- The matrix to be multiplied
B -- The matrix to be added, can be null
alpha -- scalar multiplier for A
beta -- scalar multiplier for B
transpose -- If true, X is transposed
transposeA -- If true, A is transposed
transposeB -- If true, B is transposed
Returns:
alpha*XA+beta*B where X is this matrix
"""
pass
## Gets X'x or X'dm, where X is this matrix
# @param dm The second matrix
# @return The cross product
@javaOverload("crossproduct",
(make_sig([],java_imports['DoubleMatrix']),(),
lambda x: DoubleMatrix(obj=x)),
(make_sig([java_imports['DoubleMatrix']],java_imports['DoubleMatrix']),
(metaDoubleMatrix,),lambda x: DoubleMatrix(obj=x)))
def crossproduct(self, *args):
"""
Get X'X or X'dm, where X is this matrix
Signatures:
DoubleMatrix crossproduct()
DoubleMatrix crossproduct(DoubleMatrix dm)
Arguments:
DoubleMatrix crossproduct(DoubleMatrix dm)
dm -- The second matrix
Returns:
The cross product
"""
pass
## Gets XX' or Xdm', where X is this matrix
# @param dm The second matrix
# @return The tcrossproduct
@javaOverload("tcrossproduct",
(make_sig([],java_imports['DoubleMatrix']),(),
lambda x: DoubleMatrix(obj=x)),
(make_sig([java_imports['DoubleMatrix']],java_imports['DoubleMatrix']),
(metaDoubleMatrix,),lambda x: DoubleMatrix(obj=x)))
def tcrossproduct(self, *args):
"""
Gets XX' or Xdm', where X is this matrix
Signatures:
DoubleMatrix tcrossproduct()
DoubleMatrix tcrossproduct(DoubleMatrix dm)
Arguments:
DoubleMatrix tcrossproduct(DoubleMatrix dm)
dm -- The second matrix
Returns:
The tcrossproduct
"""
pass
## Puts two matrices together
# @param dm A DoubleMatrix
# @param rows true if rows are to be concatenated, false if columns are to be concatenated
# @return X with dm appended, where X is this matrix
@javaOverload("concatenate",
(make_sig([java_imports['DoubleMatrix'],'boolean'],
java_imports['DoubleMatrix']),
(metaDoubleMatrix,metaBoolean),
lambda x: DoubleMatrix(obj=x)))
def concatenate(self, *args):
"""
Puts two matrices together
Signatures:
DoubleMatrix concatenate(DoubleMatrix dm, boolean rows)
Arguments:
dm -- a DoubleMatrix
rows -- true if rows are to be concatenated, false if columns are to be concatenated
Returns:
Returns X with dm appended, where X is this matrix
"""
pass
## Returns the inverse of a square matrix, without modifying the original matrix
# @return The inverse of a square matrix if it is non-singular, null otherwise
@javaOverload("inverse",
(make_sig([],java_imports['DoubleMatrix']),(),
lambda x: DoubleMatrix(obj=x)))
def inverse(self, *args):
"""
Returns the inverse of a square matrix, without modifying the original matrix.
Signatures:
DoubleMatrix inverse()
Returns:
The inverse of a square matrix if it is non-singular, null otherwise
"""
pass
## Inverts a square matrix, replacing the original with the inverse
@javaOverload("invert",
(make_sig([],'void'),(),None))
def invert(self, *args):
"""
This inverts a square matrix, replacing the original with the inverse
Signatures:
void invert()
"""
pass
@javaOverload("generalizedInverse",
(make_sig([],java_imports['DoubleMatrix']),(),
lambda x: DoubleMatrix(obj=x)))
def generalizedInverse(self, *args):
"""
Gets the generalized inverse of a square matrix
Signatures:
DoubleMatrix generalizedInverse()
Returns:
The generalized inverse of a square matrix
"""
pass
## Inverts the matrix and returns the rank as the first element in rank[]. The
# original matrix is not modified
# @param rank Array to hold element where rank will be stored
# @return A generalized inverse of this matrix
@javaOverload("generalizedInverseWithRank",
(make_sig(['int[]'],java_imports['DoubleMatrix']),
(meta_int_array,),
lambda x: DoubleMatrix(obj=x)))
def generalizedInverseWithRank(self, *args):
"""
Inverts the matrix and returns the rank as the first element in rank[]. The
original matrix is not modified
Signatures:
DoubleMatrix generalizedInverseWithRank(int[] rank)
Arguments:
rank -- Array to hold element where rank will be stored
Returns:
A generalized inverse of this matrix
"""
pass
## Solves the matrix
# @param Y a DoubleMatrix
# @return The least squares solutions for B, where XB=Y and X is this matrix
@javaOverload("solve",
(make_sig([java_imports['DoubleMatrix']],java_imports['DoubleMatrix']),
(metaDoubleMatrix,),lambda x: DoubleMatrix(obj=x)))
def solve(self, *args):
"""
Solves the matrix
Signatures:
DoubleMatrix solve(DoubleMatrix Y)
Arguments:
Y -- a DoubleMatrix
Returns:
The least squares solutions for B, where XB = Y and X is this matrix
"""
pass
## Gets the number of rows in this matrix
# @return Number of rows in this matrix
@javaOverload("numberOfRows",
(make_sig([],'int'),(),None))
def numberOfRows(self, *args):
"""
Gets the number of rows in this matrix
Signatures:
int numberOfRows()
Returns:
Number of rows in this matrix
"""
pass
## Gets the number of columns in this matrix
# @return Number of columns in this matrix
@javaOverload("numberOfColumns",
(make_sig([],'int'),(),None))
def numberOfColumns(self, *args):
"""
Gets the number of columns in this matrix
Signatures:
int numberOfColumns()
Returns:
Number of columns in this matrix
"""
pass
## Gets the row of the matrix as a column vector
# @param i a row index
# @return The ith row of this matrix as a column vector
@javaOverload("row",
(make_sig(['int'],java_imports['DoubleMatrix']),(metaInteger,),
lambda x: DoubleMatrix(obj=x)))
def row(self, *args):
"""
Gets the row of the matrix as a row vector
Signatures:
DoubleMatrix row(int i)
Arguments:
i -- a row index
Returns:
The ith row of this matrix as a column vector
"""
pass
## Gets the column of the matrix as a column vector
# @param j A column index
# @return The jth row of this matrix as a column vector
@javaOverload("column",
(make_sig(['int'],java_imports['DoubleMatrix']),(metaInteger,),
lambda x: DoubleMatrix(obj=x)))
def column(self, *args):
"""
Gets the column of the matrix as a column vector
Signatures:
DoubleMatrix column(int j)
Arguments:
j -- A column index
Returns:
The jth row of this matrix as a column vector
"""
pass
## Gets an array of three DoubleMatrices. Where X is this matrix, the first is
# X'X, the second is the inverse of X'X, and the third is I-XGX'
# @return Array of three DoubleMatrices. Where X is this matrix, the first is X'X,
# the second is inverse of X'X, and the third is I-XGX'
@javaOverload("getXtXGM",
(make_sig([],java_imports['DoubleMatrix']+'[]'),(),
lambda x: DoubleMatrix.wrap_existing_array(x)))
def getXtXGM(self, *args):
"""
Gets an array of three DoubleMatrices. Where X is this matrix, the first is
X'X, the second is the inverse of X'X, and the third is I-XGX'
Signatures:
DoubleMatrix[] getXtXGM()
Returns:
An array of three DoubleMatrices. Where X is this matrix, the first is X'X,
the second is the inverse of X'X, and the third is I-XGX'
"""
pass
## Gets a copy of the matrix
# @return A copy of the matrix
@javaOverload("copy",
(make_sig([],java_imports['DoubleMatrix']),(),
lambda x: DoubleMatrix(obj=x)))
def copy(self, *args):
"""
Gets a copy of the matrix
Signatures:
DoubleMatrix copy()
Returns:
A copy of the matrix
"""
pass
## Gets the eigenvalue decomposition of this matrix
# @return An eigenvalue decomposition of this matrix
@javaOverload("getEigenvalueDecomposition",
(make_sig([],java_imports['EigenvalueDecomposition']),
(),lambda x: TASSELpy.net.maizegenetics.matrixalgebra.\
decomposition.EigenvalueDecomposition.\
EigenvalueDecomposition(obj=x)))
def getEigenvalueDecomposition(self, *args):
"""
Gets the Eigenvalue decomposition of this matrix
Signatures:
EigenvalueDecomposition getEigenvalueDecomposition()
Returns:
An Eigenvalue Decomposition of this matrix
"""
pass
## Gets the singular value decomposition of this matrix
# @return An singular value decomposition of this matrix
@javaOverload("getSingularValueDecomposition",
(make_sig([],java_imports['SingularValueDecomposition']),
(),lambda x: TASSELpy.net.maizegenetics.matrixalgebra.\
decomposition.SingularValueDecomposition.\
SingularValueDecomposition(obj=x)))
def getSingularValueDecomposition(self, *args):
"""
Gets the Singular Value decomposition of this matrix
Signatures:
SingularValueDecomposition getSingularValueDecomposition()
Returns:
An Singular Value Decomposition of this matrix
"""
pass
## Subtracts a matrix from this matrix, X, without modifying X
# @param dm A DoubleMatrix to subtract
# @return A new DoubleMatrix created by subtracting dm from this matrix
@javaOverload("minus",
(make_sig([java_imports['DoubleMatrix']],java_imports['DoubleMatrix']),
(metaDoubleMatrix,),lambda x: DoubleMatrix(obj=x)))
def minus(self, *args):
"""
Subtracts a matrix from this matrix, X, without modifying X
Signatures:
DoubleMatrix minus(DoubleMatrix dm)
Arguments:
dm -- A DoubleMatrix to subtract
Returns:
A new DoubleMatrix created by subtracting dm from this matrix
"""
pass
## This function subtracts dm, modifying the original matrix
# @param dm A DoubleMatrix to subtract
@javaOverload("minusEquals",
(make_sig([java_imports['DoubleMatrix']],'void'),
(metaDoubleMatrix,),None))
def minusEquals(self, *args):
"""
This function subtracts dm, modifying the original matrix
Signatures:
void minusEquals(DoubleMatrix dm)
Arguments:
dm -- A DoubleMatrix to subtract
"""
pass
## Adds a matrix from this matrix, X, without modifying X
# @param dm A DoubleMatrix to add
# @return A new DoubleMatrix created by adding dm from this matrix
@javaOverload("plus",
(make_sig([java_imports['DoubleMatrix']],java_imports['DoubleMatrix']),
(metaDoubleMatrix,),lambda x: DoubleMatrix(obj=x)))
def plus(self, *args):
"""
Adds a matrix from this matrix, X, without modifying X
Signatures:
DoubleMatrix plus(DoubleMatrix dm)
Arguments:
dm -- A DoubleMatrix to add
Returns:
A new DoubleMatrix created by adding dm from this matrix
"""
pass
## This function adds dm, modifying the original matrix
# @param dm A DoubleMatrix to add
@javaOverload("plusEquals",
(make_sig([java_imports['DoubleMatrix']],'void'),
(metaDoubleMatrix,),None))
def plusEquals(self, *args):
"""
This function adds dm, modifying the original matrix
Signatures:
void plusEquals(DoubleMatrix dm)
Arguments:
dm -- A DoubleMatrix to add
"""
pass
## Adds a scalar to this matrix and returns a new matrix. The original is
# not modified
# @param s A scalar
# @return The sum of this matrix and a scalar s
@javaOverload("scalarAdd",
(make_sig(['double'],java_imports['DoubleMatrix']),
(metaDouble,),lambda x: DoubleMatrix(obj=x)))
def scalarAdd(self, *args):
"""
Adds a scalar to this matrix and returns a new matrix. The original is
not modified
Signatures:
DoubleMatrix scalarAdd(double s)
Arguments:
s -- a scalar
Returns:
The sum of this matrix and a scalar s
"""
pass
## Adds a scalar s to this matrix, replacing the original matrix with the result
# @param s A scalar
@javaOverload("scalarAddEquals",
(make_sig(['double'],'void'),(metaDouble,),None))
def scalarAddEquals(self, *args):
"""
Adds a scalar s to this matrix, replacing the original matrix with the result
Signatures:
void scalarAddEquals(double s)
Arguments:
s -- a scalar
"""
pass
## Multiplies a scalar with this matrix and returns a new matrix. The original is
# not modified
# @param s A scalar
# @return The sum of this matrix and a scalar s
@javaOverload("scalarMult",
(make_sig(['double'],java_imports['DoubleMatrix']),
(metaDouble,),lambda x: DoubleMatrix(obj=x)))
def scalarMult(self, *args):
"""
Multiplies a scalar to this matrix and returns a new matrix. The original is
not modified
Signatures:
DoubleMatrix scalarMult(double s)
Arguments:
s -- a scalar
Returns:
The sum of this matrix and a scalar s
"""
pass
## Multiplies a scalar s with this matrix, replacing the original matrix with the result
# @param s A scalar
@javaOverload("scalarMultEquals",
(make_sig(['double'],'void'),(metaDouble,),None))
def scalarMultEquals(self, *args):
"""
Multiplies a scalar s to this matrix, replacing the original matrix with the result
Signatures:
void scalarMultEquals(double s)
Arguments:
s -- a scalar
"""
pass
## Creates a new matrix consisting of the rows and columns of this matrix in the
# order specified. If rows or columns is null, then all rows or columns, respectively,
# will be included
# @param rows The rows to be included in the new matrix
# @param columns The columns to be included in the new matrix
# @return A new matrix consisting of the specified rows and columns
@javaOverload("getSelection",
(make_sig(['int[]','int[]'],java_imports['DoubleMatrix']),
(meta_int_array, meta_int_array),
lambda x: DoubleMatrix(obj=x)))
def getSelection(self, *args):
"""
Creates a new matrix consisting of the rows and columns of this matrix in the
order specified. If rows or columns is null, then all rows or columns, respectively,
will be included
Signatures:
DoubleMatrix getSelection(int[] rows, int[] columns)
Arguments:
rows -- The rows to be included in the new matrix
columns -- The columns to be included in the new matrix
Returns:
A new matrix consisting of the specified rows and columns
"""
pass
## Sums a row
# @param row The index of the row to sum
# @return The sum of the elements in this row
@javaOverload("rowSum",
(make_sig(['int'],'double'),(metaInteger,),np.float64))
def rowSum(self, *args):
"""
Sums a row
Signatures:
double rowSum(int row)
Arguments:
row -- The row index to sum
Returns:
The sum of the elements in this row
"""
pass
## Sums a column
# @param column The index of the column to sum
# @return The sum of the elements in this column
@javaOverload("columnSum",
(make_sig(['int'],'double'),(metaInteger,),np.float64))
def columnSum(self, *args):
"""
Sums a column
Signatures:
double columnSum(int column)
Arguments:
column -- The column index to sum
Returns:
The sum of the elements in this column
"""
pass
## Gets the column rank of this matrix
# @return The column rank of this matrix
@javaOverload("columnRank",
(make_sig([],'int'),(),None))
def columnRank(self, *args):
"""
Gets the column rank of this matrix
Signatures:
int columnRank()
Returns:
The column rank of this matrix
"""
pass
|
|
"""
Copyright (c) 2015-2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from atomic_reactor.core import DockerTasker, retry, RetryGeneratorException, ContainerTasker
from atomic_reactor.util import clone_git_repo, CommandResult
from osbs.utils import ImageName
from tests.constants import LOCALHOST_REGISTRY, INPUT_IMAGE, DOCKERFILE_GIT, MOCK, COMMAND
from urllib3.exceptions import ProtocolError, ReadTimeoutError
from tests.util import requires_internet
from base64 import b64encode
import json
import os
import docker
import docker.errors
import requests
import sys
import time
import atomic_reactor
from docker.errors import APIError
from flexmock import flexmock
import pytest
if MOCK:
from tests.docker_mock import mock_docker
input_image_name = ImageName.parse(INPUT_IMAGE)
# TEST-SUITE SETUP
def setup_module(module):
if MOCK:
return
d = docker.Client() # TODO: current version of python-docker does not have Client anymore
try:
d.inspect_image(INPUT_IMAGE)
setattr(module, 'HAS_IMAGE', True)
except docker.errors.APIError:
d.pull(INPUT_IMAGE)
setattr(module, 'HAS_IMAGE', False)
def teardown_module(module):
if MOCK:
return
if not getattr(module, 'HAS_IMAGE', False):
d = docker.Client()
d.remove_image(INPUT_IMAGE)
# TESTS
def test_container_tasker():
ct = ContainerTasker()
with pytest.raises(AttributeError) as exc:
ct.tasker.get_version()
assert "Container task type not yet decided" in str(exc.value)
ct.build_method = "not_valid_build_method"
with pytest.raises(AttributeError) as exc:
ct.tasker.get_version()
err_msg = 'build method "%s" is not valid to determine Container tasker' \
' type' % ct.build_method
assert err_msg in str(exc.value)
@pytest.mark.parametrize('autoversion', [True, False])
@pytest.mark.parametrize('base_url_arg', [True, False])
def test_docker_tasker(autoversion, base_url_arg):
mock_docker()
base_url = 'unix://var/run/docker.sock'
kwargs = {}
if base_url_arg:
kwargs['base_url'] = base_url
else:
os.environ['DOCKER_CONNECTION'] = base_url
expected_kwargs = {'base_url': base_url, 'timeout': 120}
if autoversion:
setattr(docker, 'AutoVersionClient', 'auto')
expected_kwargs['version'] = 'auto'
(flexmock(docker.APIClient)
.should_receive('__init__')
.with_args(**expected_kwargs)
.once())
DockerTasker(**kwargs)
os.environ.pop('DOCKER_CONNECTION', None)
if autoversion:
delattr(docker, 'AutoVersionClient')
def test_run(docker_tasker):
if MOCK:
mock_docker()
container_id = docker_tasker.run(input_image_name, command="id")
try:
docker_tasker.wait(container_id)
finally:
docker_tasker.remove_container(container_id)
def test_run_invalid_command(docker_tasker):
if MOCK:
mock_docker(should_raise_error={'start': []})
try:
with pytest.raises(docker.errors.APIError):
docker_tasker.run(input_image_name, command=COMMAND)
finally:
# remove the container
containers = docker_tasker.tasker.d.containers(all=True)
container_id = [c for c in containers if c["Command"] == COMMAND][0]['Id']
docker_tasker.remove_container(container_id)
def test_image_exists(docker_tasker):
if MOCK:
mock_docker()
assert docker_tasker.image_exists(input_image_name) is True
def test_image_doesnt_exist(docker_tasker):
image = "lerknglekrnglekrnglekrnglekrng"
if MOCK:
mock_docker(should_raise_error={'inspect_image': [image]})
assert docker_tasker.image_exists(image) is False
def test_logs(docker_tasker):
if MOCK:
mock_docker()
container_id = docker_tasker.run(input_image_name, command="id")
try:
docker_tasker.wait(container_id)
output = docker_tasker.logs(container_id, stderr=True, stream=False)
assert "\n".join(output).startswith("uid=")
finally:
docker_tasker.remove_container(container_id)
def test_remove_container(docker_tasker):
if MOCK:
mock_docker()
container_id = docker_tasker.run(input_image_name, command="id")
try:
docker_tasker.wait(container_id)
finally:
docker_tasker.remove_container(container_id)
def test_remove_image(temp_image_name, docker_tasker): # noqa
if MOCK:
mock_docker(inspect_should_fail=True)
container_id = docker_tasker.run(input_image_name, command="id")
docker_tasker.wait(container_id)
image_id = docker_tasker.commit_container(container_id, image=temp_image_name)
try:
docker_tasker.remove_container(container_id)
finally:
docker_tasker.remove_image(image_id)
assert not docker_tasker.image_exists(temp_image_name)
def test_commit_container(temp_image_name, docker_tasker): # noqa
if MOCK:
mock_docker()
container_id = docker_tasker.run(INPUT_IMAGE, command="id")
docker_tasker.wait(container_id)
image_id = docker_tasker.commit_container(container_id, message="test message",
image=temp_image_name)
try:
assert docker_tasker.image_exists(image_id)
finally:
docker_tasker.remove_container(container_id)
docker_tasker.remove_image(image_id)
def test_inspect_image(docker_tasker):
if MOCK:
mock_docker()
inspect_data = docker_tasker.inspect_image(input_image_name)
assert isinstance(inspect_data, dict)
def test_tag_image(temp_image_name, docker_tasker): # noqa
if MOCK:
mock_docker()
temp_image_name.registry = "somewhere.example.com"
temp_image_name.tag = "1"
img = docker_tasker.tag_image(INPUT_IMAGE, temp_image_name)
try:
assert docker_tasker.image_exists(temp_image_name)
assert img == temp_image_name.to_str()
finally:
docker_tasker.remove_image(temp_image_name)
def test_tag_image_same_name(temp_image_name, docker_tasker): # noqa
if MOCK:
mock_docker()
temp_image_name.registry = "somewhere.example.com"
temp_image_name.tag = "1"
flexmock(docker.APIClient).should_receive('tag').never()
docker_tasker.tag_image(temp_image_name, temp_image_name.copy())
@pytest.mark.parametrize(('should_fail',), [ # noqa
(True, ),
(False, ),
])
def test_push_image(temp_image_name, docker_tasker, should_fail):
if MOCK:
mock_docker(push_should_fail=should_fail)
temp_image_name.registry = LOCALHOST_REGISTRY
temp_image_name.tag = "1"
docker_tasker.tag_image(INPUT_IMAGE, temp_image_name)
if should_fail:
with pytest.raises(RetryGeneratorException) as exc:
output = docker_tasker.push_image(temp_image_name, insecure=True)
assert "Failed to mock_method image" in str(exc.value)
assert "connection refused" in str(exc.value)
else:
output = docker_tasker.push_image(temp_image_name, insecure=True)
assert output is not None
docker_tasker.remove_image(temp_image_name)
def test_tag_and_push(temp_image_name, docker_tasker): # noqa
if MOCK:
mock_docker()
temp_image_name.registry = LOCALHOST_REGISTRY
temp_image_name.tag = "1"
output = docker_tasker.tag_and_push_image(INPUT_IMAGE, temp_image_name, insecure=True)
assert output is not None
assert docker_tasker.image_exists(temp_image_name)
docker_tasker.remove_image(temp_image_name)
@pytest.mark.parametrize(('insecure', 'dockercfg'), [
(True, None),
(False, None),
(False, {LOCALHOST_REGISTRY: {"auth": b64encode(b'user:mypassword').decode('utf-8')}}),
])
def test_pull_image(tmpdir, docker_tasker, insecure, dockercfg):
if MOCK:
mock_docker()
dockercfg_path = None
if dockercfg:
dockercfg_path = str(tmpdir.realpath())
file_name = '.dockercfg'
dockercfg_secret_path = os.path.join(dockercfg_path, file_name)
with open(dockercfg_secret_path, "w+") as dockerconfig:
dockerconfig.write(json.dumps(dockercfg))
dockerconfig.flush()
local_img = input_image_name
remote_img = local_img.copy()
remote_img.registry = LOCALHOST_REGISTRY
docker_tasker.tag_and_push_image(local_img, remote_img, insecure=insecure,
dockercfg=dockercfg_path)
got_image = docker_tasker.pull_image(remote_img, insecure=insecure,
dockercfg_path=dockercfg_path)
assert remote_img.to_str() == got_image
assert len(docker_tasker.tasker.last_logs) > 0
docker_tasker.remove_image(remote_img)
def test_get_image_info_by_id_nonexistent(docker_tasker):
if MOCK:
mock_docker()
response = docker_tasker.get_image_info_by_image_id("asd")
assert response is None
def test_get_image_info_by_id(docker_tasker):
if MOCK:
mock_docker(provided_image_repotags=input_image_name.to_str())
image_id = docker_tasker.get_image_info_by_image_name(input_image_name)[0]['Id']
response = docker_tasker.get_image_info_by_image_id(image_id)
assert isinstance(response, dict)
def test_get_image_history(docker_tasker):
if MOCK:
mock_docker()
response = docker_tasker.get_image_history(input_image_name)
assert response is not None
def test_get_image(docker_tasker):
if MOCK:
mock_docker()
response = docker_tasker.get_image(input_image_name)
assert response is not None
def test_get_image_info_by_name_tag_in_name(docker_tasker):
if MOCK:
mock_docker()
response = docker_tasker.get_image_info_by_image_name(input_image_name)
assert len(response) == 1
def test_get_image_info_by_name_tag_in_name_nonexisten(temp_image_name, docker_tasker): # noqa
if MOCK:
mock_docker()
response = docker_tasker.get_image_info_by_image_name(temp_image_name)
assert len(response) == 0
@requires_internet # noqa
def test_build_image_from_path(tmpdir, temp_image_name, docker_tasker):
if MOCK:
mock_docker()
buildargs = {'testarg1': 'testvalue1', 'testarg2': 'testvalue2'}
tmpdir_path = str(tmpdir.realpath())
clone_git_repo(DOCKERFILE_GIT, tmpdir_path)
df = tmpdir.join("Dockerfile")
assert df.check()
response = docker_tasker.build_image_from_path(tmpdir_path, temp_image_name,
use_cache=True, buildargs=buildargs)
list(response)
assert response is not None
assert docker_tasker.image_exists(temp_image_name)
docker_tasker.remove_image(temp_image_name)
@requires_internet # noqa
def test_build_image_from_git(temp_image_name, docker_tasker):
if MOCK:
mock_docker()
buildargs = {'testarg1': 'testvalue1', 'testarg2': 'testvalue2'}
response = docker_tasker.build_image_from_git(DOCKERFILE_GIT, temp_image_name,
use_cache=True, buildargs=buildargs)
list(response)
assert response is not None
assert docker_tasker.image_exists(temp_image_name)
docker_tasker.remove_image(temp_image_name)
def test_get_info(docker_tasker):
if MOCK:
mock_docker()
response = docker_tasker.get_info()
assert isinstance(response, dict)
@pytest.mark.parametrize('no_container', (False, True))
def test_export(docker_tasker, no_container):
if MOCK:
mock_docker()
container_dict = docker_tasker.create_container(INPUT_IMAGE, command=["/bin/bash"])
container_id = container_dict['Id']
try:
if no_container:
with pytest.raises(docker.errors.APIError):
docker_tasker.export_container('NOT_THERE')
else:
export_generator = docker_tasker.export_container(container_id)
for _ in export_generator:
pass
finally:
docker_tasker.remove_container(container_id)
def test_get_version(docker_tasker):
if MOCK:
mock_docker()
response = docker_tasker.get_info()
assert isinstance(response, dict)
@pytest.mark.parametrize(('timeout', 'expected_timeout'), [
(None, 120),
(60, 60),
])
def test_timeout(timeout, expected_timeout):
if not hasattr(docker, 'APIClient'):
setattr(docker, 'APIClient', docker.Client)
expected_kwargs = {
'timeout': expected_timeout
}
if hasattr(docker, 'AutoVersionClient'):
expected_kwargs['version'] = 'auto'
(flexmock(docker.APIClient)
.should_receive('__init__')
.with_args(**expected_kwargs)
.once())
kwargs = {}
if timeout is not None:
kwargs['timeout'] = timeout
DockerTasker(**kwargs)
def test_docker2():
class MockClient(object):
def __init__(self, **kwargs):
pass
def version(self):
return {}
for client in ['APIClient', 'Client']:
if not hasattr(docker, client):
setattr(docker, client, MockClient)
(flexmock(docker)
.should_receive('APIClient')
.once()
.and_raise(AttributeError))
(flexmock(docker)
.should_receive('Client')
.once())
DockerTasker()
def my_func(*args, **kwargs):
my_args = ('some', 'new')
my_kwargs = {'one': 'first', 'two': 'second'}
assert args == my_args
assert kwargs == my_kwargs
response = requests.Response()
response.status_code = 408
raise APIError("test fail", response)
@pytest.mark.parametrize('retry_times', [-1, 0, 1, 2, 3])
def test_retry_method(retry_times):
my_args = ('some', 'new')
my_kwargs = {'one': 'first', 'two': 'second'}
(flexmock(sys.modules[__name__])
.should_call('my_func')
.with_args(*my_args, **my_kwargs)
.times(retry_times + 1))
(flexmock(time)
.should_receive('sleep')
.and_return(None))
if retry_times >= 0:
with pytest.raises(docker.errors.APIError):
retry(my_func, *my_args, retry=retry_times, **my_kwargs)
else:
retry(my_func, *my_args, retry=retry_times, **my_kwargs)
@pytest.mark.parametrize('exc', [ProtocolError, APIError, ReadTimeoutError, False])
@pytest.mark.parametrize('in_init', [True, False])
@pytest.mark.parametrize('retry_times', [-1, 0, 1, 2, 3])
def test_retry_generator(exc, in_init, retry_times):
def simplegen():
yield "log line"
my_args = ('some', 'new')
if not in_init:
my_kwargs = {'one': 'first', 'two': 'second', 'retry_times': retry_times}
else:
my_kwargs = {'one': 'first', 'two': 'second'}
if in_init:
t = DockerTasker(retry_times=retry_times)
else:
t = DockerTasker()
(flexmock(time)
.should_receive('sleep')
.and_return(None))
if not exc:
cr = CommandResult()
cr._error = "cmd_error"
cr._error_detail = {"message": "error_detail"}
if exc == APIError:
error_message = 'api_error'
response = flexmock(content=error_message, status_code=408)
(flexmock(atomic_reactor.util)
.should_receive('wait_for_command')
.times(retry_times + 1)
.and_raise(APIError, error_message, response))
elif exc == ProtocolError:
error_message = 'protocol_error'
(flexmock(atomic_reactor.util)
.should_receive('wait_for_command')
.times(retry_times + 1)
.and_raise(ProtocolError, error_message))
elif exc == ReadTimeoutError:
pool = 'pool'
message = 'read_timeout_error'
error_message = '{}: {}'.format(pool, message)
(flexmock(atomic_reactor.util)
.should_receive('wait_for_command')
.times(retry_times + 1)
.and_raise(ReadTimeoutError, pool, 'url', message))
else:
(flexmock(atomic_reactor.util)
.should_receive('wait_for_command')
.times(retry_times + 1)
.and_return(cr))
error_message = 'cmd_error'
if retry_times >= 0:
with pytest.raises(RetryGeneratorException) as ex:
t.retry_generator(lambda *args, **kwargs: simplegen(),
*my_args, **my_kwargs)
assert repr(error_message) in repr(ex.value)
else:
t.retry_generator(lambda *args, **kwargs: simplegen(),
*my_args, **my_kwargs)
@pytest.mark.parametrize(("dockerconfig_contents", "should_raise"), [
({LOCALHOST_REGISTRY: {"foo": "bar"}}, True),
({LOCALHOST_REGISTRY: {"auth": b64encode(b'user').decode('utf-8')}}, True),
({LOCALHOST_REGISTRY: {"auth": b64encode(b'user:mypassword').decode('utf-8')}}, False),
({LOCALHOST_REGISTRY: {"username": "user", "password": "mypassword"}}, False)])
def test_login(tmpdir, docker_tasker, dockerconfig_contents, should_raise):
if MOCK:
mock_docker()
fake_api = flexmock(docker.APIClient, login=lambda username, registry,
dockercfg_path: {'Status': 'Login Succeeded'})
tmpdir_path = str(tmpdir.realpath())
file_name = '.dockercfg'
dockercfg_path = os.path.join(tmpdir_path, file_name)
with open(dockercfg_path, "w+") as dockerconfig:
dockerconfig.write(json.dumps(dockerconfig_contents))
dockerconfig.flush()
if should_raise:
if 'auth' in dockerconfig_contents[LOCALHOST_REGISTRY]:
with pytest.raises(ValueError) as exc:
docker_tasker.login(LOCALHOST_REGISTRY, tmpdir_path)
assert "Failed to parse 'auth'" in str(exc.value)
else:
with pytest.raises(RuntimeError) as exc:
docker_tasker.login(LOCALHOST_REGISTRY, tmpdir_path)
assert "Failed to extract a username" in str(exc.value)
else:
if MOCK:
(fake_api
.should_receive('login')
.with_args(username='user', registry=LOCALHOST_REGISTRY, dockercfg_path=dockercfg_path)
.once().and_return({'Status': 'Login Succeeded'}))
docker_tasker.login(LOCALHOST_REGISTRY, tmpdir_path)
|
|
import multiprocessing
from multiprocessing import Process
import logging
import time
import datetime
import subprocess
import psutil
import os
import sys
from cloudscheduler.lib.db_config import Config
class ProcessMonitor:
config = None
processes = {}
process_ids = {}
static_process_ids = {}
dynamic_process_ids = {}
logging = None
log_file = None
log_level = None
def __init__(self, config_params, pool_size, process_ids=None, config_file='/etc/cloudscheduler/cloudscheduler.yaml', log_file=None, log_level=None, log_key=None):
self.config = Config(config_file, config_params, pool_size=pool_size)
if log_file is None:
if log_key is not None:
self.log_file = self.config.__dict__[log_key]["log_file"]
else:
self.log_file = self.config.categories[os.path.basename(sys.argv[0])]["log_file"]
else:
self.log_file = log_file
if log_level is None:
if log_key is not None:
self.log_level = self.config.__dict__[log_key]["log_level"]
else:
self.log_level = self.config.categories[os.path.basename(sys.argv[0])]["log_level"]
else:
self.log_level = log_level
logging.basicConfig(
filename=self.log_file,
level=self.log_level,
format='%(asctime)s - %(processName)-12s - %(process)d - %(levelname)s - %(message)s')
self.logging = logging.getLogger()
self.process_ids = process_ids
for proc in process_ids:
if isinstance(process_ids[proc], list):
# add dynamic process
function = process_ids[proc][0]
select = process_ids[proc][1]
self.config.db_open()
rows=[]
rc, msg = self.config.db_execute(select)
for row in self.config.db_cursor:
rows.append(row)
if rc == 0:
#process rows
for row in rows:
logging.debug("Parsing csv2_cloud row: %s" % row)
target_group = row["group_name"]
target_cloud = row["cloud_name"]
dyna_proc = {
"function": function,
"args": [target_group, target_cloud],
"process": None
}
self.dynamic_process_ids[proc + "-" + target_group + "-" + target_cloud] = dyna_proc
else:
#something wrong with the select
self.logging.error("Failed to retrieve child targets from select statement:%s \n Error: %s" % (select, msg))
self.config.db_close()
else:
# its a static process
logging.debug("Adding static process: %s" % process_ids[proc])
self.static_process_ids[proc] = process_ids[proc]
def get_process_ids(self):
return self.process_ids
def add_process_id(self, process_id, function):
self.process_ids[process_id] = function
_init_cpu_sleep_time(process_id)
return
def del_process(self, process_id, dynamic=False):
proc = self.processes.get(process_id)
if proc:
logging.info("Deleting process: %s" % process_id)
#if self.is_alive(process_id):
#proc.join()
del self.processes[process_id]
if dynamic:
self.dynamic_process_ids.pop(process_id)
else:
self.process_ids.pop(process_id)
self.static_process_ids.pop(process_id)
return
def get_logging(self):
return self.logging
def get_config(self):
return self.config
def start_all(self):
# start static_ids
for process in self.static_process_ids:
if process not in self.processes or not self.processes[process].is_alive():
if process in self.processes:
logging.error("Restarting %s...", process)
else:
logging.info("Starting %s...", process)
self.processes[process] = Process(target=self.process_ids[process])
self.processes[process].start()
# start dynamic_ids
for process in self.dynamic_process_ids:
if process not in self.processes or not self.processes[process].is_alive():
if process in self.processes:
logging.error("Restarting %s...", process)
else:
logging.info("Starting %s...", process)
# key here should be function-group-cloud
self.processes[process] = Process(target=self.dynamic_process_ids[process]["function"], args = (self.dynamic_process_ids[process]["args"],))
self.processes[process].start()
def restart_process(self, process, dynamic=False):
# Capture tail of log when process has to restart
try:
proc = subprocess.Popen(['tail', '-n', '50', self.config.categories[os.path.basename(sys.argv[0])]["log_file"]], stdout=subprocess.PIPE)
lines = proc.stdout.readlines()
timestamp = str(datetime.date.today())
with open(''.join([self.log_file, '-crash-', timestamp]), 'wb') as f:
for line in lines:
f.write(line)
except Exception as ex:
self.logging.exception(ex)
if dynamic:
self.processes[process] = Process(target=self.dynamic_process_ids[process]["function"], args = (self.dynamic_process_ids[process]["args"],))
self.processes[process].start()
else:
self.processes[process] = Process(target=self.process_ids[process])
self.processes[process].start()
def is_alive(self, process):
return self.processes[process].is_alive()
def kill_join_all(self):
for proc in self.processes:
pro = self.processes[proc]
try:
pro.terminate()
pro.join()
self._cleanup_event_pids(proc)
except:
logging.error("failed to join process %s", pro.name)
def join_all(self):
for proc in self.processes:
pro = self.processes[proc]
try:
pro.join()
except:
logging.error("failed to join process %s", pro.name)
def check_processes(self, stop=False):
if stop and len(self.process_ids) == 0:
logging.info("Stop set and all children shut down, exiting...")
exit(0)
if stop:
for proc in self.process_ids:
if isinstance(self.process_ids[proc], list):
function = self.process_ids[proc][0]
select = self.process_ids[proc][1]
self.config.db_open()
rows=[]
rc, msg = self.config.db_execute(select)
for row in self.config.db_cursor:
rows.append(row)
if rc == 0:
for row in rows:
target_group = row["group_name"]
target_cloud = row["cloud_name"]
proc_key = proc + "-" + target_group + "-" + target_cloud
if proc_key in self.processes and self.is_alive(proc_key):
logging.info("Stop dynamic set, terminating child: %s" % proc)
self.processes[proc].terminate()
else:
self.logging.error("Failed to retrieve child targets from select statement: %s" % msg)
self.config.db_close()
elif self.is_alive(proc):
logging.info("Stop static set, terminating child: %s" % proc)
self.processes[proc].terminate()
procs_to_remove = []
# handle static processes
for process in self.static_process_ids:
if process not in self.processes or not self.is_alive(process):
if stop:
# child proc is dead, and stop flag set, don't restart and remove proc id
procs_to_remove.append(process)
if process in self.processes:
del self.processes[process]
continue
if process in self.processes:
logging.error("%s process died, restarting...", process)
logging.debug("exit code: %s" , self.processes[process].exitcode)
# self.config.update_service_catalog(error="%s process died, exit code: %s" % (process, self.processes[process].exitcode))
self.config.update_service_catalog(host_id=self.config.local_host_id, error="%s process died, exit code: %s" % (process, self.processes[process].exitcode))
del self.processes[process]
else:
self.logging.info("Restarting %s process", process)
#self._cleanup_event_pids(process)
self.restart_process(process)
time.sleep(self.config.categories["ProcessMonitor"]["sleep_interval_main_short"])
p = psutil.Process(self.processes[process].pid)
# handle dynamic processes
dynamic_procs = self.dynamic_process_ids.keys()
dynamic_procs_set = set(dynamic_procs)
for proc in self.process_ids:
#check if its a list
if isinstance(self.process_ids[proc], list):
#TODO ADD STOP LOGIC
# add dynamic process
function = self.process_ids[proc][0]
select = self.process_ids[proc][1]
self.config.db_open()
rows=[]
rc, msg = self.config.db_execute(select)
for row in self.config.db_cursor:
rows.append(row)
if rc == 0:
#process rows
for row in rows:
target_group = row["group_name"]
target_cloud = row["cloud_name"]
# check if process already in our list, if it is check if it's alive
proc_key = proc + "-" + target_group + "-" + target_cloud
if proc_key in dynamic_procs_set:
dynamic_procs_set.remove(proc_key)
if proc_key in self.processes:
#check if it's alive
if not self.is_alive(proc_key) and not stop:
#restart it
logging.error("%s process died, restarting...", proc_key)
self.config.update_service_catalog(host_id=self.config.local_host_id, error="%s process died, exit code: %s" % (proc_key, self.processes[proc_key].exitcode))
self.restart_process(proc_key, dynamic=True)
else:
#else create a new thread
dyna_proc = {
"function": function,
"args": [target_group, target_cloud],
"process": None
}
self.dynamic_process_ids[proc + "-" + target_group + "-" + target_cloud] = dyna_proc
else:
#something wrong with the select
self.logging.error("Failed to retrieve child targets from select statement: %s" % msg)
#check for any dynamic processes that are no longer needed
# anything left in dynamic_procs_set is no longer in the database
for proc in dynamic_procs_set:
#join it
self.del_process(proc, dynamic=True)
for proc in procs_to_remove:
if proc in self.process_ids:
self.process_ids.pop(proc)
def _cleanup_event_pids(self, pid):
path = self.config.categories["ProcessMonitor"]["signal_registry"]
event_dirs = os.walk(path)
for epath in event_dirs:
pid_path = epath[0] + "/" + pid
if os.path.isfile(pid_path):
os.unlink(pid_path)
def terminate(signal_num, frame):
try:
logging.info("Recieved signal %s, removing pid file." % signal_num)
pid_file = frame.f_globals["PID_FILE"]
os.unlink(pid_file)
except Exception as exc:
logging.debug("Failed to unlink pid file:")
logging.debug(exc)
#Returns false if pid exists, true if pid is gone
def check_pid(pid_file):
if os.path.exists(pid_file):
#PID still exists, return false
return False
else:
return True
|
|
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
from lxml import etree, html
from spyne.application import Application
from spyne.decorator import srpc
from spyne.model.primitive import Integer, Unicode
from spyne.model.primitive import String
from spyne.model.primitive import AnyUri
from spyne.model.complex import Array
from spyne.model.complex import ComplexModel
from spyne.protocol.http import HttpRpc
from spyne.protocol.html.table import HtmlColumnTable, HtmlRowTable
from spyne.service import ServiceBase
from spyne.server.wsgi import WsgiApplication
from spyne.util.test import show, call_wsgi_app_kwargs, call_wsgi_app
class CM(ComplexModel):
i = Integer
s = String
class CCM(ComplexModel):
c = CM
i = Integer
s = String
class TestHtmlColumnTable(unittest.TestCase):
def test_complex_array(self):
class SomeService(ServiceBase):
@srpc(CCM, _returns=Array(CCM))
def some_call(ccm):
return [ccm] * 5
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlColumnTable(field_name_attr='class'))
server = WsgiApplication(app)
out_string = call_wsgi_app_kwargs(server,
ccm_i='456',
ccm_s='def',
ccm_c_i='123',
ccm_c_s='abc',
)
elt = etree.fromstring(out_string)
show(elt, 'TestHtmlColumnTable.test_complex_array')
elt = html.fromstring(out_string)
row, = elt[0] # thead
cell = row.findall('th[@class="i"]')
assert len(cell) == 1
assert cell[0].text == 'i'
cell = row.findall('th[@class="s"]')
assert len(cell) == 1
assert cell[0].text == 's'
for row in elt[1]: # tbody
cell = row.xpath('td[@class="i"]')
assert len(cell) == 1
assert cell[0].text == '456'
cell = row.xpath('td[@class="c"]//td[@class="i"]')
assert len(cell) == 1
assert cell[0].text == '123'
cell = row.xpath('td[@class="c"]//td[@class="s"]')
assert len(cell) == 1
assert cell[0].text == 'abc'
cell = row.xpath('td[@class="s"]')
assert len(cell) == 1
assert cell[0].text == 'def'
def test_string_array(self):
class SomeService(ServiceBase):
@srpc(String(max_occurs='unbounded'), _returns=Array(String))
def some_call(s):
return s
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlColumnTable())
server = WsgiApplication(app)
out_string = call_wsgi_app(server, body_pairs=(('s', '1'), ('s', '2')))
elt = etree.fromstring(out_string)
show(elt, "TestHtmlColumnTable.test_string_array")
assert out_string == \
'<table class="string">' \
'<thead><tr><th class="some_callResponse">some_callResponse</th></tr></thead>' \
'<tbody><tr><td>1</td></tr><tr><td>2</td></tr></tbody>' \
'</table>'
def test_anyuri_string(self):
_link = "http://arskom.com.tr/"
class C(ComplexModel):
c = AnyUri
class SomeService(ServiceBase):
@srpc(_returns=Array(C))
def some_call():
return [C(c=_link)]
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlColumnTable(field_name_attr='class'))
server = WsgiApplication(app)
out_string = call_wsgi_app_kwargs(server)
elt = html.fromstring(out_string)
show(elt, "TestHtmlColumnTable.test_anyuri_string")
assert elt.xpath('//td[@class="c"]')[0][0].tag == 'a'
assert elt.xpath('//td[@class="c"]')[0][0].attrib['href'] == _link
def test_anyuri_uri_value(self):
_link = "http://arskom.com.tr/"
_text = "Arskom"
class C(ComplexModel):
c = AnyUri
class SomeService(ServiceBase):
@srpc(_returns=Array(C))
def some_call():
return [C(c=AnyUri.Value(_link, text=_text))]
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlColumnTable(field_name_attr='class'))
server = WsgiApplication(app)
out_string = call_wsgi_app_kwargs(server)
elt = html.fromstring(out_string)
print(html.tostring(elt, pretty_print=True))
assert elt.xpath('//td[@class="c"]')[0][0].tag == 'a'
assert elt.xpath('//td[@class="c"]')[0][0].text == _text
assert elt.xpath('//td[@class="c"]')[0][0].attrib['href'] == _link
def test_column_href_string(self):
_link = "http://arskom.com.tr/?spyne_test"
class C(ComplexModel):
c = Unicode(pa={HtmlColumnTable: dict(href=_link)})
class SomeService(ServiceBase):
@srpc(_returns=C)
def some_call():
return C(c="hello")
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlColumnTable(field_name_attr='class'))
server = WsgiApplication(app)
out_string = call_wsgi_app_kwargs(server)
elt = html.fromstring(out_string)
print(html.tostring(elt, pretty_print=True))
assert elt.xpath('//td[@class="c"]')[0][0].tag == 'a'
assert elt.xpath('//td[@class="c"]')[0][0].attrib['href'] == _link
def test_column_href_string_with_substitution(self):
_link = "http://arskom.com.tr/?spyne_test=%s"
class C(ComplexModel):
c = Unicode(pa={HtmlColumnTable: dict(href=_link)})
class SomeService(ServiceBase):
@srpc(_returns=C)
def some_call():
return C(c="hello")
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlColumnTable(field_name_attr='class'))
server = WsgiApplication(app)
out_string = call_wsgi_app_kwargs(server)
elt = html.fromstring(out_string)
print(html.tostring(elt, pretty_print=True))
assert elt.xpath('//td[@class="c"]')[0][0].tag == 'a'
assert elt.xpath('//td[@class="c"]')[0][0].attrib['href'] == _link % "hello"
class TestHtmlRowTable(unittest.TestCase):
def test_anyuri_string(self):
_link = "http://arskom.com.tr/"
class C(ComplexModel):
c = AnyUri
class SomeService(ServiceBase):
@srpc(_returns=C)
def some_call():
return C(c=_link)
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlRowTable(field_name_attr='class'))
server = WsgiApplication(app)
out_string = call_wsgi_app_kwargs(server)
elt = html.fromstring(out_string)
print(html.tostring(elt, pretty_print=True))
assert elt.xpath('//td[@class="c"]')[0][0].tag == 'a'
assert elt.xpath('//td[@class="c"]')[0][0].attrib['href'] == _link
def test_anyuri_uri_value(self):
_link = "http://arskom.com.tr/"
_text = "Arskom"
class C(ComplexModel):
c = AnyUri
class SomeService(ServiceBase):
@srpc(_returns=C)
def some_call():
return C(c=AnyUri.Value(_link, text=_text))
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlRowTable(field_name_attr='class'))
server = WsgiApplication(app)
out_string = call_wsgi_app_kwargs(server)
elt = html.fromstring(out_string)
print(html.tostring(elt, pretty_print=True))
assert elt.xpath('//td[@class="c"]')[0][0].tag == 'a'
assert elt.xpath('//td[@class="c"]')[0][0].text == _text
assert elt.xpath('//td[@class="c"]')[0][0].attrib['href'] == _link
def test_complex(self):
class SomeService(ServiceBase):
@srpc(CCM, _returns=CCM)
def some_call(ccm):
return ccm
app = Application([SomeService], 'tns',
in_protocol=HttpRpc(hier_delim="_"),
out_protocol=HtmlRowTable(field_name_attr='class'))
server = WsgiApplication(app)
out_string = call_wsgi_app_kwargs(server, 'some_call',
ccm_c_s='abc', ccm_c_i='123', ccm_i='456', ccm_s='def')
elt = html.fromstring(out_string)
show(elt, "TestHtmlRowTable.test_complex")
# Here's what this is supposed to return
"""
<table class="CCM">
<tbody>
<tr>
<th class="i">i</th>
<td class="i">456</td>
</tr>
<tr class="c">
<th class="c">c</th>
<td class="c">
<table class="c">
<tbody>
<tr>
<th class="i">i</th>
<td class="i">123</td>
</tr>
<tr>
<th class="s">s</th>
<td class="s">abc</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<th class="s">s</th>
<td class="s">def</td>
</tr>
</tbody>
</table>
"""
print(html.tostring(elt, pretty_print=True))
resp = elt.find_class('CCM')
assert len(resp) == 1
assert elt.xpath('tbody/tr/th[@class="i"]/text()')[0] == 'i'
assert elt.xpath('tbody/tr/td[@class="i"]/text()')[0] == '456'
assert elt.xpath('tbody/tr/td[@class="c"]//th[@class="i"]/text()')[0] == 'i'
assert elt.xpath('tbody/tr/td[@class="c"]//td[@class="i"]/text()')[0] == '123'
assert elt.xpath('tbody/tr/td[@class="c"]//th[@class="s"]/text()')[0] == 's'
assert elt.xpath('tbody/tr/td[@class="c"]//td[@class="s"]/text()')[0] == 'abc'
assert elt.xpath('tbody/tr/th[@class="s"]/text()')[0] == 's'
assert elt.xpath('tbody/tr/td[@class="s"]/text()')[0] == 'def'
def test_string_array(self):
class SomeService(ServiceBase):
@srpc(String(max_occurs='unbounded'), _returns=Array(String))
def some_call(s):
return s
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlRowTable())
server = WsgiApplication(app)
out_string = call_wsgi_app(server, body_pairs=(('s', '1'), ('s', '2')) )
show(html.fromstring(out_string), 'TestHtmlRowTable.test_string_array')
assert out_string == \
'<div>' \
'<table class="some_callResponse">' \
'<tr>' \
'<th>string</th>' \
'<td>' \
'<table>' \
'<tr>' \
'<td>1</td>' \
'</tr>' \
'<tr>' \
'<td>2</td>' \
'</tr>' \
'</table>' \
'</td>' \
'</tr>' \
'</table>' \
'</div>'
def test_string_array_no_header(self):
class SomeService(ServiceBase):
@srpc(String(max_occurs='unbounded'), _returns=Array(String))
def some_call(s):
return s
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlRowTable(header=False))
server = WsgiApplication(app)
out_string = call_wsgi_app(server, body_pairs=(('s', '1'), ('s', '2')) )
#FIXME: Needs a proper test with xpaths and all.
show(html.fromstring(out_string), 'TestHtmlRowTable.test_string_array_no_header')
assert out_string == \
'<div>' \
'<table class="some_callResponse">' \
'<tr>' \
'<td>' \
'<table>' \
'<tr>' \
'<td>1</td>' \
'</tr>' \
'<tr>' \
'<td>2</td>' \
'</tr>' \
'</table>' \
'</td>' \
'</tr>' \
'</table>' \
'</div>'
def test_complex_array(self):
v = [
CM(i=1, s='a'),
CM(i=2, s='b'),
CM(i=3, s='c'),
CM(i=4, s='d'),
]
class SomeService(ServiceBase):
@srpc(_returns=Array(CM))
def some_call():
return v
app = Application([SomeService], 'tns', in_protocol=HttpRpc(),
out_protocol=HtmlRowTable())
server = WsgiApplication(app)
out_string = call_wsgi_app_kwargs(server)
show(html.fromstring(out_string), 'TestHtmlRowTable.test_complex_array')
#FIXME: Needs a proper test with xpaths and all.
assert out_string == \
'<div>' \
'<table class="CM">' \
'<tbody>' \
'<tr>' \
'<th class="i">i</th>' \
'<td class="i">1</td>' \
'</tr>' \
'<tr>' \
'<th class="s">s</th>' \
'<td class="s">a</td>' \
'</tr>' \
'</tbody>' \
'</table>' \
'<table class="CM">' \
'<tbody>' \
'<tr>' \
'<th class="i">i</th>' \
'<td class="i">2</td>' \
'</tr>' \
'<tr>' \
'<th class="s">s</th>' \
'<td class="s">b</td>' \
'</tr>' \
'</tbody>' \
'</table>' \
'<table class="CM">' \
'<tbody>' \
'<tr>' \
'<th class="i">i</th>' \
'<td class="i">3</td>' \
'</tr>' \
'<tr>' \
'<th class="s">s</th>' \
'<td class="s">c</td>' \
'</tr>' \
'</tbody>' \
'</table>' \
'<table class="CM">' \
'<tbody>' \
'<tr>' \
'<th class="i">i</th>' \
'<td class="i">4</td>' \
'</tr>' \
'<tr>' \
'<th class="s">s</th>' \
'<td class="s">d</td>' \
'</tr>' \
'</tbody>' \
'</table>' \
'</div>'
if __name__ == '__main__':
unittest.main()
|
|
import random
import numpy as np
import tensorflow as tf
import data_util
emb_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
fc_layer = tf.contrib.layers.fully_connected
class BiGRUModel(object):
def __init__(self,
source_vocab_size,
target_vocab_size,
buckets,
state_size,
num_layers,
embedding_size,
max_gradient,
batch_size,
learning_rate,
forward_only=False,
dtype=tf.float32):
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = learning_rate
self.global_step = tf.Variable(0, trainable=False, name="global_step")
self.state_size = state_size
self.encoder_inputs = tf.placeholder(
tf.int32, shape=[self.batch_size, None])
self.decoder_inputs = tf.placeholder(
tf.int32, shape=[self.batch_size, None])
self.decoder_targets = tf.placeholder(
tf.int32, shape=[self.batch_size, None])
self.encoder_len = tf.placeholder(tf.int32, shape=[self.batch_size])
self.decoder_len = tf.placeholder(tf.int32, shape=[self.batch_size])
self.beam_tok = tf.placeholder(tf.int32, shape=[self.batch_size])
self.prev_att = tf.placeholder(
tf.float32, shape=[self.batch_size, state_size * 2])
encoder_fw_cell = tf.contrib.rnn.GRUCell(state_size)
encoder_bw_cell = tf.contrib.rnn.GRUCell(state_size)
decoder_cell = tf.contrib.rnn.GRUCell(state_size)
if not forward_only:
encoder_fw_cell = tf.contrib.rnn.DropoutWrapper(
encoder_fw_cell, output_keep_prob=0.50)
encoder_bw_cell = tf.contrib.rnn.DropoutWrapper(
encoder_bw_cell, output_keep_prob=0.50)
decoder_cell = tf.contrib.rnn.DropoutWrapper(
decoder_cell, output_keep_prob=0.50)
with tf.variable_scope("seq2seq", dtype=dtype):
with tf.variable_scope("encoder"):
encoder_emb = tf.get_variable(
"embedding", [source_vocab_size, embedding_size],
initializer=emb_init)
encoder_inputs_emb = tf.nn.embedding_lookup(
encoder_emb, self.encoder_inputs)
encoder_outputs, encoder_states = \
tf.nn.bidirectional_dynamic_rnn(
encoder_fw_cell, encoder_bw_cell, encoder_inputs_emb,
sequence_length=self.encoder_len, dtype=dtype)
with tf.variable_scope("init_state"):
init_state = fc_layer(
tf.concat(encoder_states, 1), state_size)
# the shape of bidirectional_dynamic_rnn is weird
# None for batch_size
self.init_state = init_state
self.init_state.set_shape([self.batch_size, state_size])
self.att_states = tf.concat(encoder_outputs, 2)
self.att_states.set_shape([self.batch_size, None, state_size*2])
with tf.variable_scope("attention"):
attention = tf.contrib.seq2seq.BahdanauAttention(
state_size, self.att_states, self.encoder_len)
decoder_cell = tf.contrib.seq2seq.DynamicAttentionWrapper(
decoder_cell, attention, state_size * 2)
wrapper_state = tf.contrib.seq2seq.DynamicAttentionWrapperState(
self.init_state, self.prev_att)
with tf.variable_scope("decoder") as scope:
decoder_emb = tf.get_variable(
"embedding", [target_vocab_size, embedding_size],
initializer=emb_init)
decoder_cell = tf.contrib.rnn.OutputProjectionWrapper(
decoder_cell, target_vocab_size)
if not forward_only:
decoder_inputs_emb = tf.nn.embedding_lookup(
decoder_emb, self.decoder_inputs)
helper = tf.contrib.seq2seq.TrainingHelper(
decoder_inputs_emb, self.decoder_len)
decoder = tf.contrib.seq2seq.BasicDecoder(
decoder_cell, helper, wrapper_state)
outputs, final_state = \
tf.contrib.seq2seq.dynamic_decode(decoder)
outputs_logits = outputs[0]
self.outputs = outputs_logits
weights = tf.sequence_mask(
self.decoder_len, dtype=tf.float32)
loss_t = tf.contrib.seq2seq.sequence_loss(
outputs_logits, self.decoder_targets, weights,
average_across_timesteps=False,
average_across_batch=False)
self.loss = tf.reduce_sum(loss_t) / self.batch_size
params = tf.trainable_variables()
opt = tf.train.AdadeltaOptimizer(
self.learning_rate, epsilon=1e-6)
gradients = tf.gradients(self.loss, params)
clipped_gradients, norm = \
tf.clip_by_global_norm(gradients, max_gradient)
self.updates = opt.apply_gradients(
zip(clipped_gradients, params),
global_step=self.global_step)
tf.summary.scalar('loss', self.loss)
else:
self.loss = tf.constant(0)
with tf.variable_scope("proj") as scope:
output_fn = lambda x: fc_layer(
x, target_vocab_size, scope=scope)
st_toks = tf.convert_to_tensor(
[data_util.ID_GO]*batch_size, dtype=tf.int32)
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
decoder_emb, st_toks, data_util.ID_EOS)
decoder = tf.contrib.seq2seq.BasicDecoder(
decoder_cell, helper, wrapper_state)
outputs, final_state = \
tf.contrib.seq2seq.dynamic_decode(decoder)
self.outputs = outputs[0]
# single step decode for beam search
with tf.variable_scope("decoder", reuse=True):
beam_emb = tf.nn.embedding_lookup(
decoder_emb, self.beam_tok)
self.beam_outputs, self.beam_nxt_state, _, _ = \
decoder.step(0, beam_emb, wrapper_state)
self.beam_logsoftmax = \
tf.nn.log_softmax(self.beam_outputs[0])
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=0)
self.summary_merge = tf.summary.merge_all()
def step(self,
session,
encoder_inputs,
decoder_inputs,
encoder_len,
decoder_len,
forward_only,
summary_writer=None):
# dim fit is important for sequence_mask
# TODO better way to use sequence_mask
if encoder_inputs.shape[1] != max(encoder_len):
raise ValueError("encoder_inputs and encoder_len does not fit")
if not forward_only and \
decoder_inputs.shape[1] != max(decoder_len) + 1:
raise ValueError("decoder_inputs and decoder_len does not fit")
input_feed = {}
input_feed[self.encoder_inputs] = encoder_inputs
input_feed[self.decoder_inputs] = decoder_inputs[:, :-1]
input_feed[self.decoder_targets] = decoder_inputs[:, 1:]
input_feed[self.encoder_len] = encoder_len
input_feed[self.decoder_len] = decoder_len
input_feed[self.prev_att] = np.zeros(
[self.batch_size, 2 * self.state_size])
if forward_only:
output_feed = [self.loss, self.outputs]
else:
output_feed = [self.loss, self.updates]
if summary_writer:
output_feed += [self.summary_merge, self.global_step]
outputs = session.run(output_feed, input_feed)
if summary_writer:
summary_writer.add_summary(outputs[2], outputs[3])
return outputs[:2]
def step_beam(self,
session,
encoder_inputs,
encoder_len,
max_len=12,
geneos=True):
beam_size = self.batch_size
if encoder_inputs.shape[0] == 1:
encoder_inputs = np.repeat(encoder_inputs, beam_size, axis=0)
encoder_len = np.repeat(encoder_len, beam_size, axis=0)
if encoder_inputs.shape[1] != max(encoder_len):
raise ValueError("encoder_inputs and encoder_len does not fit")
#generate attention_states
input_feed = {}
input_feed[self.encoder_inputs] = encoder_inputs
input_feed[self.encoder_len] = encoder_len
output_feed = [self.att_states, self.init_state]
outputs = session.run(output_feed, input_feed)
att_states = outputs[0]
prev_state = outputs[1]
prev_tok = np.ones([beam_size], dtype="int32") * data_util.ID_GO
prev_att = np.zeros([self.batch_size, 2 * self.state_size])
input_feed = {}
input_feed[self.att_states] = att_states
input_feed[self.encoder_len] = encoder_len
ret = [[]] * beam_size
neos = np.ones([beam_size], dtype="bool")
score = np.ones([beam_size], dtype="float32") * (-1e8)
score[0] = 0
beam_att = np.zeros(
[self.batch_size, self.state_size*2], dtype="float32")
for i in range(max_len):
input_feed[self.init_state] = prev_state
input_feed[self.beam_tok] = prev_tok
input_feed[self.prev_att] = beam_att
output_feed = [self.beam_nxt_state[1],
self.beam_logsoftmax,
self.beam_nxt_state[0]]
outputs = session.run(output_feed, input_feed)
beam_att = outputs[0]
tok_logsoftmax = np.asarray(outputs[1])
tok_logsoftmax = tok_logsoftmax.reshape(
[beam_size, self.target_vocab_size])
if not geneos:
tok_logsoftmax[:, data_util.ID_EOS] = -1e8
tok_argsort = np.argsort(tok_logsoftmax, axis=1)[:, -beam_size:]
tmp_arg0 = np.arange(beam_size).reshape([beam_size, 1])
tok_argsort_score = tok_logsoftmax[tmp_arg0, tok_argsort]
tok_argsort_score *= neos.reshape([beam_size, 1])
tok_argsort_score += score.reshape([beam_size, 1])
all_arg = np.argsort(tok_argsort_score.flatten())[-beam_size:]
arg0 = all_arg // beam_size #previous id in batch
arg1 = all_arg % beam_size
prev_tok = tok_argsort[arg0, arg1] #current word
prev_state = outputs[2][arg0]
score = tok_argsort_score[arg0, arg1]
neos = neos[arg0] & (prev_tok != data_util.ID_EOS)
ret_t = []
for j in range(beam_size):
ret_t.append(ret[arg0[j]] + [prev_tok[j]])
ret = ret_t
return ret[-1]
def add_pad(self, data, fixlen):
data = map(lambda x: x + [data_util.ID_PAD] * (fixlen - len(x)), data)
data = list(data)
return np.asarray(data)
def get_batch(self, data, bucket_id):
encoder_inputs, decoder_inputs = [], []
encoder_len, decoder_len = [], []
# Get a random batch of encoder and decoder inputs from data,
# and add GO to decoder.
for _ in range(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
encoder_inputs.append(encoder_input)
encoder_len.append(len(encoder_input))
decoder_inputs.append(decoder_input)
decoder_len.append(len(decoder_input))
batch_enc_len = max(encoder_len)
batch_dec_len = max(decoder_len)
encoder_inputs = self.add_pad(encoder_inputs, batch_enc_len)
decoder_inputs = self.add_pad(decoder_inputs, batch_dec_len)
encoder_len = np.asarray(encoder_len)
# decoder_input has both <GO> and <EOS>
# len(decoder_input)-1 is number of steps in the decoder.
decoder_len = np.asarray(decoder_len) - 1
return encoder_inputs, decoder_inputs, encoder_len, decoder_len
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import BaseHandler
from synapse.streams.config import PaginationConfig
from synapse.api.constants import Membership, EventTypes
from twisted.internet import defer
import collections
import logging
logger = logging.getLogger(__name__)
SyncConfig = collections.namedtuple("SyncConfig", [
"user",
"filter",
])
class TimelineBatch(collections.namedtuple("TimelineBatch", [
"prev_batch",
"events",
"limited",
])):
__slots__ = []
def __nonzero__(self):
"""Make the result appear empty if there are no updates. This is used
to tell if room needs to be part of the sync result.
"""
return bool(self.events)
class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [
"room_id", # str
"timeline", # TimelineBatch
"state", # dict[(str, str), FrozenEvent]
"ephemeral",
"account_data",
])):
__slots__ = []
def __nonzero__(self):
"""Make the result appear empty if there are no updates. This is used
to tell if room needs to be part of the sync result.
"""
return bool(
self.timeline
or self.state
or self.ephemeral
or self.account_data
)
class ArchivedSyncResult(collections.namedtuple("JoinedSyncResult", [
"room_id", # str
"timeline", # TimelineBatch
"state", # dict[(str, str), FrozenEvent]
"account_data",
])):
__slots__ = []
def __nonzero__(self):
"""Make the result appear empty if there are no updates. This is used
to tell if room needs to be part of the sync result.
"""
return bool(
self.timeline
or self.state
or self.account_data
)
class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [
"room_id", # str
"invite", # FrozenEvent: the invite event
])):
__slots__ = []
def __nonzero__(self):
"""Invited rooms should always be reported to the client"""
return True
class SyncResult(collections.namedtuple("SyncResult", [
"next_batch", # Token for the next sync
"presence", # List of presence events for the user.
"joined", # JoinedSyncResult for each joined room.
"invited", # InvitedSyncResult for each invited room.
"archived", # ArchivedSyncResult for each archived room.
])):
__slots__ = []
def __nonzero__(self):
"""Make the result appear empty if there are no updates. This is used
to tell if the notifier needs to wait for more events when polling for
events.
"""
return bool(
self.presence or self.joined or self.invited
)
class SyncHandler(BaseHandler):
def __init__(self, hs):
super(SyncHandler, self).__init__(hs)
self.event_sources = hs.get_event_sources()
self.clock = hs.get_clock()
@defer.inlineCallbacks
def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
full_state=False):
"""Get the sync for a client if we have new data for it now. Otherwise
wait for new data to arrive on the server. If the timeout expires, then
return an empty sync result.
Returns:
A Deferred SyncResult.
"""
if timeout == 0 or since_token is None or full_state:
# we are going to return immediately, so don't bother calling
# notifier.wait_for_events.
result = yield self.current_sync_for_user(sync_config, since_token,
full_state=full_state)
defer.returnValue(result)
else:
def current_sync_callback(before_token, after_token):
return self.current_sync_for_user(sync_config, since_token)
result = yield self.notifier.wait_for_events(
sync_config.user, timeout, current_sync_callback,
from_token=since_token
)
defer.returnValue(result)
def current_sync_for_user(self, sync_config, since_token=None,
full_state=False):
"""Get the sync for client needed to match what the server has now.
Returns:
A Deferred SyncResult.
"""
if since_token is None or full_state:
return self.full_state_sync(sync_config, since_token)
else:
return self.incremental_sync_with_gap(sync_config, since_token)
@defer.inlineCallbacks
def full_state_sync(self, sync_config, timeline_since_token):
"""Get a sync for a client which is starting without any state.
If a 'message_since_token' is given, only timeline events which have
happened since that token will be returned.
Returns:
A Deferred SyncResult.
"""
now_token = yield self.event_sources.get_current_token()
now_token, ephemeral_by_room = yield self.ephemeral_by_room(
sync_config, now_token
)
presence_stream = self.event_sources.sources["presence"]
# TODO (mjark): This looks wrong, shouldn't we be getting the presence
# UP to the present rather than after the present?
pagination_config = PaginationConfig(from_token=now_token)
presence, _ = yield presence_stream.get_pagination_rows(
user=sync_config.user,
pagination_config=pagination_config.get_source_config("presence"),
key=None
)
room_list = yield self.store.get_rooms_for_user_where_membership_is(
user_id=sync_config.user.to_string(),
membership_list=(
Membership.INVITE,
Membership.JOIN,
Membership.LEAVE,
Membership.BAN
)
)
tags_by_room = yield self.store.get_tags_for_user(
sync_config.user.to_string()
)
joined = []
invited = []
archived = []
for event in room_list:
if event.membership == Membership.JOIN:
room_sync = yield self.full_state_sync_for_joined_room(
room_id=event.room_id,
sync_config=sync_config,
now_token=now_token,
timeline_since_token=timeline_since_token,
ephemeral_by_room=ephemeral_by_room,
tags_by_room=tags_by_room,
)
joined.append(room_sync)
elif event.membership == Membership.INVITE:
invite = yield self.store.get_event(event.event_id)
invited.append(InvitedSyncResult(
room_id=event.room_id,
invite=invite,
))
elif event.membership in (Membership.LEAVE, Membership.BAN):
leave_token = now_token.copy_and_replace(
"room_key", "s%d" % (event.stream_ordering,)
)
room_sync = yield self.full_state_sync_for_archived_room(
sync_config=sync_config,
room_id=event.room_id,
leave_event_id=event.event_id,
leave_token=leave_token,
timeline_since_token=timeline_since_token,
tags_by_room=tags_by_room,
)
archived.append(room_sync)
defer.returnValue(SyncResult(
presence=presence,
joined=joined,
invited=invited,
archived=archived,
next_batch=now_token,
))
@defer.inlineCallbacks
def full_state_sync_for_joined_room(self, room_id, sync_config,
now_token, timeline_since_token,
ephemeral_by_room, tags_by_room):
"""Sync a room for a client which is starting without any state
Returns:
A Deferred JoinedSyncResult.
"""
batch = yield self.load_filtered_recents(
room_id, sync_config, now_token, since_token=timeline_since_token
)
current_state = yield self.get_state_at(room_id, now_token)
defer.returnValue(JoinedSyncResult(
room_id=room_id,
timeline=batch,
state=current_state,
ephemeral=ephemeral_by_room.get(room_id, []),
account_data=self.account_data_for_room(
room_id, tags_by_room
),
))
def account_data_for_room(self, room_id, tags_by_room):
account_data = []
tags = tags_by_room.get(room_id)
if tags is not None:
account_data.append({
"type": "m.tag",
"content": {"tags": tags},
})
return account_data
@defer.inlineCallbacks
def ephemeral_by_room(self, sync_config, now_token, since_token=None):
"""Get the ephemeral events for each room the user is in
Args:
sync_config (SyncConfig): The flags, filters and user for the sync.
now_token (StreamToken): Where the server is currently up to.
since_token (StreamToken): Where the server was when the client
last synced.
Returns:
A tuple of the now StreamToken, updated to reflect the which typing
events are included, and a dict mapping from room_id to a list of
typing events for that room.
"""
typing_key = since_token.typing_key if since_token else "0"
rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string())
room_ids = [room.room_id for room in rooms]
typing_source = self.event_sources.sources["typing"]
typing, typing_key = yield typing_source.get_new_events(
user=sync_config.user,
from_key=typing_key,
limit=sync_config.filter.ephemeral_limit(),
room_ids=room_ids,
is_guest=False,
)
now_token = now_token.copy_and_replace("typing_key", typing_key)
ephemeral_by_room = {}
for event in typing:
# we want to exclude the room_id from the event, but modifying the
# result returned by the event source is poor form (it might cache
# the object)
room_id = event["room_id"]
event_copy = {k: v for (k, v) in event.iteritems()
if k != "room_id"}
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
receipt_key = since_token.receipt_key if since_token else "0"
receipt_source = self.event_sources.sources["receipt"]
receipts, receipt_key = yield receipt_source.get_new_events(
user=sync_config.user,
from_key=receipt_key,
limit=sync_config.filter.ephemeral_limit(),
room_ids=room_ids,
# /sync doesn't support guest access, they can't get to this point in code
is_guest=False,
)
now_token = now_token.copy_and_replace("receipt_key", receipt_key)
for event in receipts:
room_id = event["room_id"]
# exclude room id, as above
event_copy = {k: v for (k, v) in event.iteritems()
if k != "room_id"}
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
defer.returnValue((now_token, ephemeral_by_room))
@defer.inlineCallbacks
def full_state_sync_for_archived_room(self, room_id, sync_config,
leave_event_id, leave_token,
timeline_since_token, tags_by_room):
"""Sync a room for a client which is starting without any state
Returns:
A Deferred JoinedSyncResult.
"""
batch = yield self.load_filtered_recents(
room_id, sync_config, leave_token, since_token=timeline_since_token
)
leave_state = yield self.store.get_state_for_event(leave_event_id)
defer.returnValue(ArchivedSyncResult(
room_id=room_id,
timeline=batch,
state=leave_state,
account_data=self.account_data_for_room(
room_id, tags_by_room
),
))
@defer.inlineCallbacks
def incremental_sync_with_gap(self, sync_config, since_token):
""" Get the incremental delta needed to bring the client up to
date with the server.
Returns:
A Deferred SyncResult.
"""
now_token = yield self.event_sources.get_current_token()
rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string())
room_ids = [room.room_id for room in rooms]
presence_source = self.event_sources.sources["presence"]
presence, presence_key = yield presence_source.get_new_events(
user=sync_config.user,
from_key=since_token.presence_key,
limit=sync_config.filter.presence_limit(),
room_ids=room_ids,
# /sync doesn't support guest access, they can't get to this point in code
is_guest=False,
)
now_token = now_token.copy_and_replace("presence_key", presence_key)
now_token, ephemeral_by_room = yield self.ephemeral_by_room(
sync_config, now_token, since_token
)
rm_handler = self.hs.get_handlers().room_member_handler
app_service = yield self.store.get_app_service_by_user_id(
sync_config.user.to_string()
)
if app_service:
rooms = yield self.store.get_app_service_rooms(app_service)
joined_room_ids = set(r.room_id for r in rooms)
else:
joined_room_ids = yield rm_handler.get_joined_rooms_for_user(
sync_config.user
)
timeline_limit = sync_config.filter.timeline_limit()
room_events, _ = yield self.store.get_room_events_stream(
sync_config.user.to_string(),
from_key=since_token.room_key,
to_key=now_token.room_key,
limit=timeline_limit + 1,
)
tags_by_room = yield self.store.get_updated_tags(
sync_config.user.to_string(),
since_token.account_data_key,
)
joined = []
archived = []
if len(room_events) <= timeline_limit:
# There is no gap in any of the rooms. Therefore we can just
# partition the new events by room and return them.
logger.debug("Got %i events for incremental sync - not limited",
len(room_events))
invite_events = []
leave_events = []
events_by_room_id = {}
for event in room_events:
events_by_room_id.setdefault(event.room_id, []).append(event)
if event.room_id not in joined_room_ids:
if (event.type == EventTypes.Member
and event.state_key == sync_config.user.to_string()):
if event.membership == Membership.INVITE:
invite_events.append(event)
elif event.membership in (Membership.LEAVE, Membership.BAN):
leave_events.append(event)
for room_id in joined_room_ids:
recents = events_by_room_id.get(room_id, [])
logger.debug("Events for room %s: %r", room_id, recents)
state = {
(event.type, event.state_key): event
for event in recents if event.is_state()}
limited = False
if recents:
prev_batch = now_token.copy_and_replace(
"room_key", recents[0].internal_metadata.before
)
else:
prev_batch = now_token
just_joined = yield self.check_joined_room(sync_config, state)
if just_joined:
logger.debug("User has just joined %s: needs full state",
room_id)
state = yield self.get_state_at(room_id, now_token)
# the timeline is inherently limited if we've just joined
limited = True
room_sync = JoinedSyncResult(
room_id=room_id,
timeline=TimelineBatch(
events=recents,
prev_batch=prev_batch,
limited=limited,
),
state=state,
ephemeral=ephemeral_by_room.get(room_id, []),
account_data=self.account_data_for_room(
room_id, tags_by_room
),
)
logger.debug("Result for room %s: %r", room_id, room_sync)
if room_sync:
joined.append(room_sync)
else:
logger.debug("Got %i events for incremental sync - hit limit",
len(room_events))
invite_events = yield self.store.get_invites_for_user(
sync_config.user.to_string()
)
leave_events = yield self.store.get_leave_and_ban_events_for_user(
sync_config.user.to_string()
)
for room_id in joined_room_ids:
room_sync = yield self.incremental_sync_with_gap_for_room(
room_id, sync_config, since_token, now_token,
ephemeral_by_room, tags_by_room
)
if room_sync:
joined.append(room_sync)
for leave_event in leave_events:
room_sync = yield self.incremental_sync_for_archived_room(
sync_config, leave_event, since_token, tags_by_room
)
archived.append(room_sync)
invited = [
InvitedSyncResult(room_id=event.room_id, invite=event)
for event in invite_events
]
defer.returnValue(SyncResult(
presence=presence,
joined=joined,
invited=invited,
archived=archived,
next_batch=now_token,
))
@defer.inlineCallbacks
def load_filtered_recents(self, room_id, sync_config, now_token,
since_token=None):
"""
:returns a Deferred TimelineBatch
"""
limited = True
recents = []
filtering_factor = 2
timeline_limit = sync_config.filter.timeline_limit()
load_limit = max(timeline_limit * filtering_factor, 100)
max_repeat = 3 # Only try a few times per room, otherwise
room_key = now_token.room_key
end_key = room_key
while limited and len(recents) < timeline_limit and max_repeat:
events, keys = yield self.store.get_recent_events_for_room(
room_id,
limit=load_limit + 1,
from_token=since_token.room_key if since_token else None,
end_token=end_key,
)
(room_key, _) = keys
end_key = "s" + room_key.split('-')[-1]
loaded_recents = sync_config.filter.filter_room_timeline(events)
loaded_recents = yield self._filter_events_for_client(
sync_config.user.to_string(), loaded_recents,
)
loaded_recents.extend(recents)
recents = loaded_recents
if len(events) <= load_limit:
limited = False
max_repeat -= 1
if len(recents) > timeline_limit:
limited = True
recents = recents[-timeline_limit:]
room_key = recents[0].internal_metadata.before
prev_batch_token = now_token.copy_and_replace(
"room_key", room_key
)
defer.returnValue(TimelineBatch(
events=recents, prev_batch=prev_batch_token, limited=limited
))
@defer.inlineCallbacks
def incremental_sync_with_gap_for_room(self, room_id, sync_config,
since_token, now_token,
ephemeral_by_room, tags_by_room):
""" Get the incremental delta needed to bring the client up to date for
the room. Gives the client the most recent events and the changes to
state.
Returns:
A Deferred JoinedSyncResult
"""
logger.debug("Doing incremental sync for room %s between %s and %s",
room_id, since_token, now_token)
# TODO(mjark): Check for redactions we might have missed.
batch = yield self.load_filtered_recents(
room_id, sync_config, now_token, since_token,
)
logging.debug("Recents %r", batch)
current_state = yield self.get_state_at(room_id, now_token)
state_at_previous_sync = yield self.get_state_at(
room_id, stream_position=since_token
)
state = yield self.compute_state_delta(
since_token=since_token,
previous_state=state_at_previous_sync,
current_state=current_state,
)
just_joined = yield self.check_joined_room(sync_config, state)
if just_joined:
state = yield self.get_state_at(room_id, now_token)
room_sync = JoinedSyncResult(
room_id=room_id,
timeline=batch,
state=state,
ephemeral=ephemeral_by_room.get(room_id, []),
account_data=self.account_data_for_room(
room_id, tags_by_room
),
)
logging.debug("Room sync: %r", room_sync)
defer.returnValue(room_sync)
@defer.inlineCallbacks
def incremental_sync_for_archived_room(self, sync_config, leave_event,
since_token, tags_by_room):
""" Get the incremental delta needed to bring the client up to date for
the archived room.
Returns:
A Deferred ArchivedSyncResult
"""
stream_token = yield self.store.get_stream_token_for_event(
leave_event.event_id
)
leave_token = since_token.copy_and_replace("room_key", stream_token)
batch = yield self.load_filtered_recents(
leave_event.room_id, sync_config, leave_token, since_token,
)
logging.debug("Recents %r", batch)
state_events_at_leave = yield self.store.get_state_for_event(
leave_event.event_id
)
state_at_previous_sync = yield self.get_state_at(
leave_event.room_id, stream_position=since_token
)
state_events_delta = yield self.compute_state_delta(
since_token=since_token,
previous_state=state_at_previous_sync,
current_state=state_events_at_leave,
)
room_sync = ArchivedSyncResult(
room_id=leave_event.room_id,
timeline=batch,
state=state_events_delta,
account_data=self.account_data_for_room(
leave_event.room_id, tags_by_room
),
)
logging.debug("Room sync: %r", room_sync)
defer.returnValue(room_sync)
@defer.inlineCallbacks
def get_state_after_event(self, event):
"""
Get the room state after the given event
:param synapse.events.EventBase event: event of interest
:return: A Deferred map from ((type, state_key)->Event)
"""
state = yield self.store.get_state_for_event(event.event_id)
if event.is_state():
state = state.copy()
state[(event.type, event.state_key)] = event
defer.returnValue(state)
@defer.inlineCallbacks
def get_state_at(self, room_id, stream_position):
""" Get the room state at a particular stream position
:param str room_id: room for which to get state
:param StreamToken stream_position: point at which to get state
:returns: A Deferred map from ((type, state_key)->Event)
"""
last_events, token = yield self.store.get_recent_events_for_room(
room_id, end_token=stream_position.room_key, limit=1,
)
if last_events:
last_event = last_events[-1]
state = yield self.get_state_after_event(last_event)
else:
# no events in this room - so presumably no state
state = {}
defer.returnValue(state)
def compute_state_delta(self, since_token, previous_state, current_state):
""" Works out the differnce in state between the current state and the
state the client got when it last performed a sync.
:param str since_token: the point we are comparing against
:param dict[(str,str), synapse.events.FrozenEvent] previous_state: the
state to compare to
:param dict[(str,str), synapse.events.FrozenEvent] current_state: the
new state
:returns A new event dictionary
"""
# TODO(mjark) Check if the state events were received by the server
# after the previous sync, since we need to include those state
# updates even if they occured logically before the previous event.
# TODO(mjark) Check for new redactions in the state events.
state_delta = {}
for key, event in current_state.iteritems():
if (key not in previous_state or
previous_state[key].event_id != event.event_id):
state_delta[key] = event
return state_delta
def check_joined_room(self, sync_config, state_delta):
"""
Check if the user has just joined the given room (so should
be given the full state)
:param sync_config:
:param dict[(str,str), synapse.events.FrozenEvent] state_delta: the
difference in state since the last sync
:returns A deferred Tuple (state_delta, limited)
"""
join_event = state_delta.get((
EventTypes.Member, sync_config.user.to_string()), None)
if join_event is not None:
if join_event.content["membership"] == Membership.JOIN:
return True
return False
|
|
"""
MPII Keypoints process functions.
"""
from __future__ import print_function, division
import os
import numpy as np
from dbcollection.datasets import BaseTask, BaseField
from dbcollection.utils.decorators import display_message_processing, display_message_load_annotations
from dbcollection.utils.string_ascii import convert_str_to_ascii as str2ascii
from dbcollection.utils.pad import pad_list
from dbcollection.utils.file_load import load_matlab
class Keypoints(BaseTask):
"""MPII Keypoints preprocessing functions."""
filename_h5 = 'keypoint'
is_full = True
def load_data(self):
"""
Load data of the dataset (create a generator).
"""
loader = DatasetAnnotationLoader(
is_full=self.is_full,
data_path=self.data_path,
cache_path=self.cache_path,
verbose=self.verbose
)
yield {"train": loader.load_trainval_data()}
yield {"train01": loader.load_train_data()}
yield {"val01": loader.load_val_data()}
yield {"test": loader.load_test_data()}
def process_set_metadata(self, data, set_name):
"""
Saves the metadata of a set.
"""
args = {
"is_full": self.is_full,
"data": data,
"set_name": set_name,
"hdf5_manager": self.hdf5_manager,
"verbose": self.verbose
}
# Fields
if self.verbose:
print('\n==> Setting up the data fields:')
image_ids = ImageFilenamesField(**args).process()
ScalesField(**args).process()
ObjposField(**args).process()
video_ids = VideoIdsField(**args).process()
VideoNamesField(**args).process(video_ids)
FrameSecField(**args).process()
KeypointLabelsField(**args).process()
CategoryNamesField(**args).process()
ActivityNamesField(**args).process()
ActivityIdsField(**args).process()
SinglePersonField(**args).process()
if set_name is not 'test':
HeadBoundingBoxField(**args).process()
KeypointsField(**args).process()
ObjectFieldNamesField(**args).process()
ObjectIdsField(**args).process(image_ids, video_ids)
# Lists
if self.verbose:
print('\n==> Setting up ordered lists:')
SinglePersonPerImageList(**args).process()
if set_name is not 'test':
KeypointsPerImageList(**args).process()
# -----------------------------------------------------------
# Data load / set up
# -----------------------------------------------------------
class DatasetAnnotationLoader:
"""Annotation's data loader for the cifar10 dataset (train/test)."""
def __init__(self, is_full, data_path, cache_path, verbose):
self.is_full = is_full
self.data_path = data_path
self.cache_path = cache_path
self.verbose = verbose
def load_trainval_data(self):
"""Loads the train set annotation data from disk
and returns it as a dictionary."""
return self.load_annotations_set(is_test=False)
def load_train_data(self):
"""Loads the train+val set annotation data from disk
and returns it as a dictionary.
This validation set is a split of the training set
of the MPII Human Pose dataset. It is a custom
split not available in the original dataset but
it is crafted for use in validation tasks.
"""
from .train_image_ids import train_images_ids
annotations = self.load_annotations_set(is_test=False)
return self.filter_annotations_by_ids(annotations, train_images_ids)
def load_val_data(self):
"""Loads the val set annotation data from disk
and returns it as a dictionary.
This validation set is a split of the training set
of the MPII Human Pose dataset. It is a custom
split not available in the original dataset but
it is crafted for use in validation tasks.
"""
from .val_image_ids import val_images_ids
annotations = self.load_annotations_set(is_test=False)
return self.filter_annotations_by_ids(annotations, val_images_ids)
def load_test_data(self):
"""Loads the test set annotation data from disk
and returns it as a dictionary."""
return self.load_annotations_set(is_test=True)
@display_message_load_annotations
def load_annotations_set(self, is_test):
"""Loads the annotation's data for the train + test splits."""
annotations = self.load_annotation_data_from_disk()
nfiles = self.get_num_files(annotations)
return {
"image_ids": self.get_image_ids(annotations, nfiles, is_test),
"image_filenames": self.get_image_filenames(annotations, nfiles, is_test),
"frame_sec": self.get_frame_sec(annotations, nfiles, is_test),
"video_idx": self.get_video_indexes(annotations, nfiles, is_test),
"pose_annotations": self.get_pose_annotations(annotations, nfiles, is_test),
"activity": self.get_activities(annotations, nfiles, is_test),
"single_person": self.get_single_persons(annotations, nfiles, is_test),
"video_names": self.get_video_names(annotations)
}
def load_annotation_data_from_disk(self):
"""Loads the annotation's data from the data file."""
annotation_filename = os.path.join(self.data_path,
'mpii_human_pose_v1_u12_2',
'mpii_human_pose_v1_u12_1.mat')
annotations = self.load_file(annotation_filename)
return annotations
def load_file(self, filename):
"""Loads the data of the annotation file."""
return load_matlab(filename)
def get_num_files(self, annotations):
"""Returns the total number of files available in the dataset."""
return len(annotations["RELEASE"][0][0][3])
def get_image_ids(self, annotations, num_files, is_test):
"""Returns the image indexes from the annotation's data for a
set split."""
image_ids = []
for ifile in range(num_files):
if is_test == self.is_test_annotation(annotations, ifile):
image_ids.append(ifile)
return image_ids
def get_image_filenames(self, annotations, num_files, is_test):
"""Returns the image filenames from the annotation's data for a
set split."""
image_filenames = []
for ifile in range(num_files):
if is_test == self.is_test_annotation(annotations, ifile):
filename = self.get_filename_from_annotation_id(annotations, ifile)
image_filenames.append(os.path.join('images', filename))
return image_filenames
def is_test_annotation(self, annotations, ifile):
"""Returns True if the annotation belongs to the test set.
Otherwise, returns False."""
return annotations['RELEASE'][0][0][1][0][ifile] == 0
def get_filename_from_annotation_id(self, annotations, ifile):
"""Return the image file name for an id."""
return str(annotations['RELEASE'][0][0][0][0][ifile][0][0][0][0][0])
def get_frame_sec(self, annotations, num_files, is_test):
"""Returns the image's frame position (seconds) from the
annotation's data for a set split."""
frame_sec = []
for ifile in range(num_files):
if is_test == self.is_test_annotation(annotations, ifile):
frame_sec_ = self.get_frame_sec_from_annotation_id(annotations, ifile)
frame_sec.append(frame_sec_)
return frame_sec
def get_frame_sec_from_annotation_id(self, annotations, ifile):
if any(self.get_annotations_list_by_image_id(annotations, ifile)):
return int(annotations['RELEASE'][0][0][0][0][ifile][2][0][0])
else:
return -1
def get_annotations_list_by_image_id(self, annotations, ifile):
""".Returns a list of annotations for a given image id."""
return annotations['RELEASE'][0][0][0][0][ifile][3][0]
def get_video_indexes(self, annotations, num_files, is_test):
"""Returns the image's video identifier from the annotation's
data for a set split."""
video_indexes = []
for ifile in range(num_files):
if is_test == self.is_test_annotation(annotations, ifile):
video_idx = self.get_video_idx_from_annotation_id(annotations, ifile)
video_indexes.append(video_idx)
return video_indexes
def get_video_idx_from_annotation_id(self, annotations, ifile):
annotations_image = self.get_annotations_list_by_image_id(annotations, ifile)
if any(annotations_image):
return int(annotations_image[0]) - 1
else:
return -1
def get_pose_annotations(self, annotations, num_files, is_test):
"""Returns the poses annotations of individual persons from the
annotation's data for a set split."""
poses_annotations = []
for ifile in range(num_files):
if is_test == self.is_test_annotation(annotations, ifile):
poses = self.get_poses_from_annotation_id(annotations, ifile, is_test)
poses_annotations.append(poses)
return poses_annotations
def get_poses_from_annotation_id(self, annotations, ifile, is_test):
"""Returns the pose(s) annotations for an image file."""
poses = []
pnames = self.get_pose_annotation_names(annotations, ifile)
if any(pnames):
if any(self.get_annotations_list_by_image_id(annotations, ifile)):
poses = self.get_full_pose_annotations(annotations, ifile, pnames)
else:
if is_test or self.is_full:
poses = self.get_partial_poses_annotations(annotations, ifile, pnames)
return poses
def get_pose_annotation_names(self, annotations, ifile):
"""Returns the annotation variable (table) names that categorize the annotated data."""
try:
annot_ptr = self.get_annotation_by_file_id(annotations, ifile)
names = annot_ptr.dtype.names
if names is not None:
return names
else:
return []
except IndexError:
return []
def get_annotation_by_file_id(self, annotations, ifile):
return annotations['RELEASE'][0][0][0][0][ifile][1][0]
def get_full_pose_annotations(self, annotations, ifile, pnames):
"""Returns the full pose's annotations (head bbox, body joints keypoints,
center coordinates and scale) for a single file of all person detections."""
poses_annotations = []
annotations_file = self.get_annotation_by_file_id(annotations, ifile)
for i in range(len(annotations_file)):
keypoints = self.get_keypoints(annotations_file, i)
head_bbox = self.get_head_coordinates(annotations_file, i, pnames)
scale = self.get_person_scale(annotations_file, i, pnames)
objpos = self.get_person_center_coordinates(annotations_file, i, pnames)
poses_annotations.append({
"keypoints": keypoints,
"head_bbox": head_bbox,
"scale": scale,
"objpos": objpos
})
return poses_annotations
def get_keypoints(self, annotations_file, ipose):
"""Returns the keypoints annotations for a single person detection."""
keypoints = [[0, 0, 0]] * 16 # [x, y, is_visible]
keypoints_annotations = self.get_keypoint_annotations(annotations_file, ipose)
if any(keypoints_annotations):
vnames = keypoints_annotations.dtype.names
for i in range(len(keypoints_annotations)):
x = float(keypoints_annotations[i][vnames.index('x')][0][0])
y = float(keypoints_annotations[i][vnames.index('y')][0][0])
idx = int(keypoints_annotations[i][vnames.index('id')][0][0])
try:
is_visible = int(keypoints_annotations[i][vnames.index('is_visible')][0])
except (ValueError, IndexError):
is_visible = -1
keypoints[idx] = [x, y, is_visible]
return keypoints
def get_keypoint_annotations(self, annotations_file, ipose):
"""Returns the keypoint's annotations (x,y,id and is_visible)
for a single pose detection from the annotations data."""
try:
keypoint_annotations = annotations_file[ipose][4][0][0][0][0]
if isinstance(keypoint_annotations, str):
return []
return keypoint_annotations
except (AttributeError, IndexError):
return []
def get_head_coordinates(self, annotations_file, ipose, pnames):
"""Returns the head bounding box coordinates of a person detection."""
try:
x1 = annotations_file[ipose][pnames.index('x1')][0][0]
y1 = annotations_file[ipose][pnames.index('y1')][0][0]
x2 = annotations_file[ipose][pnames.index('x2')][0][0]
y2 = annotations_file[ipose][pnames.index('y2')][0][0]
except ValueError:
x1, y1, x2, y2 = -1, -1, -1, -1
return float(x1), float(y1), float(x2), float(y2)
def get_person_scale(self, annotations_file, ipose, pnames):
"""Returns the scale of a person detection."""
try:
scale = annotations_file[ipose][pnames.index('scale')][0][0]
except (ValueError, IndexError):
scale = -1
return float(scale)
def get_person_center_coordinates(self, annotations_file, ipose, pnames):
"""Returns the center coordinates of a person dection."""
try:
objnames = annotations_file[ipose][pnames.index('objpos')][0].dtype.names
center_x = annotations_file[ipose][pnames.index('objpos')][0][0][objnames.index('x')][0][0]
center_y = annotations_file[ipose][pnames.index('objpos')][0][0][objnames.index('y')][0][0]
except (ValueError, IndexError):
center_x, center_y = -1, -1
return {"x": float(center_x), "y": float(center_y)}
def get_partial_poses_annotations(self, annotations, ifile, pnames):
"""Returns partial poses' annotations (center coordinates and scale)
for a single file of all person detections."""
poses_annotations = []
annotations_file = self.get_annotation_by_file_id(annotations, ifile)
for i in range(len(annotations_file)):
scale = self.get_person_scale(annotations_file, i, pnames)
objpos = self.get_person_center_coordinates(annotations_file, i, pnames)
poses_annotations.append({
"scale": scale,
"objpos": objpos
})
return poses_annotations
def get_activities(self, annotations, annotation_size, is_test):
"""Returns the video's activities from the annotation's data
for a set split."""
activities = []
for ifile in range(annotation_size):
if is_test == self.is_test_annotation(annotations, ifile):
category_name, activity_name, activity_id = '', '', -1
if any(self.get_activity_annotation_of_file(annotations, ifile)):
category_name = self.get_category_name(annotations, ifile)
activity_name = self.get_activity_name(annotations, ifile)
activity_id = self.get_activity_id(annotations, ifile)
activities.append({
"category_name": str(category_name),
"activity_name": str(activity_name),
"activity_id": int(activity_id)
})
return activities
def get_activity_annotation_of_file(self, annotations, ifile):
"""Returns the activity annotations of an image file."""
return annotations['RELEASE'][0][0][4][ifile][0][0]
def get_category_name(self, annotations, ifile):
"""Returns the category name of the activity of an image file."""
return annotations['RELEASE'][0][0][4][ifile][0][0][0]
def get_activity_name(self, annotations, ifile):
"""Returns the activity name of the activity of an image file."""
return annotations['RELEASE'][0][0][4][ifile][0][1][0]
def get_activity_id(self, annotations, ifile):
"""Returns the activity id of the activity of an image file."""
return annotations['RELEASE'][0][0][4][ifile][0][2][0][0]
def get_single_persons(self, annotations, annotation_size, is_test):
"""Returns a list of 0 and 1s indicating the presence of a
single person from the annotation's data for a set split."""
single_person = []
for ifile in range(annotation_size):
if is_test == self.is_test_annotation(annotations, ifile):
single_person_ = self.get_single_persons_by_file(annotations, ifile)
single_person.append(single_person_)
return single_person
def get_single_persons_by_file(self, annotations, ifile):
"""Returns a list of single persons (0s and 1s) of an image file."""
annotation_single_person = self.get_single_person_annotations_for_file(annotations, ifile)
if any(annotation_single_person):
single_person = []
for i in range(len(annotation_single_person)):
is_single = int(annotation_single_person[i][0])
single_person.append(is_single)
else:
single_person = [-1]
return single_person
def get_single_person_annotations_for_file(self, annotations, ifile):
"""Returns the single person annotations of an image file."""
return annotations['RELEASE'][0][0][3][ifile][0]
def get_video_names(self, annotations):
"""Returns the video names of the dataset."""
video_names = []
annotations_videos = self.get_video_annotations(annotations)
for ivideo in range(len(annotations_videos)):
video_name = str(annotations_videos[ivideo][0])
video_names.append(video_name)
return video_names
def get_video_annotations(self, annotations):
"""Returns the video names annotations."""
return annotations['RELEASE'][0][0][5][0]
def filter_annotations_by_ids(self, annotations, set_image_ids):
"""Returns a subset of the annotations w.r.t. a list of image indices."""
filtered_ids = self.get_filtered_ids(annotations['image_ids'], set_image_ids)
return {
"image_filenames": self.select_items_from_list(annotations['image_filenames'], filtered_ids),
"frame_sec": self.select_items_from_list(annotations['frame_sec'], filtered_ids),
"video_idx": self.select_items_from_list(annotations['video_idx'], filtered_ids),
"pose_annotations": self.select_items_from_list(annotations['pose_annotations'], filtered_ids),
"activity": self.select_items_from_list(annotations['activity'], filtered_ids),
"single_person": self.select_items_from_list(annotations['single_person'], filtered_ids),
"video_names": annotations['video_names']
}
def get_filtered_ids(self, image_ids, set_image_ids):
filtered_ids = []
for idx in set_image_ids:
try:
filtered_ids.append(image_ids.index(idx))
except ValueError:
pass
return filtered_ids
def select_items_from_list(self, annotations, filtered_ids):
annotations_filtered = []
for idx in filtered_ids:
annotations_filtered.append(annotations[idx])
return annotations_filtered
# -----------------------------------------------------------
# Metadata fields
# -----------------------------------------------------------
class CustomBaseField(BaseField):
"""Custom BaseField with common methods for some fields."""
def get_image_filenames_annotations(self):
return self.data['image_filenames']
def get_pose_annotations(self):
return self.data['pose_annotations']
def get_single_person_annotations(self):
return self.data['single_person']
def get_video_idx_annotations(self):
return self.data['video_idx']
def get_video_names_annotations(self):
return self.data['video_names']
def get_frame_sec_annotations(self):
return self.data['frame_sec']
def get_activity_annotations(self):
return self.data['activity']
class ImageFilenamesField(CustomBaseField):
"""Image filenames' field metadata process/save class."""
@display_message_processing('image_filenames')
def process(self):
"""Processes and saves the image filenames metadata to hdf5."""
image_filenames, image_filename_ids = self.get_image_filenames()
self.save_field_to_hdf5(
set_name=self.set_name,
field='image_filenames',
data=str2ascii(image_filenames),
dtype=np.uint8,
fillvalue=0
)
return image_filename_ids
def get_image_filenames(self):
"""Returns a list of image filenames and ids."""
image_filenames = []
image_filenames_ids = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
for i, image_filename in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for j, _ in enumerate(image_pose_annotations):
image_filenames.append(image_filename)
image_filenames_ids.append(i)
return image_filenames, image_filenames_ids
class ScalesField(CustomBaseField):
"""Person's scale field metadata process/save class."""
@display_message_processing('scale')
def process(self):
"""Processes and saves the person's scale metadata to hdf5."""
scales = self.get_scales()
self.save_field_to_hdf5(
set_name=self.set_name,
field='scale',
data=np.array(scales, dtype=np.float),
dtype=np.float,
fillvalue=0
)
def get_scales(self):
"""Returns a list of person's scale."""
scales = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
scales.append(pose['scale'])
return scales
class ObjposField(CustomBaseField):
"""Person's position field metadata process/save class."""
@display_message_processing('objpos')
def process(self):
"""Processes and saves the person's position metadata to hdf5."""
objpos = self.get_objpos()
self.save_field_to_hdf5(
set_name=self.set_name,
field='objpos',
data=np.array(objpos, dtype=np.float),
dtype=np.float,
fillvalue=0
)
def get_objpos(self):
"""Returns a list of person's position."""
objpos = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
objpos.append([pose['objpos']['x'], pose['objpos']['y']])
return objpos
class VideoIdsField(CustomBaseField):
"""Video ids field metadata process/save class."""
@display_message_processing('video_ids')
def process(self):
"""Processes and saves the video ids metadata to hdf5."""
video_ids = self.get_video_ids()
self.save_field_to_hdf5(
set_name=self.set_name,
field='video_id',
data=np.array(video_ids, dtype=np.int32),
dtype=np.int32,
fillvalue=-1
)
return video_ids
def get_video_ids(self):
"""Returns a list of video ids."""
video_ids = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
video_idx = self.get_video_idx_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
video_ids.append(video_idx[i])
return video_ids
class VideoNamesField(CustomBaseField):
"""Video names field metadata process/save class."""
@display_message_processing('video_names')
def process(self, video_ids):
"""Processes and saves the video names metadata to hdf5."""
video_names = self.get_video_names(video_ids)
self.save_field_to_hdf5(
set_name=self.set_name,
field='video_name',
data=str2ascii(video_names),
dtype=np.uint8,
fillvalue=0
)
def get_video_names(self, video_ids):
"""Returns a list of video names."""
video_names = []
video_names_annotations = self.get_video_names_annotations()
for video_idx in video_ids:
if video_idx >= 0:
video_names.append(video_names_annotations[video_idx])
else:
video_names.append('NA')
return video_names
class FrameSecField(CustomBaseField):
"""Frame sec field metadata process/save class."""
@display_message_processing('frame_sec')
def process(self):
"""Processes and saves the frame sec metadata to hdf5."""
frame_sec = self.get_frame_sec()
self.save_field_to_hdf5(
set_name=self.set_name,
field='frame_sec',
data=np.array(frame_sec, dtype=np.int32),
dtype=np.int32,
fillvalue=-1
)
def get_frame_sec(self):
"""Returns a list of frame sec."""
frame_sec = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
frame_sec_annotations = self.get_frame_sec_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
frame_sec.append(frame_sec_annotations[i])
return frame_sec
class KeypointLabelsField(CustomBaseField):
"""Keypoint names field metadata process/save class."""
@display_message_processing('keypoint_labels')
def process(self):
"""Processes and saves the keypoint labels metadata to hdf5."""
keypoint_labels = self.get_keypoint_labels()
self.save_field_to_hdf5(
set_name=self.set_name,
field='keypoint_labels',
data=str2ascii(keypoint_labels),
dtype=np.uint8,
fillvalue=0
)
def get_keypoint_labels(self):
"""Returns a list of keypoint names."""
keypoints_labels = [
'right ankle', # -- 1
'right knee', # -- 2
'right hip', # -- 3
'left hip', # -- 4
'left knee', # -- 5
'left ankle', # -- 6
'pelvis', # -- 7
'thorax', # -- 8
'upper neck', # -- 9
'head top', # -- 10
'right wrist', # -- 11
'right elbow', # -- 12
'right shoulder', # -- 13
'left shoulder', # -- 14
'left elbow', # -- 15
'left wrist' # -- 16
]
return keypoints_labels
class CategoryNamesField(CustomBaseField):
"""Category names field metadata process/save class."""
@display_message_processing('category_name')
def process(self):
"""Processes and saves the category names metadata to hdf5."""
category_name = self.get_category_name()
self.save_field_to_hdf5(
set_name=self.set_name,
field='category_name',
data=str2ascii(category_name),
dtype=np.uint8,
fillvalue=0
)
def get_category_name(self):
"""Returns a list of category names."""
category_names = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
activity_annotations = self.get_activity_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
category_names.append(activity_annotations[i]['category_name'])
return category_names
class ActivityNamesField(CustomBaseField):
"""Activity names field metadata process/save class."""
@display_message_processing('activity_name')
def process(self):
"""Processes and saves the activity names metadata to hdf5."""
activity_name = self.get_activity_name()
self.save_field_to_hdf5(
set_name=self.set_name,
field='activity_name',
data=str2ascii(activity_name),
dtype=np.uint8,
fillvalue=0
)
def get_activity_name(self):
"""Returns a list of activity names."""
activity_names = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
activity_annotations = self.get_activity_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
activity_names.append(activity_annotations[i]['activity_name'])
return activity_names
class ActivityIdsField(CustomBaseField):
"""Activity ids field metadata process/save class."""
@display_message_processing('activity_id')
def process(self):
"""Processes and saves the activity ids metadata to hdf5."""
activity_id = self.get_activity_ids()
self.save_field_to_hdf5(
set_name=self.set_name,
field='activity_id',
data=np.array(activity_id, dtype=np.int32),
dtype=np.int32,
fillvalue=-1
)
def get_activity_ids(self):
"""Returns a list of activity ids."""
activity_ids = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
activity_annotations = self.get_activity_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
activity_ids.append(activity_annotations[i]['activity_id'])
return activity_ids
class SinglePersonField(CustomBaseField):
"""Single person field metadata process/save class."""
@display_message_processing('single_person')
def process(self):
"""Processes and saves the single person metadata to hdf5."""
single_person = self.get_single_person()
self.save_field_to_hdf5(
set_name=self.set_name,
field='single_person',
data=np.array(single_person, dtype=np.uint8),
dtype=np.uint8,
fillvalue=-1
)
def get_single_person(self):
"""Returns a list of booleans ([0, 1]) indicating single person detections."""
single_persons = []
single_person_annotations = self.get_single_person_annotations()
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
activity_annotations = self.get_activity_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for j, pose in enumerate(image_pose_annotations):
try:
val = single_person_annotations[i][j]
except IndexError:
val = -1
if val == -1:
single_persons.append(0)
else:
single_persons.append(1)
return single_persons
class ObjectFieldNamesField(CustomBaseField):
"""Object field names field metadata process/save class."""
@display_message_processing('object_fields')
def process(self):
"""Processes and saves the object fields metadata to hdf5."""
object_fields = self.get_object_fields()
self.save_field_to_hdf5(
set_name=self.set_name,
field='object_fields',
data=str2ascii(object_fields),
dtype=np.uint8,
fillvalue=0
)
def get_object_fields(self):
"""Returns a list of object field names."""
object_fields = [
"image_filenames",
"scale",
"objpos",
"video_ids",
"video_names",
"frame_sec",
"category_name",
"activity_name",
"activity_id",
"single_person",
"keypoint_labels"
]
if self.set_name is not 'test':
object_fields += ["head_bbox", "keypoints"]
return object_fields
class ObjectIdsField(CustomBaseField):
"""Object ids field metadata process/save class."""
@display_message_processing('object_ids')
def process(self, image_ids, video_ids):
"""Processes and saves the object ids metadata to hdf5."""
object_ids = self.get_object_ids(image_ids, video_ids)
self.save_field_to_hdf5(
set_name=self.set_name,
field='object_ids',
data=np.array(object_ids, dtype=np.int32),
dtype=np.int32,
fillvalue=-1
)
def get_object_ids(self, image_ids, video_ids):
"""Returns a list of object ids."""
object_ids = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
counter = 0
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
obj_ids = [
image_ids[counter], # image_filenames
counter, # scale
counter, # objpos
video_ids[counter], # video_ids
video_ids[counter], # video_name
counter, # frame_sec
counter, # category_name
counter, # activity_name
counter, # activity_id
counter, # single_person
counter, # keypoint_labels
]
if self.set_name is not 'test':
obj_ids += [counter, counter] # [head_bbox, keypoints]
object_ids.append(obj_ids)
counter += 1
return object_ids
class HeadBoundingBoxField(CustomBaseField):
"""Head bounding box field metadata process/save class."""
@display_message_processing('head_bbox')
def process(self):
"""Processes and saves the head bbox metadata to hdf5."""
head_bboxes = self.get_head_bboxes()
self.save_field_to_hdf5(
set_name=self.set_name,
field='head_bbox',
data=np.array(head_bboxes, dtype=np.float),
dtype=np.float,
fillvalue=-1
)
def get_head_bboxes(self):
"""Returns a list of head bboxes."""
head_bboxes = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
head_bboxes.append(pose['head_bbox'])
return head_bboxes
class KeypointsField(CustomBaseField):
"""Keypoints field metadata process/save class."""
@display_message_processing('keypoints')
def process(self):
"""Processes and saves the keypoints metadata to hdf5."""
keypoints = self.get_keypoints()
self.save_field_to_hdf5(
set_name=self.set_name,
field='keypoints',
data=np.array(keypoints, dtype=np.float),
dtype=np.float,
fillvalue=-1
)
def get_keypoints(self):
"""Returns a list of keypoints."""
keypoints = []
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
for i, _ in enumerate(image_fnames):
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
keypoints.append(pose['keypoints'])
return keypoints
# -----------------------------------------------------------
# Metadata lists
# -----------------------------------------------------------
class SinglePersonPerImageList(CustomBaseField):
"""Single persons per image list field metadata process/save class."""
@display_message_processing('list_single_person_per_image')
def process(self):
"""Processes and saves the single persons per image metadata to hdf5."""
single_person_per_image = self.get_list_single_person_per_image()
self.save_field_to_hdf5(
set_name=self.set_name,
field='list_single_person_per_image',
data=np.array(pad_list(single_person_per_image, val=-1), dtype=np.int32),
dtype=np.int32,
fillvalue=-1
)
def get_list_single_person_per_image(self):
"""Returns a list of single persons ids per image."""
single_person_per_image = []
counter = 0
single_person_annotations = self.get_single_person_annotations()
for single_person in single_person_annotations:
single_persons = []
for val in single_person:
if val == 1:
single_persons.append(counter)
counter += 1
single_person_per_image.append(single_persons)
return single_person_per_image
class KeypointsPerImageList(CustomBaseField):
"""Keypoints per image list field metadata process/save class."""
@display_message_processing('list_keypoints_per_image')
def process(self):
"""Processes and saves the keypoints per image metadata to hdf5."""
keypoints_per_image = self.get_list_keypoints_per_image()
self.save_field_to_hdf5(
set_name=self.set_name,
field='list_keypoints_per_image',
data=np.array(pad_list(keypoints_per_image, val=-1), dtype=np.int32),
dtype=np.int32,
fillvalue=-1
)
def get_list_keypoints_per_image(self):
"""Returns a list of keypoints ids per image."""
keypoints_per_image = []
keypoints_empty = [[0, 0, 0]] * 16
counter = 0
image_fnames = self.get_image_filenames_annotations()
pose_annotations = self.get_pose_annotations()
for i, _ in enumerate(image_fnames):
keypoints_image = []
image_pose_annotations = pose_annotations[i]
for _, pose in enumerate(image_pose_annotations):
keypoints = pose['keypoints']
if not keypoints == keypoints_empty:
keypoints_image.append(counter)
counter += 1
keypoints_per_image.append(keypoints_image)
return keypoints_per_image
# -----------------------------------------------------------
# Additional tasks
# -----------------------------------------------------------
class KeypointsClean(Keypoints):
"""MPII Keypoints (clean annotations) task class."""
# metadata filename
filename_h5 = 'keypoint_clean'
is_full = False
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import math
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class NormalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(self.evaluate(tensor))
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
param_shapes = normal_lib.Normal.param_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, self.evaluate(mu_shape))
self.assertAllEqual(expected, self.evaluate(sigma_shape))
mu = array_ops.zeros(mu_shape)
sigma = array_ops.ones(sigma_shape)
self.assertAllEqual(
expected,
self.evaluate(array_ops.shape(normal_lib.Normal(mu, sigma).sample())))
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = normal_lib.Normal.param_static_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, mu_shape)
self.assertEqual(expected, sigma_shape)
@test_util.run_in_graph_and_eager_modes
def testSampleLikeArgsGetDistDType(self):
dist = normal_lib.Normal(0., 1.)
self.assertEqual(dtypes.float32, dist.dtype)
for method in ("log_prob", "prob", "log_cdf", "cdf",
"log_survival_function", "survival_function", "quantile"):
self.assertEqual(dtypes.float32, getattr(dist, method)(1).dtype)
@test_util.run_in_graph_and_eager_modes
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNormalWithSoftplusScale(self):
mu = array_ops.zeros((10, 3))
rho = array_ops.ones((10, 3)) * -2.
normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)
self.assertAllEqual(self.evaluate(mu), self.evaluate(normal.loc))
self.assertAllEqual(
self.evaluate(nn_ops.softplus(rho)), self.evaluate(normal.scale))
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDF(self):
batch_size = 6
mu = constant_op.constant([3.0] * batch_size)
sigma = constant_op.constant([math.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)
pdf = normal.prob(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(pdf).shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(pdf).shape)
if not stats:
return
expected_log_pdf = stats.norm(self.evaluate(mu),
self.evaluate(sigma)).logpdf(x)
self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDFMultidimensional(self):
batch_size = 6
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)
pdf = normal.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf_values.shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.norm(self.evaluate(mu),
self.evaluate(sigma)).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
@test_util.run_in_graph_and_eager_modes
def testNormalCDF(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.cdf(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).cdf(x)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0)
@test_util.run_in_graph_and_eager_modes
def testNormalSurvivalFunction(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.survival_function(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), sf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).sf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0)
@test_util.run_in_graph_and_eager_modes
def testNormalLogCDF(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.log_cdf(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).logcdf(x)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0, rtol=1e-3)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [mu, sigma])
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
@test_util.run_in_graph_and_eager_modes
def testNormalLogSurvivalFunction(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.log_survival_function(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), sf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).logsf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0, rtol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testNormalEntropyWithScalarInputs(self):
# Scipy.stats.norm cannot deal with the shapes in the other test.
mu_v = 2.34
sigma_v = 4.56
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
entropy = normal.entropy()
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(entropy).shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)
# scipy.stats.norm cannot deal with these shapes.
if not stats:
return
expected_entropy = stats.norm(mu_v, sigma_v).entropy()
self.assertAllClose(expected_entropy, self.evaluate(entropy))
@test_util.run_in_graph_and_eager_modes
def testNormalEntropy(self):
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
# scipy.stats.norm cannot deal with these shapes.
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2 * np.pi * np.exp(1) * sigma_broadcast**2)
entropy = normal.entropy()
np.testing.assert_allclose(expected_entropy, self.evaluate(entropy))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(entropy).shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNormalMeanAndMode(self):
# Mu will be broadcast to [7, 7, 7].
mu = [7.]
sigma = [11., 12., 13.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.mean().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.mean()))
self.assertAllEqual((3,), normal.mode().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.mode()))
@test_util.run_in_graph_and_eager_modes
def testNormalQuantile(self):
batch_size = 52
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
p = np.linspace(0., 1.0, batch_size - 2).astype(np.float64)
# Quantile performs piecewise rational approximation so adding some
# special input values to make sure we hit all the pieces.
p = np.hstack((p, np.exp(-33), 1. - np.exp(-33)))
normal = normal_lib.Normal(loc=mu, scale=sigma)
x = normal.quantile(p)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), x.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(x).shape)
self.assertAllEqual(normal.batch_shape, x.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(x).shape)
if not stats:
return
expected_x = stats.norm(mu, sigma).ppf(p)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def _baseQuantileFiniteGradientAtDifficultPoints(self, dtype):
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
p = variables.Variable(
np.array([0.,
np.exp(-32.), np.exp(-2.),
1. - np.exp(-2.), 1. - np.exp(-32.),
1.]).astype(dtype))
value = dist.quantile(p)
grads = gradients_impl.gradients(value, [mu, p])
with self.cached_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testQuantileFiniteGradientAtDifficultPointsFloat32(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float32)
def testQuantileFiniteGradientAtDifficultPointsFloat64(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float64)
@test_util.run_in_graph_and_eager_modes
def testNormalVariance(self):
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.variance().get_shape())
self.assertAllEqual([49., 49, 49], self.evaluate(normal.variance()))
@test_util.run_in_graph_and_eager_modes
def testNormalStandardDeviation(self):
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.stddev().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.stddev()))
@test_util.run_in_graph_and_eager_modes
def testNormalSample(self):
mu = constant_op.constant(3.0)
sigma = constant_op.constant(math.sqrt(3.0))
mu_v = 3.0
sigma_v = np.sqrt(3.0)
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-1)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape(
[self.evaluate(n)]).concatenate(
tensor_shape.TensorShape(
self.evaluate(normal.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tensor_shape.TensorShape([self.evaluate(n)]).concatenate(
normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testNormalFullyReparameterized(self):
mu = constant_op.constant(4.0)
sigma = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(mu)
tape.watch(sigma)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(100)
grad_mu, grad_sigma = tape.gradient(samples, [mu, sigma])
self.assertIsNotNone(grad_mu)
self.assertIsNotNone(grad_sigma)
@test_util.run_in_graph_and_eager_modes
def testNormalSampleMultiDimensional(self):
batch_size = 2
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(2.0), math.sqrt(3.0)]] * batch_size)
mu_v = [3.0, -3.0]
sigma_v = [np.sqrt(2.0), np.sqrt(3.0)]
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape(
[self.evaluate(n)]).concatenate(
tensor_shape.TensorShape(
self.evaluate(normal.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tensor_shape.TensorShape([self.evaluate(n)]).concatenate(
normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
@test_util.run_in_graph_and_eager_modes
def testNegativeSigmaFails(self):
with self.assertRaisesOpError("Condition x > 0 did not hold"):
normal = normal_lib.Normal(
loc=[1.], scale=[-5.], validate_args=True, name="G")
self.evaluate(normal.mean())
@test_util.run_in_graph_and_eager_modes
def testNormalShape(self):
mu = constant_op.constant([-3.0] * 5)
sigma = constant_op.constant(11.0)
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertEqual(self.evaluate(normal.batch_shape_tensor()), [5])
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])
self.assertEqual(normal.event_shape, tensor_shape.TensorShape([]))
def testNormalShapeWithPlaceholders(self):
mu = array_ops.placeholder(dtype=dtypes.float32)
sigma = array_ops.placeholder(dtype=dtypes.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
with self.cached_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(normal.event_shape, ())
self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])
self.assertAllEqual(
sess.run(normal.batch_shape_tensor(),
feed_dict={mu: 5.0,
sigma: [1.0, 2.0]}), [2])
@test_util.run_in_graph_and_eager_modes
def testNormalNormalKL(self):
batch_size = 6
mu_a = np.array([3.0] * batch_size)
sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])
mu_b = np.array([-3.0] * batch_size)
sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
n_a = normal_lib.Normal(loc=mu_a, scale=sigma_a)
n_b = normal_lib.Normal(loc=mu_b, scale=sigma_b)
kl = kullback_leibler.kl_divergence(n_a, n_b)
kl_val = self.evaluate(kl)
kl_expected = ((mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 * (
(sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
if __name__ == "__main__":
test.main()
|
|
from functools import partial
from menpo.feature import no_op
from menpofit.math import (IIRLRegression, IRLRegression, PCRRegression,
OptimalLinearRegression, OPPRegression)
from menpofit.modelinstance import OrthoPDM
from menpofit.error import euclidean_bb_normalised_error
from menpofit.result import MultiScaleParametricIterativeResult
from .base import (BaseSupervisedDescentAlgorithm,
compute_parametric_delta_x, features_per_image,
features_per_patch, update_parametric_estimates,
print_parametric_info, fit_parametric_shape)
class ParametricShapeSDAlgorithm(BaseSupervisedDescentAlgorithm):
r"""
Abstract class for training a cascaded-regression Supervised Descent
algorithm that employs a parametric shape model.
Parameters
----------
shape_model_cls : `subclass` of :map:`PDM`, optional
The class to be used for building the shape model. The most common
choice is :map:`OrthoPDM`.
"""
def __init__(self, shape_model_cls=OrthoPDM):
super(ParametricShapeSDAlgorithm, self).__init__()
self.regressors = []
self.shape_model_cls = shape_model_cls
self.shape_model = None
@property
def _multi_scale_fitter_result(self):
# The result class to be used by a multi-scale fitter
return MultiScaleParametricIterativeResult
def _compute_delta_x(self, gt_shapes, current_shapes):
# This is called first - so train shape model here
if self.shape_model is None:
self.shape_model = self.shape_model_cls(gt_shapes)
return compute_parametric_delta_x(gt_shapes, current_shapes,
self.shape_model)
def _update_estimates(self, estimated_delta_x, delta_x, gt_x,
current_shapes):
update_parametric_estimates(estimated_delta_x, delta_x, gt_x,
current_shapes, self.shape_model)
def _compute_training_features(self, images, gt_shapes, current_shapes,
prefix='', verbose=False):
# initialize sample counter
return features_per_image(images, current_shapes, self.patch_shape,
self.patch_features, prefix=prefix,
verbose=verbose)
def _compute_test_features(self, image, current_shape):
return features_per_patch(image, current_shape,
self.patch_shape, self.patch_features)
def run(self, image, initial_shape, gt_shape=None, return_costs=False,
**kwargs):
r"""
Run the algorithm to an image given an initial shape.
Parameters
----------
image : `menpo.image.Image` or subclass
The image to be fitted.
initial_shape : `menpo.shape.PointCloud`
The initial shape from which the fitting procedure will start.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated to the image.
return_costs : `bool`, optional
If ``True``, then the cost function values will be computed
during the fitting procedure. Then these cost values will be
assigned to the returned `fitting_result`. *Note that this
argument currently has no effect and will raise a warning if set
to ``True``. This is because it is not possible to evaluate the
cost function of this algorithm.*
Returns
-------
fitting_result: :map:`ParametricIterativeResult`
The result of the fitting procedure.
"""
return fit_parametric_shape(image, initial_shape, self,
gt_shape=gt_shape,
return_costs=return_costs)
def _print_regression_info(self, _, gt_shapes, n_perturbations,
delta_x, estimated_delta_x, level_index,
prefix=''):
print_parametric_info(self.shape_model, gt_shapes, n_perturbations,
delta_x, estimated_delta_x, level_index,
self._compute_error, prefix=prefix)
class ParametricShapeNewton(ParametricShapeSDAlgorithm):
r"""
Class for training a cascaded-regression algorithm that employs a
parametric shape model using Incremental Regularized Linear Regression
(:map:`IRLRegression`).
Parameters
----------
patch_features : `callable`, optional
The features to be extracted from the patches of an image.
patch_shape : `(int, int)`, optional
The shape of the extracted patches.
n_iterations : `int`, optional
The number of iterations (cascades).
shape_model_cls : `subclass` of :map:`PDM`, optional
The class to be used for building the shape model. The most common
choice is :map:`OrthoPDM`.
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
alpha : `float`, optional
The regularization parameter.
bias : `bool`, optional
Flag that controls whether to use a bias term.
"""
def __init__(self, patch_features=no_op, patch_shape=(17, 17),
n_iterations=3, shape_model_cls=OrthoPDM,
compute_error=euclidean_bb_normalised_error,
alpha=0, bias=True):
super(ParametricShapeNewton, self).__init__(
shape_model_cls=shape_model_cls)
self._regressor_cls = partial(IRLRegression, alpha=alpha, bias=bias)
self.patch_shape = patch_shape
self.patch_features = patch_features
self.n_iterations = n_iterations
self._compute_error = compute_error
class ParametricShapeGaussNewton(ParametricShapeSDAlgorithm):
r"""
Class for training a cascaded-regression algorithm that employs a
parametric shape model using Indirect Incremental Regularized Linear
Regression (:map:`IIRLRegression`).
Parameters
----------
patch_features : `callable`, optional
The features to be extracted from the patches of an image.
patch_shape : `(int, int)`, optional
The shape of the extracted patches.
n_iterations : `int`, optional
The number of iterations (cascades).
shape_model_cls : `subclass` of :map:`PDM`, optional
The class to be used for building the shape model. The most common
choice is :map:`OrthoPDM`.
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
alpha : `float`, optional
The regularization parameter.
bias : `bool`, optional
Flag that controls whether to use a bias term.
alpha2 : `float`, optional
The regularization parameter of the Hessian matrix.
"""
def __init__(self, patch_features=no_op, patch_shape=(17, 17),
n_iterations=3, shape_model_cls=OrthoPDM,
compute_error=euclidean_bb_normalised_error,
alpha=0, bias=True, alpha2=0):
super(ParametricShapeGaussNewton, self).__init__(
shape_model_cls=shape_model_cls)
self._regressor_cls = partial(IIRLRegression, alpha=alpha, bias=bias,
alpha2=alpha2)
self.patch_shape = patch_shape
self.patch_features = patch_features
self.n_iterations = n_iterations
self._compute_error = compute_error
class ParametricShapeOptimalRegression(ParametricShapeSDAlgorithm):
r"""
Class for training a cascaded-regression algorithm that employs a parametric
shape model using Multivariate Linear Regression with optimal
reconstructions (:map:`OptimalLinearRegression`).
Parameters
----------
patch_features : `callable`, optional
The features to be extracted from the patches of an image.
patch_shape : `(int, int)`, optional
The shape of the extracted patches.
n_iterations : `int`, optional
The number of iterations (cascades).
shape_model_cls : `subclass` of :map:`PDM`, optional
The class to be used for building the shape model. The most common
choice is :map:`OrthoPDM`.
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
variance : `float` or ``None``, optional
The SVD variance.
bias : `bool`, optional
Flag that controls whether to use a bias term.
"""
def __init__(self, patch_features=no_op, patch_shape=(17, 17),
n_iterations=3, shape_model_cls=OrthoPDM,
compute_error=euclidean_bb_normalised_error,
variance=None, bias=True):
super(ParametricShapeOptimalRegression, self).__init__(
shape_model_cls=shape_model_cls)
self._regressor_cls = partial(OptimalLinearRegression,
variance=variance, bias=bias)
self.patch_shape = patch_shape
self.patch_features = patch_features
self.n_iterations = n_iterations
self._compute_error = compute_error
class ParametricShapePCRRegression(ParametricShapeSDAlgorithm):
r"""
Class for training a cascaded-regression algorithm that employs a parametric
shape model using Principal Component Regression (:map:`PCRRegression`).
Parameters
----------
patch_features : `callable`, optional
The features to be extracted from the patches of an image.
patch_shape : `(int, int)`, optional
The shape of the extracted patches.
n_iterations : `int`, optional
The number of iterations (cascades).
shape_model_cls : `subclass` of :map:`PDM`, optional
The class to be used for building the shape model. The most common
choice is :map:`OrthoPDM`.
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
variance : `float` or ``None``, optional
The SVD variance.
bias : `bool`, optional
Flag that controls whether to use a bias term.
Raises
------
ValueError
variance must be set to a number between 0 and 1
"""
def __init__(self, patch_features=no_op, patch_shape=(17, 17),
n_iterations=3, shape_model_cls=OrthoPDM,
compute_error=euclidean_bb_normalised_error,
variance=None, bias=True):
super(ParametricShapePCRRegression, self).__init__(
shape_model_cls=shape_model_cls)
self._regressor_cls = partial(PCRRegression,
variance=variance, bias=bias)
self.patch_shape = patch_shape
self.patch_features = patch_features
self.n_iterations = n_iterations
self._compute_error = compute_error
class ParametricShapeOPPRegression(ParametricShapeSDAlgorithm):
r"""
Class for training a cascaded-regression algorithm that employs a parametric
shape model using Multivariate Linear Regression with Orthogonal Procrustes
Problem reconstructions (:map:`OPPRegression`).
Parameters
----------
patch_features : `callable`, optional
The features to be extracted from the patches of an image.
patch_shape : `(int, int)`, optional
The shape of the extracted patches.
n_iterations : `int`, optional
The number of iterations (cascades).
shape_model_cls : `subclass` of :map:`PDM`, optional
The class to be used for building the shape model. The most common
choice is :map:`OrthoPDM`.
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
whiten : `bool`, optional
Whether to use a whitened PCA model.
bias : `bool`, optional
Flag that controls whether to use a bias term.
"""
def __init__(self, patch_features=no_op, patch_shape=(17, 17),
n_iterations=3, shape_model_cls=OrthoPDM,
compute_error=euclidean_bb_normalised_error,
whiten=False, bias=True):
super(ParametricShapeOPPRegression, self).__init__(
shape_model_cls=shape_model_cls)
self._regressor_cls = partial(OPPRegression,
whiten=whiten, bias=bias)
self.patch_shape = patch_shape
self.patch_features = patch_features
self.n_iterations = n_iterations
self._compute_error = compute_error
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.