code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from enum import Enum
import numpy as np
import scipy as sp
from scipy import sparse
from . import rulsif
def sq_puc_tr_rulsif(xp_tr, xu_tr, xu_te, prior, lambda_list=np.logspace(-3, 0, num=11),
gamma_list=None, sigma_list=None, n_fold=5, n_basis=200, kertype='gauss'):
if gamma_list is None:
gamma_list = [0.01, .05, .25, .5, .75, .95, .99]
if isinstance(kertype, Enum):
kertype = kertype.value
np_tr, d = xp_tr.shape
nu_tr = xu_tr.shape[0]
nu_te = xu_te.shape[0]
is_sparse = sparse.issparse(xp_tr)
if kertype == 'gauss':
b = np.minimum(n_basis, nu_te)
center_index = np.random.permutation(nu_te)
xc = xu_te[center_index[:b], :]
dp = squared_dist(xp_tr, xc)
du = squared_dist(xu_tr, xc)
if sigma_list is None:
med = np.median(du.ravel())
sigma_list = np.sqrt(med)*np.logspace(-1, 1, num=11)
else:
sigma_list = [0]
b = d + 1
if is_sparse:
dp = sparse.hstack((xp_tr, sparse.csr_matrix(np.ones((np_tr, 1)))), format='csr')
du = sparse.hstack((xu_tr, sparse.csr_matrix(np.ones((nu_tr, 1)))), format='csr')
else:
dp = np.c_[xp_tr, np.ones(np_tr)]
du = np.c_[xu_tr, np.ones(nu_tr)]
n_gamma, n_sigma, n_lambda = len(gamma_list), len(sigma_list), len(lambda_list)
mix_rate_list = gamma_list
if 0 not in mix_rate_list:
mix_rate_list = np.append(mix_rate_list, 0)
else:
raise Exception('exception for now')
wm = rulsif.rulsif_cv(xu_tr, xu_te, mix_rate_list=mix_rate_list)
wph_list = {}
wuh_list = {}
ite_gam = 0
for ite_mix in range(len(mix_rate_list)):
if mix_rate_list[ite_mix] == 0:
wph0 = np.array(rulsif.est_w(xp_tr, wm[ite_mix])).squeeze()
wuh0 = np.array(rulsif.est_w(xu_tr, wm[ite_mix])).squeeze()
else:
wph_list[ite_gam] = np.array(rulsif.est_w(xp_tr, wm[ite_mix])).squeeze()
wuh_list[ite_gam] = np.array(rulsif.est_w(xu_tr, wm[ite_mix])).squeeze()
ite_gam += 1
cv_index_p_tr = (np.arange(np_tr, dtype=np.int_)*n_fold)//np_tr
cv_index_p_tr = cv_index_p_tr[np.random.permutation(np_tr)]
cv_index_u_tr = (np.arange(nu_tr, dtype=np.int_)*n_fold)//nu_tr
cv_index_u_tr = cv_index_u_tr[np.random.permutation(nu_tr)]
score_cv_fold = np.zeros((n_gamma, n_sigma, n_lambda, n_fold))
for ite_fold in range(n_fold):
dp_tr_cvtr = dp[cv_index_p_tr != ite_fold, :]
dp_tr_cvte = dp[cv_index_p_tr == ite_fold, :]
du_tr_cvtr = du[cv_index_u_tr != ite_fold, :]
du_tr_cvte = du[cv_index_u_tr == ite_fold, :]
for ite_sigma, sigma in enumerate(sigma_list):
if kertype == 'gauss':
Kp_tr_cvtr = np.exp(-dp_tr_cvtr/(2*sigma**2))
Kp_tr_cvte = np.exp(-dp_tr_cvte/(2*sigma**2))
Ku_tr_cvtr = np.exp(-du_tr_cvtr/(2*sigma**2))
Ku_tr_cvte = np.exp(-du_tr_cvte/(2*sigma**2))
else:
Kp_tr_cvtr = dp_tr_cvtr
Kp_tr_cvte = dp_tr_cvte
Ku_tr_cvtr = du_tr_cvtr
Ku_tr_cvte = du_tr_cvte
for ite_gamma in range(n_gamma):
gamma = gamma_list[ite_gamma]
wph_tr = (wph_list[ite_gamma])[cv_index_p_tr != ite_fold]
wph_te = (wph0)[cv_index_p_tr == ite_fold]
wuh_tr = (wuh_list[ite_gamma])[cv_index_u_tr != ite_fold]
wuh_te = (wuh0)[cv_index_u_tr == ite_fold]
Hu = Ku_tr_cvtr.T.dot(np.diag(wuh_tr)).dot(Ku_tr_cvtr)/Ku_tr_cvtr.shape[0]
hp = prior*wph_tr.dot(Kp_tr_cvtr).T/Kp_tr_cvtr.shape[0]
hu = wuh_tr.dot(Ku_tr_cvtr).T/Ku_tr_cvtr.shape[0]
for ite_lambda, lam in enumerate(lambda_list):
Reg = lam*np.eye(b)
if kertype != 'gauss':
Reg[b-1, b-1] = 0
alpha_cv = sp.linalg.solve(Hu + Reg, 2*hp - hu)
score_cv_fold[ite_gamma, ite_sigma, ite_lambda, ite_fold] \
= risk_puc_tr(Kp_tr_cvte, Ku_tr_cvte, alpha_cv, prior, wph_te, wuh_te)
score_cv = np.mean(score_cv_fold, axis=3)
tmp = np.argmin(score_cv.ravel())
tmp = np.unravel_index(tmp, score_cv.shape)
gamma_index, sigma_index, lambda_index = tmp[0], tmp[1], tmp[2]
gamma = gamma_list[gamma_index]
sigma = sigma_list[sigma_index]
lam = lambda_list[lambda_index]
print("(gamma, sigma, lambda) = ({:.2f}, {:2f}, {:6f})".format(gamma, sigma, lam))
if kertype == 'gauss':
Kp_tr = np.exp(-dp/(2*sigma**2))
Ku_tr = np.exp(-du/(2*sigma**2))
else:
Kp_tr = dp
Ku_tr = du
wph = wph_list[gamma_index]
wuh = wuh_list[gamma_index]
Reg = lam*np.eye(b)
if kertype != 'gauss':
Reg[b-1, b-1] = 0
Hu = Ku_tr.T.dot(np.diag(wuh)).dot(Ku_tr)/Ku_tr.shape[0]
hp = prior*wph.dot(Kp_tr).T/Kp_tr.shape[0]
hu = wuh.dot(Ku_tr).T/Ku_tr.shape[0]
alpha = sp.linalg.solve(Hu + Reg, 2*hp - hu)
model = dict()
model['kertype'] = kertype
model['gamma'] = gamma
model['sigma'] = sigma
model['lambda'] = lam
model['alpha'] = alpha
for index, gam in enumerate(mix_rate_list):
if gam == gamma:
model['wm'] = wm[index]
break
if kertype == 'gauss':
model['center'] = xc
else:
model['bias'] = True
return model
fit = sq_puc_tr_rulsif
def decision_function(model, x_te):
if model['kertype'] == 'gauss':
K = gaussian_kernel(squared_dist(x_te, model['center']), model['sigma'])
else:
if model['bias']:
if sparse.issparse(x_te):
K = sparse.hstack((x_te, np.ones((x_te.shape[0], 1))), format='csr')
else:
K = np.c_[x_te, np.ones(x_te.shape[0])]
else:
K = x_te
return K.dot(model['alpha'])
def risk_puc_tr(Kp, Ku, alpha, prior, wp, wu):
rp_p = np.mean(wp*(Kp.dot(alpha) <= 0))
rp_n = np.mean(wp*(Kp.dot(alpha) >= 0))
ru_n = np.mean(wu*(Ku.dot(alpha) >= 0))
risk = prior*rp_p + np.maximum(0, ru_n - prior*rp_n)
return risk
def logilos(m):
return sp.misc.logsumexp(np.c_[np.zeros(len(m)), -m], axis=1)
def squared_dist(x, c):
n1 = x.shape[0]
n2 = c.shape[0]
if sparse.issparse(x):
dist2 = x.power(2).sum(axis=1).reshape((n1, 1)) \
+ c.power(2).sum(axis=1).reshape((n2, 1)).T - 2*x.dot(c.T)
else:
dist2 = np.sum(x**2, axis=1).reshape((n1, 1)) \
+ np.sum(c**2, axis=1).reshape((n2, 1)).T - 2*x.dot(c.T)
return dist2
def gaussian_kernel(dist2, sigma):
return np.exp(-dist2/(2*sigma**2)) | src/puc/pu.py | from enum import Enum
import numpy as np
import scipy as sp
from scipy import sparse
from . import rulsif
def sq_puc_tr_rulsif(xp_tr, xu_tr, xu_te, prior, lambda_list=np.logspace(-3, 0, num=11),
gamma_list=None, sigma_list=None, n_fold=5, n_basis=200, kertype='gauss'):
if gamma_list is None:
gamma_list = [0.01, .05, .25, .5, .75, .95, .99]
if isinstance(kertype, Enum):
kertype = kertype.value
np_tr, d = xp_tr.shape
nu_tr = xu_tr.shape[0]
nu_te = xu_te.shape[0]
is_sparse = sparse.issparse(xp_tr)
if kertype == 'gauss':
b = np.minimum(n_basis, nu_te)
center_index = np.random.permutation(nu_te)
xc = xu_te[center_index[:b], :]
dp = squared_dist(xp_tr, xc)
du = squared_dist(xu_tr, xc)
if sigma_list is None:
med = np.median(du.ravel())
sigma_list = np.sqrt(med)*np.logspace(-1, 1, num=11)
else:
sigma_list = [0]
b = d + 1
if is_sparse:
dp = sparse.hstack((xp_tr, sparse.csr_matrix(np.ones((np_tr, 1)))), format='csr')
du = sparse.hstack((xu_tr, sparse.csr_matrix(np.ones((nu_tr, 1)))), format='csr')
else:
dp = np.c_[xp_tr, np.ones(np_tr)]
du = np.c_[xu_tr, np.ones(nu_tr)]
n_gamma, n_sigma, n_lambda = len(gamma_list), len(sigma_list), len(lambda_list)
mix_rate_list = gamma_list
if 0 not in mix_rate_list:
mix_rate_list = np.append(mix_rate_list, 0)
else:
raise Exception('exception for now')
wm = rulsif.rulsif_cv(xu_tr, xu_te, mix_rate_list=mix_rate_list)
wph_list = {}
wuh_list = {}
ite_gam = 0
for ite_mix in range(len(mix_rate_list)):
if mix_rate_list[ite_mix] == 0:
wph0 = np.array(rulsif.est_w(xp_tr, wm[ite_mix])).squeeze()
wuh0 = np.array(rulsif.est_w(xu_tr, wm[ite_mix])).squeeze()
else:
wph_list[ite_gam] = np.array(rulsif.est_w(xp_tr, wm[ite_mix])).squeeze()
wuh_list[ite_gam] = np.array(rulsif.est_w(xu_tr, wm[ite_mix])).squeeze()
ite_gam += 1
cv_index_p_tr = (np.arange(np_tr, dtype=np.int_)*n_fold)//np_tr
cv_index_p_tr = cv_index_p_tr[np.random.permutation(np_tr)]
cv_index_u_tr = (np.arange(nu_tr, dtype=np.int_)*n_fold)//nu_tr
cv_index_u_tr = cv_index_u_tr[np.random.permutation(nu_tr)]
score_cv_fold = np.zeros((n_gamma, n_sigma, n_lambda, n_fold))
for ite_fold in range(n_fold):
dp_tr_cvtr = dp[cv_index_p_tr != ite_fold, :]
dp_tr_cvte = dp[cv_index_p_tr == ite_fold, :]
du_tr_cvtr = du[cv_index_u_tr != ite_fold, :]
du_tr_cvte = du[cv_index_u_tr == ite_fold, :]
for ite_sigma, sigma in enumerate(sigma_list):
if kertype == 'gauss':
Kp_tr_cvtr = np.exp(-dp_tr_cvtr/(2*sigma**2))
Kp_tr_cvte = np.exp(-dp_tr_cvte/(2*sigma**2))
Ku_tr_cvtr = np.exp(-du_tr_cvtr/(2*sigma**2))
Ku_tr_cvte = np.exp(-du_tr_cvte/(2*sigma**2))
else:
Kp_tr_cvtr = dp_tr_cvtr
Kp_tr_cvte = dp_tr_cvte
Ku_tr_cvtr = du_tr_cvtr
Ku_tr_cvte = du_tr_cvte
for ite_gamma in range(n_gamma):
gamma = gamma_list[ite_gamma]
wph_tr = (wph_list[ite_gamma])[cv_index_p_tr != ite_fold]
wph_te = (wph0)[cv_index_p_tr == ite_fold]
wuh_tr = (wuh_list[ite_gamma])[cv_index_u_tr != ite_fold]
wuh_te = (wuh0)[cv_index_u_tr == ite_fold]
Hu = Ku_tr_cvtr.T.dot(np.diag(wuh_tr)).dot(Ku_tr_cvtr)/Ku_tr_cvtr.shape[0]
hp = prior*wph_tr.dot(Kp_tr_cvtr).T/Kp_tr_cvtr.shape[0]
hu = wuh_tr.dot(Ku_tr_cvtr).T/Ku_tr_cvtr.shape[0]
for ite_lambda, lam in enumerate(lambda_list):
Reg = lam*np.eye(b)
if kertype != 'gauss':
Reg[b-1, b-1] = 0
alpha_cv = sp.linalg.solve(Hu + Reg, 2*hp - hu)
score_cv_fold[ite_gamma, ite_sigma, ite_lambda, ite_fold] \
= risk_puc_tr(Kp_tr_cvte, Ku_tr_cvte, alpha_cv, prior, wph_te, wuh_te)
score_cv = np.mean(score_cv_fold, axis=3)
tmp = np.argmin(score_cv.ravel())
tmp = np.unravel_index(tmp, score_cv.shape)
gamma_index, sigma_index, lambda_index = tmp[0], tmp[1], tmp[2]
gamma = gamma_list[gamma_index]
sigma = sigma_list[sigma_index]
lam = lambda_list[lambda_index]
print("(gamma, sigma, lambda) = ({:.2f}, {:2f}, {:6f})".format(gamma, sigma, lam))
if kertype == 'gauss':
Kp_tr = np.exp(-dp/(2*sigma**2))
Ku_tr = np.exp(-du/(2*sigma**2))
else:
Kp_tr = dp
Ku_tr = du
wph = wph_list[gamma_index]
wuh = wuh_list[gamma_index]
Reg = lam*np.eye(b)
if kertype != 'gauss':
Reg[b-1, b-1] = 0
Hu = Ku_tr.T.dot(np.diag(wuh)).dot(Ku_tr)/Ku_tr.shape[0]
hp = prior*wph.dot(Kp_tr).T/Kp_tr.shape[0]
hu = wuh.dot(Ku_tr).T/Ku_tr.shape[0]
alpha = sp.linalg.solve(Hu + Reg, 2*hp - hu)
model = dict()
model['kertype'] = kertype
model['gamma'] = gamma
model['sigma'] = sigma
model['lambda'] = lam
model['alpha'] = alpha
for index, gam in enumerate(mix_rate_list):
if gam == gamma:
model['wm'] = wm[index]
break
if kertype == 'gauss':
model['center'] = xc
else:
model['bias'] = True
return model
fit = sq_puc_tr_rulsif
def decision_function(model, x_te):
if model['kertype'] == 'gauss':
K = gaussian_kernel(squared_dist(x_te, model['center']), model['sigma'])
else:
if model['bias']:
if sparse.issparse(x_te):
K = sparse.hstack((x_te, np.ones((x_te.shape[0], 1))), format='csr')
else:
K = np.c_[x_te, np.ones(x_te.shape[0])]
else:
K = x_te
return K.dot(model['alpha'])
def risk_puc_tr(Kp, Ku, alpha, prior, wp, wu):
rp_p = np.mean(wp*(Kp.dot(alpha) <= 0))
rp_n = np.mean(wp*(Kp.dot(alpha) >= 0))
ru_n = np.mean(wu*(Ku.dot(alpha) >= 0))
risk = prior*rp_p + np.maximum(0, ru_n - prior*rp_n)
return risk
def logilos(m):
return sp.misc.logsumexp(np.c_[np.zeros(len(m)), -m], axis=1)
def squared_dist(x, c):
n1 = x.shape[0]
n2 = c.shape[0]
if sparse.issparse(x):
dist2 = x.power(2).sum(axis=1).reshape((n1, 1)) \
+ c.power(2).sum(axis=1).reshape((n2, 1)).T - 2*x.dot(c.T)
else:
dist2 = np.sum(x**2, axis=1).reshape((n1, 1)) \
+ np.sum(c**2, axis=1).reshape((n2, 1)).T - 2*x.dot(c.T)
return dist2
def gaussian_kernel(dist2, sigma):
return np.exp(-dist2/(2*sigma**2)) | 0.354433 | 0.323821 |
# flake8: noqa
from builtins import _test_sink, _test_source
from typing import Awaitable, Callable, TypeVar
from pyre_extensions import ParameterSpecification
from pyre_extensions.type_variable_operators import Concatenate
P = ParameterSpecification("P")
def with_logging(f: Callable[[int], None]) -> Callable[[int], None]:
def inner(x: int) -> None:
_test_sink(x)
f(x)
return inner
@with_logging
def foo(x: int) -> None:
print(x)
def with_logging_no_sink(f: Callable[[int], None]) -> Callable[[int], None]:
def inner(x: int) -> None:
f(x)
return inner
@with_logging_no_sink
def foo_with_sink(x: int) -> None:
_test_sink(x)
print(x)
def with_logging_async(
f: Callable[[str], Awaitable[None]]
) -> Callable[[str], Awaitable[None]]:
async def inner(y: str) -> None:
try:
result = await f(y)
except Exception:
_test_sink(y)
return inner
@with_logging_async
async def foo_async(x: str) -> None:
print(x)
def with_logging_args_kwargs(f: Callable) -> Callable:
def inner(*args, **kwargs) -> None:
_test_sink(kwargs)
f(*args, **kwargs)
return inner
@with_logging_args_kwargs
def foo_args_kwargs(x: str) -> None:
print(x)
def with_logging_args_kwargs_no_sink(f: Callable) -> Callable:
def inner(*args, **kwargs) -> None:
f(*args, **kwargs)
return inner
@with_logging_args_kwargs_no_sink
def foo_args_kwargs_with_sink(x: str, y: int) -> None:
_test_sink(y)
def with_logging_sink(callable: Callable[[str], None]) -> Callable[[str], None]:
def inner(y: str) -> None:
_test_sink(y)
callable(y)
return inner
def with_logging_source(callable: Callable[[str], None]) -> Callable[[str], None]:
def inner(y: str) -> None:
callable(y + _test_source())
return inner
def fails_to_apply(f):
return f
@fails_to_apply
@with_logging_source
@fails_to_apply
@with_logging_sink
@fails_to_apply
def foo_with_shady_decorators(z: str) -> None:
print(z)
def with_named_logger(logger_name: str) -> Callable[[Callable], Callable]:
def _inner_decorator(f: Callable) -> Callable:
def inner(*args: object, **kwargs: object) -> None:
print("Logging to:", logger_name)
_test_sink(args)
f(*args, **kwargs)
return inner
return _inner_decorator
@with_named_logger("foo_logger")
def foo_using_decorator_factory(x: str) -> None:
print(x)
def with_logging_first_parameter(
f: Callable[Concatenate[int, P], None]
) -> Callable[Concatenate[int, P], None]:
def inner(first_parameter: int, *args: P.args, **kwargs: P.kwargs) -> None:
if first_parameter != 42:
_test_sink(first_parameter)
return
f(first_parameter, *args, **kwargs)
return inner
@with_logging_first_parameter
def foo_log_first_parameter(x: int, y: str) -> None:
print(x, y)
def with_logging_helper_functions(
f: Callable[P, Awaitable[None]]
) -> Callable[P, Awaitable[None]]:
async def inner(*args: P.args, **kwargs: P.kwargs) -> None:
try:
before(*args, **kwargs)
await f(*args, **kwargs)
after(*args, **kwargs)
except Exception as exception:
print(exception)
def before(*args: object, **kwargs: object) -> None:
print("before", args)
def after(*args: object, **kwargs: object) -> None:
print("after", kwargs)
_test_sink(args)
return inner
@with_logging_helper_functions
async def foo_with_helper_function(x: int, y: str) -> None:
print(x, y)
T = TypeVar("T", bound="Foo")
class Foo:
def sink_method(self, x: str) -> None:
print(x)
_test_sink(x)
@with_logging_args_kwargs_no_sink
def foo(self, x: str) -> None:
self.sink_method(x)
@with_logging_args_kwargs_no_sink
@with_logging_args_kwargs
@with_logging_args_kwargs_no_sink
def bar(self, x: str) -> None:
print(x)
@with_logging_args_kwargs_no_sink
def self_has_generic_type(self: T, other: T, x: str) -> None:
other.bar(x)
@classmethod
@with_logging_args_kwargs_no_sink
def some_class_method(cls, x: str) -> None:
cls().sink_method(x)
def main() -> None:
foo(_test_source())
foo_with_sink(_test_source())
await foo_async(_test_source())
foo_args_kwargs(_test_source())
# No issue because the taint is on the second parameter.
foo_args_kwargs_with_sink(_test_source(), 0)
# Issue.
foo_args_kwargs_with_sink("hello", _test_source())
foo_with_shady_decorators("hello")
foo_using_decorator_factory(_test_source())
foo_log_first_parameter(_test_source(), "hello")
foo_with_helper_function(_test_source(), "hello")
Foo().foo(_test_source())
Foo().bar(_test_source())
Foo().self_has_generic_type(Foo(), _test_source())
Foo.some_class_method(_test_source()) | source/interprocedural_analyses/taint/test/integration/decorator.py |
# flake8: noqa
from builtins import _test_sink, _test_source
from typing import Awaitable, Callable, TypeVar
from pyre_extensions import ParameterSpecification
from pyre_extensions.type_variable_operators import Concatenate
P = ParameterSpecification("P")
def with_logging(f: Callable[[int], None]) -> Callable[[int], None]:
def inner(x: int) -> None:
_test_sink(x)
f(x)
return inner
@with_logging
def foo(x: int) -> None:
print(x)
def with_logging_no_sink(f: Callable[[int], None]) -> Callable[[int], None]:
def inner(x: int) -> None:
f(x)
return inner
@with_logging_no_sink
def foo_with_sink(x: int) -> None:
_test_sink(x)
print(x)
def with_logging_async(
f: Callable[[str], Awaitable[None]]
) -> Callable[[str], Awaitable[None]]:
async def inner(y: str) -> None:
try:
result = await f(y)
except Exception:
_test_sink(y)
return inner
@with_logging_async
async def foo_async(x: str) -> None:
print(x)
def with_logging_args_kwargs(f: Callable) -> Callable:
def inner(*args, **kwargs) -> None:
_test_sink(kwargs)
f(*args, **kwargs)
return inner
@with_logging_args_kwargs
def foo_args_kwargs(x: str) -> None:
print(x)
def with_logging_args_kwargs_no_sink(f: Callable) -> Callable:
def inner(*args, **kwargs) -> None:
f(*args, **kwargs)
return inner
@with_logging_args_kwargs_no_sink
def foo_args_kwargs_with_sink(x: str, y: int) -> None:
_test_sink(y)
def with_logging_sink(callable: Callable[[str], None]) -> Callable[[str], None]:
def inner(y: str) -> None:
_test_sink(y)
callable(y)
return inner
def with_logging_source(callable: Callable[[str], None]) -> Callable[[str], None]:
def inner(y: str) -> None:
callable(y + _test_source())
return inner
def fails_to_apply(f):
return f
@fails_to_apply
@with_logging_source
@fails_to_apply
@with_logging_sink
@fails_to_apply
def foo_with_shady_decorators(z: str) -> None:
print(z)
def with_named_logger(logger_name: str) -> Callable[[Callable], Callable]:
def _inner_decorator(f: Callable) -> Callable:
def inner(*args: object, **kwargs: object) -> None:
print("Logging to:", logger_name)
_test_sink(args)
f(*args, **kwargs)
return inner
return _inner_decorator
@with_named_logger("foo_logger")
def foo_using_decorator_factory(x: str) -> None:
print(x)
def with_logging_first_parameter(
f: Callable[Concatenate[int, P], None]
) -> Callable[Concatenate[int, P], None]:
def inner(first_parameter: int, *args: P.args, **kwargs: P.kwargs) -> None:
if first_parameter != 42:
_test_sink(first_parameter)
return
f(first_parameter, *args, **kwargs)
return inner
@with_logging_first_parameter
def foo_log_first_parameter(x: int, y: str) -> None:
print(x, y)
def with_logging_helper_functions(
f: Callable[P, Awaitable[None]]
) -> Callable[P, Awaitable[None]]:
async def inner(*args: P.args, **kwargs: P.kwargs) -> None:
try:
before(*args, **kwargs)
await f(*args, **kwargs)
after(*args, **kwargs)
except Exception as exception:
print(exception)
def before(*args: object, **kwargs: object) -> None:
print("before", args)
def after(*args: object, **kwargs: object) -> None:
print("after", kwargs)
_test_sink(args)
return inner
@with_logging_helper_functions
async def foo_with_helper_function(x: int, y: str) -> None:
print(x, y)
T = TypeVar("T", bound="Foo")
class Foo:
def sink_method(self, x: str) -> None:
print(x)
_test_sink(x)
@with_logging_args_kwargs_no_sink
def foo(self, x: str) -> None:
self.sink_method(x)
@with_logging_args_kwargs_no_sink
@with_logging_args_kwargs
@with_logging_args_kwargs_no_sink
def bar(self, x: str) -> None:
print(x)
@with_logging_args_kwargs_no_sink
def self_has_generic_type(self: T, other: T, x: str) -> None:
other.bar(x)
@classmethod
@with_logging_args_kwargs_no_sink
def some_class_method(cls, x: str) -> None:
cls().sink_method(x)
def main() -> None:
foo(_test_source())
foo_with_sink(_test_source())
await foo_async(_test_source())
foo_args_kwargs(_test_source())
# No issue because the taint is on the second parameter.
foo_args_kwargs_with_sink(_test_source(), 0)
# Issue.
foo_args_kwargs_with_sink("hello", _test_source())
foo_with_shady_decorators("hello")
foo_using_decorator_factory(_test_source())
foo_log_first_parameter(_test_source(), "hello")
foo_with_helper_function(_test_source(), "hello")
Foo().foo(_test_source())
Foo().bar(_test_source())
Foo().self_has_generic_type(Foo(), _test_source())
Foo.some_class_method(_test_source()) | 0.763307 | 0.419232 |
import os
from time import sleep
import subprocess
import shlex
from pathlib import Path
from urllib.error import URLError
from urllib.request import urlopen
import common
import upload_file_manage
import upload_process
# ログの設定
logger = common.logger_setup(__name__, True)
# インターネットに接続出来るか確認する
def is_internet_access():
try:
# google.comにアクセス出来るか
# Proxy環境とかは考慮してない
urlopen('https://www.google.com', timeout=1)
# 例外としてエラーが返ってくる場合、アクセスできないとみなす
except URLError as e:
logger.error(e)
return False
return True
# .h264形式の動画を.mp4へ変換する
def convert_h264_to_mp4():
is_convert = False
camera_path = common.get_camera_path()
for file in Path(camera_path).glob("*.h264"):
file_name = str(file)
if not common.is_uploadable_time_stamp(file_name):
continue
logger.info('Convert h264 to mp4 start :{}'.format(file_name))
# コマンド生成
cmd = 'MP4Box -fps 30 -add ' + file_name + \
' -new ' + file_name.rstrip('h264') + 'mp4'
logger.debug(cmd)
args = shlex.split(cmd)
logger.debug(str(args))
subprocess.run(args)
logger.info('Convert h264 to mp4 end')
# 変換前のファイルは削除する
os.remove(file_name)
is_convert = True
return is_convert
# アップロード対象のファイルがなくなるまでアップロードを繰り返す
def upload():
while True:
upload_file_list = []
album_name = upload_file_manage.get_upload_files(
common.get_camera_path(), upload_file_list)
logger.info(
'album_name:{}, upload file={}'.format(
album_name,
len(upload_file_list)))
if len(upload_file_list):
upload_process.file_upload(album_name, upload_file_list)
else:
break
# 実行中のプロセス内にtimer_camera.pyがあるか確認する
def is_run_camera():
try:
cmd = 'ps aux | grep timer_camera.py | grep -v grep | wc -l'
if subprocess.check_output(cmd, shell=True).decode(
'utf-8').strip() == '0':
return False
except Exception as e:
logger.error(e)
return True
def main():
logger.info('--- UPLOADER START ---')
# アップロード処理が重複しないようにカメラが動作していないことを確認する
if not is_run_camera():
# インターネット接続出来ているか確認する
if is_internet_access():
logger.info('Internet access OK')
if convert_h264_to_mp4():
# 変換した動画もアップロード対象にしたいので70秒待つ
sleep(70)
upload()
else:
logger.info('Internet access NG')
else:
logger.info("The camera is running, so it won't upload.")
logger.info('--- UPLOADER END ---')
if __name__ == '__main__':
main() | src/uploader.py |
import os
from time import sleep
import subprocess
import shlex
from pathlib import Path
from urllib.error import URLError
from urllib.request import urlopen
import common
import upload_file_manage
import upload_process
# ログの設定
logger = common.logger_setup(__name__, True)
# インターネットに接続出来るか確認する
def is_internet_access():
try:
# google.comにアクセス出来るか
# Proxy環境とかは考慮してない
urlopen('https://www.google.com', timeout=1)
# 例外としてエラーが返ってくる場合、アクセスできないとみなす
except URLError as e:
logger.error(e)
return False
return True
# .h264形式の動画を.mp4へ変換する
def convert_h264_to_mp4():
is_convert = False
camera_path = common.get_camera_path()
for file in Path(camera_path).glob("*.h264"):
file_name = str(file)
if not common.is_uploadable_time_stamp(file_name):
continue
logger.info('Convert h264 to mp4 start :{}'.format(file_name))
# コマンド生成
cmd = 'MP4Box -fps 30 -add ' + file_name + \
' -new ' + file_name.rstrip('h264') + 'mp4'
logger.debug(cmd)
args = shlex.split(cmd)
logger.debug(str(args))
subprocess.run(args)
logger.info('Convert h264 to mp4 end')
# 変換前のファイルは削除する
os.remove(file_name)
is_convert = True
return is_convert
# アップロード対象のファイルがなくなるまでアップロードを繰り返す
def upload():
while True:
upload_file_list = []
album_name = upload_file_manage.get_upload_files(
common.get_camera_path(), upload_file_list)
logger.info(
'album_name:{}, upload file={}'.format(
album_name,
len(upload_file_list)))
if len(upload_file_list):
upload_process.file_upload(album_name, upload_file_list)
else:
break
# 実行中のプロセス内にtimer_camera.pyがあるか確認する
def is_run_camera():
try:
cmd = 'ps aux | grep timer_camera.py | grep -v grep | wc -l'
if subprocess.check_output(cmd, shell=True).decode(
'utf-8').strip() == '0':
return False
except Exception as e:
logger.error(e)
return True
def main():
logger.info('--- UPLOADER START ---')
# アップロード処理が重複しないようにカメラが動作していないことを確認する
if not is_run_camera():
# インターネット接続出来ているか確認する
if is_internet_access():
logger.info('Internet access OK')
if convert_h264_to_mp4():
# 変換した動画もアップロード対象にしたいので70秒待つ
sleep(70)
upload()
else:
logger.info('Internet access NG')
else:
logger.info("The camera is running, so it won't upload.")
logger.info('--- UPLOADER END ---')
if __name__ == '__main__':
main() | 0.268462 | 0.100348 |
import http.client
import json
import logging
import os
import fnmatch
from collections import Counter
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import parse_qs, urlparse
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Config
SLACK_WEBHOOK_URL = os.environ.get("SLACK_WEBHOOK_URL")
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY")
BASE_HEROKU_API_URL = "https://api.heroku.com"
ALLOWLIST_APP_PATTERNS = os.environ.get("ALLOWLIST_APP_PATTERNS", "").split(",")
SECRET_KEY = os.environ.get(
"SECRET_KEY", ""
) # Key used to authorise requests to this endpoint
EVENT_THRESHOLD = 2 # Only restart if there are at least this many events for a dyno
HEROKU_HEADERS = {
"Content-Type": "application/json",
"Accept": "application/vnd.heroku+json; version=3",
"Authorization": f"Bearer {HEROKU_API_KEY}",
}
class RequestError(Exception):
def __init__(self, *args, **kwargs):
self.response = kwargs.pop("response")
self.request_url = kwargs.pop("request_url")
super().__init__(*args, **kwargs)
@dataclass(eq=True, frozen=True)
class Dyno:
app: str
dyno: str
def __str__(self):
return f"{self.app} {self.dyno}"
def should_restart(self):
status = self.status()
if status["state"] == "starting":
logger.warning(
f"Dyno {self} should not restart as it is in a 'starting' state"
)
return False
if datetime.strptime(
status["created_at"], "%Y-%m-%dT%H:%M:%S%z"
) >= datetime.now(timezone.utc) - timedelta(minutes=2):
logger.warning(
f"Dyno {self} should not restart as it was created less than 2 minutes ago"
)
return False
heroku_status = json.loads(
do_request("GET", "https://status.heroku.com/api/v4/current-status").read()
)
for system in heroku_status["status"]:
if system["system"] == "Apps" and system["status"] == "red":
logger.warning(
f"Dyno {self} should not restart as there is an ongoing Heroku outage"
)
return False
return True
def restart(self):
res = do_request(
"DELETE",
f"{BASE_HEROKU_API_URL}/apps/{self.app}/dynos/{self.dyno}",
headers=HEROKU_HEADERS,
)
logger.info(f"Dyno {self.dyno} successfully restarted")
def status(self):
res = do_request(
"GET",
f"{BASE_HEROKU_API_URL}/apps/{self.app}/dynos/{self.dyno}",
headers=HEROKU_HEADERS,
)
return json.loads(res.read())
class WebhookRequestHandler(BaseHTTPRequestHandler):
def send_html_response(self, status, body):
self.send_response(status)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(body)
def do_POST(self):
url_parts = urlparse(self.path)
querystring = parse_qs(url_parts.query)
if querystring.get("key", [])[0] != SECRET_KEY:
self.send_html_response(403, b"Incorrect key")
return
content_length = int(self.headers["Content-Length"])
post_data = self.rfile.read(content_length)
payload = parse_qs(post_data)[b"payload"][0]
parsed_payload = json.loads(payload)
handle_webhook(parsed_payload)
self.send_html_response(200, b"Success")
def app_is_in_allowlist(app):
""" Check whether the given app name matches a pattern
in the allowlist """
for pattern in ALLOWLIST_APP_PATTERNS:
if fnmatch.fnmatch(app, pattern):
return True
return False
def handle_webhook(body):
""" Given the body of a webhook from Papertrail, determine
which dynos are affected and trigger restarts if applicable """
saved_search_name = body["saved_search"]["name"]
logger.info(
f"Received webhook from Papertrail for saved search {saved_search_name}"
)
events = body["events"]
problem_dynos = Counter(parse_dyno_from_event(event) for event in events)
for dyno, event_count in problem_dynos.items():
if not app_is_in_allowlist(dyno.app):
logger.info(
f"Dyno {dyno} is timing out but does not match an allowlisted pattern restarting"
)
elif event_count < EVENT_THRESHOLD:
logger.info(
f"Dyno {dyno} is timing out but has not met the restart threshold"
)
else:
try:
if dyno.should_restart():
logger.info(f"Restarting {dyno}")
dyno.restart()
send_slack_message(f"Heroku Restarter has restarted {dyno}")
except RequestError as e:
logger.error(
f"While restarting {dyno}, request to {e.request_url} returned status {e.response.status}: {e}"
)
def parse_dyno_from_event(event):
""" Return a Dyno by parsing an individual Papertrail event """
app = event.get("hostname")
attribute_pairs = event.get("message").split(" ")
attributes = dict((attr.split("=") + [""])[:2] for attr in attribute_pairs)
dyno = attributes.get("dyno")
return Dyno(app=app, dyno=dyno)
def send_slack_message(message):
do_request(
"POST",
SLACK_WEBHOOK_URL,
body=json.dumps({"text": message}),
headers={"Content-type": "application/json"},
)
def do_request(method, url, **kwargs):
url_parts = urlparse(url)
conn = http.client.HTTPSConnection(url_parts.netloc)
conn.request(method, url_parts.path, **kwargs)
res = conn.getresponse()
if res.status > 299:
raise RequestError(res.read().decode("utf-8"), response=res, request_url=url)
return res
def run():
logger.info("Server running")
server_address = ("", int(os.environ.get("PORT", "8000")))
httpd = HTTPServer(server_address, WebhookRequestHandler)
httpd.serve_forever()
if __name__ == "__main__":
run() | main.py | import http.client
import json
import logging
import os
import fnmatch
from collections import Counter
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import parse_qs, urlparse
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Config
SLACK_WEBHOOK_URL = os.environ.get("SLACK_WEBHOOK_URL")
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY")
BASE_HEROKU_API_URL = "https://api.heroku.com"
ALLOWLIST_APP_PATTERNS = os.environ.get("ALLOWLIST_APP_PATTERNS", "").split(",")
SECRET_KEY = os.environ.get(
"SECRET_KEY", ""
) # Key used to authorise requests to this endpoint
EVENT_THRESHOLD = 2 # Only restart if there are at least this many events for a dyno
HEROKU_HEADERS = {
"Content-Type": "application/json",
"Accept": "application/vnd.heroku+json; version=3",
"Authorization": f"Bearer {HEROKU_API_KEY}",
}
class RequestError(Exception):
def __init__(self, *args, **kwargs):
self.response = kwargs.pop("response")
self.request_url = kwargs.pop("request_url")
super().__init__(*args, **kwargs)
@dataclass(eq=True, frozen=True)
class Dyno:
app: str
dyno: str
def __str__(self):
return f"{self.app} {self.dyno}"
def should_restart(self):
status = self.status()
if status["state"] == "starting":
logger.warning(
f"Dyno {self} should not restart as it is in a 'starting' state"
)
return False
if datetime.strptime(
status["created_at"], "%Y-%m-%dT%H:%M:%S%z"
) >= datetime.now(timezone.utc) - timedelta(minutes=2):
logger.warning(
f"Dyno {self} should not restart as it was created less than 2 minutes ago"
)
return False
heroku_status = json.loads(
do_request("GET", "https://status.heroku.com/api/v4/current-status").read()
)
for system in heroku_status["status"]:
if system["system"] == "Apps" and system["status"] == "red":
logger.warning(
f"Dyno {self} should not restart as there is an ongoing Heroku outage"
)
return False
return True
def restart(self):
res = do_request(
"DELETE",
f"{BASE_HEROKU_API_URL}/apps/{self.app}/dynos/{self.dyno}",
headers=HEROKU_HEADERS,
)
logger.info(f"Dyno {self.dyno} successfully restarted")
def status(self):
res = do_request(
"GET",
f"{BASE_HEROKU_API_URL}/apps/{self.app}/dynos/{self.dyno}",
headers=HEROKU_HEADERS,
)
return json.loads(res.read())
class WebhookRequestHandler(BaseHTTPRequestHandler):
def send_html_response(self, status, body):
self.send_response(status)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(body)
def do_POST(self):
url_parts = urlparse(self.path)
querystring = parse_qs(url_parts.query)
if querystring.get("key", [])[0] != SECRET_KEY:
self.send_html_response(403, b"Incorrect key")
return
content_length = int(self.headers["Content-Length"])
post_data = self.rfile.read(content_length)
payload = parse_qs(post_data)[b"payload"][0]
parsed_payload = json.loads(payload)
handle_webhook(parsed_payload)
self.send_html_response(200, b"Success")
def app_is_in_allowlist(app):
""" Check whether the given app name matches a pattern
in the allowlist """
for pattern in ALLOWLIST_APP_PATTERNS:
if fnmatch.fnmatch(app, pattern):
return True
return False
def handle_webhook(body):
""" Given the body of a webhook from Papertrail, determine
which dynos are affected and trigger restarts if applicable """
saved_search_name = body["saved_search"]["name"]
logger.info(
f"Received webhook from Papertrail for saved search {saved_search_name}"
)
events = body["events"]
problem_dynos = Counter(parse_dyno_from_event(event) for event in events)
for dyno, event_count in problem_dynos.items():
if not app_is_in_allowlist(dyno.app):
logger.info(
f"Dyno {dyno} is timing out but does not match an allowlisted pattern restarting"
)
elif event_count < EVENT_THRESHOLD:
logger.info(
f"Dyno {dyno} is timing out but has not met the restart threshold"
)
else:
try:
if dyno.should_restart():
logger.info(f"Restarting {dyno}")
dyno.restart()
send_slack_message(f"Heroku Restarter has restarted {dyno}")
except RequestError as e:
logger.error(
f"While restarting {dyno}, request to {e.request_url} returned status {e.response.status}: {e}"
)
def parse_dyno_from_event(event):
""" Return a Dyno by parsing an individual Papertrail event """
app = event.get("hostname")
attribute_pairs = event.get("message").split(" ")
attributes = dict((attr.split("=") + [""])[:2] for attr in attribute_pairs)
dyno = attributes.get("dyno")
return Dyno(app=app, dyno=dyno)
def send_slack_message(message):
do_request(
"POST",
SLACK_WEBHOOK_URL,
body=json.dumps({"text": message}),
headers={"Content-type": "application/json"},
)
def do_request(method, url, **kwargs):
url_parts = urlparse(url)
conn = http.client.HTTPSConnection(url_parts.netloc)
conn.request(method, url_parts.path, **kwargs)
res = conn.getresponse()
if res.status > 299:
raise RequestError(res.read().decode("utf-8"), response=res, request_url=url)
return res
def run():
logger.info("Server running")
server_address = ("", int(os.environ.get("PORT", "8000")))
httpd = HTTPServer(server_address, WebhookRequestHandler)
httpd.serve_forever()
if __name__ == "__main__":
run() | 0.525369 | 0.066751 |
from .xdcrnewbasetests import XDCRNewBaseTest
import time
class XDCRFilterTests(XDCRNewBaseTest):
def setUp(self):
XDCRNewBaseTest.setUp(self)
def tearDown(self):
XDCRNewBaseTest.tearDown(self)
def get_cluster_objects_for_input(self, input):
"""returns a list of cluster objects for input. 'input' is a string
containing names of clusters separated by ':'
eg. failover=C1:C2
"""
clusters = []
input_clusters = input.split(':')
for cluster_name in input_clusters:
clusters.append(self.get_cb_cluster_by_name(cluster_name))
return clusters
def test_xdcr_with_filter(self):
tasks = []
rebalance_in = self._input.param("rebalance_in", None)
rebalance_out = self._input.param("rebalance_out", None)
swap_rebalance = self._input.param("swap_rebalance", None)
failover = self._input.param("failover", None)
graceful = self._input.param("graceful", None)
pause = self._input.param("pause", None)
reboot = self._input.param("reboot", None)
initial_xdcr = self._input.param("initial_xdcr", False)
if initial_xdcr:
self.load_and_setup_xdcr()
else:
self.setup_xdcr_and_load()
if pause:
for cluster in self.get_cluster_objects_for_input(pause):
for remote_cluster_refs in cluster.get_remote_clusters():
remote_cluster_refs.pause_all_replications()
if rebalance_in:
for cluster in self.get_cluster_objects_for_input(rebalance_in):
tasks.append(cluster.async_rebalance_in())
for task in tasks:
task.result()
if failover:
for cluster in self.get_cluster_objects_for_input(failover):
cluster.failover_and_rebalance_nodes(graceful=graceful,
rebalance=True)
if rebalance_out:
tasks = []
for cluster in self.get_cluster_objects_for_input(rebalance_out):
tasks.append(cluster.async_rebalance_out())
for task in tasks:
task.result()
if swap_rebalance:
tasks = []
for cluster in self.get_cluster_objects_for_input(swap_rebalance):
tasks.append(cluster.async_swap_rebalance())
for task in tasks:
task.result()
if pause:
for cluster in self.get_cluster_objects_for_input(pause):
for remote_cluster_refs in cluster.get_remote_clusters():
remote_cluster_refs.resume_all_replications()
if reboot:
for cluster in self.get_cluster_objects_for_input(reboot):
cluster.warmup_node()
time.sleep(60)
self.perform_update_delete()
self.verify_results() | pytests/xdcr/filterXDCR.py | from .xdcrnewbasetests import XDCRNewBaseTest
import time
class XDCRFilterTests(XDCRNewBaseTest):
def setUp(self):
XDCRNewBaseTest.setUp(self)
def tearDown(self):
XDCRNewBaseTest.tearDown(self)
def get_cluster_objects_for_input(self, input):
"""returns a list of cluster objects for input. 'input' is a string
containing names of clusters separated by ':'
eg. failover=C1:C2
"""
clusters = []
input_clusters = input.split(':')
for cluster_name in input_clusters:
clusters.append(self.get_cb_cluster_by_name(cluster_name))
return clusters
def test_xdcr_with_filter(self):
tasks = []
rebalance_in = self._input.param("rebalance_in", None)
rebalance_out = self._input.param("rebalance_out", None)
swap_rebalance = self._input.param("swap_rebalance", None)
failover = self._input.param("failover", None)
graceful = self._input.param("graceful", None)
pause = self._input.param("pause", None)
reboot = self._input.param("reboot", None)
initial_xdcr = self._input.param("initial_xdcr", False)
if initial_xdcr:
self.load_and_setup_xdcr()
else:
self.setup_xdcr_and_load()
if pause:
for cluster in self.get_cluster_objects_for_input(pause):
for remote_cluster_refs in cluster.get_remote_clusters():
remote_cluster_refs.pause_all_replications()
if rebalance_in:
for cluster in self.get_cluster_objects_for_input(rebalance_in):
tasks.append(cluster.async_rebalance_in())
for task in tasks:
task.result()
if failover:
for cluster in self.get_cluster_objects_for_input(failover):
cluster.failover_and_rebalance_nodes(graceful=graceful,
rebalance=True)
if rebalance_out:
tasks = []
for cluster in self.get_cluster_objects_for_input(rebalance_out):
tasks.append(cluster.async_rebalance_out())
for task in tasks:
task.result()
if swap_rebalance:
tasks = []
for cluster in self.get_cluster_objects_for_input(swap_rebalance):
tasks.append(cluster.async_swap_rebalance())
for task in tasks:
task.result()
if pause:
for cluster in self.get_cluster_objects_for_input(pause):
for remote_cluster_refs in cluster.get_remote_clusters():
remote_cluster_refs.resume_all_replications()
if reboot:
for cluster in self.get_cluster_objects_for_input(reboot):
cluster.warmup_node()
time.sleep(60)
self.perform_update_delete()
self.verify_results() | 0.428951 | 0.180811 |
from pliers import config
from pliers.filters import FrameSamplingFilter
from pliers.extractors import (GoogleVisionAPIFaceExtractor,
GoogleVisionAPILabelExtractor,
GoogleVisionAPIPropertyExtractor,
GoogleVisionAPISafeSearchExtractor,
GoogleVisionAPIWebEntitiesExtractor,
GoogleVideoIntelligenceAPIExtractor,
GoogleVideoAPILabelDetectionExtractor,
GoogleVideoAPIShotDetectionExtractor,
GoogleVideoAPIExplicitDetectionExtractor,
GoogleLanguageAPIExtractor,
GoogleLanguageAPIEntityExtractor,
GoogleLanguageAPISentimentExtractor,
GoogleLanguageAPISyntaxExtractor,
GoogleLanguageAPITextCategoryExtractor,
GoogleLanguageAPIEntitySentimentExtractor,
ExtractorResult,
merge_results)
from pliers.extractors.api.google import GoogleVisionAPIExtractor
from pliers.stimuli import ImageStim, VideoStim, TextStim
from pliers.utils import attempt_to_import, verify_dependencies
import pytest
import json
from os.path import join
from ...utils import get_test_data_path
import numpy as np
googleapiclient = attempt_to_import('googleapiclient', fromlist=['discovery'])
IMAGE_DIR = join(get_test_data_path(), 'image')
VIDEO_DIR = join(get_test_data_path(), 'video')
TEXT_DIR = join(get_test_data_path(), 'text')
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_extractor_inits():
ext = GoogleVisionAPIExtractor(num_retries=5)
assert ext.num_retries == 5
assert ext.max_results == 100
assert ext.service is not None
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_face_extractor_inits():
ext = GoogleVisionAPIFaceExtractor(num_retries=5)
assert ext.num_retries == 5
assert ext.max_results == 100
assert ext.service is not None
# Test parsing of individual response
filename = join(
get_test_data_path(), 'payloads', 'google_vision_api_face_payload.json')
response = json.load(open(filename, 'r'))
stim = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
res = ExtractorResult(response['faceAnnotations'], stim, ext)
df = res.to_df()
assert df['angerLikelihood'][0] == 'VERY_UNLIKELY'
assert df['landmark_LEFT_EYE_BOTTOM_BOUNDARY_y'][0] == 257.023
assert np.isnan(df['boundingPoly_vertex2_y'][0])
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_face_extractor():
ext = GoogleVisionAPIFaceExtractor(num_retries=5)
assert ext.validate_keys()
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'joyLikelihood' in result.columns
assert result['joyLikelihood'][0] == 'VERY_LIKELY'
assert float(result['face_detectionConfidence'][0]) > 0.7
ext = GoogleVisionAPIFaceExtractor(discovery_file='nogood')
assert not ext.validate_keys()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_multiple_face_extraction():
filename = join(get_test_data_path(), 'image', 'thai_people.jpg')
stim = ImageStim(filename)
# Only first record
ext = GoogleVisionAPIFaceExtractor()
result1 = ext.transform(stim).to_df(handle_annotations='first')
assert 'joyLikelihood' in result1.columns
# All records
ext = GoogleVisionAPIFaceExtractor()
result2 = ext.transform(stim).to_df()
assert 'joyLikelihood' in result2.columns
assert result2.shape[0] > result1.shape[0]
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_face_batch():
stims = ['apple', 'obama', 'thai_people']
stim_files = [join(get_test_data_path(), 'image', '%s.jpg' % s)
for s in stims]
stims = [ImageStim(s) for s in stim_files]
ext = GoogleVisionAPIFaceExtractor(batch_size=5)
result = ext.transform(stims)
result = merge_results(result, format='wide', extractor_names=False,
handle_annotations='first')
assert result.shape == (2, 139)
assert 'joyLikelihood' in result.columns
assert result['joyLikelihood'][0] == 'VERY_LIKELY'
assert result['joyLikelihood'][1] == 'VERY_LIKELY'
video = VideoStim(join(VIDEO_DIR, 'obama_speech.mp4'))
conv = FrameSamplingFilter(every=10)
video = conv.transform(video)
result = ext.transform(video)
result = merge_results(result, format='wide', extractor_names=False)
assert 'joyLikelihood' in result.columns
assert result.shape == (22, 139)
video = VideoStim(join(VIDEO_DIR, 'small.mp4'))
video = conv.transform(video)
result = ext.transform(video)
result = merge_results(result, format='wide', extractor_names=False)
assert 'joyLikelihood' not in result.columns
assert len(result) == 0
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_label_extractor():
ext = GoogleVisionAPILabelExtractor(num_retries=5)
assert ext.validate_keys()
filename = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'apple' in result.columns
assert result['apple'][0] > 0.75
url = 'https://tuition.utexas.edu/sites/all/themes/tuition/logo.png'
stim = ImageStim(url=url)
result = ext.transform(stim).to_df()
assert result['orange'][0] > 0.7
ext = GoogleVisionAPILabelExtractor(discovery_file='nogood')
assert not ext.validate_keys()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_properties_extractor():
ext = GoogleVisionAPIPropertyExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert '158, 13, 29' in result.columns
assert np.isfinite(result['158, 13, 29'][0])
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_safe_search():
ext = GoogleVisionAPISafeSearchExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'adult' in result.columns
assert result['violence'][0] == 'VERY_UNLIKELY'
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_web_entities():
ext = GoogleVisionAPIWebEntitiesExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'Barack Obama' in result.columns
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_extractor_large():
default = config.get_option('allow_large_jobs')
default_large = config.get_option('large_job')
default_cache = config.get_option('cache_transformers')
config.set_option('allow_large_jobs', False)
config.set_option('large_job', 1)
config.set_option('cache_transformers', False)
ext = GoogleVisionAPILabelExtractor()
images = [ImageStim(join(IMAGE_DIR, 'apple.jpg')),
ImageStim(join(IMAGE_DIR, 'obama.jpg'))]
with pytest.raises(ValueError):
merge_results(ext.transform(images))
config.set_option('allow_large_jobs', True)
results = merge_results(ext.transform(images))
assert 'GoogleVisionAPILabelExtractor#apple' in results.columns
assert results.shape == (2, 32)
config.set_option('allow_large_jobs', default)
config.set_option('large_job', default_large)
config.set_option('cache_transformers', default_cache)
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_extractor(caplog):
ext = GoogleVideoIntelligenceAPIExtractor(timeout=1)
stim = VideoStim(join(VIDEO_DIR, 'park.mp4'))
result = ext.transform(stim)
log_message = caplog.records[-1].message
assert log_message == ("The extraction reached the timeout limit of %fs, "
"which means the API may not have finished analyzing the "
"video and the results may be empty or incomplete." % 1.0)
ext = GoogleVideoIntelligenceAPIExtractor(timeout=500,
features=['LABEL_DETECTION',
'SHOT_CHANGE_DETECTION'])
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 500))
if not incomplete:
assert result.shape == (1, 31)
assert result['onset'][0] == 0.0
assert result['duration'][0] > 0.5 and result['duration'][0] < 0.6
assert result['category_plant'][0] > 0.5
assert result['park'][0] > 0.5
assert result['shot_id'][0] == 0
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_extractor2(caplog):
segments = [{'startTimeOffset': '0.1s', 'endTimeOffset': '0.3s'},
{'startTimeOffset': '0.3s', 'endTimeOffset': '0.45s'}]
ext = GoogleVideoIntelligenceAPIExtractor(timeout=500, segments=segments,
features=['EXPLICIT_CONTENT_DETECTION'])
stim = VideoStim(join(VIDEO_DIR, 'park.mp4'))
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 500))
if not incomplete:
assert result.shape == (2, 5)
assert result['onset'][0] > 0.1 and result['onset'][0] < 0.3
assert result['onset'][1] > 0.3 and result['onset'][1] < 0.45
assert 'UNLIKELY' in result['pornographyLikelihood'][0]
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_label_extractor(caplog):
ext = GoogleVideoAPILabelDetectionExtractor(mode='FRAME_MODE',
stationary_camera=True)
stim = VideoStim(join(VIDEO_DIR, 'small.mp4'))
ex_result = ext.transform(stim)
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
result = ex_result.to_df()
assert result.shape == (7, 25)
assert 'category_toy' in result.columns
assert result['toy'][0] > 0.5
assert np.isclose(result['duration'][0], stim.duration, 0.1)
result = ex_result.to_df(format='long')
assert 'pornographyLikelihood' not in result['feature']
assert np.nan not in result['value']
ext = GoogleVideoAPILabelDetectionExtractor(mode='SHOT_MODE')
stim = VideoStim(join(VIDEO_DIR, 'shot_change.mp4'))
ex_result = ext.transform(stim)
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
raw = ex_result.raw['response']['annotationResults'][0]
assert 'shotLabelAnnotations' in raw
result = ex_result.to_df()
assert result.shape == (3, 17)
assert result['onset'][1] == 0.0
assert np.isclose(result['onset'][2], 3.2, 0.1)
assert np.isnan(result['cat'][1])
assert result['cat'][2] > 0.5
assert np.isnan(result['clock'][2])
assert result['clock'][1] > 0.5 or result['clock'][0] > 0.5
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_shot_extractor(caplog):
ext = GoogleVideoAPIShotDetectionExtractor(request_rate=3)
stim = VideoStim(join(VIDEO_DIR, 'small.mp4'))
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
assert result.shape == (1, 5)
assert result['onset'][0] == 0.0
assert np.isclose(result['duration'][0], stim.duration, 0.1)
assert 'shot_id' in result.columns
assert result['shot_id'][0] == 0
ext = GoogleVideoAPIShotDetectionExtractor()
stim = VideoStim(join(VIDEO_DIR, 'shot_change.mp4'))
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
assert result.shape == (2, 5)
assert np.isclose(result['onset'][1], 3.2, 0.1)
assert 'shot_id' in result.columns
assert result['shot_id'][1] == 1
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_explicit_extractor(caplog):
ext = GoogleVideoAPIExplicitDetectionExtractor(request_rate=3)
stim = VideoStim(join(VIDEO_DIR, 'small.mp4'), onset=4.2)
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
assert result.shape[1] == 5
assert result['onset'][0] >= 4.2
assert 'pornographyLikelihood' in result.columns
assert 'UNLIKELY' in result['pornographyLikelihood'][0]
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPIExtractor(features=['classifyText',
'extractEntities'])
stim = TextStim(text='hello world')
with pytest.raises(googleapiclient.errors.HttpError):
# Should fail because too few tokens
ext.transform(stim)
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (43, 10)
assert 'category_/Books & Literature' in result.columns
assert result['category_/Books & Literature'][0] > 0.5
irene = result[result['text'] == '<NAME>']
assert (irene['type'] == 'PERSON').all()
assert not irene['metadata_wikipedia_url'].isna().any()
# Document row shouldn't have entity features, and vice versa
assert np.isnan(result.iloc[0]['text'])
assert np.isnan(result.iloc[1]['category_/Books & Literature']).all()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_entity_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPIEntityExtractor()
stim = TextStim(join(TEXT_DIR, 'sample_text_with_entities.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (10, 9)
assert result['text'][0] == 'Google'
assert result['type'][0] == 'ORGANIZATION'
assert result['salience'][0] > 0.0 and result['salience'][0] < 0.5
assert result['begin_char_index'][4] == 165.0
assert result['end_char_index'][4] == 172.0
assert result['text'][4] == 'Android'
assert result['type'][4] == 'CONSUMER_GOOD'
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_sentiment_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPISentimentExtractor()
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (12, 7)
assert 'sentiment_magnitude' in result.columns
assert 'text' in result.columns
doc_sentiment = result['sentiment_score'][11]
assert doc_sentiment < 0.3 and doc_sentiment > -0.3
assert result['begin_char_index'][7] == 565.0
assert result['end_char_index'][7] == 672.0
assert result['sentiment_magnitude'][7] > 0.6
assert result['sentiment_score'][7] > 0.6
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_syntax_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPISyntaxExtractor()
stim = TextStim(join(TEXT_DIR, 'sample_text_with_entities.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (32, 20)
his = result[result['text'] == 'his']
assert (his['person'] == 'THIRD').all()
assert (his['gender'] == 'MASCULINE').all()
assert (his['case'] == 'GENITIVE').all()
their = result[result['text'] == 'their']
assert (their['person'] == 'THIRD').all()
assert (their['number'] == 'PLURAL').all()
love = result[result['text'] == 'love']
assert (love['tag'] == 'VERB').all()
assert (love['mood'] == 'INDICATIVE').all()
headquartered = result[result['text'] == 'headquartered']
assert (headquartered['tense'] == 'PAST').all()
assert (headquartered['lemma'] == 'headquarter').all()
google = result[result['text'] == 'Google']
assert (google['proper'] == 'PROPER').all()
assert (google['tag'] == 'NOUN').all()
assert (google['dependency_label'] == 'NSUBJ').all()
assert (google['dependency_headTokenIndex'] == 7).all()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_category_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPITextCategoryExtractor()
stim = TextStim(join(TEXT_DIR, 'sample_text_with_entities.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (1, 4)
assert 'category_/Computers & Electronics' in result.columns
assert result['category_/Computers & Electronics'][0] > 0.3
assert 'category_/News' in result.columns
assert result['category_/News'][0] > 0.3
assert result['language'][0] == 'en'
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_entity_sentiment_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPIEntitySentimentExtractor()
stim = TextStim(join(TEXT_DIR, 'sample_text_with_entities.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
# Produces same result as entity extractor with sentiment columns
assert result.shape == (10, 11)
assert result['text'][8] == 'phones'
assert result['type'][8] == 'CONSUMER_GOOD'
assert 'sentiment_score' in result.columns
assert result['sentiment_score'][8] > 0.6 # 'love their ... phones' | pliers/tests/extractors/api/test_google_extractors.py | from pliers import config
from pliers.filters import FrameSamplingFilter
from pliers.extractors import (GoogleVisionAPIFaceExtractor,
GoogleVisionAPILabelExtractor,
GoogleVisionAPIPropertyExtractor,
GoogleVisionAPISafeSearchExtractor,
GoogleVisionAPIWebEntitiesExtractor,
GoogleVideoIntelligenceAPIExtractor,
GoogleVideoAPILabelDetectionExtractor,
GoogleVideoAPIShotDetectionExtractor,
GoogleVideoAPIExplicitDetectionExtractor,
GoogleLanguageAPIExtractor,
GoogleLanguageAPIEntityExtractor,
GoogleLanguageAPISentimentExtractor,
GoogleLanguageAPISyntaxExtractor,
GoogleLanguageAPITextCategoryExtractor,
GoogleLanguageAPIEntitySentimentExtractor,
ExtractorResult,
merge_results)
from pliers.extractors.api.google import GoogleVisionAPIExtractor
from pliers.stimuli import ImageStim, VideoStim, TextStim
from pliers.utils import attempt_to_import, verify_dependencies
import pytest
import json
from os.path import join
from ...utils import get_test_data_path
import numpy as np
googleapiclient = attempt_to_import('googleapiclient', fromlist=['discovery'])
IMAGE_DIR = join(get_test_data_path(), 'image')
VIDEO_DIR = join(get_test_data_path(), 'video')
TEXT_DIR = join(get_test_data_path(), 'text')
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_extractor_inits():
ext = GoogleVisionAPIExtractor(num_retries=5)
assert ext.num_retries == 5
assert ext.max_results == 100
assert ext.service is not None
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_face_extractor_inits():
ext = GoogleVisionAPIFaceExtractor(num_retries=5)
assert ext.num_retries == 5
assert ext.max_results == 100
assert ext.service is not None
# Test parsing of individual response
filename = join(
get_test_data_path(), 'payloads', 'google_vision_api_face_payload.json')
response = json.load(open(filename, 'r'))
stim = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
res = ExtractorResult(response['faceAnnotations'], stim, ext)
df = res.to_df()
assert df['angerLikelihood'][0] == 'VERY_UNLIKELY'
assert df['landmark_LEFT_EYE_BOTTOM_BOUNDARY_y'][0] == 257.023
assert np.isnan(df['boundingPoly_vertex2_y'][0])
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_face_extractor():
ext = GoogleVisionAPIFaceExtractor(num_retries=5)
assert ext.validate_keys()
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'joyLikelihood' in result.columns
assert result['joyLikelihood'][0] == 'VERY_LIKELY'
assert float(result['face_detectionConfidence'][0]) > 0.7
ext = GoogleVisionAPIFaceExtractor(discovery_file='nogood')
assert not ext.validate_keys()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_multiple_face_extraction():
filename = join(get_test_data_path(), 'image', 'thai_people.jpg')
stim = ImageStim(filename)
# Only first record
ext = GoogleVisionAPIFaceExtractor()
result1 = ext.transform(stim).to_df(handle_annotations='first')
assert 'joyLikelihood' in result1.columns
# All records
ext = GoogleVisionAPIFaceExtractor()
result2 = ext.transform(stim).to_df()
assert 'joyLikelihood' in result2.columns
assert result2.shape[0] > result1.shape[0]
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_face_batch():
stims = ['apple', 'obama', 'thai_people']
stim_files = [join(get_test_data_path(), 'image', '%s.jpg' % s)
for s in stims]
stims = [ImageStim(s) for s in stim_files]
ext = GoogleVisionAPIFaceExtractor(batch_size=5)
result = ext.transform(stims)
result = merge_results(result, format='wide', extractor_names=False,
handle_annotations='first')
assert result.shape == (2, 139)
assert 'joyLikelihood' in result.columns
assert result['joyLikelihood'][0] == 'VERY_LIKELY'
assert result['joyLikelihood'][1] == 'VERY_LIKELY'
video = VideoStim(join(VIDEO_DIR, 'obama_speech.mp4'))
conv = FrameSamplingFilter(every=10)
video = conv.transform(video)
result = ext.transform(video)
result = merge_results(result, format='wide', extractor_names=False)
assert 'joyLikelihood' in result.columns
assert result.shape == (22, 139)
video = VideoStim(join(VIDEO_DIR, 'small.mp4'))
video = conv.transform(video)
result = ext.transform(video)
result = merge_results(result, format='wide', extractor_names=False)
assert 'joyLikelihood' not in result.columns
assert len(result) == 0
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_label_extractor():
ext = GoogleVisionAPILabelExtractor(num_retries=5)
assert ext.validate_keys()
filename = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'apple' in result.columns
assert result['apple'][0] > 0.75
url = 'https://tuition.utexas.edu/sites/all/themes/tuition/logo.png'
stim = ImageStim(url=url)
result = ext.transform(stim).to_df()
assert result['orange'][0] > 0.7
ext = GoogleVisionAPILabelExtractor(discovery_file='nogood')
assert not ext.validate_keys()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_properties_extractor():
ext = GoogleVisionAPIPropertyExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert '158, 13, 29' in result.columns
assert np.isfinite(result['158, 13, 29'][0])
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_safe_search():
ext = GoogleVisionAPISafeSearchExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'adult' in result.columns
assert result['violence'][0] == 'VERY_UNLIKELY'
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_web_entities():
ext = GoogleVisionAPIWebEntitiesExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'Barack Obama' in result.columns
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_extractor_large():
default = config.get_option('allow_large_jobs')
default_large = config.get_option('large_job')
default_cache = config.get_option('cache_transformers')
config.set_option('allow_large_jobs', False)
config.set_option('large_job', 1)
config.set_option('cache_transformers', False)
ext = GoogleVisionAPILabelExtractor()
images = [ImageStim(join(IMAGE_DIR, 'apple.jpg')),
ImageStim(join(IMAGE_DIR, 'obama.jpg'))]
with pytest.raises(ValueError):
merge_results(ext.transform(images))
config.set_option('allow_large_jobs', True)
results = merge_results(ext.transform(images))
assert 'GoogleVisionAPILabelExtractor#apple' in results.columns
assert results.shape == (2, 32)
config.set_option('allow_large_jobs', default)
config.set_option('large_job', default_large)
config.set_option('cache_transformers', default_cache)
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_extractor(caplog):
ext = GoogleVideoIntelligenceAPIExtractor(timeout=1)
stim = VideoStim(join(VIDEO_DIR, 'park.mp4'))
result = ext.transform(stim)
log_message = caplog.records[-1].message
assert log_message == ("The extraction reached the timeout limit of %fs, "
"which means the API may not have finished analyzing the "
"video and the results may be empty or incomplete." % 1.0)
ext = GoogleVideoIntelligenceAPIExtractor(timeout=500,
features=['LABEL_DETECTION',
'SHOT_CHANGE_DETECTION'])
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 500))
if not incomplete:
assert result.shape == (1, 31)
assert result['onset'][0] == 0.0
assert result['duration'][0] > 0.5 and result['duration'][0] < 0.6
assert result['category_plant'][0] > 0.5
assert result['park'][0] > 0.5
assert result['shot_id'][0] == 0
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_extractor2(caplog):
segments = [{'startTimeOffset': '0.1s', 'endTimeOffset': '0.3s'},
{'startTimeOffset': '0.3s', 'endTimeOffset': '0.45s'}]
ext = GoogleVideoIntelligenceAPIExtractor(timeout=500, segments=segments,
features=['EXPLICIT_CONTENT_DETECTION'])
stim = VideoStim(join(VIDEO_DIR, 'park.mp4'))
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 500))
if not incomplete:
assert result.shape == (2, 5)
assert result['onset'][0] > 0.1 and result['onset'][0] < 0.3
assert result['onset'][1] > 0.3 and result['onset'][1] < 0.45
assert 'UNLIKELY' in result['pornographyLikelihood'][0]
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_label_extractor(caplog):
ext = GoogleVideoAPILabelDetectionExtractor(mode='FRAME_MODE',
stationary_camera=True)
stim = VideoStim(join(VIDEO_DIR, 'small.mp4'))
ex_result = ext.transform(stim)
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
result = ex_result.to_df()
assert result.shape == (7, 25)
assert 'category_toy' in result.columns
assert result['toy'][0] > 0.5
assert np.isclose(result['duration'][0], stim.duration, 0.1)
result = ex_result.to_df(format='long')
assert 'pornographyLikelihood' not in result['feature']
assert np.nan not in result['value']
ext = GoogleVideoAPILabelDetectionExtractor(mode='SHOT_MODE')
stim = VideoStim(join(VIDEO_DIR, 'shot_change.mp4'))
ex_result = ext.transform(stim)
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
raw = ex_result.raw['response']['annotationResults'][0]
assert 'shotLabelAnnotations' in raw
result = ex_result.to_df()
assert result.shape == (3, 17)
assert result['onset'][1] == 0.0
assert np.isclose(result['onset'][2], 3.2, 0.1)
assert np.isnan(result['cat'][1])
assert result['cat'][2] > 0.5
assert np.isnan(result['clock'][2])
assert result['clock'][1] > 0.5 or result['clock'][0] > 0.5
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_shot_extractor(caplog):
ext = GoogleVideoAPIShotDetectionExtractor(request_rate=3)
stim = VideoStim(join(VIDEO_DIR, 'small.mp4'))
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
assert result.shape == (1, 5)
assert result['onset'][0] == 0.0
assert np.isclose(result['duration'][0], stim.duration, 0.1)
assert 'shot_id' in result.columns
assert result['shot_id'][0] == 0
ext = GoogleVideoAPIShotDetectionExtractor()
stim = VideoStim(join(VIDEO_DIR, 'shot_change.mp4'))
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
assert result.shape == (2, 5)
assert np.isclose(result['onset'][1], 3.2, 0.1)
assert 'shot_id' in result.columns
assert result['shot_id'][1] == 1
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_explicit_extractor(caplog):
ext = GoogleVideoAPIExplicitDetectionExtractor(request_rate=3)
stim = VideoStim(join(VIDEO_DIR, 'small.mp4'), onset=4.2)
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
assert result.shape[1] == 5
assert result['onset'][0] >= 4.2
assert 'pornographyLikelihood' in result.columns
assert 'UNLIKELY' in result['pornographyLikelihood'][0]
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPIExtractor(features=['classifyText',
'extractEntities'])
stim = TextStim(text='hello world')
with pytest.raises(googleapiclient.errors.HttpError):
# Should fail because too few tokens
ext.transform(stim)
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (43, 10)
assert 'category_/Books & Literature' in result.columns
assert result['category_/Books & Literature'][0] > 0.5
irene = result[result['text'] == '<NAME>']
assert (irene['type'] == 'PERSON').all()
assert not irene['metadata_wikipedia_url'].isna().any()
# Document row shouldn't have entity features, and vice versa
assert np.isnan(result.iloc[0]['text'])
assert np.isnan(result.iloc[1]['category_/Books & Literature']).all()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_entity_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPIEntityExtractor()
stim = TextStim(join(TEXT_DIR, 'sample_text_with_entities.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (10, 9)
assert result['text'][0] == 'Google'
assert result['type'][0] == 'ORGANIZATION'
assert result['salience'][0] > 0.0 and result['salience'][0] < 0.5
assert result['begin_char_index'][4] == 165.0
assert result['end_char_index'][4] == 172.0
assert result['text'][4] == 'Android'
assert result['type'][4] == 'CONSUMER_GOOD'
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_sentiment_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPISentimentExtractor()
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (12, 7)
assert 'sentiment_magnitude' in result.columns
assert 'text' in result.columns
doc_sentiment = result['sentiment_score'][11]
assert doc_sentiment < 0.3 and doc_sentiment > -0.3
assert result['begin_char_index'][7] == 565.0
assert result['end_char_index'][7] == 672.0
assert result['sentiment_magnitude'][7] > 0.6
assert result['sentiment_score'][7] > 0.6
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_syntax_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPISyntaxExtractor()
stim = TextStim(join(TEXT_DIR, 'sample_text_with_entities.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (32, 20)
his = result[result['text'] == 'his']
assert (his['person'] == 'THIRD').all()
assert (his['gender'] == 'MASCULINE').all()
assert (his['case'] == 'GENITIVE').all()
their = result[result['text'] == 'their']
assert (their['person'] == 'THIRD').all()
assert (their['number'] == 'PLURAL').all()
love = result[result['text'] == 'love']
assert (love['tag'] == 'VERB').all()
assert (love['mood'] == 'INDICATIVE').all()
headquartered = result[result['text'] == 'headquartered']
assert (headquartered['tense'] == 'PAST').all()
assert (headquartered['lemma'] == 'headquarter').all()
google = result[result['text'] == 'Google']
assert (google['proper'] == 'PROPER').all()
assert (google['tag'] == 'NOUN').all()
assert (google['dependency_label'] == 'NSUBJ').all()
assert (google['dependency_headTokenIndex'] == 7).all()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_category_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPITextCategoryExtractor()
stim = TextStim(join(TEXT_DIR, 'sample_text_with_entities.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (1, 4)
assert 'category_/Computers & Electronics' in result.columns
assert result['category_/Computers & Electronics'][0] > 0.3
assert 'category_/News' in result.columns
assert result['category_/News'][0] > 0.3
assert result['language'][0] == 'en'
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_entity_sentiment_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPIEntitySentimentExtractor()
stim = TextStim(join(TEXT_DIR, 'sample_text_with_entities.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
# Produces same result as entity extractor with sentiment columns
assert result.shape == (10, 11)
assert result['text'][8] == 'phones'
assert result['type'][8] == 'CONSUMER_GOOD'
assert 'sentiment_score' in result.columns
assert result['sentiment_score'][8] > 0.6 # 'love their ... phones' | 0.514644 | 0.328556 |
from __future__ import absolute_import
import os
import sys
import pytest
from mock import MagicMock, patch, mock_open
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
#FIXTURES_DIR = os.path.join(FILE_DIR, "fixtures")
REPO_DIR = os.path.join(FILE_DIR, "..", "..")
# Add environ.py into path for testing
sys.path.append(os.path.join(REPO_DIR, "inventory"))
import environ
@pytest.mark.parametrize(("regex", "result"),
[
(r"(FOOBAR)", {"foobar": "123"}),
(r"^FOO(.*)", {"bar": "123"}),
]
)
def test_getVars(regex, result):
'''
This method makes the assumption that there will always be a group(1),
So if doing an exact string match, for now group the entire string
'''
with patch("os.environ", new={"FOOBAR": "123", "BARFOO": "456"}):
r = environ.getVars(regex)
assert r == result
@pytest.mark.skip(reason="TODO")
def test_getSplunkInventory():
pass
@patch('environ.loadDefaults', return_value={"splunk": {"http_port": 8000, "build_location": None}})
@patch('environ.overrideEnvironmentVars')
@patch('environ.getSecrets')
@patch('environ.getHEC')
def test_getDefaultVars(mock_overrideEnvironmentVars, mock_loadDefaultSplunkVariables, mock_getSecrets, mock_getHEC):
'''
Unit test for getting our default variables
'''
retval = environ.getDefaultVars()
assert "splunk" in retval
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"opt": None, "home": None, "exec": None, "pid": None}),
# Check default.yml parameters
({"opt": "/opt"}, {}, {"opt": "/opt", "home": None, "exec": None, "pid": None}),
({"home": "/tmp/splunk"}, {}, {"opt": None, "home": "/tmp/splunk", "exec": None, "pid": None}),
({"exec": "/opt/splunk/bin/splunk"}, {}, {"opt": None, "home": None, "exec": "/opt/splunk/bin/splunk", "pid": None}),
({"pid": "/splunk.pid"}, {}, {"opt": None, "home": None, "exec": None, "pid": "/splunk.pid"}),
# Check environment variable parameters
({}, {"SPLUNK_OPT": "/home/"}, {"opt": "/home/", "home": None, "exec": None, "pid": None}),
({}, {"SPLUNK_HOME": "/home/"}, {"opt": None, "home": "/home/", "exec": None, "pid": None}),
({}, {"SPLUNK_EXEC": "/home/splunk.exe"}, {"opt": None, "home": None, "exec": "/home/splunk.exe", "pid": None}),
({}, {"SPLUNK_PID": "/home/splunk.pid"}, {"opt": None, "home": None, "exec": None, "pid": "/home/splunk.pid"}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"opt": "/home"}, {"SPLUNK_OPT": "/opt"}, {"opt": "/opt", "home": None, "exec": None, "pid": None}),
({"home": "/tmp/splunk"}, {"SPLUNK_HOME": "/opt/splunk"}, {"opt": None, "home": "/opt/splunk", "exec": None, "pid": None}),
({"exec": "/bin/splunk"}, {"SPLUNK_EXEC": "/opt/splunk/bin/splunk"}, {"opt": None, "home": None, "exec": "/opt/splunk/bin/splunk", "pid": None}),
({"pid": "/splunk.pid"}, {"SPLUNK_PID": "/opt/splunk/splunk.pid"}, {"opt": None, "home": None, "exec": None, "pid": "/opt/splunk/splunk.pid"}),
]
)
def test_getSplunkPaths(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("os.environ", new=os_env):
environ.getSplunkPaths(vars_scope)
assert type(vars_scope["splunk"]) == dict
assert vars_scope["splunk"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
# Check default.yml parameters
({"idxc": {}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"label": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"label": "1234"}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "1234", "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": "1234"}}, {}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234"}}, {}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"discoveryPass4SymmKey": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"discoveryPass4SymmKey": "1234"}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": "1234", "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
# Search factor should never exceed replication factor
({"idxc": {"replication_factor": 0, "search_factor": 2}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 0, "search_factor": 0}),
({"idxc": {"replication_factor": 1, "search_factor": 3}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"replication_factor": "2", "search_factor": 3}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 2}),
# This should return replication_factor=2 because there are only 2 hosts in the "splunk_indexer" group
({"idxc": {"replication_factor": 3, "search_factor": 1}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 1}),
# Check environment variable parameters
({}, {"SPLUNK_IDXC_LABEL": ""}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "", "secret": None, "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_LABEL": "abcd"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "abcd", "secret": None, "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_SECRET": ""}, {"pass4SymmKey": "", "discoveryPass4SymmKey": "", "label": None, "secret": "", "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_SECRET": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_REPLICATION_FACTOR": "1"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_REPLICATION_FACTOR": 2, "SPLUNK_IDXC_SEARCH_FACTOR": "1"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 1}),
({}, {"SPLUNK_IDXC_DISCOVERYPASS4SYMMKEY": "qwerty"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": "qwerty", "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"idxc": {"label": "1234"}}, {"SPLUNK_IDXC_LABEL": "abcd"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "abcd", "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": "abcd"}}, {"SPLUNK_IDXC_SECRET": "1234"}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234"}}, {"SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "7890"}}, {"SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "7890", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "7890"}}, {"SPLUNK_IDXC_DISCOVERYPASS4SYMMKEY": "zxcv", "SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "zxcv", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": "abcd"}}, {"SPLUNK_IDXC_SECRET": "1234"}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"replication_factor": 3, "search_factor": 3}}, {"SPLUNK_IDXC_REPLICATION_FACTOR": 2}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 2}),
({"idxc": {"replication_factor": 2, "search_factor": 2}}, {"SPLUNK_IDXC_SEARCH_FACTOR": 1}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 1}),
]
)
def test_getIndexerClustering(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory", {"splunk_indexer": {"hosts": ["a", "b"]}}) as mock_inven:
with patch("os.environ", new=os_env):
environ.getIndexerClustering(vars_scope)
assert type(vars_scope["splunk"]["idxc"]) == dict
assert vars_scope["splunk"]["idxc"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
# Check default.yml parameters
({"shc": {}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"label": None}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"label": "1234"}}, {}, {"pass4SymmKey": None, "label": "1234", "secret": None, "replication_factor": 1}),
({"shc": {"secret": None}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"secret": "1234"}}, {}, {"pass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1}),
({"shc": {"pass4SymmKey": None}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"pass4SymmKey": "1234"}}, {}, {"pass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1}),
({"shc": {"replication_factor": 0}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 0}),
({"shc": {"replication_factor": 1}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"replication_factor": "2"}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2}),
# This should return replication_factor=2 because there are only 2 hosts in the "splunk_search_head" group
({"shc": {"replication_factor": 3}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2}),
# Check environment variable parameters
({}, {"SPLUNK_SHC_LABEL": ""}, {"pass4SymmKey": None, "label": "", "secret": None, "replication_factor": 1}),
({}, {"SPLUNK_SHC_LABEL": "abcd"}, {"pass4SymmKey": None,"label": "abcd", "secret": None, "replication_factor": 1}),
({}, {"SPLUNK_SHC_SECRET": ""}, {"pass4SymmKey": "", "label": None, "secret": "", "replication_factor": 1}),
({}, {"SPLUNK_SHC_SECRET": "abcd"}, {"pass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1}),
({}, {"SPLUNK_SHC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1}),
({}, {"SPLUNK_SHC_REPLICATION_FACTOR": "2"}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"shc": {"label": "1234"}}, {"SPLUNK_SHC_LABEL": "abcd"}, {"pass4SymmKey": None, "label": "abcd", "secret": None, "replication_factor": 1}),
({"shc": {"secret": "abcd"}}, {"SPLUNK_SHC_SECRET": "1234"}, {"pass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1}),
({"shc": {"pass4SymmKey": "1234"}}, {"SPLUNK_SHC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1}),
({"shc": {"replication_factor": 2}}, {"SPLUNK_SHC_REPLICATION_FACTOR": "1"}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
]
)
def test_getSearchHeadClustering(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory", {"splunk_search_head": {"hosts": ["a", "b"]}}) as mock_inven:
with patch("os.environ", new=os_env):
environ.getSearchHeadClustering(vars_scope)
assert type(vars_scope["splunk"]["shc"]) == dict
assert vars_scope["splunk"]["shc"] == output
@pytest.mark.skip(reason="TODO")
def test_getMultisite():
pass
@pytest.mark.skip(reason="TODO")
def test_getSplunkWebSSL():
pass
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"ca": None, "cert": None, "password": None, "enable": True}),
({"does-not-exist": True}, {}, {"ca": None, "cert": None, "password": None, "enable": True}),
# Check default.yml parameters
({"ssl": {"enable": False}}, {}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"ca": "hi"}}, {}, {"ca": "hi", "cert": None, "password": None, "enable": True}),
({"ssl": {"cert": "hi"}}, {}, {"ca": None, "cert": "hi", "password": None, "enable": True}),
({"ssl": {"password": "hi"}}, {}, {"ca": None, "cert": None, "password": "hi", "enable": True}),
({"ssl": {"ca": "aaa", "cert": "bbb", "password": "<PASSWORD>", "enable": False}}, {}, {"ca": "aaa", "cert": "bbb", "password": "<PASSWORD>", "enable": False}),
# Check environment variable parameters
({}, {"SPLUNKD_SSL_CA": "hi"}, {"ca": "hi", "cert": None, "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_CERT": "hi"}, {"ca": None, "cert": "hi", "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_PASSWORD": "hi"}, {"ca": None, "cert": None, "password": "hi", "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "true"}, {"ca": None, "cert": None, "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "false"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({}, {"SPLUNKD_SSL_ENABLE": "False"}, {"ca": None, "cert": None, "password": None, "enable": False}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"ssl": {"ca": "value1"}}, {"SPLUNKD_SSL_CA": "value2"}, {"ca": "value2", "cert": None, "password": None, "enable": True}),
({"ssl": {"cert": "value1"}}, {"SPLUNKD_SSL_CERT": "value2"}, {"ca": None, "cert": "value2", "password": None, "enable": True}),
({"ssl": {"password": "<PASSWORD>"}}, {"SPLUNKD_SSL_PASSWORD": "<PASSWORD>"}, {"ca": None, "cert": None, "password": "<PASSWORD>", "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "true"}, {"ca": None, "cert": None, "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "false"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"enable": True}}, {"SPLUNKD_SSL_ENABLE": "FALSE"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"enable": True}}, {"SPLUNKD_SSL_ENABLE": "FaLsE"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"enable": False}}, {"SPLUNKD_SSL_ENABLE": ""}, {"ca": None, "cert": None, "password": None, "enable": False}),
]
)
def test_getSplunkdSSL(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("os.environ", new=os_env):
environ.getSplunkdSSL(vars_scope)
assert type(vars_scope["splunk"]) == dict
assert type(vars_scope["splunk"]["ssl"]) == dict
assert vars_scope["splunk"]["ssl"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters - Splunk password is required
({"password": "<PASSWORD>"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": None}),
# Check default.yml parameters
({"password": "<PASSWORD>", "pass4SymmKey": "you-will-never-guess", "secret": None}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": "<PASSWORD>", "pass4SymmKey": "you-will-never-guess", "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
({"password": "<PASSWORD>", "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": "1234"}),
({"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": None}, {}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
({"password": "<PASSWORD>", "declarative_admin_password": True, "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": None, "secret": "1234"}),
# Check environment variable parameters
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": "1234"}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_DECLARATIVE_ADMIN_PASSWORD": "true", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess"}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_DECLARATIVE_ADMIN_PASSWORD": "TRUE", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
# We currently don't support 'yes' as a valid boolean
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_DECLARATIVE_ADMIN_PASSWORD": "yes", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": "1234"})
]
)
def test_getSecrets(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
with patch("environ.os.path") as mock_os_path:
mock_os_path.isfile = MagicMock()
mock_os_path.isfile.return_value = False
environ.getSecrets(vars_scope)
assert vars_scope["splunk"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check when Splunk password is a file
({"password": "/<PASSWORD>"}, {}, {"password": "<PASSWORD>", "pass4SymmKey": None, "secret": None}),
({"password": "<PASSWORD>"}, {"SPLUNK_PASSWORD": "/<PASSWORD>"}, {"password": "<PASSWORD>", "pass4SymmKey": None, "secret": None}),
]
)
def test_getSecrets_passwordFromFile(default_yml, os_env, output):
file_contents = """
worldneversayshiback
"""
m = mock_open(read_data=file_contents)
vars_scope = {"splunk": default_yml}
with patch("environ.open", m, create=True) as mopen:
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
with patch("os.path") as mock_os_path:
# Make sure that the isfile() check returns True
mock_os_path.isfile = MagicMock()
mock_os_path.isfile.return_value = True
environ.getSecrets(vars_scope)
mopen.assert_called_once()
assert vars_scope["splunk"]["password"] == "<PASSWORD>"
@pytest.mark.parametrize(("default_yml"),
[
# Check null parameters
({}),
({"password": None}),
({"password": ""})
]
)
def test_noSplunkPassword(default_yml):
vars_scope = {"splunk": default_yml}
with pytest.raises(Exception) as exc:
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new={}):
environ.getSecrets(vars_scope)
assert "Splunk password must be supplied!" in str(exc.value)
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"launch": {}}),
# Check default.yml parameters
({"launch": {}}, {}, {"launch": {}}),
({"launch": {"A": "B"}}, {}, {"launch": {"A": "B"}}),
({"launch": {"A": "B", "C": "D"}}, {}, {"launch": {"A": "B", "C": "D"}}),
# Check environment variable parameters
({}, {"SPLUNK_LAUNCH_CONF": None}, {"launch": {}}),
({}, {"SPLUNK_LAUNCH_CONF": ""}, {"launch": {}}),
({}, {"SPLUNK_LAUNCH_CONF": "AAA=BBB"}, {"launch": {"AAA": "BBB"}}),
({}, {"SPLUNK_LAUNCH_CONF": "AAA=BBB,CCC=DDD"}, {"launch": {"AAA": "BBB", "CCC": "DDD"}}),
({}, {"SPLUNK_LAUNCH_CONF": "AAA=BBB=CCC,DDD=EEE=FFF"}, {"launch": {"AAA": "BBB=CCC", "DDD": "EEE=FFF"}}),
# Check both
({"launch": {"A": "B", "C": "D"}}, {"SPLUNK_LAUNCH_CONF": "A=E,C=D"}, {"launch": {"A": "E", "C": "D"}}),
]
)
def test_getLaunchConf(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getLaunchConf(vars_scope)
assert vars_scope["splunk"] == output
@pytest.mark.parametrize(("value", "separator", "output"),
[
# Check null value
(None, ",", []),
# Check empty value
("", ",", []),
# Check string value
("a", ",", ["a"]),
# Check comma separated string value
("a,b,c", ",", ["a", "b", "c"]),
# Check list value
(["a"], ",", ["a"]),
(["a", "b", "c"], ",", ["a", "b", "c"])
]
)
def test_ensureListValue(value, separator, output):
result = environ.ensureListValue(value, separator)
assert result == output
@pytest.mark.parametrize(("value", "separator", "output"),
[
# Check null value
(None, ",", []),
# Check empty value
("", ",", []),
# Check string value
("a", ",", ["a"]),
# Check comma separated string value
("a,b,c", ",", ["a", "b", "c"]),
# Check comma separated string value with whitespaces
(" a, b,c ", ",", ["a", "b", "c"]),
]
)
def test_splitAndStrip(value, separator, output):
result = environ.splitAndStrip(value, separator)
assert result == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
# Check ansible_pre_tasks using defaults or env vars
({"ansible_pre_tasks": ""}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": None}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": "a"}, {}, {"ansible_pre_tasks": ["a"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": ["a"]}, {}, {"ansible_pre_tasks": ["a"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": "a,b,c"}, {}, {"ansible_pre_tasks": ["a","b","c"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": ["a","b","c"]}, {}, {"ansible_pre_tasks": ["a","b","c"], "ansible_post_tasks": [], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_PRE_TASKS": "d"}, {"ansible_pre_tasks": ["d"], "ansible_post_tasks": [], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_PRE_TASKS": "e,f,g"}, {"ansible_pre_tasks": ["e","f","g"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": "a,b,c"}, {"SPLUNK_ANSIBLE_PRE_TASKS": "e,f,g"}, {"ansible_pre_tasks": ["e","f","g"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": ["a","b","c"]}, {"SPLUNK_ANSIBLE_PRE_TASKS": "e,f,g"}, {"ansible_pre_tasks": ["e","f","g"], "ansible_post_tasks": [], "ansible_environment": {}}),
# Check ansible_post_tasks using defaults or env vars
({"ansible_post_tasks": ""}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_post_tasks": None}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_post_tasks": "a"}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a"], "ansible_environment": {}}),
({"ansible_post_tasks": ["a"]}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a"], "ansible_environment": {}}),
({"ansible_post_tasks": "a,b,c"}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a","b","c"], "ansible_environment": {}}),
({"ansible_post_tasks": ["a","b","c"]}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a","b","c"], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_POST_TASKS": "d"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["d"], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_POST_TASKS": "e,f,g"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["e","f","g"], "ansible_environment": {}}),
({"ansible_post_tasks": "a,b,c"}, {"SPLUNK_ANSIBLE_POST_TASKS": "e,f,g"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["e","f","g"], "ansible_environment": {}}),
({"ansible_post_tasks": ["a","b","c"]}, {"SPLUNK_ANSIBLE_POST_TASKS": "e,f,g"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["e","f","g"], "ansible_environment": {}}),
# Check ansible_environment using defaults or env vars
({"ansible_environment": None}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_environment": {"a": "b"}}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b"}}),
({"ansible_environment": {"a": "b", "d": "e"}}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b", "d": "e"}}),
({}, {"SPLUNK_ANSIBLE_ENV": "a=b"}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b"}}),
({}, {"SPLUNK_ANSIBLE_ENV": "a=b,x=y"}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b", "x": "y"}}),
({"ansible_environment": {"a": "c", "d": "e"}}, {"SPLUNK_ANSIBLE_ENV": "a=b,x=y"}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b", "d": "e", "x": "y"}}),
]
)
def test_getAnsibleContext(default_yml, os_env, output):
vars_scope = default_yml
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getAnsibleContext(vars_scope)
assert vars_scope == output
@pytest.mark.parametrize(("default_yml", "os_env", "splunk_asan"),
[
# Check null parameters
({}, {}, False),
# Check default.yml parameters
({"asan": False}, {}, False),
({"asan": True}, {}, True),
# Check env var parameters
({}, {"SPLUNK_ENABLE_ASAN": ""}, False),
({}, {"SPLUNK_ENABLE_ASAN": "anything"}, True),
# Check both
({"asan": False}, {"SPLUNK_ENABLE_ASAN": ""}, False),
({"asan": True}, {"SPLUNK_ENABLE_ASAN": ""}, False),
({"asan": True}, {"SPLUNK_ENABLE_ASAN": "true"}, True),
({"asan": False}, {"SPLUNK_ENABLE_ASAN": "yes"}, True),
]
)
def test_getASan(default_yml, os_env, splunk_asan):
vars_scope = {"ansible_environment": {}, "splunk": {}}
vars_scope["splunk"] = default_yml
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getASan(vars_scope)
assert vars_scope["splunk"]["asan"] == splunk_asan
if vars_scope["splunk"]["asan"]:
assert vars_scope["ansible_environment"].get("ASAN_OPTIONS") == "detect_leaks=0"
else:
assert vars_scope["ansible_environment"].get("ASAN_OPTIONS") == None
@pytest.mark.parametrize(("default_yml", "os_env", "result"),
[
# Check null parameters
({}, {}, {"enable": True, "port": 8088, "token": None, "ssl": True}),
# Check default.yml parameters
({"enable": False}, {}, {"enable": False, "port": 8088, "token": None, "ssl": True}),
({"port": 8099}, {}, {"enable": True, "port": 8099, "token": None, "ssl": True}),
({"token": "abcd"}, {}, {"enable": True, "port": 8088, "token": "abcd", "ssl": True}),
({"ssl": False}, {}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
# Check env var parameters
({}, {"SPLUNK_HEC_TOKEN": "<PASSWORD>"}, {"enable": True, "port": 8088, "token": "qw<PASSWORD>", "ssl": True}),
({}, {"SPLUNK_HEC_PORT": "9999"}, {"enable": True, "port": 9999, "token": None, "ssl": True}),
({}, {"SPLUNK_HEC_SSL": "true"}, {"enable": True, "port": 8088, "token": None, "ssl": True}),
({}, {"SPLUNK_HEC_SSL": "false"}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
({}, {"SPLUNK_HEC_SSL": "FALSE"}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
# Check both
({"port": 8099}, {"SPLUNK_HEC_PORT": "19999"}, {"enable": True, "port": 19999, "token": None, "ssl": True}),
({"token": "abcd"}, {"SPLUNK_HEC_TOKEN": "<PASSWORD>"}, {"enable": True, "port": 8088, "token": "fdsa", "ssl": True}),
({"ssl": True}, {"SPLUNK_HEC_SSL": "fAlSe"}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
]
)
def test_getHEC(default_yml, os_env, result):
vars_scope = {"splunk": {}}
vars_scope["splunk"] = {
"hec": {
"enable": True,
"port": 8088,
"token": None,
"ssl": True
}
}
vars_scope["splunk"]["hec"].update(default_yml)
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getHEC(vars_scope)
assert vars_scope["splunk"]["hec"] == result
@pytest.mark.parametrize(("default_yml", "os_env", "result"),
[
# Check null parameters
({}, {}, False),
# # Check default.yml parameters
({"disable_popups": False}, {}, False),
({"disable_popups": True}, {}, True),
# # Check env var parameters
({}, {"SPLUNK_DISABLE_POPUPS": "TRUE"}, True),
({}, {"SPLUNK_DISABLE_POPUPS": "true"}, True),
({}, {"SPLUNK_DISABLE_POPUPS": "True"}, True),
({}, {"SPLUNK_DISABLE_POPUPS": "false"}, False),
({}, {"SPLUNK_DISABLE_POPUPS": "False"}, False),
({}, {"SPLUNK_DISABLE_POPUPS": "FALSE"}, False),
# # Check both
({"disable_popups": False}, {"SPLUNK_DISABLE_POPUPS": "TRUE"}, True),
({"disable_popups": False}, {"SPLUNK_DISABLE_POPUPS": "True"}, True),
({"disable_popups": True}, {"SPLUNK_DISABLE_POPUPS": "False"}, False),
({"disable_popups": True}, {"SPLUNK_DISABLE_POPUPS": "FALSE"}, False),
]
)
def test_getDisablePopups(default_yml, os_env, result):
vars_scope = {}
vars_scope["splunk"] = default_yml
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getDisablePopups(vars_scope)
assert vars_scope["splunk"]["disable_popups"] == result
@pytest.mark.parametrize(("default_yml", "os_env", "result"),
[
# Check null parameters
({}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
# Check default.yml parameters
({"enable": True}, {}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"server": "fwd.dsp.com:8888"}, {}, {"enable": False, "server": "fwd.dsp.com:8888", "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"cert": "path/to/cert.pem"}, {}, {"enable": False, "server": None, "cert": "path/to/cert.pem", "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"verify": True}, {}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_name": "abcd"}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": "abcd", "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_desc": "abcd"}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": "abcd", "pipeline_spec": None}),
({"pipeline_spec": "abcd"}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": "abcd"}),
# Check env var parameters
({}, {"SPLUNK_DSP_SERVER": "fwd.dsp.com:9999"}, {"enable": False, "server": "fwd.dsp.com:9999", "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_CERT": "crt.pem"}, {"enable": False, "server": None, "cert": "crt.pem", "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_VERIFY": "yes"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_VERIFY": "true"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_VERIFY": "TRUE"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_ENABLE": "yes"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_ENABLE": "true"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_ENABLE": "TRUE"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_PIPELINE_NAME": "do"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": "do", "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_PIPELINE_DESC": "re"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": "re", "pipeline_spec": None}),
({}, {"SPLUNK_DSP_PIPELINE_SPEC": "mi"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": "mi"}),
# Check both
({"enable": True}, {"SPLUNK_DSP_ENABLE": "false"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"enable": False}, {"SPLUNK_DSP_ENABLE": "true"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"server": "fwd.dsp.com:8888"}, {"SPLUNK_DSP_SERVER": "fwd.dsp.com:9999"}, {"enable": False, "server": "fwd.dsp.com:9999", "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"cert": "path1/crt.pem"}, {"SPLUNK_DSP_CERT": "path2/cert.pem"}, {"enable": False, "server": None, "cert": "path2/cert.pem", "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"verify": True}, {"SPLUNK_DSP_VERIFY": "false"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"verify": False}, {"SPLUNK_DSP_VERIFY": "TRUE"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_name": "abcd"}, {"SPLUNK_DSP_PIPELINE_NAME": "xyz"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": "xyz", "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_desc": "abcd"}, {"SPLUNK_DSP_PIPELINE_DESC": "xyz"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": "xyz", "pipeline_spec": None}),
({"pipeline_spec": "abcd"}, {"SPLUNK_DSP_PIPELINE_SPEC": "xyz"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": "xyz"}),
]
)
def test_getDSP(default_yml, os_env, result):
vars_scope = {"splunk": {}}
vars_scope["splunk"] = {
"dsp": {
"enable": False,
"server": None,
"cert": None,
"verify": False,
"pipeline_name": None,
"pipeline_desc": None,
"pipeline_spec": None,
}
}
vars_scope["splunk"]["dsp"].update(default_yml)
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getDSP(vars_scope)
assert vars_scope["splunk"]["dsp"] == result
@pytest.mark.parametrize(("es_enablement", "os_env", "result"),
[
(None, {}, ""),
(None, {"SPLUNK_ES_SSL_ENABLEMENT":"strict"}, "--ssl_enablement strict"),
({"ssl_enablement":"auto"}, {}, "--ssl_enablement auto"),
({"ssl_enablement":"strict"}, {}, "--ssl_enablement strict"),
({"ssl_enablement":"ignore"}, {}, "--ssl_enablement ignore"),
({"ssl_enablement":"ignore"}, {"SPLUNK_ES_SSL_ENABLEMENT":"strict"}, "--ssl_enablement strict"),
({"ssl_enablement":"invalid"}, {}, "Exception")
]
)
def test_getESSplunkVariables(es_enablement, os_env, result):
vars_scope = {"splunk": {}}
if es_enablement is not None:
vars_scope["splunk"]["es"] = es_enablement
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
try:
environ.getESSplunkVariables(vars_scope)
assert vars_scope["es_ssl_enablement"] == result
except Exception:
assert result == "Exception"
@pytest.mark.parametrize(("os_env", "license_master_url", "deployer_url", "cluster_master_url", "search_head_captain_url"),
[
({}, "", "", "", ""),
# Check individual environment variables
({"SPLUNK_LICENSE_MASTER_URL": "something"}, "https://something:8089", "", "", ""),
({"SPLUNK_DEPLOYER_URL": "something"}, "", "something", "", ""),
({"SPLUNK_CLUSTER_MASTER_URL": "something"}, "", "", "something", ""),
({"SPLUNK_SEARCH_HEAD_CAPTAIN_URL": "something"}, "", "", "", "something"),
]
)
def test_getDistributedTopology(os_env, license_master_url, deployer_url, cluster_master_url, search_head_captain_url):
vars_scope = {"splunk": {}}
with patch("os.environ", new=os_env):
environ.getDistributedTopology(vars_scope)
assert type(vars_scope["splunk"]["license_master_url"]) == str
assert vars_scope["splunk"]["license_master_url"] == license_master_url
assert type(vars_scope["splunk"]["deployer_url"]) == str
assert vars_scope["splunk"]["deployer_url"] == deployer_url
assert type(vars_scope["splunk"]["cluster_master_url"]) == str
assert vars_scope["splunk"]["cluster_master_url"] == cluster_master_url
assert type(vars_scope["splunk"]["search_head_captain_url"]) == str
assert vars_scope["splunk"]["search_head_captain_url"] == search_head_captain_url
@pytest.mark.parametrize(("default_yml", "os_env", "license_uri", "wildcard_license", "ignore_license", "license_download_dest"),
[
({}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
# Check individual environment variables
({}, {"SPLUNK_LICENSE_URI": "http://web/license.lic"}, "http://web/license.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_LICENSE_URI": "/mnt/*.lic"}, "/mnt/*.lic", True, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_NFR_LICENSE": "/mnt/nfr.lic"}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": ""}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": "true"}, "splunk.lic", False, True, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": "TRUE"}, "splunk.lic", False, True, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": "false"}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_LICENSE_INSTALL_PATH": "/Downloads/"}, "splunk.lic", False, False, "/Downloads/"),
# Check default.yml
({"license_uri": None}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": ""}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": "http://web/license.lic"}, {}, "http://web/license.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": "/mnt/*.lic"}, {}, "/mnt/*.lic", True, False, "/tmp/splunk.lic"),
({"license_uri": "/mnt/nfr.lic"}, {}, "/mnt/nfr.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": "/mnt/1.lic"}, {"SPLUNK_LICENSE_URI": "/mnt/2.lic"}, "/mnt/2.lic", False, False, "/tmp/splunk.lic"),
({"license_download_dest": None}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_download_dest": ""}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_download_dest": "/Downloads/splunk.lic"}, {}, "splunk.lic", False, False, "/Downloads/splunk.lic"),
({"license_download_dest": "/Downloads/splunk.lic"}, {"SPLUNK_LICENSE_INSTALL_PATH": "/mnt/license.file"}, "splunk.lic", False, False, "/mnt/license.file"),
]
)
def test_getLicenses(default_yml, os_env, license_uri, wildcard_license, ignore_license, license_download_dest):
vars_scope = {"splunk": default_yml}
with patch("os.environ", new=os_env):
environ.getLicenses(vars_scope)
assert vars_scope["splunk"]["license_uri"] == license_uri
assert type(vars_scope["splunk"]["wildcard_license"]) == bool
assert vars_scope["splunk"]["wildcard_license"] == wildcard_license
assert type(vars_scope["splunk"]["ignore_license"]) == bool
assert vars_scope["splunk"]["ignore_license"] == ignore_license
assert vars_scope["splunk"]["license_download_dest"] == license_download_dest
@pytest.mark.parametrize(("default_yml", "os_env", "java_version", "java_download_url", "java_update_version"),
[
({}, {}, None, None, None),
# Check environment variable parameters
({}, {"JAVA": "oracle:8"}, None, None, None),
({}, {"JAVA_VERSION": "openjdk:8"}, "openjdk:8", None, None),
({}, {"JAVA_VERSION": "openjdk:9"}, "openjdk:9", None, None),
({}, {"JAVA_VERSION": "oracle:8"}, "oracle:8", "https://download.oracle.com/otn-pub/java/jdk/8u141-b15/336fa29ff2bb4ef291e347e091f7f4a7/jdk-8u141-linux-x64.tar.gz", "141"),
({}, {"JAVA_VERSION": "ORACLE:8"}, "oracle:8", "https://download.oracle.com/otn-pub/java/jdk/8u141-b15/336fa29ff2bb4ef291e347e091f7f4a7/jdk-8u141-linux-x64.tar.gz", "141"),
({}, {"JAVA_VERSION": "openjdk:11"}, "openjdk:11", "https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz", "11.0.2"),
({}, {"JAVA_VERSION": "oPenJdK:11"}, "openjdk:11", "https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz", "11.0.2"),
({}, {"JAVA_VERSION": "oracle:8", "JAVA_DOWNLOAD_URL": "https://java/jdk-8u9000-linux-x64.tar.gz"}, "oracle:8", "https://java/jdk-8u9000-linux-x64.tar.gz", "9000"),
({}, {"JAVA_VERSION": "openjdk:11", "JAVA_DOWNLOAD_URL": "https://java/openjdk-11.11.11_linux-x64_bin.tar.gz"}, "openjdk:11", "https://java/openjdk-11.11.11_linux-x64_bin.tar.gz", "11.11.11"),
# Check default.yml
({"java_version": "openjdk:11"}, {}, "openjdk:11", None, None),
({"java_download_url": "http://web/java.tgz"}, {}, None, "http://web/java.tgz", None),
({"java_update_version": "jdk11u141"}, {}, None, None, "jdk11u141"),
# Check order of precedence
({"java_version": "openjdk:9", "java_download_url": "http://web/java.tgz", "java_update_version": "jdk11u141"}, {"JAVA_VERSION": "oPenJdK:11"}, "openjdk:11", "https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz", "11.0.2"),
]
)
def test_getJava(default_yml, os_env, java_version, java_download_url, java_update_version):
vars_scope = default_yml
with patch("os.environ", new=os_env):
environ.getJava(vars_scope)
assert vars_scope["java_version"] == java_version
assert vars_scope["java_download_url"] == java_download_url
assert vars_scope["java_update_version"] == java_update_version
@pytest.mark.parametrize(("os_env", "java_version", "java_download_url", "err_msg"),
[
({"JAVA_VERSION": "oracle:3"}, None, None, "Invalid Java version supplied"),
({"JAVA_VERSION": "openjdk:20"}, None, None, "Invalid Java version supplied"),
({"JAVA_VERSION": "oracle:8", "JAVA_DOWNLOAD_URL": "https://java/jdk-8u9000.tar.gz"}, "oracle:8", "https://java/jdk-8u9000.tar.gz", "Invalid Java download URL format"),
({"JAVA_VERSION": "openjdk:11", "JAVA_DOWNLOAD_URL": "https://java/openjdk-11.tar.gz"}, "openjdk:11", "https://java/openjdk-11.tar.gz", "Invalid Java download URL format"),
]
)
def test_getJava_exception(os_env, java_version, java_download_url, err_msg):
vars_scope = {"splunk": {}}
with patch("os.environ", new=os_env):
try:
environ.getJava(vars_scope)
assert False
except Exception as e:
assert True
assert err_msg in str(e)
assert vars_scope["java_version"] == java_version
assert vars_scope["java_download_url"] == java_download_url
assert vars_scope["java_update_version"] == None
@pytest.mark.parametrize(("default_yml", "os_env", "build", "build_url_bearer_token"),
[
({}, {}, None, None),
# Check default.yml parameters
({"buildlocation": "http://server/file.tgz"}, {}, None, None),
({"build_location": None}, {}, None, None),
({"build_location": ""}, {}, "", None),
({"build_location": "/path/to/file.tgz"}, {}, "/path/to/file.tgz", None),
({"build_location": "http://server/file.tgz"}, {}, "http://server/file.tgz", None),
({"build_location": "https://server/file.tgz"}, {}, "https://server/file.tgz", None),
# Check environment variable parameters
({}, {"SPLUNK_BUILD": "http://server/file.tgz"}, None, None),
({}, {"SPLUNK_BUILD_URL": None}, None, None),
({}, {"SPLUNK_BUILD_URL": ""}, "", None),
({}, {"SPLUNK_BUILD_URL": "/path/to/file.tgz", "SPLUNK_BUILD_URL_BEARER_TOKEN": "testToken"}, "/path/to/file.tgz", "testToken"),
({}, {"SPLUNK_BUILD_URL": "http://server/file.tgz", "SPLUNK_BUILD_URL_BEARER_TOKEN": "testToken"}, "http://server/file.tgz", "testToken"),
({}, {"SPLUNK_BUILD_URL": "https://server/file.tgz", "SPLUNK_BUILD_URL_BEARER_TOKEN": "testToken"}, "https://server/file.tgz", "testToken"),
# Check order of precedence
({"build_location": "http://server/file1.tgz"}, {"SPLUNK_BUILD_URL": "https://server/file2.tgz"}, "https://server/file2.tgz", None),
({"build_location": "http://server/file1.tgz"}, {"SPLUNK_BUILD_URL": "/path/to/file.tgz"}, "/path/to/file.tgz", None),
]
)
def test_getSplunkBuild(default_yml, os_env, build, build_url_bearer_token):
vars_scope = dict()
vars_scope["splunk"] = default_yml
with patch("os.environ", new=os_env):
environ.getSplunkBuild(vars_scope)
assert vars_scope["splunk"]["build_location"] == build
assert vars_scope["splunk"]["build_url_bearer_token"] == build_url_bearer_token
@pytest.mark.parametrize(("default_yml", "response_content", "trigger_splunkbase"),
[
({}, "<id>123abc</id>", False),
({"splunkbase_username": "ocho"}, "<id>123abc</id>", False),
({"splunkbase_password": "<PASSWORD>"}, "<id>123abc</id>", False),
({"splunkbase_username": "ocho", "splunkbase_password": "<PASSWORD>"}, "<id>123abc</id>", True),
({"splunkbase_username": "", "splunkbase_password": ""}, "<id>123abc</id>", False),
({}, "<id>123abc</id>", False),
({"splunkbase_username": "ocho"}, b"<id>123abc</id>", False),
({"splunkbase_password": "<PASSWORD>"}, b"<id>123abc</id>", False),
({"splunkbase_username": "ocho", "splunkbase_password": "<PASSWORD>"}, b"<id>123abc</id>", True),
({"splunkbase_username": "", "splunkbase_password": ""}, b"<id>123abc</id>", False),
]
)
def test_getSplunkbaseToken(default_yml, response_content, trigger_splunkbase):
vars_scope = default_yml
with patch("environ.requests.post") as mock_post:
mock_post.return_value = MagicMock(status_code=200, content=response_content)
with patch("os.environ", new=dict()):
environ.getSplunkbaseToken(vars_scope)
# Make sure Splunkbase token is populated when appropriate
assert "splunkbase_token" in vars_scope
assert "splunkbase_username" in vars_scope
assert "splunkbase_password" in vars_scope
if trigger_splunkbase:
mock_post.assert_called_with("https://splunkbase.splunk.com/api/account:login/", data={"username": "ocho", "password": "<PASSWORD>"})
assert vars_scope.get("splunkbase_token") == "<PASSWORD>"
else:
mock_post.assert_not_called()
assert not vars_scope.get("splunkbase_token")
def test_getSplunkbaseToken_exception():
with patch("environ.requests.post") as mock_post:
mock_post.return_value = MagicMock(status_code=400, content="error")
try:
environ.getSplunkbaseToken({"splunkbase_username": "ocho", "splunkbase_password": "<PASSWORD>"})
assert False
except Exception as e:
assert True
assert "Invalid Splunkbase credentials" in str(e)
@pytest.mark.parametrize(("default_yml", "os_env", "apps_count"),
[
# Check null parameters
({}, {}, 0),
# Check default.yml parameters
({"app_location": []}, {}, 0),
({"app_location": ["a"]}, {}, 0),
({"app_location": ["a", "b", "c"]}, {}, 0),
({"apps_location": []}, {}, 0),
({"apps_location": ["a"]}, {}, 1),
({"apps_location": ["a", "b", "c"]}, {}, 3),
({"apps_location": "a"}, {}, 1),
({"apps_location": "a,b,c,d"}, {}, 4),
# Check environment variable parameters
({}, {"SPLUNK_APPS": None}, 0),
({}, {"SPLUNK_APPS": "hi"}, 0),
({}, {"SPLUNK_APPS_URL": "hi"}, 1),
({}, {"SPLUNK_APPS_URL": "a,b,ccccc,dd"}, 4),
# Check the union combination of default.yml + environment variables
### Invalid 'app_location' variable name in default.yml
({"app_location": []}, {"SPLUNK_APPS_URL": None}, 0),
({"app_location": ["a"]}, {"SPLUNK_APPS_URL": "a"}, 1),
({"app_location": ["a", "b", "c"]}, {"SPLUNK_APPS_URL": "a,bb"}, 2),
### Invalid 'SPLUNK_APP_URL' variable name in env vars
({"apps_location": ["x"]}, {"SPLUNK_APP_URL": "a"}, 1),
({"apps_location": ["x", "y"]}, {"SPLUNK_APP_URL": "a,bb"}, 2),
({"apps_location": "x,y,z"}, {"SPLUNK_APP_URL": "a,bb"}, 3),
### Correct variable names
({"apps_location": ["x"]}, {"SPLUNK_APPS_URL": "a"}, 2),
({"apps_location": ["x", "y"]}, {"SPLUNK_APPS_URL": "a,bb"}, 4),
({"apps_location": "x,y,z"}, {"SPLUNK_APPS_URL": "a,bb"}, 5),
### Only return unique set of apps
({"apps_location": ["x"]}, {"SPLUNK_APPS_URL": "x"}, 1),
({"apps_location": ["x", "y"]}, {"SPLUNK_APPS_URL": "a,bb,y"}, 4),
({"apps_location": "x,y,z"}, {"SPLUNK_APPS_URL": "x,yy,a,z"}, 5),
]
)
def test_getSplunkApps(default_yml, os_env, apps_count):
vars_scope = dict()
vars_scope["splunk"] = default_yml
with patch("os.environ", new=os_env):
environ.getSplunkApps(vars_scope)
assert type(vars_scope["splunk"]["apps_location"]) == list
assert len(vars_scope["splunk"]["apps_location"]) == apps_count
@pytest.mark.parametrize(("default_yml", "os_env", "key", "value"),
[
# Check cert_prefix
({}, {}, "cert_prefix", "https"),
({"cert_prefix": "http"}, {}, "cert_prefix", "http"),
({}, {"SPLUNK_CERT_PREFIX": "fakehttps"}, "cert_prefix", "fakehttps"),
# Check splunk.user
({"splunk": {"user": "root"}}, {}, "splunk.user", "root"),
({}, {"SPLUNK_USER": "root"}, "splunk.user", "root"),
# Check splunk.group
({"splunk": {"group": "root"}}, {}, "splunk.group", "root"),
({}, {"SPLUNK_GROUP": "root"}, "splunk.group", "root"),
# Check splunk.root_endpoint
({"splunk": {"root_endpoint": "/splunk"}}, {}, "splunk.root_endpoint", "/splunk"),
({}, {"SPLUNK_ROOT_ENDPOINT": "/splk"}, "splunk.root_endpoint", "/splk"),
# Check splunk.svc_port
({"splunk": {"svc_port": "9089"}}, {}, "splunk.svc_port", "9089"),
({}, {"SPLUNK_SVC_PORT": "8189"}, "splunk.svc_port", "8189"),
# Check splunk.s2s.port
({"splunk": {"s2s": {"port": "9999"}}}, {}, "splunk.s2s.port", 9999),
({}, {"SPLUNK_S2S_PORT": "9991"}, "splunk.s2s.port", 9991),
# Check splunk.enable_service
({"splunk": {"enable_service": "yes"}}, {}, "splunk.enable_service", "yes"),
({}, {"SPLUNK_ENABLE_SERVICE": "no"}, "splunk.enable_service", "no"),
# Check splunk.service_name
({"splunk": {"service_name": "SpLuNkD"}}, {}, "splunk.service_name", "SpLuNkD"),
({}, {"SPLUNK_SERVICE_NAME": "sPlUnKd"}, "splunk.service_name", "sPlUnKd"),
# Check splunk.allow_upgrade
({"splunk": {"allow_upgrade": "yes"}}, {}, "splunk.allow_upgrade", "yes"),
({}, {"SPLUNK_ALLOW_UPGRADE": "no"}, "splunk.allow_upgrade", "no"),
# Check splunk.set_search_peers
({"splunk": {"set_search_peers": False}}, {}, "splunk.set_search_peers", False),
({}, {"SPLUNK_SET_SEARCH_PEERS": "False"}, "splunk.set_search_peers", False),
({"splunk": {"set_search_peers": True}}, {"SPLUNK_SET_SEARCH_PEERS": "False"}, "splunk.set_search_peers", False),
# Check splunk.appserver.port
({"splunk": {"appserver": {"port": "9291"}}}, {}, "splunk.appserver.port", "9291"),
({}, {"SPLUNK_APPSERVER_PORT": "9391"}, "splunk.appserver.port", "9391"),
# Check splunk.kvstore.port
({"splunk": {"kvstore" :{"port": "9165"}}}, {}, "splunk.kvstore.port", "9165"),
({}, {"SPLUNK_KVSTORE_PORT": "9265"}, "splunk.kvstore.port", "9265"),
# Check splunk.connection_timeout
({"splunk": {"connection_timeout": 60}}, {}, "splunk.connection_timeout", 60),
({}, {"SPLUNK_CONNECTION_TIMEOUT": 200}, "splunk.connection_timeout", 200),
]
)
def test_overrideEnvironmentVars(default_yml, os_env, key, value):
vars_scope = {
"ansible_pre_tasks": None,
"ansible_post_tasks": None,
"cert_prefix": "https",
"splunk": {
"user": "splunk",
"group": "splunk",
"root_endpoint": None,
"svc_port": 8089,
"s2s": {"port": 9997},
"appserver": {"port": 8065},
"kvstore": {"port": 8191},
"hec_token": "<KEY>",
"enable_service": False,
"service_name": "Splunkd",
"allow_upgrade": True,
"asan": None,
"set_search_peers": True,
"connection_timeout": 0,
}
}
# TODO: Possibly remove the dependency on merge_dict() in this test
environ.merge_dict(vars_scope, default_yml)
with patch("os.environ", new=os_env):
environ.overrideEnvironmentVars(vars_scope)
if "splunk" in key:
if "s2s" in key or "appserver" in key or "kvstore" in key:
section, key = key.split(".")[-2:]
assert vars_scope["splunk"][section][key] == value
else:
key = key.split(".")[-1]
assert vars_scope["splunk"][key] == value
else:
assert vars_scope[key] == value
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
# Check default.yml parameters
({"dfs": {"enable": True}}, {}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots": 20}}, {}, {"enable": False, "dfw_num_slots": 20, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots": "15"}}, {}, {"enable": False, "dfw_num_slots": 15, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfc_num_slots": 20}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 20, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfc_num_slots": "15"}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 15, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots_enabled": True}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_host": "10.0.0.1"}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "10.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_webui_port": 8081}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8081}),
({"dfs": {"spark_master_webui_port": "8082"}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8082}),
# Check environment variable parameters
({}, {"SPLUNK_ENABLE_DFS": ""}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_ENABLE_DFS": "true"}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_ENABLE_DFS": "TRUE"}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS": "11"}, {"enable": False, "dfw_num_slots": 11, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFC_NUM_SLOTS": "1"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 1, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": ""}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": "true"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": "TRUE"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPARK_MASTER_HOST": "8.8.8.8"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "8.8.8.8", "spark_master_webui_port": 8080}),
({}, {"SPARK_MASTER_WEBUI_PORT": "8888"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8888}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"dfs": {"enable": False}}, {"SPLUNK_ENABLE_DFS": "true"}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots": 100}}, {"SPLUNK_DFW_NUM_SLOTS": "101"}, {"enable": False, "dfw_num_slots": 101, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfc_num_slots": 100}}, {"SPLUNK_DFC_NUM_SLOTS": "101"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 101, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots_enabled": False}}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": "True"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_host": "10.0.0.1"}}, {"SPARK_MASTER_HOST": "8.8.8.8"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "8.8.8.8", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_webui_port": 8082}}, {"SPARK_MASTER_WEBUI_PORT": "8888"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8888}),
]
)
def test_getDFS(default_yml, os_env, output):
vars_scope = dict()
vars_scope["splunk"] = default_yml
with patch("os.environ", new=os_env):
environ.getDFS(vars_scope)
# Check typing
assert type(vars_scope["splunk"]["dfs"]["enable"]) == bool
assert type(vars_scope["splunk"]["dfs"]["dfw_num_slots"]) == int
assert type(vars_scope["splunk"]["dfs"]["dfc_num_slots"]) == int
assert type(vars_scope["splunk"]["dfs"]["dfw_num_slots_enabled"]) == bool
assert type(vars_scope["splunk"]["dfs"]["spark_master_webui_port"]) == int
assert vars_scope["splunk"]["dfs"] == output
@pytest.mark.parametrize(("os_env", "deployment_server", "add", "before_start_cmd", "cmd"),
[
({}, None, None, None, None),
# Check environment variable parameters
({"SPLUNK_DEPLOYMENT_SERVER": ""}, None, None, None, None),
({"SPLUNK_DEPLOYMENT_SERVER": "something"}, "something", None, None, None),
({"SPLUNK_ADD": ""}, None, None, None, None),
({"SPLUNK_ADD": "echo 1"}, None, ["echo 1"], None, None),
({"SPLUNK_ADD": "echo 1,echo 2"}, None, ["echo 1", "echo 2"], None, None),
({"SPLUNK_BEFORE_START_CMD": ""}, None, None, None, None),
({"SPLUNK_BEFORE_START_CMD": "echo 1"}, None, None, ["echo 1"], None),
({"SPLUNK_BEFORE_START_CMD": "echo 1,echo 2"}, None, None, ["echo 1", "echo 2"], None),
({"SPLUNK_CMD": ""}, None, None, None, None),
({"SPLUNK_CMD": "echo 1"}, None, None, None, ["echo 1"]),
({"SPLUNK_CMD": "echo 1,echo 2"}, None, None, None, ["echo 1", "echo 2"]),
]
)
def test_getUFSplunkVariables(os_env, deployment_server, add, before_start_cmd, cmd):
vars_scope = {"splunk": {}}
with patch("os.environ", new=os_env):
environ.getUFSplunkVariables(vars_scope)
assert vars_scope["splunk"].get("deployment_server") == deployment_server
assert vars_scope["splunk"].get("add") == add
assert vars_scope["splunk"].get("before_start_cmd") == before_start_cmd
assert vars_scope["splunk"].get("cmd") == cmd
def test_getRandomString():
word = environ.getRandomString()
assert len(word) == 6
@pytest.mark.parametrize(("url", "vars_scope", "output"),
[
("licmaster", {"splunk": {}}, "https://licmaster:8089"),
("http://licmaster", {"splunk": {}}, "http://licmaster:8089"),
("licmaster:8081", {"splunk": {}}, "https://licmaster:8081"),
("http://licmaster:80", {"splunk": {}}, "http://licmaster:80"),
("ftp://licmaster.corp.net:3333", {"splunk": {}}, "ftp://licmaster.corp.net:3333"),
("username:<EMAIL>", {"splunk": {}}, "https://lm.internal.net:8089"),
("http://username:password@lm.internal.net:3333", {"splunk": {}}, "http://lm.internal.net:3333"),
# Check null input
("", {"splunk": {}}, ""),
(None, {"splunk": {}}, ""),
# Check vars_scope overrides
("licmaster", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "http://licmaster:18089"),
("https://licmaster", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "https://licmaster:18089"),
("licmaster:28089", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "http://licmaster:28089"),
("https://licmaster:38089", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "https://licmaster:38089"),
]
)
def test_parseUrl(url, vars_scope, output):
result = environ.parseUrl(url, vars_scope)
assert result == output
@pytest.mark.parametrize(("dict1", "dict2", "result"),
[
# Check dicts
({}, {"a": 2}, {"a": 2}),
({"b": 2}, {"a": 2}, {"a": 2, "b": 2}),
({"a": 1, "b": 2}, {"a": 2}, {"a": 2, "b": 2}),
({"a": 0}, {"a": 1}, {"a": 1}),
({"a": 1}, {"b": 2, "c": 3}, {"a": 1, "b": 2, "c": 3}),
# Check arrays
({}, {"a": []}, {"a": []}),
({}, {"a": [1, 2]}, {"a": [1, 2]}),
({"b": [0]}, {"a": [1]}, {"a": [1], "b": [0]}),
({"a": [0]}, {"a": [1]}, {"a": [0, 1]}),
# Check nested dict output
({"nested": {}}, {"nested": {"a": 1}}, {"nested": {"a": 1}}),
({"nested": {"a": 1}}, {"nested": {"b": 2}}, {"nested": {"a": 1, "b": 2}}),
({"nested": {"a": 1, "c": 3}}, {"nested": {"b": 2}}, {"nested": {"a": 1, "b": 2, "c": 3}}),
({"nested": {"a": 1, "b": 3}}, {"nested": {"b": 2}}, {"nested": {"a": 1, "b": 2}}),
# Check nested with diff value types
({"nested": {"x": 1}}, {"nested": {"x": {"a": 1}}}, {"nested": {"x": {"a": 1}}}),
({"nested": {"x": {"a": 1}}}, {"nested": {"x": 1}}, {"nested": {"x": 1}}),
# Check nested arrays
({"nested": {"array": []}}, {"nested": {"array": [1]}}, {"nested": {"array": [1]}}),
({"nested": {"array": [1, 2, 3]}}, {"nested": {"array": []}}, {"nested": {"array": [1, 2, 3]}}),
({"nested": {"array": [1, 2]}}, {"nested": {"array": [3, 4, 5]}}, {"nested": {"array": [1, 2, 3, 4, 5]}}),
({"nested": {"x": 10, "array": [1, 2]}}, {"nested": {"y": 20, "array": [3, 4, 5]}}, {"nested": {"x": 10, "y": 20, "array": [1, 2, 3, 4, 5]}}),
# Targeted github bug
({"splunk": {"conf": [{"key": "fileA", "content": {"a": "b", "c": "d"}}]}}, {"splunk": {"conf": [{"key": "fileB", "content": {"e": "f", "g": "h"}}]}}, {"splunk": {"conf": [{"key": "fileA", "content": {"a": "b", "c": "d"}}, {"key": "fileB", "content": {"e": "f", "g": "h"}}]}}),
]
)
def test_merge_dict(dict1, dict2, result):
output = environ.merge_dict(dict1, dict2)
assert output == result
@pytest.mark.parametrize(("source", "merge_url_called", "merge_file_called"),
[
(None, False, False),
("", False, False),
(" ", False, False),
("http://web/default.yml", True, False),
("https://web/default.yml", True, False),
("file:///path/to/default.yml", False, True),
("/path/to/default.yml", False, True),
("rel/path/to/default.yml", False, True),
]
)
def test_mergeDefaults(source, merge_url_called, merge_file_called):
with patch("environ.mergeDefaultsFromFile") as mock_merge_file:
with patch("environ.mergeDefaultsFromURL") as mock_merge_url:
result = environ.mergeDefaults({"hello": "world"}, "foobar", source)
if merge_url_called:
mock_merge_url.assert_called_once()
mock_merge_file.assert_not_called()
else:
mock_merge_url.assert_not_called()
if merge_file_called:
mock_merge_file.assert_called_once()
mock_merge_url.assert_not_called()
else:
mock_merge_file.assert_not_called()
@pytest.mark.parametrize(("key"),
[
("FOO"),
("BAR"),
("BAZ"),
]
)
def test_mergeDefaults_url_with_req_params(key):
config = {
"config": {
"FOO": {
"headers": {"HI": "MOM"},
"verify": True
},
"BAR": {
"headers": {"GOODBYE": "MOM"},
"verify": False
}
}
}
with patch("environ.mergeDefaultsFromFile") as mock_merge_file:
with patch("environ.mergeDefaultsFromURL") as mock_merge_url:
result = environ.mergeDefaults(config, key, "http://website/default.yml")
mock_merge_file.assert_not_called()
mock_merge_url.assert_called_once()
if key == "FOO":
mock_merge_url.assert_called_with(config, "http://website/default.yml", {"HI": "MOM"}, True)
elif key == "BAR":
mock_merge_url.assert_called_with(config, "http://website/default.yml", {"GOODBYE": "MOM"}, False)
else:
mock_merge_url.assert_called_with(config, "http://website/default.yml", None, False)
@pytest.mark.skip(reason="TODO")
def test_mergeDefaultsFromURL():
pass
@pytest.mark.parametrize(("file", "file_exists", "merge_called"),
[
(None, False, False),
("", False, False),
(" ", False, False),
("/path/to/file", False, False),
("/path/to/file", True, True),
]
)
def test_mergeDefaultsFromFile(file, file_exists, merge_called):
mo = mock_open()
with patch("environ.open", mo, create=True):
with patch("environ.os") as mock_os:
with patch("environ.merge_dict") as mock_merge:
mock_os.path.exists = MagicMock(return_value=file_exists)
result = environ.mergeDefaultsFromFile({"hello": "world"}, file)
if merge_called:
mo.assert_called_once()
mock_merge.assert_called_once()
else:
mo.assert_not_called()
mock_merge.assert_not_called()
assert result == {"hello": "world"}
@pytest.mark.parametrize(("mock_base", "mock_baked", "mock_env", "mock_host", "merge_call_count"),
[
# Null cases
({}, [], [], [], 0),
({"config": None}, [], [], [], 0),
({"config": {}}, [], [], [], 0),
# Check baked
({"config": {"foo": "bar"}}, [{"key": "baked", "src": "file1"}], [], [], 1),
({"config": {"foo": "bar"}}, [{"key": "baked", "src": "f1"}, {"key": "baked", "src": "f2"}, {"key": "baked", "src": "f3"}], [], [], 3),
# Check env
({"config": {"foo": "bar"}}, [], [{"key": "env", "src": "file1"}], [], 1),
({"config": {"foo": "bar"}}, [], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}, {"key": "env", "src": "f3"}], [], 3),
# Check host
({"config": {"foo": "bar"}}, [], [], [{"key": "host", "src": "file1"}], 1),
({"config": {"foo": "bar"}}, [], [], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}, {"key": "host", "src": "f3"}], 3),
# Check mixed
({"config": {"foo": "bar"}}, [{"key": "baked", "src": "file1"}], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}], 5),
({"config": None}, [{"key": "baked", "src": "file1"}], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}], 0),
({"config": {}}, [{"key": "baked", "src": "file1"}], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}], 0),
]
)
def test_loadDefaults(mock_base, mock_baked, mock_env, mock_host, merge_call_count):
mbase = MagicMock(return_value=mock_base)
mbaked = MagicMock(return_value=mock_baked)
menv = MagicMock(return_value=mock_env)
mhost = MagicMock(return_value=mock_host)
with patch("environ.loadBaseDefaults", mbase):
with patch("environ.loadBakedDefaults", mbaked):
with patch("environ.loadEnvDefaults", menv):
with patch("environ.loadHostDefaults", mhost):
with patch("environ.mergeDefaults") as mock_merge:
output = environ.loadDefaults()
assert mock_merge.call_count == merge_call_count
@pytest.mark.parametrize(("os_env", "filename"),
[
({}, "splunk_defaults"),
({"SPLUNK_ROLE": "splunk_standalone"}, "splunk_defaults"),
({"SPLUNK_ROLE": "splunk_universal_forwarder"}, "splunkforwarder_defaults"),
]
)
def test_loadBaseDefaults(os_env, filename):
sample_yml = """
this: file
is:
a: yaml
"""
mo = mock_open(read_data=sample_yml)
with patch("environ.open", mo, create=True):
with patch("os.environ", new=os_env):
output = environ.loadBaseDefaults()
mo.assert_called_once()
args, _ = mo.call_args
assert filename in args[0]
assert args[1] == "r"
assert type(output) == dict
assert output["this"] == "file"
@pytest.mark.parametrize(("config", "output"),
[
(None, []),
({}, []),
({"baked": None}, []),
({"baked": ""}, []),
({"baked": "file1"}, [{"key": "baked", "src": "file1"}]),
({"baked": "file1,file2,file3"}, [{"key": "baked", "src": "file1"}, {"key": "baked", "src": "file2"}, {"key": "baked", "src": "file3"}]),
]
)
def test_loadBakedDefaults(config, output):
result = environ.loadBakedDefaults(config)
assert result == output
@pytest.mark.parametrize(("config", "output"),
[
(None, []),
({}, []),
({"env": None}, []),
({"env": {}}, []),
({"env": {"var": None}}, []),
({"env": {"var": ""}}, []),
# Adding test for a key that does not exist
({"env": {"var": "FAKE"}}, []),
# Adding tests for keys that exist
({"env": {"var": "KEY1"}}, [{"key": "env", "src": "file1"}]),
({"env": {"var": "KEY2"}}, [{"key": "env", "src": "file1"}, {"key": "env", "src": "file2"}, {"key": "env", "src": "file3"}]),
]
)
def test_loadEnvDefaults(config, output):
with patch("os.environ", new={"KEY1": "file1", "KEY2": "file1,file2,file3"}):
result = environ.loadEnvDefaults(config)
assert result == output
@pytest.mark.parametrize(("config", "output"),
[
(None, []),
({}, []),
({"host": None}, []),
({"host": {}}, []),
({"host": {"url": None}}, []),
({"host": {"url": ""}}, []),
({"host": {"url": "file1"}}, [{"key": "host", "src": "file1"}]),
({"host": {"url": "file1,file2,file3"}}, [{"key": "host", "src": "file1"}, {"key": "host", "src": "file2"}, {"key": "host", "src": "file3"}]),
]
)
def test_loadHostDefaults(config, output):
result = environ.loadHostDefaults(config)
assert result == output
@pytest.mark.parametrize(("inputInventory", "outputInventory"),
[
# Verify null inputs
({}, {}),
({"all": {}}, {"all": {}}),
({"all": {"vars": {}}}, {"all": {"vars": {}}}),
({"all": {"vars": {"splunk": {}}}}, {"all": {"vars": {"splunk": {}}}}),
# Verify individual keys to obfuscate
({"all": {"vars": {"splunk": {"password": "<PASSWORD>"}}}}, {"all": {"vars": {"splunk": {"password": "**************"}}}}),
({"all": {"vars": {"splunk": {"shc": {"secret": "helloworld"}}}}}, {"all": {"vars": {"splunk": {"shc": {"secret": "**************"}}}}}),
({"all": {"vars": {"splunk": {"smartstore": {"index": []}}}}}, {"all": {"vars": {"splunk": {"smartstore": {"index": []}}}}}),
({"all": {"vars": {"splunk": {"smartstore": {"index": [{"s3": {"access_key": "1234", "secret_key": "abcd"}}]}}}}}, {"all": {"vars": {"splunk": {"smartstore": {"index": [{"s3": {"access_key": "**************", "secret_key": "**************"}}]}}}}}),
]
)
def test_obfuscate_vars(inputInventory, outputInventory):
result = environ.obfuscate_vars(inputInventory)
assert result == outputInventory
@pytest.mark.skip(reason="TODO")
def test_create_parser():
pass
@pytest.mark.skip(reason="TODO")
def test_prep_for_yaml_out():
pass
@pytest.mark.skip(reason="TODO")
def test_main():
pass | tests/small/test_environ.py | from __future__ import absolute_import
import os
import sys
import pytest
from mock import MagicMock, patch, mock_open
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
#FIXTURES_DIR = os.path.join(FILE_DIR, "fixtures")
REPO_DIR = os.path.join(FILE_DIR, "..", "..")
# Add environ.py into path for testing
sys.path.append(os.path.join(REPO_DIR, "inventory"))
import environ
@pytest.mark.parametrize(("regex", "result"),
[
(r"(FOOBAR)", {"foobar": "123"}),
(r"^FOO(.*)", {"bar": "123"}),
]
)
def test_getVars(regex, result):
'''
This method makes the assumption that there will always be a group(1),
So if doing an exact string match, for now group the entire string
'''
with patch("os.environ", new={"FOOBAR": "123", "BARFOO": "456"}):
r = environ.getVars(regex)
assert r == result
@pytest.mark.skip(reason="TODO")
def test_getSplunkInventory():
pass
@patch('environ.loadDefaults', return_value={"splunk": {"http_port": 8000, "build_location": None}})
@patch('environ.overrideEnvironmentVars')
@patch('environ.getSecrets')
@patch('environ.getHEC')
def test_getDefaultVars(mock_overrideEnvironmentVars, mock_loadDefaultSplunkVariables, mock_getSecrets, mock_getHEC):
'''
Unit test for getting our default variables
'''
retval = environ.getDefaultVars()
assert "splunk" in retval
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"opt": None, "home": None, "exec": None, "pid": None}),
# Check default.yml parameters
({"opt": "/opt"}, {}, {"opt": "/opt", "home": None, "exec": None, "pid": None}),
({"home": "/tmp/splunk"}, {}, {"opt": None, "home": "/tmp/splunk", "exec": None, "pid": None}),
({"exec": "/opt/splunk/bin/splunk"}, {}, {"opt": None, "home": None, "exec": "/opt/splunk/bin/splunk", "pid": None}),
({"pid": "/splunk.pid"}, {}, {"opt": None, "home": None, "exec": None, "pid": "/splunk.pid"}),
# Check environment variable parameters
({}, {"SPLUNK_OPT": "/home/"}, {"opt": "/home/", "home": None, "exec": None, "pid": None}),
({}, {"SPLUNK_HOME": "/home/"}, {"opt": None, "home": "/home/", "exec": None, "pid": None}),
({}, {"SPLUNK_EXEC": "/home/splunk.exe"}, {"opt": None, "home": None, "exec": "/home/splunk.exe", "pid": None}),
({}, {"SPLUNK_PID": "/home/splunk.pid"}, {"opt": None, "home": None, "exec": None, "pid": "/home/splunk.pid"}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"opt": "/home"}, {"SPLUNK_OPT": "/opt"}, {"opt": "/opt", "home": None, "exec": None, "pid": None}),
({"home": "/tmp/splunk"}, {"SPLUNK_HOME": "/opt/splunk"}, {"opt": None, "home": "/opt/splunk", "exec": None, "pid": None}),
({"exec": "/bin/splunk"}, {"SPLUNK_EXEC": "/opt/splunk/bin/splunk"}, {"opt": None, "home": None, "exec": "/opt/splunk/bin/splunk", "pid": None}),
({"pid": "/splunk.pid"}, {"SPLUNK_PID": "/opt/splunk/splunk.pid"}, {"opt": None, "home": None, "exec": None, "pid": "/opt/splunk/splunk.pid"}),
]
)
def test_getSplunkPaths(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("os.environ", new=os_env):
environ.getSplunkPaths(vars_scope)
assert type(vars_scope["splunk"]) == dict
assert vars_scope["splunk"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
# Check default.yml parameters
({"idxc": {}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"label": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"label": "1234"}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "1234", "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": "1234"}}, {}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234"}}, {}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"discoveryPass4SymmKey": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"discoveryPass4SymmKey": "1234"}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": "1234", "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
# Search factor should never exceed replication factor
({"idxc": {"replication_factor": 0, "search_factor": 2}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 0, "search_factor": 0}),
({"idxc": {"replication_factor": 1, "search_factor": 3}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"replication_factor": "2", "search_factor": 3}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 2}),
# This should return replication_factor=2 because there are only 2 hosts in the "splunk_indexer" group
({"idxc": {"replication_factor": 3, "search_factor": 1}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 1}),
# Check environment variable parameters
({}, {"SPLUNK_IDXC_LABEL": ""}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "", "secret": None, "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_LABEL": "abcd"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "abcd", "secret": None, "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_SECRET": ""}, {"pass4SymmKey": "", "discoveryPass4SymmKey": "", "label": None, "secret": "", "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_SECRET": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_REPLICATION_FACTOR": "1"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_REPLICATION_FACTOR": 2, "SPLUNK_IDXC_SEARCH_FACTOR": "1"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 1}),
({}, {"SPLUNK_IDXC_DISCOVERYPASS4SYMMKEY": "qwerty"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": "qwerty", "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"idxc": {"label": "1234"}}, {"SPLUNK_IDXC_LABEL": "abcd"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "abcd", "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": "abcd"}}, {"SPLUNK_IDXC_SECRET": "1234"}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234"}}, {"SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "7890"}}, {"SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "7890", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "7890"}}, {"SPLUNK_IDXC_DISCOVERYPASS4SYMMKEY": "zxcv", "SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "zxcv", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": "abcd"}}, {"SPLUNK_IDXC_SECRET": "1234"}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"replication_factor": 3, "search_factor": 3}}, {"SPLUNK_IDXC_REPLICATION_FACTOR": 2}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 2}),
({"idxc": {"replication_factor": 2, "search_factor": 2}}, {"SPLUNK_IDXC_SEARCH_FACTOR": 1}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 1}),
]
)
def test_getIndexerClustering(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory", {"splunk_indexer": {"hosts": ["a", "b"]}}) as mock_inven:
with patch("os.environ", new=os_env):
environ.getIndexerClustering(vars_scope)
assert type(vars_scope["splunk"]["idxc"]) == dict
assert vars_scope["splunk"]["idxc"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
# Check default.yml parameters
({"shc": {}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"label": None}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"label": "1234"}}, {}, {"pass4SymmKey": None, "label": "1234", "secret": None, "replication_factor": 1}),
({"shc": {"secret": None}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"secret": "1234"}}, {}, {"pass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1}),
({"shc": {"pass4SymmKey": None}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"pass4SymmKey": "1234"}}, {}, {"pass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1}),
({"shc": {"replication_factor": 0}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 0}),
({"shc": {"replication_factor": 1}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"replication_factor": "2"}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2}),
# This should return replication_factor=2 because there are only 2 hosts in the "splunk_search_head" group
({"shc": {"replication_factor": 3}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2}),
# Check environment variable parameters
({}, {"SPLUNK_SHC_LABEL": ""}, {"pass4SymmKey": None, "label": "", "secret": None, "replication_factor": 1}),
({}, {"SPLUNK_SHC_LABEL": "abcd"}, {"pass4SymmKey": None,"label": "abcd", "secret": None, "replication_factor": 1}),
({}, {"SPLUNK_SHC_SECRET": ""}, {"pass4SymmKey": "", "label": None, "secret": "", "replication_factor": 1}),
({}, {"SPLUNK_SHC_SECRET": "abcd"}, {"pass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1}),
({}, {"SPLUNK_SHC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1}),
({}, {"SPLUNK_SHC_REPLICATION_FACTOR": "2"}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"shc": {"label": "1234"}}, {"SPLUNK_SHC_LABEL": "abcd"}, {"pass4SymmKey": None, "label": "abcd", "secret": None, "replication_factor": 1}),
({"shc": {"secret": "abcd"}}, {"SPLUNK_SHC_SECRET": "1234"}, {"pass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1}),
({"shc": {"pass4SymmKey": "1234"}}, {"SPLUNK_SHC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1}),
({"shc": {"replication_factor": 2}}, {"SPLUNK_SHC_REPLICATION_FACTOR": "1"}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
]
)
def test_getSearchHeadClustering(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory", {"splunk_search_head": {"hosts": ["a", "b"]}}) as mock_inven:
with patch("os.environ", new=os_env):
environ.getSearchHeadClustering(vars_scope)
assert type(vars_scope["splunk"]["shc"]) == dict
assert vars_scope["splunk"]["shc"] == output
@pytest.mark.skip(reason="TODO")
def test_getMultisite():
pass
@pytest.mark.skip(reason="TODO")
def test_getSplunkWebSSL():
pass
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"ca": None, "cert": None, "password": None, "enable": True}),
({"does-not-exist": True}, {}, {"ca": None, "cert": None, "password": None, "enable": True}),
# Check default.yml parameters
({"ssl": {"enable": False}}, {}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"ca": "hi"}}, {}, {"ca": "hi", "cert": None, "password": None, "enable": True}),
({"ssl": {"cert": "hi"}}, {}, {"ca": None, "cert": "hi", "password": None, "enable": True}),
({"ssl": {"password": "hi"}}, {}, {"ca": None, "cert": None, "password": "hi", "enable": True}),
({"ssl": {"ca": "aaa", "cert": "bbb", "password": "<PASSWORD>", "enable": False}}, {}, {"ca": "aaa", "cert": "bbb", "password": "<PASSWORD>", "enable": False}),
# Check environment variable parameters
({}, {"SPLUNKD_SSL_CA": "hi"}, {"ca": "hi", "cert": None, "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_CERT": "hi"}, {"ca": None, "cert": "hi", "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_PASSWORD": "hi"}, {"ca": None, "cert": None, "password": "hi", "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "true"}, {"ca": None, "cert": None, "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "false"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({}, {"SPLUNKD_SSL_ENABLE": "False"}, {"ca": None, "cert": None, "password": None, "enable": False}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"ssl": {"ca": "value1"}}, {"SPLUNKD_SSL_CA": "value2"}, {"ca": "value2", "cert": None, "password": None, "enable": True}),
({"ssl": {"cert": "value1"}}, {"SPLUNKD_SSL_CERT": "value2"}, {"ca": None, "cert": "value2", "password": None, "enable": True}),
({"ssl": {"password": "<PASSWORD>"}}, {"SPLUNKD_SSL_PASSWORD": "<PASSWORD>"}, {"ca": None, "cert": None, "password": "<PASSWORD>", "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "true"}, {"ca": None, "cert": None, "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "false"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"enable": True}}, {"SPLUNKD_SSL_ENABLE": "FALSE"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"enable": True}}, {"SPLUNKD_SSL_ENABLE": "FaLsE"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"enable": False}}, {"SPLUNKD_SSL_ENABLE": ""}, {"ca": None, "cert": None, "password": None, "enable": False}),
]
)
def test_getSplunkdSSL(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("os.environ", new=os_env):
environ.getSplunkdSSL(vars_scope)
assert type(vars_scope["splunk"]) == dict
assert type(vars_scope["splunk"]["ssl"]) == dict
assert vars_scope["splunk"]["ssl"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters - Splunk password is required
({"password": "<PASSWORD>"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": None}),
# Check default.yml parameters
({"password": "<PASSWORD>", "pass4SymmKey": "you-will-never-guess", "secret": None}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": "<PASSWORD>", "pass4SymmKey": "you-will-never-guess", "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
({"password": "<PASSWORD>", "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": "1234"}),
({"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": None}, {}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
({"password": "<PASSWORD>", "declarative_admin_password": True, "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": None, "secret": "1234"}),
# Check environment variable parameters
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": "1234"}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_DECLARATIVE_ADMIN_PASSWORD": "true", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess"}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_DECLARATIVE_ADMIN_PASSWORD": "TRUE", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
# We currently don't support 'yes' as a valid boolean
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_DECLARATIVE_ADMIN_PASSWORD": "yes", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": "1234"})
]
)
def test_getSecrets(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
with patch("environ.os.path") as mock_os_path:
mock_os_path.isfile = MagicMock()
mock_os_path.isfile.return_value = False
environ.getSecrets(vars_scope)
assert vars_scope["splunk"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check when Splunk password is a file
({"password": "/<PASSWORD>"}, {}, {"password": "<PASSWORD>", "pass4SymmKey": None, "secret": None}),
({"password": "<PASSWORD>"}, {"SPLUNK_PASSWORD": "/<PASSWORD>"}, {"password": "<PASSWORD>", "pass4SymmKey": None, "secret": None}),
]
)
def test_getSecrets_passwordFromFile(default_yml, os_env, output):
file_contents = """
worldneversayshiback
"""
m = mock_open(read_data=file_contents)
vars_scope = {"splunk": default_yml}
with patch("environ.open", m, create=True) as mopen:
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
with patch("os.path") as mock_os_path:
# Make sure that the isfile() check returns True
mock_os_path.isfile = MagicMock()
mock_os_path.isfile.return_value = True
environ.getSecrets(vars_scope)
mopen.assert_called_once()
assert vars_scope["splunk"]["password"] == "<PASSWORD>"
@pytest.mark.parametrize(("default_yml"),
[
# Check null parameters
({}),
({"password": None}),
({"password": ""})
]
)
def test_noSplunkPassword(default_yml):
vars_scope = {"splunk": default_yml}
with pytest.raises(Exception) as exc:
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new={}):
environ.getSecrets(vars_scope)
assert "Splunk password must be supplied!" in str(exc.value)
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"launch": {}}),
# Check default.yml parameters
({"launch": {}}, {}, {"launch": {}}),
({"launch": {"A": "B"}}, {}, {"launch": {"A": "B"}}),
({"launch": {"A": "B", "C": "D"}}, {}, {"launch": {"A": "B", "C": "D"}}),
# Check environment variable parameters
({}, {"SPLUNK_LAUNCH_CONF": None}, {"launch": {}}),
({}, {"SPLUNK_LAUNCH_CONF": ""}, {"launch": {}}),
({}, {"SPLUNK_LAUNCH_CONF": "AAA=BBB"}, {"launch": {"AAA": "BBB"}}),
({}, {"SPLUNK_LAUNCH_CONF": "AAA=BBB,CCC=DDD"}, {"launch": {"AAA": "BBB", "CCC": "DDD"}}),
({}, {"SPLUNK_LAUNCH_CONF": "AAA=BBB=CCC,DDD=EEE=FFF"}, {"launch": {"AAA": "BBB=CCC", "DDD": "EEE=FFF"}}),
# Check both
({"launch": {"A": "B", "C": "D"}}, {"SPLUNK_LAUNCH_CONF": "A=E,C=D"}, {"launch": {"A": "E", "C": "D"}}),
]
)
def test_getLaunchConf(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getLaunchConf(vars_scope)
assert vars_scope["splunk"] == output
@pytest.mark.parametrize(("value", "separator", "output"),
[
# Check null value
(None, ",", []),
# Check empty value
("", ",", []),
# Check string value
("a", ",", ["a"]),
# Check comma separated string value
("a,b,c", ",", ["a", "b", "c"]),
# Check list value
(["a"], ",", ["a"]),
(["a", "b", "c"], ",", ["a", "b", "c"])
]
)
def test_ensureListValue(value, separator, output):
result = environ.ensureListValue(value, separator)
assert result == output
@pytest.mark.parametrize(("value", "separator", "output"),
[
# Check null value
(None, ",", []),
# Check empty value
("", ",", []),
# Check string value
("a", ",", ["a"]),
# Check comma separated string value
("a,b,c", ",", ["a", "b", "c"]),
# Check comma separated string value with whitespaces
(" a, b,c ", ",", ["a", "b", "c"]),
]
)
def test_splitAndStrip(value, separator, output):
result = environ.splitAndStrip(value, separator)
assert result == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
# Check ansible_pre_tasks using defaults or env vars
({"ansible_pre_tasks": ""}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": None}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": "a"}, {}, {"ansible_pre_tasks": ["a"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": ["a"]}, {}, {"ansible_pre_tasks": ["a"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": "a,b,c"}, {}, {"ansible_pre_tasks": ["a","b","c"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": ["a","b","c"]}, {}, {"ansible_pre_tasks": ["a","b","c"], "ansible_post_tasks": [], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_PRE_TASKS": "d"}, {"ansible_pre_tasks": ["d"], "ansible_post_tasks": [], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_PRE_TASKS": "e,f,g"}, {"ansible_pre_tasks": ["e","f","g"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": "a,b,c"}, {"SPLUNK_ANSIBLE_PRE_TASKS": "e,f,g"}, {"ansible_pre_tasks": ["e","f","g"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": ["a","b","c"]}, {"SPLUNK_ANSIBLE_PRE_TASKS": "e,f,g"}, {"ansible_pre_tasks": ["e","f","g"], "ansible_post_tasks": [], "ansible_environment": {}}),
# Check ansible_post_tasks using defaults or env vars
({"ansible_post_tasks": ""}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_post_tasks": None}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_post_tasks": "a"}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a"], "ansible_environment": {}}),
({"ansible_post_tasks": ["a"]}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a"], "ansible_environment": {}}),
({"ansible_post_tasks": "a,b,c"}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a","b","c"], "ansible_environment": {}}),
({"ansible_post_tasks": ["a","b","c"]}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a","b","c"], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_POST_TASKS": "d"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["d"], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_POST_TASKS": "e,f,g"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["e","f","g"], "ansible_environment": {}}),
({"ansible_post_tasks": "a,b,c"}, {"SPLUNK_ANSIBLE_POST_TASKS": "e,f,g"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["e","f","g"], "ansible_environment": {}}),
({"ansible_post_tasks": ["a","b","c"]}, {"SPLUNK_ANSIBLE_POST_TASKS": "e,f,g"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["e","f","g"], "ansible_environment": {}}),
# Check ansible_environment using defaults or env vars
({"ansible_environment": None}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_environment": {"a": "b"}}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b"}}),
({"ansible_environment": {"a": "b", "d": "e"}}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b", "d": "e"}}),
({}, {"SPLUNK_ANSIBLE_ENV": "a=b"}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b"}}),
({}, {"SPLUNK_ANSIBLE_ENV": "a=b,x=y"}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b", "x": "y"}}),
({"ansible_environment": {"a": "c", "d": "e"}}, {"SPLUNK_ANSIBLE_ENV": "a=b,x=y"}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b", "d": "e", "x": "y"}}),
]
)
def test_getAnsibleContext(default_yml, os_env, output):
vars_scope = default_yml
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getAnsibleContext(vars_scope)
assert vars_scope == output
@pytest.mark.parametrize(("default_yml", "os_env", "splunk_asan"),
[
# Check null parameters
({}, {}, False),
# Check default.yml parameters
({"asan": False}, {}, False),
({"asan": True}, {}, True),
# Check env var parameters
({}, {"SPLUNK_ENABLE_ASAN": ""}, False),
({}, {"SPLUNK_ENABLE_ASAN": "anything"}, True),
# Check both
({"asan": False}, {"SPLUNK_ENABLE_ASAN": ""}, False),
({"asan": True}, {"SPLUNK_ENABLE_ASAN": ""}, False),
({"asan": True}, {"SPLUNK_ENABLE_ASAN": "true"}, True),
({"asan": False}, {"SPLUNK_ENABLE_ASAN": "yes"}, True),
]
)
def test_getASan(default_yml, os_env, splunk_asan):
vars_scope = {"ansible_environment": {}, "splunk": {}}
vars_scope["splunk"] = default_yml
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getASan(vars_scope)
assert vars_scope["splunk"]["asan"] == splunk_asan
if vars_scope["splunk"]["asan"]:
assert vars_scope["ansible_environment"].get("ASAN_OPTIONS") == "detect_leaks=0"
else:
assert vars_scope["ansible_environment"].get("ASAN_OPTIONS") == None
@pytest.mark.parametrize(("default_yml", "os_env", "result"),
[
# Check null parameters
({}, {}, {"enable": True, "port": 8088, "token": None, "ssl": True}),
# Check default.yml parameters
({"enable": False}, {}, {"enable": False, "port": 8088, "token": None, "ssl": True}),
({"port": 8099}, {}, {"enable": True, "port": 8099, "token": None, "ssl": True}),
({"token": "abcd"}, {}, {"enable": True, "port": 8088, "token": "abcd", "ssl": True}),
({"ssl": False}, {}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
# Check env var parameters
({}, {"SPLUNK_HEC_TOKEN": "<PASSWORD>"}, {"enable": True, "port": 8088, "token": "qw<PASSWORD>", "ssl": True}),
({}, {"SPLUNK_HEC_PORT": "9999"}, {"enable": True, "port": 9999, "token": None, "ssl": True}),
({}, {"SPLUNK_HEC_SSL": "true"}, {"enable": True, "port": 8088, "token": None, "ssl": True}),
({}, {"SPLUNK_HEC_SSL": "false"}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
({}, {"SPLUNK_HEC_SSL": "FALSE"}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
# Check both
({"port": 8099}, {"SPLUNK_HEC_PORT": "19999"}, {"enable": True, "port": 19999, "token": None, "ssl": True}),
({"token": "abcd"}, {"SPLUNK_HEC_TOKEN": "<PASSWORD>"}, {"enable": True, "port": 8088, "token": "fdsa", "ssl": True}),
({"ssl": True}, {"SPLUNK_HEC_SSL": "fAlSe"}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
]
)
def test_getHEC(default_yml, os_env, result):
vars_scope = {"splunk": {}}
vars_scope["splunk"] = {
"hec": {
"enable": True,
"port": 8088,
"token": None,
"ssl": True
}
}
vars_scope["splunk"]["hec"].update(default_yml)
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getHEC(vars_scope)
assert vars_scope["splunk"]["hec"] == result
@pytest.mark.parametrize(("default_yml", "os_env", "result"),
[
# Check null parameters
({}, {}, False),
# # Check default.yml parameters
({"disable_popups": False}, {}, False),
({"disable_popups": True}, {}, True),
# # Check env var parameters
({}, {"SPLUNK_DISABLE_POPUPS": "TRUE"}, True),
({}, {"SPLUNK_DISABLE_POPUPS": "true"}, True),
({}, {"SPLUNK_DISABLE_POPUPS": "True"}, True),
({}, {"SPLUNK_DISABLE_POPUPS": "false"}, False),
({}, {"SPLUNK_DISABLE_POPUPS": "False"}, False),
({}, {"SPLUNK_DISABLE_POPUPS": "FALSE"}, False),
# # Check both
({"disable_popups": False}, {"SPLUNK_DISABLE_POPUPS": "TRUE"}, True),
({"disable_popups": False}, {"SPLUNK_DISABLE_POPUPS": "True"}, True),
({"disable_popups": True}, {"SPLUNK_DISABLE_POPUPS": "False"}, False),
({"disable_popups": True}, {"SPLUNK_DISABLE_POPUPS": "FALSE"}, False),
]
)
def test_getDisablePopups(default_yml, os_env, result):
vars_scope = {}
vars_scope["splunk"] = default_yml
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getDisablePopups(vars_scope)
assert vars_scope["splunk"]["disable_popups"] == result
@pytest.mark.parametrize(("default_yml", "os_env", "result"),
[
# Check null parameters
({}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
# Check default.yml parameters
({"enable": True}, {}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"server": "fwd.dsp.com:8888"}, {}, {"enable": False, "server": "fwd.dsp.com:8888", "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"cert": "path/to/cert.pem"}, {}, {"enable": False, "server": None, "cert": "path/to/cert.pem", "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"verify": True}, {}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_name": "abcd"}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": "abcd", "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_desc": "abcd"}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": "abcd", "pipeline_spec": None}),
({"pipeline_spec": "abcd"}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": "abcd"}),
# Check env var parameters
({}, {"SPLUNK_DSP_SERVER": "fwd.dsp.com:9999"}, {"enable": False, "server": "fwd.dsp.com:9999", "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_CERT": "crt.pem"}, {"enable": False, "server": None, "cert": "crt.pem", "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_VERIFY": "yes"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_VERIFY": "true"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_VERIFY": "TRUE"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_ENABLE": "yes"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_ENABLE": "true"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_ENABLE": "TRUE"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_PIPELINE_NAME": "do"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": "do", "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_PIPELINE_DESC": "re"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": "re", "pipeline_spec": None}),
({}, {"SPLUNK_DSP_PIPELINE_SPEC": "mi"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": "mi"}),
# Check both
({"enable": True}, {"SPLUNK_DSP_ENABLE": "false"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"enable": False}, {"SPLUNK_DSP_ENABLE": "true"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"server": "fwd.dsp.com:8888"}, {"SPLUNK_DSP_SERVER": "fwd.dsp.com:9999"}, {"enable": False, "server": "fwd.dsp.com:9999", "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"cert": "path1/crt.pem"}, {"SPLUNK_DSP_CERT": "path2/cert.pem"}, {"enable": False, "server": None, "cert": "path2/cert.pem", "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"verify": True}, {"SPLUNK_DSP_VERIFY": "false"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"verify": False}, {"SPLUNK_DSP_VERIFY": "TRUE"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_name": "abcd"}, {"SPLUNK_DSP_PIPELINE_NAME": "xyz"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": "xyz", "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_desc": "abcd"}, {"SPLUNK_DSP_PIPELINE_DESC": "xyz"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": "xyz", "pipeline_spec": None}),
({"pipeline_spec": "abcd"}, {"SPLUNK_DSP_PIPELINE_SPEC": "xyz"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": "xyz"}),
]
)
def test_getDSP(default_yml, os_env, result):
vars_scope = {"splunk": {}}
vars_scope["splunk"] = {
"dsp": {
"enable": False,
"server": None,
"cert": None,
"verify": False,
"pipeline_name": None,
"pipeline_desc": None,
"pipeline_spec": None,
}
}
vars_scope["splunk"]["dsp"].update(default_yml)
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getDSP(vars_scope)
assert vars_scope["splunk"]["dsp"] == result
@pytest.mark.parametrize(("es_enablement", "os_env", "result"),
[
(None, {}, ""),
(None, {"SPLUNK_ES_SSL_ENABLEMENT":"strict"}, "--ssl_enablement strict"),
({"ssl_enablement":"auto"}, {}, "--ssl_enablement auto"),
({"ssl_enablement":"strict"}, {}, "--ssl_enablement strict"),
({"ssl_enablement":"ignore"}, {}, "--ssl_enablement ignore"),
({"ssl_enablement":"ignore"}, {"SPLUNK_ES_SSL_ENABLEMENT":"strict"}, "--ssl_enablement strict"),
({"ssl_enablement":"invalid"}, {}, "Exception")
]
)
def test_getESSplunkVariables(es_enablement, os_env, result):
vars_scope = {"splunk": {}}
if es_enablement is not None:
vars_scope["splunk"]["es"] = es_enablement
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
try:
environ.getESSplunkVariables(vars_scope)
assert vars_scope["es_ssl_enablement"] == result
except Exception:
assert result == "Exception"
@pytest.mark.parametrize(("os_env", "license_master_url", "deployer_url", "cluster_master_url", "search_head_captain_url"),
[
({}, "", "", "", ""),
# Check individual environment variables
({"SPLUNK_LICENSE_MASTER_URL": "something"}, "https://something:8089", "", "", ""),
({"SPLUNK_DEPLOYER_URL": "something"}, "", "something", "", ""),
({"SPLUNK_CLUSTER_MASTER_URL": "something"}, "", "", "something", ""),
({"SPLUNK_SEARCH_HEAD_CAPTAIN_URL": "something"}, "", "", "", "something"),
]
)
def test_getDistributedTopology(os_env, license_master_url, deployer_url, cluster_master_url, search_head_captain_url):
vars_scope = {"splunk": {}}
with patch("os.environ", new=os_env):
environ.getDistributedTopology(vars_scope)
assert type(vars_scope["splunk"]["license_master_url"]) == str
assert vars_scope["splunk"]["license_master_url"] == license_master_url
assert type(vars_scope["splunk"]["deployer_url"]) == str
assert vars_scope["splunk"]["deployer_url"] == deployer_url
assert type(vars_scope["splunk"]["cluster_master_url"]) == str
assert vars_scope["splunk"]["cluster_master_url"] == cluster_master_url
assert type(vars_scope["splunk"]["search_head_captain_url"]) == str
assert vars_scope["splunk"]["search_head_captain_url"] == search_head_captain_url
@pytest.mark.parametrize(("default_yml", "os_env", "license_uri", "wildcard_license", "ignore_license", "license_download_dest"),
[
({}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
# Check individual environment variables
({}, {"SPLUNK_LICENSE_URI": "http://web/license.lic"}, "http://web/license.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_LICENSE_URI": "/mnt/*.lic"}, "/mnt/*.lic", True, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_NFR_LICENSE": "/mnt/nfr.lic"}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": ""}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": "true"}, "splunk.lic", False, True, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": "TRUE"}, "splunk.lic", False, True, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": "false"}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_LICENSE_INSTALL_PATH": "/Downloads/"}, "splunk.lic", False, False, "/Downloads/"),
# Check default.yml
({"license_uri": None}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": ""}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": "http://web/license.lic"}, {}, "http://web/license.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": "/mnt/*.lic"}, {}, "/mnt/*.lic", True, False, "/tmp/splunk.lic"),
({"license_uri": "/mnt/nfr.lic"}, {}, "/mnt/nfr.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": "/mnt/1.lic"}, {"SPLUNK_LICENSE_URI": "/mnt/2.lic"}, "/mnt/2.lic", False, False, "/tmp/splunk.lic"),
({"license_download_dest": None}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_download_dest": ""}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_download_dest": "/Downloads/splunk.lic"}, {}, "splunk.lic", False, False, "/Downloads/splunk.lic"),
({"license_download_dest": "/Downloads/splunk.lic"}, {"SPLUNK_LICENSE_INSTALL_PATH": "/mnt/license.file"}, "splunk.lic", False, False, "/mnt/license.file"),
]
)
def test_getLicenses(default_yml, os_env, license_uri, wildcard_license, ignore_license, license_download_dest):
vars_scope = {"splunk": default_yml}
with patch("os.environ", new=os_env):
environ.getLicenses(vars_scope)
assert vars_scope["splunk"]["license_uri"] == license_uri
assert type(vars_scope["splunk"]["wildcard_license"]) == bool
assert vars_scope["splunk"]["wildcard_license"] == wildcard_license
assert type(vars_scope["splunk"]["ignore_license"]) == bool
assert vars_scope["splunk"]["ignore_license"] == ignore_license
assert vars_scope["splunk"]["license_download_dest"] == license_download_dest
@pytest.mark.parametrize(("default_yml", "os_env", "java_version", "java_download_url", "java_update_version"),
[
({}, {}, None, None, None),
# Check environment variable parameters
({}, {"JAVA": "oracle:8"}, None, None, None),
({}, {"JAVA_VERSION": "openjdk:8"}, "openjdk:8", None, None),
({}, {"JAVA_VERSION": "openjdk:9"}, "openjdk:9", None, None),
({}, {"JAVA_VERSION": "oracle:8"}, "oracle:8", "https://download.oracle.com/otn-pub/java/jdk/8u141-b15/336fa29ff2bb4ef291e347e091f7f4a7/jdk-8u141-linux-x64.tar.gz", "141"),
({}, {"JAVA_VERSION": "ORACLE:8"}, "oracle:8", "https://download.oracle.com/otn-pub/java/jdk/8u141-b15/336fa29ff2bb4ef291e347e091f7f4a7/jdk-8u141-linux-x64.tar.gz", "141"),
({}, {"JAVA_VERSION": "openjdk:11"}, "openjdk:11", "https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz", "11.0.2"),
({}, {"JAVA_VERSION": "oPenJdK:11"}, "openjdk:11", "https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz", "11.0.2"),
({}, {"JAVA_VERSION": "oracle:8", "JAVA_DOWNLOAD_URL": "https://java/jdk-8u9000-linux-x64.tar.gz"}, "oracle:8", "https://java/jdk-8u9000-linux-x64.tar.gz", "9000"),
({}, {"JAVA_VERSION": "openjdk:11", "JAVA_DOWNLOAD_URL": "https://java/openjdk-11.11.11_linux-x64_bin.tar.gz"}, "openjdk:11", "https://java/openjdk-11.11.11_linux-x64_bin.tar.gz", "11.11.11"),
# Check default.yml
({"java_version": "openjdk:11"}, {}, "openjdk:11", None, None),
({"java_download_url": "http://web/java.tgz"}, {}, None, "http://web/java.tgz", None),
({"java_update_version": "jdk11u141"}, {}, None, None, "jdk11u141"),
# Check order of precedence
({"java_version": "openjdk:9", "java_download_url": "http://web/java.tgz", "java_update_version": "jdk11u141"}, {"JAVA_VERSION": "oPenJdK:11"}, "openjdk:11", "https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz", "11.0.2"),
]
)
def test_getJava(default_yml, os_env, java_version, java_download_url, java_update_version):
vars_scope = default_yml
with patch("os.environ", new=os_env):
environ.getJava(vars_scope)
assert vars_scope["java_version"] == java_version
assert vars_scope["java_download_url"] == java_download_url
assert vars_scope["java_update_version"] == java_update_version
@pytest.mark.parametrize(("os_env", "java_version", "java_download_url", "err_msg"),
[
({"JAVA_VERSION": "oracle:3"}, None, None, "Invalid Java version supplied"),
({"JAVA_VERSION": "openjdk:20"}, None, None, "Invalid Java version supplied"),
({"JAVA_VERSION": "oracle:8", "JAVA_DOWNLOAD_URL": "https://java/jdk-8u9000.tar.gz"}, "oracle:8", "https://java/jdk-8u9000.tar.gz", "Invalid Java download URL format"),
({"JAVA_VERSION": "openjdk:11", "JAVA_DOWNLOAD_URL": "https://java/openjdk-11.tar.gz"}, "openjdk:11", "https://java/openjdk-11.tar.gz", "Invalid Java download URL format"),
]
)
def test_getJava_exception(os_env, java_version, java_download_url, err_msg):
vars_scope = {"splunk": {}}
with patch("os.environ", new=os_env):
try:
environ.getJava(vars_scope)
assert False
except Exception as e:
assert True
assert err_msg in str(e)
assert vars_scope["java_version"] == java_version
assert vars_scope["java_download_url"] == java_download_url
assert vars_scope["java_update_version"] == None
@pytest.mark.parametrize(("default_yml", "os_env", "build", "build_url_bearer_token"),
[
({}, {}, None, None),
# Check default.yml parameters
({"buildlocation": "http://server/file.tgz"}, {}, None, None),
({"build_location": None}, {}, None, None),
({"build_location": ""}, {}, "", None),
({"build_location": "/path/to/file.tgz"}, {}, "/path/to/file.tgz", None),
({"build_location": "http://server/file.tgz"}, {}, "http://server/file.tgz", None),
({"build_location": "https://server/file.tgz"}, {}, "https://server/file.tgz", None),
# Check environment variable parameters
({}, {"SPLUNK_BUILD": "http://server/file.tgz"}, None, None),
({}, {"SPLUNK_BUILD_URL": None}, None, None),
({}, {"SPLUNK_BUILD_URL": ""}, "", None),
({}, {"SPLUNK_BUILD_URL": "/path/to/file.tgz", "SPLUNK_BUILD_URL_BEARER_TOKEN": "testToken"}, "/path/to/file.tgz", "testToken"),
({}, {"SPLUNK_BUILD_URL": "http://server/file.tgz", "SPLUNK_BUILD_URL_BEARER_TOKEN": "testToken"}, "http://server/file.tgz", "testToken"),
({}, {"SPLUNK_BUILD_URL": "https://server/file.tgz", "SPLUNK_BUILD_URL_BEARER_TOKEN": "testToken"}, "https://server/file.tgz", "testToken"),
# Check order of precedence
({"build_location": "http://server/file1.tgz"}, {"SPLUNK_BUILD_URL": "https://server/file2.tgz"}, "https://server/file2.tgz", None),
({"build_location": "http://server/file1.tgz"}, {"SPLUNK_BUILD_URL": "/path/to/file.tgz"}, "/path/to/file.tgz", None),
]
)
def test_getSplunkBuild(default_yml, os_env, build, build_url_bearer_token):
vars_scope = dict()
vars_scope["splunk"] = default_yml
with patch("os.environ", new=os_env):
environ.getSplunkBuild(vars_scope)
assert vars_scope["splunk"]["build_location"] == build
assert vars_scope["splunk"]["build_url_bearer_token"] == build_url_bearer_token
@pytest.mark.parametrize(("default_yml", "response_content", "trigger_splunkbase"),
[
({}, "<id>123abc</id>", False),
({"splunkbase_username": "ocho"}, "<id>123abc</id>", False),
({"splunkbase_password": "<PASSWORD>"}, "<id>123abc</id>", False),
({"splunkbase_username": "ocho", "splunkbase_password": "<PASSWORD>"}, "<id>123abc</id>", True),
({"splunkbase_username": "", "splunkbase_password": ""}, "<id>123abc</id>", False),
({}, "<id>123abc</id>", False),
({"splunkbase_username": "ocho"}, b"<id>123abc</id>", False),
({"splunkbase_password": "<PASSWORD>"}, b"<id>123abc</id>", False),
({"splunkbase_username": "ocho", "splunkbase_password": "<PASSWORD>"}, b"<id>123abc</id>", True),
({"splunkbase_username": "", "splunkbase_password": ""}, b"<id>123abc</id>", False),
]
)
def test_getSplunkbaseToken(default_yml, response_content, trigger_splunkbase):
vars_scope = default_yml
with patch("environ.requests.post") as mock_post:
mock_post.return_value = MagicMock(status_code=200, content=response_content)
with patch("os.environ", new=dict()):
environ.getSplunkbaseToken(vars_scope)
# Make sure Splunkbase token is populated when appropriate
assert "splunkbase_token" in vars_scope
assert "splunkbase_username" in vars_scope
assert "splunkbase_password" in vars_scope
if trigger_splunkbase:
mock_post.assert_called_with("https://splunkbase.splunk.com/api/account:login/", data={"username": "ocho", "password": "<PASSWORD>"})
assert vars_scope.get("splunkbase_token") == "<PASSWORD>"
else:
mock_post.assert_not_called()
assert not vars_scope.get("splunkbase_token")
def test_getSplunkbaseToken_exception():
with patch("environ.requests.post") as mock_post:
mock_post.return_value = MagicMock(status_code=400, content="error")
try:
environ.getSplunkbaseToken({"splunkbase_username": "ocho", "splunkbase_password": "<PASSWORD>"})
assert False
except Exception as e:
assert True
assert "Invalid Splunkbase credentials" in str(e)
@pytest.mark.parametrize(("default_yml", "os_env", "apps_count"),
[
# Check null parameters
({}, {}, 0),
# Check default.yml parameters
({"app_location": []}, {}, 0),
({"app_location": ["a"]}, {}, 0),
({"app_location": ["a", "b", "c"]}, {}, 0),
({"apps_location": []}, {}, 0),
({"apps_location": ["a"]}, {}, 1),
({"apps_location": ["a", "b", "c"]}, {}, 3),
({"apps_location": "a"}, {}, 1),
({"apps_location": "a,b,c,d"}, {}, 4),
# Check environment variable parameters
({}, {"SPLUNK_APPS": None}, 0),
({}, {"SPLUNK_APPS": "hi"}, 0),
({}, {"SPLUNK_APPS_URL": "hi"}, 1),
({}, {"SPLUNK_APPS_URL": "a,b,ccccc,dd"}, 4),
# Check the union combination of default.yml + environment variables
### Invalid 'app_location' variable name in default.yml
({"app_location": []}, {"SPLUNK_APPS_URL": None}, 0),
({"app_location": ["a"]}, {"SPLUNK_APPS_URL": "a"}, 1),
({"app_location": ["a", "b", "c"]}, {"SPLUNK_APPS_URL": "a,bb"}, 2),
### Invalid 'SPLUNK_APP_URL' variable name in env vars
({"apps_location": ["x"]}, {"SPLUNK_APP_URL": "a"}, 1),
({"apps_location": ["x", "y"]}, {"SPLUNK_APP_URL": "a,bb"}, 2),
({"apps_location": "x,y,z"}, {"SPLUNK_APP_URL": "a,bb"}, 3),
### Correct variable names
({"apps_location": ["x"]}, {"SPLUNK_APPS_URL": "a"}, 2),
({"apps_location": ["x", "y"]}, {"SPLUNK_APPS_URL": "a,bb"}, 4),
({"apps_location": "x,y,z"}, {"SPLUNK_APPS_URL": "a,bb"}, 5),
### Only return unique set of apps
({"apps_location": ["x"]}, {"SPLUNK_APPS_URL": "x"}, 1),
({"apps_location": ["x", "y"]}, {"SPLUNK_APPS_URL": "a,bb,y"}, 4),
({"apps_location": "x,y,z"}, {"SPLUNK_APPS_URL": "x,yy,a,z"}, 5),
]
)
def test_getSplunkApps(default_yml, os_env, apps_count):
vars_scope = dict()
vars_scope["splunk"] = default_yml
with patch("os.environ", new=os_env):
environ.getSplunkApps(vars_scope)
assert type(vars_scope["splunk"]["apps_location"]) == list
assert len(vars_scope["splunk"]["apps_location"]) == apps_count
@pytest.mark.parametrize(("default_yml", "os_env", "key", "value"),
[
# Check cert_prefix
({}, {}, "cert_prefix", "https"),
({"cert_prefix": "http"}, {}, "cert_prefix", "http"),
({}, {"SPLUNK_CERT_PREFIX": "fakehttps"}, "cert_prefix", "fakehttps"),
# Check splunk.user
({"splunk": {"user": "root"}}, {}, "splunk.user", "root"),
({}, {"SPLUNK_USER": "root"}, "splunk.user", "root"),
# Check splunk.group
({"splunk": {"group": "root"}}, {}, "splunk.group", "root"),
({}, {"SPLUNK_GROUP": "root"}, "splunk.group", "root"),
# Check splunk.root_endpoint
({"splunk": {"root_endpoint": "/splunk"}}, {}, "splunk.root_endpoint", "/splunk"),
({}, {"SPLUNK_ROOT_ENDPOINT": "/splk"}, "splunk.root_endpoint", "/splk"),
# Check splunk.svc_port
({"splunk": {"svc_port": "9089"}}, {}, "splunk.svc_port", "9089"),
({}, {"SPLUNK_SVC_PORT": "8189"}, "splunk.svc_port", "8189"),
# Check splunk.s2s.port
({"splunk": {"s2s": {"port": "9999"}}}, {}, "splunk.s2s.port", 9999),
({}, {"SPLUNK_S2S_PORT": "9991"}, "splunk.s2s.port", 9991),
# Check splunk.enable_service
({"splunk": {"enable_service": "yes"}}, {}, "splunk.enable_service", "yes"),
({}, {"SPLUNK_ENABLE_SERVICE": "no"}, "splunk.enable_service", "no"),
# Check splunk.service_name
({"splunk": {"service_name": "SpLuNkD"}}, {}, "splunk.service_name", "SpLuNkD"),
({}, {"SPLUNK_SERVICE_NAME": "sPlUnKd"}, "splunk.service_name", "sPlUnKd"),
# Check splunk.allow_upgrade
({"splunk": {"allow_upgrade": "yes"}}, {}, "splunk.allow_upgrade", "yes"),
({}, {"SPLUNK_ALLOW_UPGRADE": "no"}, "splunk.allow_upgrade", "no"),
# Check splunk.set_search_peers
({"splunk": {"set_search_peers": False}}, {}, "splunk.set_search_peers", False),
({}, {"SPLUNK_SET_SEARCH_PEERS": "False"}, "splunk.set_search_peers", False),
({"splunk": {"set_search_peers": True}}, {"SPLUNK_SET_SEARCH_PEERS": "False"}, "splunk.set_search_peers", False),
# Check splunk.appserver.port
({"splunk": {"appserver": {"port": "9291"}}}, {}, "splunk.appserver.port", "9291"),
({}, {"SPLUNK_APPSERVER_PORT": "9391"}, "splunk.appserver.port", "9391"),
# Check splunk.kvstore.port
({"splunk": {"kvstore" :{"port": "9165"}}}, {}, "splunk.kvstore.port", "9165"),
({}, {"SPLUNK_KVSTORE_PORT": "9265"}, "splunk.kvstore.port", "9265"),
# Check splunk.connection_timeout
({"splunk": {"connection_timeout": 60}}, {}, "splunk.connection_timeout", 60),
({}, {"SPLUNK_CONNECTION_TIMEOUT": 200}, "splunk.connection_timeout", 200),
]
)
def test_overrideEnvironmentVars(default_yml, os_env, key, value):
vars_scope = {
"ansible_pre_tasks": None,
"ansible_post_tasks": None,
"cert_prefix": "https",
"splunk": {
"user": "splunk",
"group": "splunk",
"root_endpoint": None,
"svc_port": 8089,
"s2s": {"port": 9997},
"appserver": {"port": 8065},
"kvstore": {"port": 8191},
"hec_token": "<KEY>",
"enable_service": False,
"service_name": "Splunkd",
"allow_upgrade": True,
"asan": None,
"set_search_peers": True,
"connection_timeout": 0,
}
}
# TODO: Possibly remove the dependency on merge_dict() in this test
environ.merge_dict(vars_scope, default_yml)
with patch("os.environ", new=os_env):
environ.overrideEnvironmentVars(vars_scope)
if "splunk" in key:
if "s2s" in key or "appserver" in key or "kvstore" in key:
section, key = key.split(".")[-2:]
assert vars_scope["splunk"][section][key] == value
else:
key = key.split(".")[-1]
assert vars_scope["splunk"][key] == value
else:
assert vars_scope[key] == value
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
# Check default.yml parameters
({"dfs": {"enable": True}}, {}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots": 20}}, {}, {"enable": False, "dfw_num_slots": 20, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots": "15"}}, {}, {"enable": False, "dfw_num_slots": 15, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfc_num_slots": 20}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 20, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfc_num_slots": "15"}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 15, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots_enabled": True}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_host": "10.0.0.1"}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "10.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_webui_port": 8081}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8081}),
({"dfs": {"spark_master_webui_port": "8082"}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8082}),
# Check environment variable parameters
({}, {"SPLUNK_ENABLE_DFS": ""}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_ENABLE_DFS": "true"}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_ENABLE_DFS": "TRUE"}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS": "11"}, {"enable": False, "dfw_num_slots": 11, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFC_NUM_SLOTS": "1"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 1, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": ""}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": "true"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": "TRUE"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPARK_MASTER_HOST": "8.8.8.8"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "8.8.8.8", "spark_master_webui_port": 8080}),
({}, {"SPARK_MASTER_WEBUI_PORT": "8888"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8888}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"dfs": {"enable": False}}, {"SPLUNK_ENABLE_DFS": "true"}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots": 100}}, {"SPLUNK_DFW_NUM_SLOTS": "101"}, {"enable": False, "dfw_num_slots": 101, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfc_num_slots": 100}}, {"SPLUNK_DFC_NUM_SLOTS": "101"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 101, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots_enabled": False}}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": "True"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_host": "10.0.0.1"}}, {"SPARK_MASTER_HOST": "8.8.8.8"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "8.8.8.8", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_webui_port": 8082}}, {"SPARK_MASTER_WEBUI_PORT": "8888"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8888}),
]
)
def test_getDFS(default_yml, os_env, output):
vars_scope = dict()
vars_scope["splunk"] = default_yml
with patch("os.environ", new=os_env):
environ.getDFS(vars_scope)
# Check typing
assert type(vars_scope["splunk"]["dfs"]["enable"]) == bool
assert type(vars_scope["splunk"]["dfs"]["dfw_num_slots"]) == int
assert type(vars_scope["splunk"]["dfs"]["dfc_num_slots"]) == int
assert type(vars_scope["splunk"]["dfs"]["dfw_num_slots_enabled"]) == bool
assert type(vars_scope["splunk"]["dfs"]["spark_master_webui_port"]) == int
assert vars_scope["splunk"]["dfs"] == output
@pytest.mark.parametrize(("os_env", "deployment_server", "add", "before_start_cmd", "cmd"),
[
({}, None, None, None, None),
# Check environment variable parameters
({"SPLUNK_DEPLOYMENT_SERVER": ""}, None, None, None, None),
({"SPLUNK_DEPLOYMENT_SERVER": "something"}, "something", None, None, None),
({"SPLUNK_ADD": ""}, None, None, None, None),
({"SPLUNK_ADD": "echo 1"}, None, ["echo 1"], None, None),
({"SPLUNK_ADD": "echo 1,echo 2"}, None, ["echo 1", "echo 2"], None, None),
({"SPLUNK_BEFORE_START_CMD": ""}, None, None, None, None),
({"SPLUNK_BEFORE_START_CMD": "echo 1"}, None, None, ["echo 1"], None),
({"SPLUNK_BEFORE_START_CMD": "echo 1,echo 2"}, None, None, ["echo 1", "echo 2"], None),
({"SPLUNK_CMD": ""}, None, None, None, None),
({"SPLUNK_CMD": "echo 1"}, None, None, None, ["echo 1"]),
({"SPLUNK_CMD": "echo 1,echo 2"}, None, None, None, ["echo 1", "echo 2"]),
]
)
def test_getUFSplunkVariables(os_env, deployment_server, add, before_start_cmd, cmd):
vars_scope = {"splunk": {}}
with patch("os.environ", new=os_env):
environ.getUFSplunkVariables(vars_scope)
assert vars_scope["splunk"].get("deployment_server") == deployment_server
assert vars_scope["splunk"].get("add") == add
assert vars_scope["splunk"].get("before_start_cmd") == before_start_cmd
assert vars_scope["splunk"].get("cmd") == cmd
def test_getRandomString():
word = environ.getRandomString()
assert len(word) == 6
@pytest.mark.parametrize(("url", "vars_scope", "output"),
[
("licmaster", {"splunk": {}}, "https://licmaster:8089"),
("http://licmaster", {"splunk": {}}, "http://licmaster:8089"),
("licmaster:8081", {"splunk": {}}, "https://licmaster:8081"),
("http://licmaster:80", {"splunk": {}}, "http://licmaster:80"),
("ftp://licmaster.corp.net:3333", {"splunk": {}}, "ftp://licmaster.corp.net:3333"),
("username:<EMAIL>", {"splunk": {}}, "https://lm.internal.net:8089"),
("http://username:password@lm.internal.net:3333", {"splunk": {}}, "http://lm.internal.net:3333"),
# Check null input
("", {"splunk": {}}, ""),
(None, {"splunk": {}}, ""),
# Check vars_scope overrides
("licmaster", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "http://licmaster:18089"),
("https://licmaster", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "https://licmaster:18089"),
("licmaster:28089", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "http://licmaster:28089"),
("https://licmaster:38089", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "https://licmaster:38089"),
]
)
def test_parseUrl(url, vars_scope, output):
result = environ.parseUrl(url, vars_scope)
assert result == output
@pytest.mark.parametrize(("dict1", "dict2", "result"),
[
# Check dicts
({}, {"a": 2}, {"a": 2}),
({"b": 2}, {"a": 2}, {"a": 2, "b": 2}),
({"a": 1, "b": 2}, {"a": 2}, {"a": 2, "b": 2}),
({"a": 0}, {"a": 1}, {"a": 1}),
({"a": 1}, {"b": 2, "c": 3}, {"a": 1, "b": 2, "c": 3}),
# Check arrays
({}, {"a": []}, {"a": []}),
({}, {"a": [1, 2]}, {"a": [1, 2]}),
({"b": [0]}, {"a": [1]}, {"a": [1], "b": [0]}),
({"a": [0]}, {"a": [1]}, {"a": [0, 1]}),
# Check nested dict output
({"nested": {}}, {"nested": {"a": 1}}, {"nested": {"a": 1}}),
({"nested": {"a": 1}}, {"nested": {"b": 2}}, {"nested": {"a": 1, "b": 2}}),
({"nested": {"a": 1, "c": 3}}, {"nested": {"b": 2}}, {"nested": {"a": 1, "b": 2, "c": 3}}),
({"nested": {"a": 1, "b": 3}}, {"nested": {"b": 2}}, {"nested": {"a": 1, "b": 2}}),
# Check nested with diff value types
({"nested": {"x": 1}}, {"nested": {"x": {"a": 1}}}, {"nested": {"x": {"a": 1}}}),
({"nested": {"x": {"a": 1}}}, {"nested": {"x": 1}}, {"nested": {"x": 1}}),
# Check nested arrays
({"nested": {"array": []}}, {"nested": {"array": [1]}}, {"nested": {"array": [1]}}),
({"nested": {"array": [1, 2, 3]}}, {"nested": {"array": []}}, {"nested": {"array": [1, 2, 3]}}),
({"nested": {"array": [1, 2]}}, {"nested": {"array": [3, 4, 5]}}, {"nested": {"array": [1, 2, 3, 4, 5]}}),
({"nested": {"x": 10, "array": [1, 2]}}, {"nested": {"y": 20, "array": [3, 4, 5]}}, {"nested": {"x": 10, "y": 20, "array": [1, 2, 3, 4, 5]}}),
# Targeted github bug
({"splunk": {"conf": [{"key": "fileA", "content": {"a": "b", "c": "d"}}]}}, {"splunk": {"conf": [{"key": "fileB", "content": {"e": "f", "g": "h"}}]}}, {"splunk": {"conf": [{"key": "fileA", "content": {"a": "b", "c": "d"}}, {"key": "fileB", "content": {"e": "f", "g": "h"}}]}}),
]
)
def test_merge_dict(dict1, dict2, result):
output = environ.merge_dict(dict1, dict2)
assert output == result
@pytest.mark.parametrize(("source", "merge_url_called", "merge_file_called"),
[
(None, False, False),
("", False, False),
(" ", False, False),
("http://web/default.yml", True, False),
("https://web/default.yml", True, False),
("file:///path/to/default.yml", False, True),
("/path/to/default.yml", False, True),
("rel/path/to/default.yml", False, True),
]
)
def test_mergeDefaults(source, merge_url_called, merge_file_called):
with patch("environ.mergeDefaultsFromFile") as mock_merge_file:
with patch("environ.mergeDefaultsFromURL") as mock_merge_url:
result = environ.mergeDefaults({"hello": "world"}, "foobar", source)
if merge_url_called:
mock_merge_url.assert_called_once()
mock_merge_file.assert_not_called()
else:
mock_merge_url.assert_not_called()
if merge_file_called:
mock_merge_file.assert_called_once()
mock_merge_url.assert_not_called()
else:
mock_merge_file.assert_not_called()
@pytest.mark.parametrize(("key"),
[
("FOO"),
("BAR"),
("BAZ"),
]
)
def test_mergeDefaults_url_with_req_params(key):
config = {
"config": {
"FOO": {
"headers": {"HI": "MOM"},
"verify": True
},
"BAR": {
"headers": {"GOODBYE": "MOM"},
"verify": False
}
}
}
with patch("environ.mergeDefaultsFromFile") as mock_merge_file:
with patch("environ.mergeDefaultsFromURL") as mock_merge_url:
result = environ.mergeDefaults(config, key, "http://website/default.yml")
mock_merge_file.assert_not_called()
mock_merge_url.assert_called_once()
if key == "FOO":
mock_merge_url.assert_called_with(config, "http://website/default.yml", {"HI": "MOM"}, True)
elif key == "BAR":
mock_merge_url.assert_called_with(config, "http://website/default.yml", {"GOODBYE": "MOM"}, False)
else:
mock_merge_url.assert_called_with(config, "http://website/default.yml", None, False)
@pytest.mark.skip(reason="TODO")
def test_mergeDefaultsFromURL():
pass
@pytest.mark.parametrize(("file", "file_exists", "merge_called"),
[
(None, False, False),
("", False, False),
(" ", False, False),
("/path/to/file", False, False),
("/path/to/file", True, True),
]
)
def test_mergeDefaultsFromFile(file, file_exists, merge_called):
mo = mock_open()
with patch("environ.open", mo, create=True):
with patch("environ.os") as mock_os:
with patch("environ.merge_dict") as mock_merge:
mock_os.path.exists = MagicMock(return_value=file_exists)
result = environ.mergeDefaultsFromFile({"hello": "world"}, file)
if merge_called:
mo.assert_called_once()
mock_merge.assert_called_once()
else:
mo.assert_not_called()
mock_merge.assert_not_called()
assert result == {"hello": "world"}
@pytest.mark.parametrize(("mock_base", "mock_baked", "mock_env", "mock_host", "merge_call_count"),
[
# Null cases
({}, [], [], [], 0),
({"config": None}, [], [], [], 0),
({"config": {}}, [], [], [], 0),
# Check baked
({"config": {"foo": "bar"}}, [{"key": "baked", "src": "file1"}], [], [], 1),
({"config": {"foo": "bar"}}, [{"key": "baked", "src": "f1"}, {"key": "baked", "src": "f2"}, {"key": "baked", "src": "f3"}], [], [], 3),
# Check env
({"config": {"foo": "bar"}}, [], [{"key": "env", "src": "file1"}], [], 1),
({"config": {"foo": "bar"}}, [], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}, {"key": "env", "src": "f3"}], [], 3),
# Check host
({"config": {"foo": "bar"}}, [], [], [{"key": "host", "src": "file1"}], 1),
({"config": {"foo": "bar"}}, [], [], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}, {"key": "host", "src": "f3"}], 3),
# Check mixed
({"config": {"foo": "bar"}}, [{"key": "baked", "src": "file1"}], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}], 5),
({"config": None}, [{"key": "baked", "src": "file1"}], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}], 0),
({"config": {}}, [{"key": "baked", "src": "file1"}], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}], 0),
]
)
def test_loadDefaults(mock_base, mock_baked, mock_env, mock_host, merge_call_count):
mbase = MagicMock(return_value=mock_base)
mbaked = MagicMock(return_value=mock_baked)
menv = MagicMock(return_value=mock_env)
mhost = MagicMock(return_value=mock_host)
with patch("environ.loadBaseDefaults", mbase):
with patch("environ.loadBakedDefaults", mbaked):
with patch("environ.loadEnvDefaults", menv):
with patch("environ.loadHostDefaults", mhost):
with patch("environ.mergeDefaults") as mock_merge:
output = environ.loadDefaults()
assert mock_merge.call_count == merge_call_count
@pytest.mark.parametrize(("os_env", "filename"),
[
({}, "splunk_defaults"),
({"SPLUNK_ROLE": "splunk_standalone"}, "splunk_defaults"),
({"SPLUNK_ROLE": "splunk_universal_forwarder"}, "splunkforwarder_defaults"),
]
)
def test_loadBaseDefaults(os_env, filename):
sample_yml = """
this: file
is:
a: yaml
"""
mo = mock_open(read_data=sample_yml)
with patch("environ.open", mo, create=True):
with patch("os.environ", new=os_env):
output = environ.loadBaseDefaults()
mo.assert_called_once()
args, _ = mo.call_args
assert filename in args[0]
assert args[1] == "r"
assert type(output) == dict
assert output["this"] == "file"
@pytest.mark.parametrize(("config", "output"),
[
(None, []),
({}, []),
({"baked": None}, []),
({"baked": ""}, []),
({"baked": "file1"}, [{"key": "baked", "src": "file1"}]),
({"baked": "file1,file2,file3"}, [{"key": "baked", "src": "file1"}, {"key": "baked", "src": "file2"}, {"key": "baked", "src": "file3"}]),
]
)
def test_loadBakedDefaults(config, output):
result = environ.loadBakedDefaults(config)
assert result == output
@pytest.mark.parametrize(("config", "output"),
[
(None, []),
({}, []),
({"env": None}, []),
({"env": {}}, []),
({"env": {"var": None}}, []),
({"env": {"var": ""}}, []),
# Adding test for a key that does not exist
({"env": {"var": "FAKE"}}, []),
# Adding tests for keys that exist
({"env": {"var": "KEY1"}}, [{"key": "env", "src": "file1"}]),
({"env": {"var": "KEY2"}}, [{"key": "env", "src": "file1"}, {"key": "env", "src": "file2"}, {"key": "env", "src": "file3"}]),
]
)
def test_loadEnvDefaults(config, output):
with patch("os.environ", new={"KEY1": "file1", "KEY2": "file1,file2,file3"}):
result = environ.loadEnvDefaults(config)
assert result == output
@pytest.mark.parametrize(("config", "output"),
[
(None, []),
({}, []),
({"host": None}, []),
({"host": {}}, []),
({"host": {"url": None}}, []),
({"host": {"url": ""}}, []),
({"host": {"url": "file1"}}, [{"key": "host", "src": "file1"}]),
({"host": {"url": "file1,file2,file3"}}, [{"key": "host", "src": "file1"}, {"key": "host", "src": "file2"}, {"key": "host", "src": "file3"}]),
]
)
def test_loadHostDefaults(config, output):
result = environ.loadHostDefaults(config)
assert result == output
@pytest.mark.parametrize(("inputInventory", "outputInventory"),
[
# Verify null inputs
({}, {}),
({"all": {}}, {"all": {}}),
({"all": {"vars": {}}}, {"all": {"vars": {}}}),
({"all": {"vars": {"splunk": {}}}}, {"all": {"vars": {"splunk": {}}}}),
# Verify individual keys to obfuscate
({"all": {"vars": {"splunk": {"password": "<PASSWORD>"}}}}, {"all": {"vars": {"splunk": {"password": "**************"}}}}),
({"all": {"vars": {"splunk": {"shc": {"secret": "helloworld"}}}}}, {"all": {"vars": {"splunk": {"shc": {"secret": "**************"}}}}}),
({"all": {"vars": {"splunk": {"smartstore": {"index": []}}}}}, {"all": {"vars": {"splunk": {"smartstore": {"index": []}}}}}),
({"all": {"vars": {"splunk": {"smartstore": {"index": [{"s3": {"access_key": "1234", "secret_key": "abcd"}}]}}}}}, {"all": {"vars": {"splunk": {"smartstore": {"index": [{"s3": {"access_key": "**************", "secret_key": "**************"}}]}}}}}),
]
)
def test_obfuscate_vars(inputInventory, outputInventory):
result = environ.obfuscate_vars(inputInventory)
assert result == outputInventory
@pytest.mark.skip(reason="TODO")
def test_create_parser():
pass
@pytest.mark.skip(reason="TODO")
def test_prep_for_yaml_out():
pass
@pytest.mark.skip(reason="TODO")
def test_main():
pass | 0.284775 | 0.345216 |
import logging
import gevent
from volttron.platform.vip.agent import Agent
from volttrontesting.utils.platformwrapper import start_wrapper_platform
from volttron.platform.agent import json
import pytest
import random
import requests
import os
import tempfile
from volttrontesting.fixtures.volttron_platform_fixtures import *
logging.basicConfig(level=logging.DEBUG)
from volttrontesting.utils.build_agent import build_agent, build_agent_with_key
@pytest.fixture(scope="module")
def web_instance(request, get_volttron_instances):
instance = get_volttron_instances(1, should_start=False)
start_wrapper_platform(instance, with_http=True)
# Create a web enabled agent to test with. Cleanup will happen in the
# shutdown_platform method of the instance.
web_agent = _build_web_agent(instance.volttron_home)
gevent.sleep(1)
instance.install_agent(agent_dir=web_agent)
yield instance
instance.shutdown_platform()
def _build_web_agent(vhome):
"""
Builds a full web enabled agent with a webroot, jsonrpc endpoint..etc.
:param vhome:
:return: The directory of the agent to be installed.
"""
agent_dir = os.path.join(vhome, "Agent{}".format(random.randint(1,100)))
package = "webagent"
os.makedirs(agent_dir)
package_dir = os.path.join(agent_dir, package)
os.makedirs(package_dir)
web_dir = os.path.join(package_dir, 'webroot', 'web')
os.makedirs(web_dir)
# Create index.html inside the webroot directory.
with open(os.path.join(web_dir, 'index.html'), 'w') as f:
f.write("""
<html>
<head>
<title>Test Page</title>
</head>
<body>
<h1>The body is good</h1>
</body>
</html>
""")
# Create the setup.py file
with open(os.path.join(agent_dir, 'setup.py'), 'w') as file:
file.write('''
from setuptools import setup, find_packages
packages = find_packages('.')
setup(
include_package_data=True,
name = '{package}',
version = '0.1',
packages = packages,
zip_safe = False,
entry_points={{
'setuptools.installation': [
'eggsecutable = {package}.agent:main',
]
}}
)
'''.format(package=package))
# Crate a manifest file to allow inclusion of other files
with open(os.path.join(agent_dir, 'MANIFEST.in'), 'w') as file:
file.write("recursive-include {package}/webroot *".format(
package=package))
# Make python package
with open(os.path.join(package_dir, '__init__.py'), 'w') as f:
pass
# Create the agent.py file in the package directory.
with open(os.path.join(package_dir, 'agent.py'), 'w') as fout:
fout.write('''
from __future__ import absolute_import, print_function
import base64
import logging
import os
import sys
from volttron.platform.vip.agent import Core, Agent
from volttron.platform.agent import utils
from volttron.platform import jsonrpc
utils.setup_logging()
_log = logging.getLogger(__name__)
MY_PATH = os.path.dirname(__file__)
WEBROOT = os.path.join(MY_PATH, "webroot")
class WebAgent(Agent):
def __init__(self, config_path, **kwargs):
super(WebAgent, self).__init__(enable_web=True, **kwargs)
@Core.receiver("onstart")
def starting(self, sender, **kwargs):
self.vip.web.register_endpoint("/web/text", self.text, "raw")
self.vip.web.register_endpoint("/web/jsonrpc", self.echoendpoint)
self.vip.web.register_path("/web", WEBROOT)
def text(self, env, data):
ret = "200 OK", base64.b64encode("This is some text"), [
('Content-Type', 'text/plain')]
_log.debug('returning: {}'.format(ret))
return ret
def echoendpoint(self, env, data):
return jsonrpc.json_result("id", data)
def main():
utils.vip_main(WebAgent)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
''')
return agent_dir
def _build_web_dir(vhome):
""" Creates a web directory that can be served.
The web directory will contain an index.html file that should be
able to be retrieved.
@param:str:
The path to vhome or where it should be
@return:tuple:
The path to the web directory and the content of index.html.
"""
webdir = os.path.join(vhome, "webdir")
os.makedirs(webdir)
html = """
<html>
<head>
<title>Test Page</title>
</head>
<body>
<h1>The body is good</h1>
</body>
</html>
"""
with open(os.path.join(webdir, 'index.html'), 'w') as f:
f.write(html)
return webdir, html
@pytest.mark.web
def test_can_discover_info(web_instance):
"""
Tests whether the web instance returns the key, instance name and
instance tcp address.
"""
vi = web_instance
# must sleep because the web server takes a bit to get going.
gevent.sleep(1)
url = "{}/discovery/".format(vi.bind_web_address)
res = requests.get(url)
assert res.ok
d = res.json()
assert vi.serverkey == d['serverkey']
assert d['vip-address']
assert d['instance-name']
@pytest.mark.web
def test_test_web_agent(web_instance):
vi = web_instance
assert vi.is_running()
agent_list = vi.list_agents()
assert len(agent_list) == 1
base_address = vi.bind_web_address
index = base_address + "/web/index.html"
text = base_address + "/web/text"
rpc = base_address + "/web/jsonrpc"
resp = requests.get(index)
assert "<h1>The body is good</h1>" in resp.text
assert "<html>" in resp.text
assert "</html>" in resp.text
assert resp.headers['Content-type'] == 'text/html'
resp = requests.get(text)
assert resp.ok
print("*" * 50)
print(resp.headers)
assert "This is some text" == resp.text
assert resp.headers['Content-type'] == 'text/plain'
# now test for json rpc
payload = {"data": "value", "one": 5, "three": {"two": 1.0}}
resp = requests.post(rpc, json=payload)
assert resp.ok
assert resp.headers['Content-type'] == 'application/json'
jsonresp = json.loads(resp.json()['result'])
print(jsonresp)
for k, v in payload.items():
assert v == jsonresp[k]
@pytest.mark.web
def test_register_path_route(web_instance):
vi = web_instance
assert vi.is_running()
gevent.sleep(1)
webdir, index_html = _build_web_dir(vi.volttron_home)
agent = vi.build_agent(use_ipc=True)
agent.vip.rpc.call('master.web',
'register_path_route', '', webdir).get(timeout=5)
response = requests.get(vi.bind_web_address+"/index.html")
assert index_html == response.text
@pytest.mark.web
@pytest.mark.skipif(True, reason="This works but not in this test.")
def test_register_agent_route(web_instance):
vi = web_instance
assert vi.is_running()
request_data = None
request_env = None
class TestWebEnabledAgent(Agent):
def agent_route_callback(self, env, data):
print("RETURNING DATA CALLBACK!")
request_data = data
request_env = env
return data
agent = vi.build_agent(enable_web=True, identity='web.agent',
agent_class=TestWebEnabledAgent)
gevent.sleep(2)
agent.vip.web.register_endpoint("/foo", agent.agent_route_callback)
gevent.sleep(2)
payload = {"data": "value", "one": 5, "three": {"two": 1.0}}
response = requests.post(vi.bind_web_address+"/foo", json=payload)
assert response.ok | volttrontesting/platform/test_platform_web.py | import logging
import gevent
from volttron.platform.vip.agent import Agent
from volttrontesting.utils.platformwrapper import start_wrapper_platform
from volttron.platform.agent import json
import pytest
import random
import requests
import os
import tempfile
from volttrontesting.fixtures.volttron_platform_fixtures import *
logging.basicConfig(level=logging.DEBUG)
from volttrontesting.utils.build_agent import build_agent, build_agent_with_key
@pytest.fixture(scope="module")
def web_instance(request, get_volttron_instances):
instance = get_volttron_instances(1, should_start=False)
start_wrapper_platform(instance, with_http=True)
# Create a web enabled agent to test with. Cleanup will happen in the
# shutdown_platform method of the instance.
web_agent = _build_web_agent(instance.volttron_home)
gevent.sleep(1)
instance.install_agent(agent_dir=web_agent)
yield instance
instance.shutdown_platform()
def _build_web_agent(vhome):
"""
Builds a full web enabled agent with a webroot, jsonrpc endpoint..etc.
:param vhome:
:return: The directory of the agent to be installed.
"""
agent_dir = os.path.join(vhome, "Agent{}".format(random.randint(1,100)))
package = "webagent"
os.makedirs(agent_dir)
package_dir = os.path.join(agent_dir, package)
os.makedirs(package_dir)
web_dir = os.path.join(package_dir, 'webroot', 'web')
os.makedirs(web_dir)
# Create index.html inside the webroot directory.
with open(os.path.join(web_dir, 'index.html'), 'w') as f:
f.write("""
<html>
<head>
<title>Test Page</title>
</head>
<body>
<h1>The body is good</h1>
</body>
</html>
""")
# Create the setup.py file
with open(os.path.join(agent_dir, 'setup.py'), 'w') as file:
file.write('''
from setuptools import setup, find_packages
packages = find_packages('.')
setup(
include_package_data=True,
name = '{package}',
version = '0.1',
packages = packages,
zip_safe = False,
entry_points={{
'setuptools.installation': [
'eggsecutable = {package}.agent:main',
]
}}
)
'''.format(package=package))
# Crate a manifest file to allow inclusion of other files
with open(os.path.join(agent_dir, 'MANIFEST.in'), 'w') as file:
file.write("recursive-include {package}/webroot *".format(
package=package))
# Make python package
with open(os.path.join(package_dir, '__init__.py'), 'w') as f:
pass
# Create the agent.py file in the package directory.
with open(os.path.join(package_dir, 'agent.py'), 'w') as fout:
fout.write('''
from __future__ import absolute_import, print_function
import base64
import logging
import os
import sys
from volttron.platform.vip.agent import Core, Agent
from volttron.platform.agent import utils
from volttron.platform import jsonrpc
utils.setup_logging()
_log = logging.getLogger(__name__)
MY_PATH = os.path.dirname(__file__)
WEBROOT = os.path.join(MY_PATH, "webroot")
class WebAgent(Agent):
def __init__(self, config_path, **kwargs):
super(WebAgent, self).__init__(enable_web=True, **kwargs)
@Core.receiver("onstart")
def starting(self, sender, **kwargs):
self.vip.web.register_endpoint("/web/text", self.text, "raw")
self.vip.web.register_endpoint("/web/jsonrpc", self.echoendpoint)
self.vip.web.register_path("/web", WEBROOT)
def text(self, env, data):
ret = "200 OK", base64.b64encode("This is some text"), [
('Content-Type', 'text/plain')]
_log.debug('returning: {}'.format(ret))
return ret
def echoendpoint(self, env, data):
return jsonrpc.json_result("id", data)
def main():
utils.vip_main(WebAgent)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
''')
return agent_dir
def _build_web_dir(vhome):
""" Creates a web directory that can be served.
The web directory will contain an index.html file that should be
able to be retrieved.
@param:str:
The path to vhome or where it should be
@return:tuple:
The path to the web directory and the content of index.html.
"""
webdir = os.path.join(vhome, "webdir")
os.makedirs(webdir)
html = """
<html>
<head>
<title>Test Page</title>
</head>
<body>
<h1>The body is good</h1>
</body>
</html>
"""
with open(os.path.join(webdir, 'index.html'), 'w') as f:
f.write(html)
return webdir, html
@pytest.mark.web
def test_can_discover_info(web_instance):
"""
Tests whether the web instance returns the key, instance name and
instance tcp address.
"""
vi = web_instance
# must sleep because the web server takes a bit to get going.
gevent.sleep(1)
url = "{}/discovery/".format(vi.bind_web_address)
res = requests.get(url)
assert res.ok
d = res.json()
assert vi.serverkey == d['serverkey']
assert d['vip-address']
assert d['instance-name']
@pytest.mark.web
def test_test_web_agent(web_instance):
vi = web_instance
assert vi.is_running()
agent_list = vi.list_agents()
assert len(agent_list) == 1
base_address = vi.bind_web_address
index = base_address + "/web/index.html"
text = base_address + "/web/text"
rpc = base_address + "/web/jsonrpc"
resp = requests.get(index)
assert "<h1>The body is good</h1>" in resp.text
assert "<html>" in resp.text
assert "</html>" in resp.text
assert resp.headers['Content-type'] == 'text/html'
resp = requests.get(text)
assert resp.ok
print("*" * 50)
print(resp.headers)
assert "This is some text" == resp.text
assert resp.headers['Content-type'] == 'text/plain'
# now test for json rpc
payload = {"data": "value", "one": 5, "three": {"two": 1.0}}
resp = requests.post(rpc, json=payload)
assert resp.ok
assert resp.headers['Content-type'] == 'application/json'
jsonresp = json.loads(resp.json()['result'])
print(jsonresp)
for k, v in payload.items():
assert v == jsonresp[k]
@pytest.mark.web
def test_register_path_route(web_instance):
vi = web_instance
assert vi.is_running()
gevent.sleep(1)
webdir, index_html = _build_web_dir(vi.volttron_home)
agent = vi.build_agent(use_ipc=True)
agent.vip.rpc.call('master.web',
'register_path_route', '', webdir).get(timeout=5)
response = requests.get(vi.bind_web_address+"/index.html")
assert index_html == response.text
@pytest.mark.web
@pytest.mark.skipif(True, reason="This works but not in this test.")
def test_register_agent_route(web_instance):
vi = web_instance
assert vi.is_running()
request_data = None
request_env = None
class TestWebEnabledAgent(Agent):
def agent_route_callback(self, env, data):
print("RETURNING DATA CALLBACK!")
request_data = data
request_env = env
return data
agent = vi.build_agent(enable_web=True, identity='web.agent',
agent_class=TestWebEnabledAgent)
gevent.sleep(2)
agent.vip.web.register_endpoint("/foo", agent.agent_route_callback)
gevent.sleep(2)
payload = {"data": "value", "one": 5, "three": {"two": 1.0}}
response = requests.post(vi.bind_web_address+"/foo", json=payload)
assert response.ok | 0.462716 | 0.115112 |
from collections import OrderedDict
import pandas as pd
import numpy as np
from models.detectors.base import BaseDetector
from scipy.stats import ttest_ind
class LODA(BaseDetector):
def __init__(self):
super().__init__()
self.projections_ = None
self.histograms_ = None
self.limits_ = None
self.n_bins = None
self.random_cuts = None
self.X = None
self.isfitted = False
self.explanation = None
def train(self, X_train, params):
n_components = X_train.shape[1]
n_nonzero_components = np.sqrt(n_components)
n_zero_components = n_components - np.int(n_nonzero_components)
self.X = X_train
self.random_cuts = params['n_random_cuts']
self.n_bins = params['n_bins']
self.projections_ = np.random.randn(self.random_cuts, n_components)
self.histograms_ = np.zeros((self.random_cuts, self.n_bins))
self.limits_ = np.zeros((self.random_cuts, self.n_bins + 1))
for i in range(self.random_cuts):
rands = np.random.permutation(n_components)[:n_zero_components]
self.projections_[i, rands] = 0.
projected_data = self.projections_[i, :].dot(X_train.T)
self.histograms_[i, :], self.limits_[i, :] = np.histogram(
projected_data, bins=self.n_bins, density=False)
self.histograms_[i, :] += 1e-12
self.histograms_[i, :] /= np.sum(self.histograms_[i, :])
self.isfitted = True
def score_samples(self):
assert self.isfitted
return self.__predict(self.X)
def predict_scores(self, new_samples):
assert self.isfitted
return self.__predict(new_samples)
def __predict(self, X):
pred_scores = np.zeros([X.shape[0], 1])
for i in range(self.random_cuts):
projected_data = self.projections_[i, :].dot(X.T)
inds = np.searchsorted(self.limits_[i, :self.n_bins - 1], projected_data, side='left')
pred_scores[:, 0] += -np.log(self.histograms_[i, inds])
pred_scores = np.concatenate(pred_scores).ravel()
return pred_scores / self.random_cuts
def calculate_explanation(self, outlier_ids):
assert self.isfitted
features_importance = OrderedDict()
for o_id in outlier_ids:
features_importance.setdefault(o_id, [])
for f_id in range(self.X.shape[1]):
left_part, right_part = self.__feature_partitions(f_id)
if len(left_part) < 2 or len(right_part) < 2:
continue
outlier = self.X.iloc[o_id, :]
lp_scores = self.__partition_scores(left_part, outlier)
rp_scores = self.__partition_scores(right_part, outlier)
_, pval = ttest_ind(lp_scores, rp_scores)
features_importance[o_id].append(1. - pval)
self.explanation = features_importance
return features_importance
def __partition_scores(self, partition, outlier):
assert len(partition) > 0
partition_scores = []
for p_id in partition:
projected_data = self.projections_[p_id, :].dot(outlier.T)
inds = np.searchsorted(self.limits_[p_id, :self.n_bins - 1], projected_data, side='left')
partition_scores.append(-np.log(self.histograms_[p_id, inds]))
return partition_scores
def __feature_partitions(self, f_id):
left_partition = []
right_partition = []
for i in range(self.projections_.shape[0]):
if self.projections_[i, f_id] != 0:
left_partition.append(i)
else:
right_partition.append(i)
return left_partition, right_partition
def get_explanation(self):
return self.explanation
def convert_to_global_explanation(self):
global_expl = pd.DataFrame(np.array(list(self.explanation.values())), index=list(self.explanation.keys()))
return global_expl.mean(axis=0).values
def is_explainable(self):
return True | PredictiveOutlierExplanationBenchmark/src/models/detectors/Loda.py | from collections import OrderedDict
import pandas as pd
import numpy as np
from models.detectors.base import BaseDetector
from scipy.stats import ttest_ind
class LODA(BaseDetector):
def __init__(self):
super().__init__()
self.projections_ = None
self.histograms_ = None
self.limits_ = None
self.n_bins = None
self.random_cuts = None
self.X = None
self.isfitted = False
self.explanation = None
def train(self, X_train, params):
n_components = X_train.shape[1]
n_nonzero_components = np.sqrt(n_components)
n_zero_components = n_components - np.int(n_nonzero_components)
self.X = X_train
self.random_cuts = params['n_random_cuts']
self.n_bins = params['n_bins']
self.projections_ = np.random.randn(self.random_cuts, n_components)
self.histograms_ = np.zeros((self.random_cuts, self.n_bins))
self.limits_ = np.zeros((self.random_cuts, self.n_bins + 1))
for i in range(self.random_cuts):
rands = np.random.permutation(n_components)[:n_zero_components]
self.projections_[i, rands] = 0.
projected_data = self.projections_[i, :].dot(X_train.T)
self.histograms_[i, :], self.limits_[i, :] = np.histogram(
projected_data, bins=self.n_bins, density=False)
self.histograms_[i, :] += 1e-12
self.histograms_[i, :] /= np.sum(self.histograms_[i, :])
self.isfitted = True
def score_samples(self):
assert self.isfitted
return self.__predict(self.X)
def predict_scores(self, new_samples):
assert self.isfitted
return self.__predict(new_samples)
def __predict(self, X):
pred_scores = np.zeros([X.shape[0], 1])
for i in range(self.random_cuts):
projected_data = self.projections_[i, :].dot(X.T)
inds = np.searchsorted(self.limits_[i, :self.n_bins - 1], projected_data, side='left')
pred_scores[:, 0] += -np.log(self.histograms_[i, inds])
pred_scores = np.concatenate(pred_scores).ravel()
return pred_scores / self.random_cuts
def calculate_explanation(self, outlier_ids):
assert self.isfitted
features_importance = OrderedDict()
for o_id in outlier_ids:
features_importance.setdefault(o_id, [])
for f_id in range(self.X.shape[1]):
left_part, right_part = self.__feature_partitions(f_id)
if len(left_part) < 2 or len(right_part) < 2:
continue
outlier = self.X.iloc[o_id, :]
lp_scores = self.__partition_scores(left_part, outlier)
rp_scores = self.__partition_scores(right_part, outlier)
_, pval = ttest_ind(lp_scores, rp_scores)
features_importance[o_id].append(1. - pval)
self.explanation = features_importance
return features_importance
def __partition_scores(self, partition, outlier):
assert len(partition) > 0
partition_scores = []
for p_id in partition:
projected_data = self.projections_[p_id, :].dot(outlier.T)
inds = np.searchsorted(self.limits_[p_id, :self.n_bins - 1], projected_data, side='left')
partition_scores.append(-np.log(self.histograms_[p_id, inds]))
return partition_scores
def __feature_partitions(self, f_id):
left_partition = []
right_partition = []
for i in range(self.projections_.shape[0]):
if self.projections_[i, f_id] != 0:
left_partition.append(i)
else:
right_partition.append(i)
return left_partition, right_partition
def get_explanation(self):
return self.explanation
def convert_to_global_explanation(self):
global_expl = pd.DataFrame(np.array(list(self.explanation.values())), index=list(self.explanation.keys()))
return global_expl.mean(axis=0).values
def is_explainable(self):
return True | 0.856677 | 0.319068 |
import time
import sys
import json
from create_merge_topo import *
from client import *
from util import *
from threading import Thread, Lock, Condition
cv = Condition()
lock = Lock()
count = 0
nt = None
def run(i, nh, hosts, lock, cv):
global count
global nt
if len(hosts) == 0:
hosts = get_hosts(int(nh))
alias = create_alias()
lock.acquire()
compute_distances(net, hosts)
count += 1
lock.release()
cv.acquire() # Barriera: aggiungi router non rispondenti solo dopo che distanze vere sono state calcolate
if count < int(nt):
cv.wait()
else:
cv.notify_all()
cv.release()
lock.acquire() # Puoi farlo fare a un solo thread portandolo dentro l'else, e evitando uso del lock
print 'thread ' + str(i) + ' 1'
make_anonymous_and_blocking_routers(net)
lock.release()
lock.acquire()
print 'thread ' + str(i) + ' 2'
create_traces(net, hosts)
lock.release()
(vtopo, traces) = create_virtual_topo_and_traces(alias, net, hosts)
(M,C) = create_merge_options(vtopo, traces)
(M, mtopo) = create_merge_topology(M, vtopo, C)
print_topo(mtopo)
out = write_topo_to_file(i, mtopo, hosts)
c = configure_client("file_config_prova/client1_config.json") #TODO va specificato da linea di comando
register_client(c)
tfile = get_topo_filename("file_config_prova/client1_config.json") #TODO idem
topo = get_topo_from_json(out)
trans = get_transactions_from_topo(topo)
c.send_transactions(trans)
def parse_cmd_line():
nt = sys.argv[1]
nh = 0
hosts = []
if sys.argv[2].startswith('h'):
hosts = sys.argv[2:]
else:
nh = sys.argv[2]
return (nt, nh, hosts)
if __name__ == '__main__':
if len(sys.argv) < 3:
print """\nUsage: python start.py <nt> < nh | hosts >\n
<nt> = number of threads to be used to collect traces\n
<nh> = number of random hosts that each thread will use\n
[hosts] = optional sequence of hosts, separated by whitespace, that
each thread will use deterministically\n"""
sys.exit()
# Delete previously generated files..
os.system('./clean.sh')
(nt, nh, hosts) = parse_cmd_line()
net = start_net()
threads = []
for i in range(int(nt)):
thread = Thread(target = run, args = (i, nh, hosts, lock, cv))
threads.append(thread)
thread.start()
for t in threads:
t.join()
print 'Threads finished' | Miscellaneous/TOPOLOGIE_FUNZIONANTI/esperimenti/e4/start.py |
import time
import sys
import json
from create_merge_topo import *
from client import *
from util import *
from threading import Thread, Lock, Condition
cv = Condition()
lock = Lock()
count = 0
nt = None
def run(i, nh, hosts, lock, cv):
global count
global nt
if len(hosts) == 0:
hosts = get_hosts(int(nh))
alias = create_alias()
lock.acquire()
compute_distances(net, hosts)
count += 1
lock.release()
cv.acquire() # Barriera: aggiungi router non rispondenti solo dopo che distanze vere sono state calcolate
if count < int(nt):
cv.wait()
else:
cv.notify_all()
cv.release()
lock.acquire() # Puoi farlo fare a un solo thread portandolo dentro l'else, e evitando uso del lock
print 'thread ' + str(i) + ' 1'
make_anonymous_and_blocking_routers(net)
lock.release()
lock.acquire()
print 'thread ' + str(i) + ' 2'
create_traces(net, hosts)
lock.release()
(vtopo, traces) = create_virtual_topo_and_traces(alias, net, hosts)
(M,C) = create_merge_options(vtopo, traces)
(M, mtopo) = create_merge_topology(M, vtopo, C)
print_topo(mtopo)
out = write_topo_to_file(i, mtopo, hosts)
c = configure_client("file_config_prova/client1_config.json") #TODO va specificato da linea di comando
register_client(c)
tfile = get_topo_filename("file_config_prova/client1_config.json") #TODO idem
topo = get_topo_from_json(out)
trans = get_transactions_from_topo(topo)
c.send_transactions(trans)
def parse_cmd_line():
nt = sys.argv[1]
nh = 0
hosts = []
if sys.argv[2].startswith('h'):
hosts = sys.argv[2:]
else:
nh = sys.argv[2]
return (nt, nh, hosts)
if __name__ == '__main__':
if len(sys.argv) < 3:
print """\nUsage: python start.py <nt> < nh | hosts >\n
<nt> = number of threads to be used to collect traces\n
<nh> = number of random hosts that each thread will use\n
[hosts] = optional sequence of hosts, separated by whitespace, that
each thread will use deterministically\n"""
sys.exit()
# Delete previously generated files..
os.system('./clean.sh')
(nt, nh, hosts) = parse_cmd_line()
net = start_net()
threads = []
for i in range(int(nt)):
thread = Thread(target = run, args = (i, nh, hosts, lock, cv))
threads.append(thread)
thread.start()
for t in threads:
t.join()
print 'Threads finished' | 0.165054 | 0.054626 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from future.builtins import range # pylint: disable=redefined-builtin
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import point
from pysc2.lib import renderer_ascii
from pysc2.lib import units
from pysc2.tests import utils
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
_EMPTY = 0
def identity_function(name, args):
return lambda _: actions.FUNCTIONS[name](*args)
def any_point(unit_type, obs):
unit_layer = obs.feature_screen.unit_type
y, x = (unit_layer == unit_type).nonzero()
if not y.any():
return None, None
return [x[-1], y[-1]]
def avg_point(unit_type, obs):
unit_layer = obs.feature_screen.unit_type
y, x = (unit_layer == unit_type).nonzero()
if not y.any():
return None, None
return [int(x.mean()), int(y.mean())]
def select(func, unit_type):
return lambda o: actions.FUNCTIONS.select_point('select', func(unit_type, o))
class Config(object):
"""Holds the configuration options."""
def __init__(self):
# Environment.
self.map_name = 'Flat64'
screen_resolution = point.Point(32, 32)
minimap_resolution = point.Point(32, 32)
self.camera_width = 24
self.random_seed = 42
self.interface = sc_pb.InterfaceOptions(
raw=True, score=True,
feature_layer=sc_pb.SpatialCameraSetup(width=self.camera_width))
screen_resolution.assign_to(self.interface.feature_layer.resolution)
minimap_resolution.assign_to(
self.interface.feature_layer.minimap_resolution)
# Hard code an action sequence.
# TODO(petkoig): Consider whether the Barracks and Supply Depot positions
# need to be dynamically determined.
self.actions = {
507: select(any_point, units.Terran.SCV),
963: identity_function('Build_SupplyDepot_screen', ['now', [25, 15]]),
1152: select(avg_point, units.Terran.CommandCenter),
1320: identity_function('Train_SCV_quick', ['now']),
1350: identity_function('Train_SCV_quick', ['now']),
1393: identity_function('Train_SCV_quick', ['now']),
1437: identity_function('Train_SCV_quick', ['now']),
1522: select(any_point, units.Terran.SCV),
1548: identity_function('Build_Barracks_screen', ['now', [25, 25]]),
1752: select(avg_point, units.Terran.CommandCenter),
1937: identity_function('Train_SCV_quick', ['now']),
2400: select(avg_point, units.Terran.Barracks),
2700: identity_function('Train_Marine_quick', ['now']),
3300: identity_function('select_army', ['select']),
}
self.num_observations = max(self.actions.keys()) + 2
self.player_id = 1
class GameController(object):
"""Wrapper class for interacting with the game in play/replay mode."""
def __init__(self, config):
"""Constructs the game controller object.
Args:
config: Interface configuration options.
"""
self._config = config
self._sc2_proc = None
self._controller = None
self._initialize()
def _initialize(self):
"""Initialize play/replay connection."""
run_config = run_configs.get()
self._map_inst = maps.get(self._config.map_name)
self._map_data = self._map_inst.data(run_config)
self._sc2_proc = run_config.start(
want_rgb=self._config.interface.HasField('render'))
self._controller = self._sc2_proc.controller
def start_replay(self, replay_data):
start_replay = sc_pb.RequestStartReplay(
replay_data=replay_data,
map_data=self._map_data,
options=self._config.interface,
disable_fog=False,
observed_player_id=self._config.player_id)
self._controller.start_replay(start_replay)
def create_game(self):
create = sc_pb.RequestCreateGame(
random_seed=self._config.random_seed,
local_map=sc_pb.LocalMap(
map_path=self._map_inst.path, map_data=self._map_data))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer,
race=sc_common.Terran,
difficulty=sc_pb.VeryEasy)
join = sc_pb.RequestJoinGame(
race=sc_common.Terran,
options=self._config.interface)
self._controller.create_game(create)
self._controller.join_game(join)
@property
def controller(self):
return self._controller
def close(self):
"""Close the controller connection."""
if self._controller:
self._controller.quit()
self._controller = None
if self._sc2_proc:
self._sc2_proc.close()
self._sc2_proc = None
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class ReplayObsTest(utils.TestCase):
def _get_replay_data(self, controller, config):
"""Runs a replay to get the replay data."""
f = features.features_from_game_info(game_info=controller.game_info())
observations = {}
last_actions = []
for _ in range(config.num_observations):
raw_obs = controller.observe()
o = raw_obs.observation
obs = f.transform_obs(raw_obs)
if raw_obs.action_errors:
print('action errors:', raw_obs.action_errors)
if o.game_loop == 2:
# Center camera is initiated automatically by the game and reported
# at frame 2.
last_actions = [actions.FUNCTIONS.move_camera.id]
self.assertEqual(last_actions, list(obs.last_actions))
unit_type = obs.feature_screen.unit_type
observations[o.game_loop] = unit_type
if o.game_loop in config.actions:
func = config.actions[o.game_loop](obs)
print(renderer_ascii.screen(obs))
scv_y, scv_x = (units.Terran.SCV == unit_type).nonzero()
print('scv locations: ', sorted(list(zip(scv_x, scv_y))))
print('available actions: ', list(sorted(obs.available_actions)))
print('Making action: %s' % (func,))
# Ensure action is available.
# If a build action is available, we have managed to target an SCV.
self.assertIn(func.function, obs.available_actions)
if (func.function in
(actions.FUNCTIONS.Build_SupplyDepot_screen.id,
actions.FUNCTIONS.Build_Barracks_screen.id)):
# Ensure we can build on that position.
x, y = func.arguments[1]
self.assertEqual(_EMPTY, unit_type[y, x])
action = f.transform_action(o, func)
last_actions = [func.function]
controller.act(action)
else:
last_actions = []
controller.step()
replay_data = controller.save_replay()
return replay_data, observations
def _process_replay(self, controller, observations, config):
f = features.features_from_game_info(game_info=controller.game_info())
while True:
o = controller.observe()
obs = f.transform_obs(o)
if o.player_result: # end of game
break
unit_type = obs.feature_screen.unit_type
self.assertEqual(
tuple(observations[o.observation.game_loop].flatten()),
tuple(unit_type.flatten()))
self.assertIn(len(o.actions), (0, 1), 'Expected 0 or 1 action')
if o.actions:
func = f.reverse_action(o.actions[0])
# Action is reported one frame later.
executed = config.actions.get(o.observation.game_loop - 1, None)
executed_func = executed(obs) if executed else None
print('%4d Sent: %s' % (o.observation.game_loop, executed_func))
print('%4d Returned: %s' % (o.observation.game_loop, func))
if o.observation.game_loop == 2:
# Center camera is initiated automatically by the game and reported
# at frame 2.
self.assertEqual(actions.FUNCTIONS.move_camera.id, func.function)
continue
self.assertEqual(func.function, executed_func.function)
if func.function != actions.FUNCTIONS.select_point.id:
# select_point likes to return Toggle instead of Select.
self.assertEqual(func.arguments, executed_func.arguments)
self.assertEqual(func.function, obs.last_actions[0])
controller.step()
return observations
def test_replay_observations_match(self):
config = Config()
with GameController(config) as game_controller:
game_controller.create_game()
replay_data, observations = self._get_replay_data(
game_controller.controller, config)
game_controller.start_replay(replay_data)
self._process_replay(game_controller.controller, observations, config)
if __name__ == '__main__':
absltest.main() | pysc2/tests/replay_obs_test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from future.builtins import range # pylint: disable=redefined-builtin
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import point
from pysc2.lib import renderer_ascii
from pysc2.lib import units
from pysc2.tests import utils
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
_EMPTY = 0
def identity_function(name, args):
return lambda _: actions.FUNCTIONS[name](*args)
def any_point(unit_type, obs):
unit_layer = obs.feature_screen.unit_type
y, x = (unit_layer == unit_type).nonzero()
if not y.any():
return None, None
return [x[-1], y[-1]]
def avg_point(unit_type, obs):
unit_layer = obs.feature_screen.unit_type
y, x = (unit_layer == unit_type).nonzero()
if not y.any():
return None, None
return [int(x.mean()), int(y.mean())]
def select(func, unit_type):
return lambda o: actions.FUNCTIONS.select_point('select', func(unit_type, o))
class Config(object):
"""Holds the configuration options."""
def __init__(self):
# Environment.
self.map_name = 'Flat64'
screen_resolution = point.Point(32, 32)
minimap_resolution = point.Point(32, 32)
self.camera_width = 24
self.random_seed = 42
self.interface = sc_pb.InterfaceOptions(
raw=True, score=True,
feature_layer=sc_pb.SpatialCameraSetup(width=self.camera_width))
screen_resolution.assign_to(self.interface.feature_layer.resolution)
minimap_resolution.assign_to(
self.interface.feature_layer.minimap_resolution)
# Hard code an action sequence.
# TODO(petkoig): Consider whether the Barracks and Supply Depot positions
# need to be dynamically determined.
self.actions = {
507: select(any_point, units.Terran.SCV),
963: identity_function('Build_SupplyDepot_screen', ['now', [25, 15]]),
1152: select(avg_point, units.Terran.CommandCenter),
1320: identity_function('Train_SCV_quick', ['now']),
1350: identity_function('Train_SCV_quick', ['now']),
1393: identity_function('Train_SCV_quick', ['now']),
1437: identity_function('Train_SCV_quick', ['now']),
1522: select(any_point, units.Terran.SCV),
1548: identity_function('Build_Barracks_screen', ['now', [25, 25]]),
1752: select(avg_point, units.Terran.CommandCenter),
1937: identity_function('Train_SCV_quick', ['now']),
2400: select(avg_point, units.Terran.Barracks),
2700: identity_function('Train_Marine_quick', ['now']),
3300: identity_function('select_army', ['select']),
}
self.num_observations = max(self.actions.keys()) + 2
self.player_id = 1
class GameController(object):
"""Wrapper class for interacting with the game in play/replay mode."""
def __init__(self, config):
"""Constructs the game controller object.
Args:
config: Interface configuration options.
"""
self._config = config
self._sc2_proc = None
self._controller = None
self._initialize()
def _initialize(self):
"""Initialize play/replay connection."""
run_config = run_configs.get()
self._map_inst = maps.get(self._config.map_name)
self._map_data = self._map_inst.data(run_config)
self._sc2_proc = run_config.start(
want_rgb=self._config.interface.HasField('render'))
self._controller = self._sc2_proc.controller
def start_replay(self, replay_data):
start_replay = sc_pb.RequestStartReplay(
replay_data=replay_data,
map_data=self._map_data,
options=self._config.interface,
disable_fog=False,
observed_player_id=self._config.player_id)
self._controller.start_replay(start_replay)
def create_game(self):
create = sc_pb.RequestCreateGame(
random_seed=self._config.random_seed,
local_map=sc_pb.LocalMap(
map_path=self._map_inst.path, map_data=self._map_data))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer,
race=sc_common.Terran,
difficulty=sc_pb.VeryEasy)
join = sc_pb.RequestJoinGame(
race=sc_common.Terran,
options=self._config.interface)
self._controller.create_game(create)
self._controller.join_game(join)
@property
def controller(self):
return self._controller
def close(self):
"""Close the controller connection."""
if self._controller:
self._controller.quit()
self._controller = None
if self._sc2_proc:
self._sc2_proc.close()
self._sc2_proc = None
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class ReplayObsTest(utils.TestCase):
def _get_replay_data(self, controller, config):
"""Runs a replay to get the replay data."""
f = features.features_from_game_info(game_info=controller.game_info())
observations = {}
last_actions = []
for _ in range(config.num_observations):
raw_obs = controller.observe()
o = raw_obs.observation
obs = f.transform_obs(raw_obs)
if raw_obs.action_errors:
print('action errors:', raw_obs.action_errors)
if o.game_loop == 2:
# Center camera is initiated automatically by the game and reported
# at frame 2.
last_actions = [actions.FUNCTIONS.move_camera.id]
self.assertEqual(last_actions, list(obs.last_actions))
unit_type = obs.feature_screen.unit_type
observations[o.game_loop] = unit_type
if o.game_loop in config.actions:
func = config.actions[o.game_loop](obs)
print(renderer_ascii.screen(obs))
scv_y, scv_x = (units.Terran.SCV == unit_type).nonzero()
print('scv locations: ', sorted(list(zip(scv_x, scv_y))))
print('available actions: ', list(sorted(obs.available_actions)))
print('Making action: %s' % (func,))
# Ensure action is available.
# If a build action is available, we have managed to target an SCV.
self.assertIn(func.function, obs.available_actions)
if (func.function in
(actions.FUNCTIONS.Build_SupplyDepot_screen.id,
actions.FUNCTIONS.Build_Barracks_screen.id)):
# Ensure we can build on that position.
x, y = func.arguments[1]
self.assertEqual(_EMPTY, unit_type[y, x])
action = f.transform_action(o, func)
last_actions = [func.function]
controller.act(action)
else:
last_actions = []
controller.step()
replay_data = controller.save_replay()
return replay_data, observations
def _process_replay(self, controller, observations, config):
f = features.features_from_game_info(game_info=controller.game_info())
while True:
o = controller.observe()
obs = f.transform_obs(o)
if o.player_result: # end of game
break
unit_type = obs.feature_screen.unit_type
self.assertEqual(
tuple(observations[o.observation.game_loop].flatten()),
tuple(unit_type.flatten()))
self.assertIn(len(o.actions), (0, 1), 'Expected 0 or 1 action')
if o.actions:
func = f.reverse_action(o.actions[0])
# Action is reported one frame later.
executed = config.actions.get(o.observation.game_loop - 1, None)
executed_func = executed(obs) if executed else None
print('%4d Sent: %s' % (o.observation.game_loop, executed_func))
print('%4d Returned: %s' % (o.observation.game_loop, func))
if o.observation.game_loop == 2:
# Center camera is initiated automatically by the game and reported
# at frame 2.
self.assertEqual(actions.FUNCTIONS.move_camera.id, func.function)
continue
self.assertEqual(func.function, executed_func.function)
if func.function != actions.FUNCTIONS.select_point.id:
# select_point likes to return Toggle instead of Select.
self.assertEqual(func.arguments, executed_func.arguments)
self.assertEqual(func.function, obs.last_actions[0])
controller.step()
return observations
def test_replay_observations_match(self):
config = Config()
with GameController(config) as game_controller:
game_controller.create_game()
replay_data, observations = self._get_replay_data(
game_controller.controller, config)
game_controller.start_replay(replay_data)
self._process_replay(game_controller.controller, observations, config)
if __name__ == '__main__':
absltest.main() | 0.677794 | 0.258613 |
import flask_sijax
from flask import render_template, g, session, request, redirect
import model
from apic_manager import cobra_apic_l2_tool
from app import app
from sijax_handlers.group_handler import group_handler
from sijax_handlers.network_handler import network_handler
from sijax_handlers.fabric_handler import fabric_handler
from sijax_handlers.vpc_handler import vpc_handler
from sijax_handlers.vpc_access_handler import vpc_access_handler
from sijax_handlers.single_access_handler import single_access_handler
from sijax_handlers.access_switch_handler import access_switch_handler
from sijax_handlers.netmon_handler import netmon_handler
__author__ = '<NAME> (<EMAIL>)'
""" Prerequisites """
@app.before_request
def before_request():
if not model.network.table_exists():
model.create_tables()
""" Error management """
@app.errorhandler(404)
def page_not_found(e):
return redirect('/')
""" Account Log in and Log out """
@flask_sijax.route(app, '/login')
def login():
if not session.get('login_apic_url'):
if request.method == 'POST':
values = request.form
try:
if len(values['login_username']) == 0:
ex = Exception()
ex.message = 'Username is required'
raise ex
elif len(values['login_password']) == 0:
ex = Exception()
ex.message = 'Password is required'
raise ex
elif len(values['login_apic_url']) == 0:
ex = Exception()
ex.message = 'Apic URL is required'
raise ex
elif not values['login_apic_url'].startswith('http'):
ex = Exception()
ex.message = 'Please specify protocol (http/https)'
raise ex
else:
apic_object = cobra_apic_l2_tool.cobra_apic_l2_tool()
apic_object.login(values['login_apic_url'], values['login_username'], values['login_password'])
session['login_apic_url'] = values['login_apic_url']
session['username'] = values['login_username']
session['password'] = values['login_password']
return redirect('/')
except Exception as e:
return render_template('login.html',
error=str(e).replace("'", "").replace('"', '').replace("\n", "")[0:200],
login_apic_url=values['login_apic_url'],
login_username=values['login_username'],
cobra_version=cobra_apic_l2_tool.cobra_apic_base.get_cobra_version())
return render_template('login.html',
cobra_version=cobra_apic_l2_tool.cobra_apic_base.get_cobra_version())
else:
return redirect('/')
@flask_sijax.route(app, '/logout')
def logout():
session['login_apic_url'] = None
session['username'] = None
session['password'] = <PASSWORD>
return redirect('/login')
@flask_sijax.route(app, '/')
def main():
if not session.get('login_apic_url'):
return redirect('/login')
return render_template('index.html')
@flask_sijax.route(app, '/groups')
def groups():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(group_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('groups.html')
@flask_sijax.route(app, '/networks')
def networks():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(network_handler())
g.sijax.register_object(group_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('network/networks.html')
@flask_sijax.route(app, '/vpcs')
def vpcs():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(fabric_handler())
g.sijax.register_object(vpc_handler())
g.sijax.register_object(group_handler())
return g.sijax.process_request()
return render_template('vpcs.html')
@flask_sijax.route(app, '/vpc_access')
def vpc_access():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(network_handler())
g.sijax.register_object(fabric_handler())
g.sijax.register_object(vpc_handler())
g.sijax.register_object(group_handler())
g.sijax.register_object(vpc_access_handler())
return g.sijax.process_request()
return render_template('vpc_access.html')
@flask_sijax.route(app, '/single_access')
def single_access():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(network_handler())
g.sijax.register_object(fabric_handler())
g.sijax.register_object(vpc_handler())
g.sijax.register_object(group_handler())
g.sijax.register_object(single_access_handler())
return g.sijax.process_request()
return render_template('single_access.html')
@flask_sijax.route(app, '/access_switches')
def access_switches():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(access_switch_handler())
return g.sijax.process_request()
return render_template('access_switches.html')
@flask_sijax.route(app, '/netmon')
def netmon():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(netmon_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('netmon/netmon.html')
@flask_sijax.route(app, '/netmon/<tenant_name>/<ap_name>/<network_name>')
def network_dashboard(tenant_name, ap_name, network_name):
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(netmon_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('netmon/network_dashboard.html', tenant=tenant_name, ap=ap_name, network=network_name)
@flask_sijax.route(app, '/netmon/<tenant_name>/<ap_name>/<network_name>/<endpoint_mac>')
def endpoint_track(tenant_name, ap_name, network_name, endpoint_mac):
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(netmon_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('netmon/endpoint_track.html', tenant=tenant_name, network=network_name, ap=ap_name,
endpoint_mac=endpoint_mac) | app/views.py | import flask_sijax
from flask import render_template, g, session, request, redirect
import model
from apic_manager import cobra_apic_l2_tool
from app import app
from sijax_handlers.group_handler import group_handler
from sijax_handlers.network_handler import network_handler
from sijax_handlers.fabric_handler import fabric_handler
from sijax_handlers.vpc_handler import vpc_handler
from sijax_handlers.vpc_access_handler import vpc_access_handler
from sijax_handlers.single_access_handler import single_access_handler
from sijax_handlers.access_switch_handler import access_switch_handler
from sijax_handlers.netmon_handler import netmon_handler
__author__ = '<NAME> (<EMAIL>)'
""" Prerequisites """
@app.before_request
def before_request():
if not model.network.table_exists():
model.create_tables()
""" Error management """
@app.errorhandler(404)
def page_not_found(e):
return redirect('/')
""" Account Log in and Log out """
@flask_sijax.route(app, '/login')
def login():
if not session.get('login_apic_url'):
if request.method == 'POST':
values = request.form
try:
if len(values['login_username']) == 0:
ex = Exception()
ex.message = 'Username is required'
raise ex
elif len(values['login_password']) == 0:
ex = Exception()
ex.message = 'Password is required'
raise ex
elif len(values['login_apic_url']) == 0:
ex = Exception()
ex.message = 'Apic URL is required'
raise ex
elif not values['login_apic_url'].startswith('http'):
ex = Exception()
ex.message = 'Please specify protocol (http/https)'
raise ex
else:
apic_object = cobra_apic_l2_tool.cobra_apic_l2_tool()
apic_object.login(values['login_apic_url'], values['login_username'], values['login_password'])
session['login_apic_url'] = values['login_apic_url']
session['username'] = values['login_username']
session['password'] = values['login_password']
return redirect('/')
except Exception as e:
return render_template('login.html',
error=str(e).replace("'", "").replace('"', '').replace("\n", "")[0:200],
login_apic_url=values['login_apic_url'],
login_username=values['login_username'],
cobra_version=cobra_apic_l2_tool.cobra_apic_base.get_cobra_version())
return render_template('login.html',
cobra_version=cobra_apic_l2_tool.cobra_apic_base.get_cobra_version())
else:
return redirect('/')
@flask_sijax.route(app, '/logout')
def logout():
session['login_apic_url'] = None
session['username'] = None
session['password'] = <PASSWORD>
return redirect('/login')
@flask_sijax.route(app, '/')
def main():
if not session.get('login_apic_url'):
return redirect('/login')
return render_template('index.html')
@flask_sijax.route(app, '/groups')
def groups():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(group_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('groups.html')
@flask_sijax.route(app, '/networks')
def networks():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(network_handler())
g.sijax.register_object(group_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('network/networks.html')
@flask_sijax.route(app, '/vpcs')
def vpcs():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(fabric_handler())
g.sijax.register_object(vpc_handler())
g.sijax.register_object(group_handler())
return g.sijax.process_request()
return render_template('vpcs.html')
@flask_sijax.route(app, '/vpc_access')
def vpc_access():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(network_handler())
g.sijax.register_object(fabric_handler())
g.sijax.register_object(vpc_handler())
g.sijax.register_object(group_handler())
g.sijax.register_object(vpc_access_handler())
return g.sijax.process_request()
return render_template('vpc_access.html')
@flask_sijax.route(app, '/single_access')
def single_access():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(network_handler())
g.sijax.register_object(fabric_handler())
g.sijax.register_object(vpc_handler())
g.sijax.register_object(group_handler())
g.sijax.register_object(single_access_handler())
return g.sijax.process_request()
return render_template('single_access.html')
@flask_sijax.route(app, '/access_switches')
def access_switches():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(access_switch_handler())
return g.sijax.process_request()
return render_template('access_switches.html')
@flask_sijax.route(app, '/netmon')
def netmon():
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(netmon_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('netmon/netmon.html')
@flask_sijax.route(app, '/netmon/<tenant_name>/<ap_name>/<network_name>')
def network_dashboard(tenant_name, ap_name, network_name):
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(netmon_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('netmon/network_dashboard.html', tenant=tenant_name, ap=ap_name, network=network_name)
@flask_sijax.route(app, '/netmon/<tenant_name>/<ap_name>/<network_name>/<endpoint_mac>')
def endpoint_track(tenant_name, ap_name, network_name, endpoint_mac):
if not session.get('login_apic_url'):
return redirect('/login')
if g.sijax.is_sijax_request:
g.sijax.register_object(netmon_handler())
g.sijax.register_object(fabric_handler())
return g.sijax.process_request()
return render_template('netmon/endpoint_track.html', tenant=tenant_name, network=network_name, ap=ap_name,
endpoint_mac=endpoint_mac) | 0.269902 | 0.056522 |
import os
import time
import glob
import sched
import multiprocessing
from superbench.common.utils import logger, run_command
from superbench.common.utils import device_manager as dm
from superbench.monitor.record import MonitorRecord
class Monitor(multiprocessing.Process):
"""The monitor class to collect system metrics periodically."""
def __init__(self, container_name, sample_duration, sample_freq, output_file):
"""Constructor.
Args:
container_name (str): container name that need to monitor, None means the current env.
sample_duration (int): calculate the average metirc during sample_duration seconds.
sample_freq (int): do sampling every sample_freq seconds.
output_file (str): output file in jsonline format.
"""
multiprocessing.Process.__init__(self)
self.__container_name = container_name
self.__sample_duration = sample_duration
self.__sample_freq = sample_freq
self.__output_file = output_file
self.__scheduler = sched.scheduler(time.time, time.sleep)
self.__running = multiprocessing.Value('i', 0)
self.__online_cpus = os.sysconf(os.sysconf_names['SC_NPROCESSORS_ONLN'])
self.__unit_MiByte = 1024 * 1024 * 1.0
self.__output_handler = open(self.__output_file, 'a')
def __preprocess(self):
"""Preprocess/preparation operations before the monitoring.
Return:
True if __preprocess() succeed.
"""
if self.__container_name is not None:
output = run_command('docker ps -qf name={}'.format(self.__container_name))
if output.returncode != 0:
logger.error(
'Failed to get the container id - container name: {}, error message: {}'.format(
self.__container_name, output.stderr
)
)
return False
container_id = output.stdout
output = run_command('docker inspect -f {{.State.Pid}} {}'.format(container_id))
if output.returncode != 0:
logger.error(
'Failed to get the container pid - container id: {}, error message: {}'.format(
container_id, output.stderr
)
)
return False
container_pid = output.stdout
try:
self._cpu_file = glob.glob('/sys/fs/cgroup/cpuacct/docker/{}*/cpuacct.stat'.format(container_id))[0]
self._mem_file = glob.glob(
'/sys/fs/cgroup/memory/docker/{}*/memory.usage_in_bytes'.format(container_id)
)[0]
self._net_file = '/proc/{}/net/dev'.format(container_pid)
except BaseException as e:
logger.error(
'Faild to get the cpu/mem/net file - container: {}, error message: {}'.format(
self.__container_name, str(e)
)
)
return False
else:
self._cpu_file = '/sys/fs/cgroup/cpuacct/cpuacct.stat'
self._mem_file = '/sys/fs/cgroup/memory/memory.usage_in_bytes'
self._net_file = '/proc/net/dev'
return True
def run(self):
"""Method representing the process’s activity.
Return:
True if launching the process succeed.
"""
if self.__running.value == 0:
if not self.__preprocess():
return False
try:
logger.info('Start monitoring.')
self.__running.value = 1
self.__sample()
self.__scheduler.run()
except BaseException as e:
logger.error('Failed to launch the monitor process - error message: {}'.format(str(e)))
self.stop()
return False
else:
logger.error('Monitor is still running')
return True
def stop(self):
"""Method stopping the process’s activity."""
self.__running.value = 0
list(map(self.__scheduler.cancel, self.__scheduler.queue))
self.join()
self.__output_handler.close()
def __sample(self):
"""Method sampling system metrics."""
if self.__running.value == 1:
self.__scheduler.enter(self.__sample_freq, 1, self.__sample, ())
# Sampling
record = MonitorRecord()
self.__sample_host_metrics(record)
self.__sample_gpu_metrics(record)
self.__output_handler.write('{}\n'.format(record.to_string()))
self.__output_handler.flush()
def __sample_host_metrics(self, record):
"""Method sampling the host metrics.
Args:
record (MonitorRecord): record instance to save the metrics.
"""
# First round of capturing.
system_ticks_s = self.__get_total_cpu_ticks()
container_ticks_s = self.__get_process_cpu_ticks()
start_time = time.time()
net_bytes_s = self.__get_network_bytes()
time.sleep(self.__sample_duration)
# Second round of capturing.
system_ticks_e = self.__get_total_cpu_ticks()
container_ticks_e = self.__get_process_cpu_ticks()
end_time = time.time()
net_bytes_e = self.__get_network_bytes()
# Calculate CPU usage.
cpu_usage = (container_ticks_e -
container_ticks_s) * 1.0 / (system_ticks_e - system_ticks_s) * self.__online_cpus * 100
record.cpu_usage = cpu_usage
# Calculate network bandwidth.
net_receive = dict()
net_transmit = dict()
for device in net_bytes_s:
net_receive[
'{}_receive_bw'.format(device)
] = ((net_bytes_e[device][0] - net_bytes_s[device][0]) / (end_time - start_time) / self.__unit_MiByte)
net_transmit[
'{}_transmit_bw'.format(device)
] = ((net_bytes_e[device][1] - net_bytes_s[device][1]) / (end_time - start_time) / self.__unit_MiByte)
record.net_receive = net_receive
record.net_transmit = net_transmit
def __sample_gpu_metrics(self, record):
"""Method sampling the gpu metrics.
Args:
record (MonitorRecord): record instance to save the metrics.
"""
device_count = dm.device_manager.get_device_count()
for i in range(device_count):
record.gpu_usage.append(dm.device_manager.get_device_utilization(i))
record.gpu_temperature.append(dm.device_manager.get_device_temperature(i))
record.gpu_power_limit.append(dm.device_manager.get_device_power_limit(i))
mem_used, mem_total = dm.device_manager.get_device_memory(i)
record.gpu_mem_used.append(mem_used)
record.gpu_mem_total.append(mem_total)
corrected_ecc, uncorrected_ecc = dm.device_manager.get_device_ecc_error(i)
record.gpu_corrected_ecc.append(corrected_ecc)
record.gpu_uncorrected_ecc.append(uncorrected_ecc)
record.gpu_remap_info.append(dm.device_manager.get_device_row_remapped_info(i))
def __get_total_cpu_ticks(self):
"""Method to get the total cpu ticks.
Return:
The total cpu ticks, None means fail to get the data.
"""
try:
with open('/proc/stat', 'r') as f:
for line in f.readlines():
if line.startswith('cpu '):
items = line.split()
total_clock_ticks = 0
for item in items[1:8]:
total_clock_ticks += int(item)
return total_clock_ticks
except BaseException as e:
logger.error('Failed to read total cpu ticks information - error message: {}'.format(str(e)))
return None
def __get_process_cpu_ticks(self):
"""Method to get the process cpu ticks.
Return:
The process cpu ticks, None means fail to get the data.
"""
user_time = 0
system_time = 0
try:
with open(self._cpu_file, 'r') as f:
for line in f:
items = line.split()
if items[0] == 'user':
user_time = int(items[1])
elif items[1] == 'system':
system_time = int(items[1])
return user_time + system_time
except BaseException as e:
logger.error('Failed to read process cpu ticks information - error message: {}'.format(str(e)))
return None
def __get_network_bytes(self):
"""Method to get the network traffic information, unit: bytes.
Return:
The bytes transferred on the network, None means fail to get the data.
"""
net_info = dict()
try:
with open(self._net_file, 'r') as f:
for line in f:
items = line.split()
if len(items) != 17:
continue
else:
receive_bytes = int(items[1])
transmit_bytes = int(items[9])
net_info[items[0].strip()[:-1]] = [receive_bytes, transmit_bytes]
return net_info
except BaseException as e:
logger.error('Failed to read network traffic information - error message: {}'.format(str(e)))
return None | superbench/monitor/monitor.py | import os
import time
import glob
import sched
import multiprocessing
from superbench.common.utils import logger, run_command
from superbench.common.utils import device_manager as dm
from superbench.monitor.record import MonitorRecord
class Monitor(multiprocessing.Process):
"""The monitor class to collect system metrics periodically."""
def __init__(self, container_name, sample_duration, sample_freq, output_file):
"""Constructor.
Args:
container_name (str): container name that need to monitor, None means the current env.
sample_duration (int): calculate the average metirc during sample_duration seconds.
sample_freq (int): do sampling every sample_freq seconds.
output_file (str): output file in jsonline format.
"""
multiprocessing.Process.__init__(self)
self.__container_name = container_name
self.__sample_duration = sample_duration
self.__sample_freq = sample_freq
self.__output_file = output_file
self.__scheduler = sched.scheduler(time.time, time.sleep)
self.__running = multiprocessing.Value('i', 0)
self.__online_cpus = os.sysconf(os.sysconf_names['SC_NPROCESSORS_ONLN'])
self.__unit_MiByte = 1024 * 1024 * 1.0
self.__output_handler = open(self.__output_file, 'a')
def __preprocess(self):
"""Preprocess/preparation operations before the monitoring.
Return:
True if __preprocess() succeed.
"""
if self.__container_name is not None:
output = run_command('docker ps -qf name={}'.format(self.__container_name))
if output.returncode != 0:
logger.error(
'Failed to get the container id - container name: {}, error message: {}'.format(
self.__container_name, output.stderr
)
)
return False
container_id = output.stdout
output = run_command('docker inspect -f {{.State.Pid}} {}'.format(container_id))
if output.returncode != 0:
logger.error(
'Failed to get the container pid - container id: {}, error message: {}'.format(
container_id, output.stderr
)
)
return False
container_pid = output.stdout
try:
self._cpu_file = glob.glob('/sys/fs/cgroup/cpuacct/docker/{}*/cpuacct.stat'.format(container_id))[0]
self._mem_file = glob.glob(
'/sys/fs/cgroup/memory/docker/{}*/memory.usage_in_bytes'.format(container_id)
)[0]
self._net_file = '/proc/{}/net/dev'.format(container_pid)
except BaseException as e:
logger.error(
'Faild to get the cpu/mem/net file - container: {}, error message: {}'.format(
self.__container_name, str(e)
)
)
return False
else:
self._cpu_file = '/sys/fs/cgroup/cpuacct/cpuacct.stat'
self._mem_file = '/sys/fs/cgroup/memory/memory.usage_in_bytes'
self._net_file = '/proc/net/dev'
return True
def run(self):
"""Method representing the process’s activity.
Return:
True if launching the process succeed.
"""
if self.__running.value == 0:
if not self.__preprocess():
return False
try:
logger.info('Start monitoring.')
self.__running.value = 1
self.__sample()
self.__scheduler.run()
except BaseException as e:
logger.error('Failed to launch the monitor process - error message: {}'.format(str(e)))
self.stop()
return False
else:
logger.error('Monitor is still running')
return True
def stop(self):
"""Method stopping the process’s activity."""
self.__running.value = 0
list(map(self.__scheduler.cancel, self.__scheduler.queue))
self.join()
self.__output_handler.close()
def __sample(self):
"""Method sampling system metrics."""
if self.__running.value == 1:
self.__scheduler.enter(self.__sample_freq, 1, self.__sample, ())
# Sampling
record = MonitorRecord()
self.__sample_host_metrics(record)
self.__sample_gpu_metrics(record)
self.__output_handler.write('{}\n'.format(record.to_string()))
self.__output_handler.flush()
def __sample_host_metrics(self, record):
"""Method sampling the host metrics.
Args:
record (MonitorRecord): record instance to save the metrics.
"""
# First round of capturing.
system_ticks_s = self.__get_total_cpu_ticks()
container_ticks_s = self.__get_process_cpu_ticks()
start_time = time.time()
net_bytes_s = self.__get_network_bytes()
time.sleep(self.__sample_duration)
# Second round of capturing.
system_ticks_e = self.__get_total_cpu_ticks()
container_ticks_e = self.__get_process_cpu_ticks()
end_time = time.time()
net_bytes_e = self.__get_network_bytes()
# Calculate CPU usage.
cpu_usage = (container_ticks_e -
container_ticks_s) * 1.0 / (system_ticks_e - system_ticks_s) * self.__online_cpus * 100
record.cpu_usage = cpu_usage
# Calculate network bandwidth.
net_receive = dict()
net_transmit = dict()
for device in net_bytes_s:
net_receive[
'{}_receive_bw'.format(device)
] = ((net_bytes_e[device][0] - net_bytes_s[device][0]) / (end_time - start_time) / self.__unit_MiByte)
net_transmit[
'{}_transmit_bw'.format(device)
] = ((net_bytes_e[device][1] - net_bytes_s[device][1]) / (end_time - start_time) / self.__unit_MiByte)
record.net_receive = net_receive
record.net_transmit = net_transmit
def __sample_gpu_metrics(self, record):
"""Method sampling the gpu metrics.
Args:
record (MonitorRecord): record instance to save the metrics.
"""
device_count = dm.device_manager.get_device_count()
for i in range(device_count):
record.gpu_usage.append(dm.device_manager.get_device_utilization(i))
record.gpu_temperature.append(dm.device_manager.get_device_temperature(i))
record.gpu_power_limit.append(dm.device_manager.get_device_power_limit(i))
mem_used, mem_total = dm.device_manager.get_device_memory(i)
record.gpu_mem_used.append(mem_used)
record.gpu_mem_total.append(mem_total)
corrected_ecc, uncorrected_ecc = dm.device_manager.get_device_ecc_error(i)
record.gpu_corrected_ecc.append(corrected_ecc)
record.gpu_uncorrected_ecc.append(uncorrected_ecc)
record.gpu_remap_info.append(dm.device_manager.get_device_row_remapped_info(i))
def __get_total_cpu_ticks(self):
"""Method to get the total cpu ticks.
Return:
The total cpu ticks, None means fail to get the data.
"""
try:
with open('/proc/stat', 'r') as f:
for line in f.readlines():
if line.startswith('cpu '):
items = line.split()
total_clock_ticks = 0
for item in items[1:8]:
total_clock_ticks += int(item)
return total_clock_ticks
except BaseException as e:
logger.error('Failed to read total cpu ticks information - error message: {}'.format(str(e)))
return None
def __get_process_cpu_ticks(self):
"""Method to get the process cpu ticks.
Return:
The process cpu ticks, None means fail to get the data.
"""
user_time = 0
system_time = 0
try:
with open(self._cpu_file, 'r') as f:
for line in f:
items = line.split()
if items[0] == 'user':
user_time = int(items[1])
elif items[1] == 'system':
system_time = int(items[1])
return user_time + system_time
except BaseException as e:
logger.error('Failed to read process cpu ticks information - error message: {}'.format(str(e)))
return None
def __get_network_bytes(self):
"""Method to get the network traffic information, unit: bytes.
Return:
The bytes transferred on the network, None means fail to get the data.
"""
net_info = dict()
try:
with open(self._net_file, 'r') as f:
for line in f:
items = line.split()
if len(items) != 17:
continue
else:
receive_bytes = int(items[1])
transmit_bytes = int(items[9])
net_info[items[0].strip()[:-1]] = [receive_bytes, transmit_bytes]
return net_info
except BaseException as e:
logger.error('Failed to read network traffic information - error message: {}'.format(str(e)))
return None | 0.61231 | 0.08617 |
# daal4py DBSCAN example for shared memory systems
import daal4py as d4p
import numpy as np
import os
from daal4py.oneapi import sycl_buffer
# let's try to use pandas' fast csv reader
try:
import pandas
read_csv = lambda f, c, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
except:
# fall back to numpy loadtxt
read_csv = lambda f, c, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
try:
from dppl import device_context, device_type
with device_context(device_type.gpu, 0):
gpu_available=True
except:
try:
from daal4py.oneapi import sycl_context
with sycl_context('gpu'):
gpu_available=True
except:
gpu_available=False
# At this moment with sycl we are working only with numpy arrays
def to_numpy(data):
try:
from pandas import DataFrame
if isinstance(data, DataFrame):
return np.ascontiguousarray(data.values)
except:
pass
try:
from scipy.sparse import csr_matrix
if isinstance(data, csr_matrix):
return data.toarray()
except:
pass
return data
# Common code for both CPU and GPU computations
def compute(data, minObservations, epsilon):
# configure dbscan main object: we also request the indices and observations of cluster cores
algo = d4p.dbscan(minObservations=minObservations,
epsilon=epsilon,
resultsToCompute='computeCoreIndices|computeCoreObservations',
memorySavingMode=True)
# and compute
return algo.compute(data)
def main(readcsv=read_csv, method='defaultDense'):
infile = os.path.join('..', 'data', 'batch', 'dbscan_dense.csv')
epsilon = 0.04
minObservations = 45
# Load the data
data = readcsv(infile, range(2))
result_classic = compute(data, minObservations, epsilon)
data = to_numpy(data)
try:
from dppl import device_context, device_type
gpu_context = lambda: device_context(device_type.gpu, 0)
cpu_context = lambda: device_context(device_type.cpu, 0)
except:
from daal4py.oneapi import sycl_context
gpu_context = lambda: sycl_context('gpu')
cpu_context = lambda: sycl_context('cpu')
# It is possible to specify to make the computations on GPU
print('gpu', gpu_available)
if gpu_available:
with gpu_context():
sycl_data = sycl_buffer(data)
result_gpu = compute(sycl_data, minObservations, epsilon)
assert np.allclose(result_classic.nClusters, result_gpu.nClusters)
assert np.allclose(result_classic.assignments, result_gpu.assignments)
assert np.allclose(result_classic.coreIndices, result_gpu.coreIndices)
assert np.allclose(result_classic.coreObservations, result_gpu.coreObservations)
with cpu_context():
sycl_data = sycl_buffer(data)
result_cpu = compute(sycl_data, minObservations, epsilon)
assert np.allclose(result_classic.nClusters, result_cpu.nClusters)
assert np.allclose(result_classic.assignments, result_cpu.assignments)
assert np.allclose(result_classic.coreIndices, result_cpu.coreIndices)
assert np.allclose(result_classic.coreObservations, result_cpu.coreObservations)
return result_classic
if __name__ == "__main__":
result = main()
print("\nFirst 10 cluster assignments:\n", result.assignments[0:10])
print("\nFirst 10 cluster core indices:\n", result.coreIndices[0:10])
print("\nFirst 10 cluster core observations:\n", result.coreObservations[0:10])
print("\nNumber of clusters:\n", result.nClusters)
print('All looks good!') | examples/sycl/dbscan_batch.py |
# daal4py DBSCAN example for shared memory systems
import daal4py as d4p
import numpy as np
import os
from daal4py.oneapi import sycl_buffer
# let's try to use pandas' fast csv reader
try:
import pandas
read_csv = lambda f, c, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
except:
# fall back to numpy loadtxt
read_csv = lambda f, c, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
try:
from dppl import device_context, device_type
with device_context(device_type.gpu, 0):
gpu_available=True
except:
try:
from daal4py.oneapi import sycl_context
with sycl_context('gpu'):
gpu_available=True
except:
gpu_available=False
# At this moment with sycl we are working only with numpy arrays
def to_numpy(data):
try:
from pandas import DataFrame
if isinstance(data, DataFrame):
return np.ascontiguousarray(data.values)
except:
pass
try:
from scipy.sparse import csr_matrix
if isinstance(data, csr_matrix):
return data.toarray()
except:
pass
return data
# Common code for both CPU and GPU computations
def compute(data, minObservations, epsilon):
# configure dbscan main object: we also request the indices and observations of cluster cores
algo = d4p.dbscan(minObservations=minObservations,
epsilon=epsilon,
resultsToCompute='computeCoreIndices|computeCoreObservations',
memorySavingMode=True)
# and compute
return algo.compute(data)
def main(readcsv=read_csv, method='defaultDense'):
infile = os.path.join('..', 'data', 'batch', 'dbscan_dense.csv')
epsilon = 0.04
minObservations = 45
# Load the data
data = readcsv(infile, range(2))
result_classic = compute(data, minObservations, epsilon)
data = to_numpy(data)
try:
from dppl import device_context, device_type
gpu_context = lambda: device_context(device_type.gpu, 0)
cpu_context = lambda: device_context(device_type.cpu, 0)
except:
from daal4py.oneapi import sycl_context
gpu_context = lambda: sycl_context('gpu')
cpu_context = lambda: sycl_context('cpu')
# It is possible to specify to make the computations on GPU
print('gpu', gpu_available)
if gpu_available:
with gpu_context():
sycl_data = sycl_buffer(data)
result_gpu = compute(sycl_data, minObservations, epsilon)
assert np.allclose(result_classic.nClusters, result_gpu.nClusters)
assert np.allclose(result_classic.assignments, result_gpu.assignments)
assert np.allclose(result_classic.coreIndices, result_gpu.coreIndices)
assert np.allclose(result_classic.coreObservations, result_gpu.coreObservations)
with cpu_context():
sycl_data = sycl_buffer(data)
result_cpu = compute(sycl_data, minObservations, epsilon)
assert np.allclose(result_classic.nClusters, result_cpu.nClusters)
assert np.allclose(result_classic.assignments, result_cpu.assignments)
assert np.allclose(result_classic.coreIndices, result_cpu.coreIndices)
assert np.allclose(result_classic.coreObservations, result_cpu.coreObservations)
return result_classic
if __name__ == "__main__":
result = main()
print("\nFirst 10 cluster assignments:\n", result.assignments[0:10])
print("\nFirst 10 cluster core indices:\n", result.coreIndices[0:10])
print("\nFirst 10 cluster core observations:\n", result.coreObservations[0:10])
print("\nNumber of clusters:\n", result.nClusters)
print('All looks good!') | 0.806586 | 0.365542 |
from __future__ import unicode_literals
from flask import jsonify, request
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import preprocess_ts_string
from indico.modules.events.logs.models.entries import EventLogEntry, EventLogRealm
from indico.modules.events.logs.util import serialize_log_entry
from indico.modules.events.logs.views import WPEventLogs
from indico.modules.events.management.controllers import RHManageEventBase
LOG_PAGE_SIZE = 15
def _contains(field, text):
return (db.func.to_tsvector('simple', db.func.indico.indico_unaccent(field))
.match(db.func.indico.indico_unaccent(preprocess_ts_string(text)), postgresql_regconfig='simple'))
class RHEventLogs(RHManageEventBase):
"""Shows the modification/action log for the event"""
def _process(self):
realms = {realm.name: realm.title for realm in EventLogRealm}
return WPEventLogs.render_template('logs.html', self.event, realms=realms)
class RHEventLogsJSON(RHManageEventBase):
def _process(self):
page = int(request.args.get('page', 1))
filters = request.args.getlist('filters')
text = request.args.get('q')
if not filters:
return jsonify(current_page=1, pages=[], entries=[], total_page_count=0)
query = self.event.log_entries.order_by(EventLogEntry.logged_dt.desc())
realms = {EventLogRealm.get(f) for f in filters if EventLogRealm.get(f)}
if realms:
query = query.filter(EventLogEntry.realm.in_(realms))
if text:
query = query.filter(
db.or_(_contains(EventLogEntry.module, text),
_contains(EventLogEntry.type, text),
_contains(EventLogEntry.summary, text),
_contains(db.m.User.first_name + " " + db.m.User.last_name, text),
_contains(EventLogEntry.data['body'].astext, text),
_contains(EventLogEntry.data['subject'].astext, text),
_contains(EventLogEntry.data['from'].astext, text),
_contains(EventLogEntry.data['to'].astext, text),
_contains(EventLogEntry.data['cc'].astext, text))
).outerjoin(db.m.User)
query = query.paginate(page, LOG_PAGE_SIZE)
entries = [dict(serialize_log_entry(entry), index=index, html=entry.render())
for index, entry in enumerate(query.items)]
return jsonify(current_page=page, pages=list(query.iter_pages()), total_page_count=query.pages, entries=entries) | indico/modules/events/logs/controllers.py |
from __future__ import unicode_literals
from flask import jsonify, request
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import preprocess_ts_string
from indico.modules.events.logs.models.entries import EventLogEntry, EventLogRealm
from indico.modules.events.logs.util import serialize_log_entry
from indico.modules.events.logs.views import WPEventLogs
from indico.modules.events.management.controllers import RHManageEventBase
LOG_PAGE_SIZE = 15
def _contains(field, text):
return (db.func.to_tsvector('simple', db.func.indico.indico_unaccent(field))
.match(db.func.indico.indico_unaccent(preprocess_ts_string(text)), postgresql_regconfig='simple'))
class RHEventLogs(RHManageEventBase):
"""Shows the modification/action log for the event"""
def _process(self):
realms = {realm.name: realm.title for realm in EventLogRealm}
return WPEventLogs.render_template('logs.html', self.event, realms=realms)
class RHEventLogsJSON(RHManageEventBase):
def _process(self):
page = int(request.args.get('page', 1))
filters = request.args.getlist('filters')
text = request.args.get('q')
if not filters:
return jsonify(current_page=1, pages=[], entries=[], total_page_count=0)
query = self.event.log_entries.order_by(EventLogEntry.logged_dt.desc())
realms = {EventLogRealm.get(f) for f in filters if EventLogRealm.get(f)}
if realms:
query = query.filter(EventLogEntry.realm.in_(realms))
if text:
query = query.filter(
db.or_(_contains(EventLogEntry.module, text),
_contains(EventLogEntry.type, text),
_contains(EventLogEntry.summary, text),
_contains(db.m.User.first_name + " " + db.m.User.last_name, text),
_contains(EventLogEntry.data['body'].astext, text),
_contains(EventLogEntry.data['subject'].astext, text),
_contains(EventLogEntry.data['from'].astext, text),
_contains(EventLogEntry.data['to'].astext, text),
_contains(EventLogEntry.data['cc'].astext, text))
).outerjoin(db.m.User)
query = query.paginate(page, LOG_PAGE_SIZE)
entries = [dict(serialize_log_entry(entry), index=index, html=entry.render())
for index, entry in enumerate(query.items)]
return jsonify(current_page=page, pages=list(query.iter_pages()), total_page_count=query.pages, entries=entries) | 0.729712 | 0.084568 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from typing import Iterator, List, Optional
from asconnect.httpclient import HttpClient
from asconnect.models import (
AppStoreVersion,
Platform,
AppStoreVersionLocalization,
AppStoreReviewDetails,
IdfaDeclaration,
)
from asconnect.utilities import next_or_none, update_query_parameters
class VersionClient:
"""Wrapper class around the ASC API."""
log: logging.Logger
http_client: HttpClient
def __init__(self, *, http_client: HttpClient, log: logging.Logger,) -> None:
"""Construct a new client object.
:param http_client: The API HTTP client
:param log: Any base logger to be used (one will be created if not supplied)
"""
self.http_client = http_client
self.log = log.getChild("version")
def get(self, *, version_id: str,) -> Optional[AppStoreVersion]:
"""Get the version with the given ID
:param version_id: The version ID to get
:returns: An AppStoreVersion if found, None otherwise
"""
url = self.http_client.generate_url(f"appStoreVersions/{version_id}")
return next_or_none(self.http_client.get(url=url, data_type=AppStoreVersion))
def get_all(
self,
*,
app_id: str,
version_string: Optional[str] = None,
platform: Optional[Platform] = None,
) -> Iterator[AppStoreVersion]:
"""Get the versions for an app.
:param app_id: The app ID to get the versions for
:param version_string: The version to filter on (if any)
:param platform: The platform to filter on (if any)
:returns: An iterator to AppStoreVersion
"""
url = self.http_client.generate_url(f"apps/{app_id}/appStoreVersions")
query_parameters = {}
if version_string:
query_parameters["filter[versionString]"] = version_string
if platform:
query_parameters["filter[platform]"] = platform.value
url = update_query_parameters(url, query_parameters)
yield from self.http_client.get(url=url, data_type=List[AppStoreVersion])
def get_version(self, *, app_id: str, version_string: str) -> Optional[AppStoreVersion]:
"""Get the versions for an app.
:param app_id: The app ID to get the version for
:param version_string: The version string to get the version for
:returns: An AppStoreVersion
"""
return next_or_none(self.get_all(app_id=app_id, version_string=version_string))
def get_localizations(self, *, version_id: str,) -> Iterator[AppStoreVersionLocalization]:
"""Get the version localizations for an app version.
:param version_id: The version ID to get the localizations for
:returns: An AppStoreVersion
"""
url = self.http_client.generate_url(
f"appStoreVersions/{version_id}/appStoreVersionLocalizations"
)
yield from self.http_client.get(url=url, data_type=List[AppStoreVersionLocalization])
def set_build(self, *, version_id: str, build_id: str) -> None:
"""Set the build for a version
:param version_id: The ID of the version to set the build on
:param build_id: The ID of the build to set
"""
self.http_client.patch(
endpoint=f"appStoreVersions/{version_id}/relationships/build",
data={"data": {"type": "builds", "id": build_id,}},
data_type=None,
)
def get_app_review_details(self, *, version_id: str) -> Optional[AppStoreReviewDetails]:
"""Get the app review details for the version.
:param version_id: The version ID to get the app review details for
:returns: The app review details if set, None otherwise
"""
return next_or_none(
self.http_client.get(
endpoint=f"appStoreVersions/{version_id}/appStoreReviewDetail",
data_type=AppStoreReviewDetails,
)
)
def set_app_review_details(
self,
*,
version_id: str,
contact_email: str,
contact_first_name: str,
contact_last_name: str,
contact_phone: str,
demo_account_name: str,
demo_account_password: str,
demo_account_required: bool,
notes: str,
) -> AppStoreReviewDetails:
"""Set the app store review details
:param version_id: The ID of the version to set the build on
:param contact_email: The email for the app review contact
:param contact_first_name: The first name for the app review contact
:param contact_last_name: The last name for the app review contact
:param contact_phone: The phone number for the app review contact
:param demo_account_name: The username for the demo account
:param demo_account_password: The <PASSWORD> the demo account
:param demo_account_required: Set to True to mark the demo account as required
:param notes: Any notes for the reviewer
:returns: The review details
"""
existing_details = self.get_app_review_details(version_id=version_id)
attributes = {
"contactFirstName": contact_first_name,
"contactLastName": contact_last_name,
"contactPhone": contact_phone,
"contactEmail": contact_email,
"demoAccountName": demo_account_name,
"demoAccountPassword": <PASSWORD>,
"demoAccountRequired": demo_account_required,
"notes": notes,
}
if existing_details:
return self.http_client.patch(
endpoint=f"appStoreReviewDetails/{existing_details.identifier}",
data={
"data": {
"type": "appStoreReviewDetails",
"id": existing_details.identifier,
"attributes": attributes,
}
},
data_type=AppStoreReviewDetails,
)
return self.http_client.post(
endpoint="appStoreReviewDetails",
data={
"data": {
"type": "appStoreReviewDetails",
"attributes": attributes,
"relationships": {
"appStoreVersion": {"data": {"type": "appStoreVersions", "id": version_id}}
},
}
},
data_type=AppStoreReviewDetails,
)
def get_idfa(self, *, version_id: str) -> Optional[IdfaDeclaration]:
"""Get the advertising ID declaration.
:param version_id: The version to get the declaration for
:returns: The declaration if set, None otherwise
"""
return next_or_none(
self.http_client.get(
endpoint=f"appStoreVersions/{version_id}/idfaDeclaration",
data_type=IdfaDeclaration,
)
)
def set_idfa(
self,
*,
version_id: str,
attributes_action_with_previous_ad: bool,
attributes_app_installation_to_previous_ad: bool,
honors_limited_ad_tracking: bool,
serves_ads: bool,
) -> IdfaDeclaration:
"""Set the IDFA declaration
:param version_id: The ID of the version to set the build on
:param attributes_action_with_previous_ad: Set to True if the ID is used to attribute actions with a previous ad
:param attributes_app_installation_to_previous_ad: Set to True if the ID is used to attribute an installation with a previous ad
:param honors_limited_ad_tracking: Set to True to confirm that your app honors a users ad tracking preferences
:param serves_ads: Set to True if the advertising ID will be used to serve ads within your app
:returns: The review details
"""
self.log.debug("Getting existing IDFA...")
existing_details = self.get_idfa(version_id=version_id)
attributes = {
"attributesActionWithPreviousAd": attributes_action_with_previous_ad,
"attributesAppInstallationToPreviousAd": attributes_app_installation_to_previous_ad,
"honorsLimitedAdTracking": honors_limited_ad_tracking,
"servesAds": serves_ads,
}
if existing_details:
self.log.debug("Patching existing IDFA")
return self.http_client.patch(
endpoint=f"idfaDeclarations/{existing_details.identifier}",
data={
"data": {
"type": "idfaDeclarations",
"id": existing_details.identifier,
"attributes": attributes,
}
},
data_type=IdfaDeclaration,
)
self.log.debug("Setting new IDFA")
return self.http_client.post(
endpoint="idfaDeclarations",
data={
"data": {
"type": "idfaDeclarations",
"attributes": attributes,
"relationships": {
"appStoreVersion": {"data": {"type": "appStoreVersions", "id": version_id}}
},
}
},
data_type=IdfaDeclaration,
)
def submit_for_review(self, *, version_id: str,) -> None:
"""Submit the version for review
:param version_id: The ID of the version to submit for review
"""
self.http_client.post(
endpoint="appStoreVersionSubmissions",
data={
"data": {
"type": "appStoreVersionSubmissions",
"relationships": {
"appStoreVersion": {"data": {"type": "appStoreVersions", "id": version_id}}
},
}
},
data_type=None,
) | asconnect/version_client.py |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from typing import Iterator, List, Optional
from asconnect.httpclient import HttpClient
from asconnect.models import (
AppStoreVersion,
Platform,
AppStoreVersionLocalization,
AppStoreReviewDetails,
IdfaDeclaration,
)
from asconnect.utilities import next_or_none, update_query_parameters
class VersionClient:
"""Wrapper class around the ASC API."""
log: logging.Logger
http_client: HttpClient
def __init__(self, *, http_client: HttpClient, log: logging.Logger,) -> None:
"""Construct a new client object.
:param http_client: The API HTTP client
:param log: Any base logger to be used (one will be created if not supplied)
"""
self.http_client = http_client
self.log = log.getChild("version")
def get(self, *, version_id: str,) -> Optional[AppStoreVersion]:
"""Get the version with the given ID
:param version_id: The version ID to get
:returns: An AppStoreVersion if found, None otherwise
"""
url = self.http_client.generate_url(f"appStoreVersions/{version_id}")
return next_or_none(self.http_client.get(url=url, data_type=AppStoreVersion))
def get_all(
self,
*,
app_id: str,
version_string: Optional[str] = None,
platform: Optional[Platform] = None,
) -> Iterator[AppStoreVersion]:
"""Get the versions for an app.
:param app_id: The app ID to get the versions for
:param version_string: The version to filter on (if any)
:param platform: The platform to filter on (if any)
:returns: An iterator to AppStoreVersion
"""
url = self.http_client.generate_url(f"apps/{app_id}/appStoreVersions")
query_parameters = {}
if version_string:
query_parameters["filter[versionString]"] = version_string
if platform:
query_parameters["filter[platform]"] = platform.value
url = update_query_parameters(url, query_parameters)
yield from self.http_client.get(url=url, data_type=List[AppStoreVersion])
def get_version(self, *, app_id: str, version_string: str) -> Optional[AppStoreVersion]:
"""Get the versions for an app.
:param app_id: The app ID to get the version for
:param version_string: The version string to get the version for
:returns: An AppStoreVersion
"""
return next_or_none(self.get_all(app_id=app_id, version_string=version_string))
def get_localizations(self, *, version_id: str,) -> Iterator[AppStoreVersionLocalization]:
"""Get the version localizations for an app version.
:param version_id: The version ID to get the localizations for
:returns: An AppStoreVersion
"""
url = self.http_client.generate_url(
f"appStoreVersions/{version_id}/appStoreVersionLocalizations"
)
yield from self.http_client.get(url=url, data_type=List[AppStoreVersionLocalization])
def set_build(self, *, version_id: str, build_id: str) -> None:
"""Set the build for a version
:param version_id: The ID of the version to set the build on
:param build_id: The ID of the build to set
"""
self.http_client.patch(
endpoint=f"appStoreVersions/{version_id}/relationships/build",
data={"data": {"type": "builds", "id": build_id,}},
data_type=None,
)
def get_app_review_details(self, *, version_id: str) -> Optional[AppStoreReviewDetails]:
"""Get the app review details for the version.
:param version_id: The version ID to get the app review details for
:returns: The app review details if set, None otherwise
"""
return next_or_none(
self.http_client.get(
endpoint=f"appStoreVersions/{version_id}/appStoreReviewDetail",
data_type=AppStoreReviewDetails,
)
)
def set_app_review_details(
self,
*,
version_id: str,
contact_email: str,
contact_first_name: str,
contact_last_name: str,
contact_phone: str,
demo_account_name: str,
demo_account_password: str,
demo_account_required: bool,
notes: str,
) -> AppStoreReviewDetails:
"""Set the app store review details
:param version_id: The ID of the version to set the build on
:param contact_email: The email for the app review contact
:param contact_first_name: The first name for the app review contact
:param contact_last_name: The last name for the app review contact
:param contact_phone: The phone number for the app review contact
:param demo_account_name: The username for the demo account
:param demo_account_password: The <PASSWORD> the demo account
:param demo_account_required: Set to True to mark the demo account as required
:param notes: Any notes for the reviewer
:returns: The review details
"""
existing_details = self.get_app_review_details(version_id=version_id)
attributes = {
"contactFirstName": contact_first_name,
"contactLastName": contact_last_name,
"contactPhone": contact_phone,
"contactEmail": contact_email,
"demoAccountName": demo_account_name,
"demoAccountPassword": <PASSWORD>,
"demoAccountRequired": demo_account_required,
"notes": notes,
}
if existing_details:
return self.http_client.patch(
endpoint=f"appStoreReviewDetails/{existing_details.identifier}",
data={
"data": {
"type": "appStoreReviewDetails",
"id": existing_details.identifier,
"attributes": attributes,
}
},
data_type=AppStoreReviewDetails,
)
return self.http_client.post(
endpoint="appStoreReviewDetails",
data={
"data": {
"type": "appStoreReviewDetails",
"attributes": attributes,
"relationships": {
"appStoreVersion": {"data": {"type": "appStoreVersions", "id": version_id}}
},
}
},
data_type=AppStoreReviewDetails,
)
def get_idfa(self, *, version_id: str) -> Optional[IdfaDeclaration]:
"""Get the advertising ID declaration.
:param version_id: The version to get the declaration for
:returns: The declaration if set, None otherwise
"""
return next_or_none(
self.http_client.get(
endpoint=f"appStoreVersions/{version_id}/idfaDeclaration",
data_type=IdfaDeclaration,
)
)
def set_idfa(
self,
*,
version_id: str,
attributes_action_with_previous_ad: bool,
attributes_app_installation_to_previous_ad: bool,
honors_limited_ad_tracking: bool,
serves_ads: bool,
) -> IdfaDeclaration:
"""Set the IDFA declaration
:param version_id: The ID of the version to set the build on
:param attributes_action_with_previous_ad: Set to True if the ID is used to attribute actions with a previous ad
:param attributes_app_installation_to_previous_ad: Set to True if the ID is used to attribute an installation with a previous ad
:param honors_limited_ad_tracking: Set to True to confirm that your app honors a users ad tracking preferences
:param serves_ads: Set to True if the advertising ID will be used to serve ads within your app
:returns: The review details
"""
self.log.debug("Getting existing IDFA...")
existing_details = self.get_idfa(version_id=version_id)
attributes = {
"attributesActionWithPreviousAd": attributes_action_with_previous_ad,
"attributesAppInstallationToPreviousAd": attributes_app_installation_to_previous_ad,
"honorsLimitedAdTracking": honors_limited_ad_tracking,
"servesAds": serves_ads,
}
if existing_details:
self.log.debug("Patching existing IDFA")
return self.http_client.patch(
endpoint=f"idfaDeclarations/{existing_details.identifier}",
data={
"data": {
"type": "idfaDeclarations",
"id": existing_details.identifier,
"attributes": attributes,
}
},
data_type=IdfaDeclaration,
)
self.log.debug("Setting new IDFA")
return self.http_client.post(
endpoint="idfaDeclarations",
data={
"data": {
"type": "idfaDeclarations",
"attributes": attributes,
"relationships": {
"appStoreVersion": {"data": {"type": "appStoreVersions", "id": version_id}}
},
}
},
data_type=IdfaDeclaration,
)
def submit_for_review(self, *, version_id: str,) -> None:
"""Submit the version for review
:param version_id: The ID of the version to submit for review
"""
self.http_client.post(
endpoint="appStoreVersionSubmissions",
data={
"data": {
"type": "appStoreVersionSubmissions",
"relationships": {
"appStoreVersion": {"data": {"type": "appStoreVersions", "id": version_id}}
},
}
},
data_type=None,
) | 0.931299 | 0.101145 |
r'''
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
from nogotofail.mitm.connection.handlers.data import handlers
from nogotofail.mitm.connection.handlers.data import DataHandler
from nogotofail.mitm.connection.handlers.store import handler
from nogotofail.mitm.event import connection
from nogotofail.mitm import util
import re
@handler(handlers)
class ImapStartTlsStripHandler(DataHandler):
name = "imapstarttlsstrip"
description = "Suppress STARTTLS in IMAP"
first_server_chunk_received = False
first_client_chunk_received = False
imap_detected = False
server_greeting_pattern = re.compile("\* OK[ \r\n]", re.I)
server_capability_pattern = re.compile("\* CAPABILITY ", re.I)
client_starttls_pattern = re.compile("[^ ]+ STARTTLS", re.I)
server_starttls_stripped = False
client_starttls_rejected = False
vuln_notified = False
def on_response(self, response):
if not self.first_server_chunk_received:
self.first_server_chunk_received = True
if (not self.first_client_chunk_received and
self.server_greeting_pattern.match(response)):
self.imap_detected = True
# Some servers advertise STARTTLS capability in their initial
# response -- strip STARTTLS just in case.
starttls_index = response.lower().find(" starttls")
if starttls_index != -1:
response = response[:starttls_index] + \
response[starttls_index + len(" starttls"):]
return response
if not self.imap_detected:
return response
if self.server_capability_pattern.match(response):
# CAPABILITY reply from server -- strip STARTTLS from the list
starttls_index = response.lower().find(" starttls")
if starttls_index != -1:
response = response[:starttls_index] + \
response[starttls_index + len(" starttls"):]
self.server_starttls_stripped = True
self.log(logging.DEBUG, "Stripped STARTTLS from server reply")
return response
return response
def on_request(self, request):
self.first_client_chunk_received = True
if not self.imap_detected:
return request
if self.client_starttls_rejected:
if not self.vuln_notified:
self.log(
logging.CRITICAL,
"Cleartext traffic after stripped STARTTLS")
self.log_event(
logging.ERROR,
connection.AttackEvent(
self.connection, self.name, True,
None))
self.connection.vuln_notify(
util.vuln.VULN_IMAP_STARTTLS_STRIP)
self.vuln_notified = True
# Stop analyzing/attacking this connection
self.imap_detected = False
elif self.client_starttls_pattern.match(request):
# Client is attempting STARTTLS -- fake a rejection reply from
# server and do not forward STARTTLS to server.
self.client_starttls_rejected = True
self.log(logging.DEBUG, "Suppressed STARTTLS from client")
tag = request[:request.find(" ")]
self.connection.client_socket.sendall(
tag + " BAD STARTTLS unavailable\r\n")
return ""
return request
@handler.passive(handlers)
class ImapAuthHandler(DataHandler):
name = "imapauthdetection"
description = "Detect authentication credentials in IMAP traffic"
first_server_chunk_received = False
first_client_chunk_received = False
imap_detected = False
server_greeting_pattern = re.compile("\* OK[ \r\n]", re.I)
client_auth_pattern = re.compile("[^ ]+ LOGIN|[^ ]+ AUTHENTICATE", re.I)
def on_response(self, response):
if not self.first_server_chunk_received:
self.first_server_chunk_received = True
if (not self.first_client_chunk_received and
self.server_greeting_pattern.match(response)):
self.imap_detected = True
return response
def on_request(self, request):
self.first_client_chunk_received = True
if not self.imap_detected:
return request
if self.client_auth_pattern.match(request):
self.log(
logging.CRITICAL,
"Authentication credentials in cleartext IMAP traffic")
self.log_event(
logging.ERROR,
connection.AttackEvent(
self.connection, self.name, True,
None))
self.connection.vuln_notify(util.vuln.VULN_CLEARTEXT_AUTH)
return request | nogotofail/mitm/connection/handlers/data/imap.py | r'''
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
from nogotofail.mitm.connection.handlers.data import handlers
from nogotofail.mitm.connection.handlers.data import DataHandler
from nogotofail.mitm.connection.handlers.store import handler
from nogotofail.mitm.event import connection
from nogotofail.mitm import util
import re
@handler(handlers)
class ImapStartTlsStripHandler(DataHandler):
name = "imapstarttlsstrip"
description = "Suppress STARTTLS in IMAP"
first_server_chunk_received = False
first_client_chunk_received = False
imap_detected = False
server_greeting_pattern = re.compile("\* OK[ \r\n]", re.I)
server_capability_pattern = re.compile("\* CAPABILITY ", re.I)
client_starttls_pattern = re.compile("[^ ]+ STARTTLS", re.I)
server_starttls_stripped = False
client_starttls_rejected = False
vuln_notified = False
def on_response(self, response):
if not self.first_server_chunk_received:
self.first_server_chunk_received = True
if (not self.first_client_chunk_received and
self.server_greeting_pattern.match(response)):
self.imap_detected = True
# Some servers advertise STARTTLS capability in their initial
# response -- strip STARTTLS just in case.
starttls_index = response.lower().find(" starttls")
if starttls_index != -1:
response = response[:starttls_index] + \
response[starttls_index + len(" starttls"):]
return response
if not self.imap_detected:
return response
if self.server_capability_pattern.match(response):
# CAPABILITY reply from server -- strip STARTTLS from the list
starttls_index = response.lower().find(" starttls")
if starttls_index != -1:
response = response[:starttls_index] + \
response[starttls_index + len(" starttls"):]
self.server_starttls_stripped = True
self.log(logging.DEBUG, "Stripped STARTTLS from server reply")
return response
return response
def on_request(self, request):
self.first_client_chunk_received = True
if not self.imap_detected:
return request
if self.client_starttls_rejected:
if not self.vuln_notified:
self.log(
logging.CRITICAL,
"Cleartext traffic after stripped STARTTLS")
self.log_event(
logging.ERROR,
connection.AttackEvent(
self.connection, self.name, True,
None))
self.connection.vuln_notify(
util.vuln.VULN_IMAP_STARTTLS_STRIP)
self.vuln_notified = True
# Stop analyzing/attacking this connection
self.imap_detected = False
elif self.client_starttls_pattern.match(request):
# Client is attempting STARTTLS -- fake a rejection reply from
# server and do not forward STARTTLS to server.
self.client_starttls_rejected = True
self.log(logging.DEBUG, "Suppressed STARTTLS from client")
tag = request[:request.find(" ")]
self.connection.client_socket.sendall(
tag + " BAD STARTTLS unavailable\r\n")
return ""
return request
@handler.passive(handlers)
class ImapAuthHandler(DataHandler):
name = "imapauthdetection"
description = "Detect authentication credentials in IMAP traffic"
first_server_chunk_received = False
first_client_chunk_received = False
imap_detected = False
server_greeting_pattern = re.compile("\* OK[ \r\n]", re.I)
client_auth_pattern = re.compile("[^ ]+ LOGIN|[^ ]+ AUTHENTICATE", re.I)
def on_response(self, response):
if not self.first_server_chunk_received:
self.first_server_chunk_received = True
if (not self.first_client_chunk_received and
self.server_greeting_pattern.match(response)):
self.imap_detected = True
return response
def on_request(self, request):
self.first_client_chunk_received = True
if not self.imap_detected:
return request
if self.client_auth_pattern.match(request):
self.log(
logging.CRITICAL,
"Authentication credentials in cleartext IMAP traffic")
self.log_event(
logging.ERROR,
connection.AttackEvent(
self.connection, self.name, True,
None))
self.connection.vuln_notify(util.vuln.VULN_CLEARTEXT_AUTH)
return request | 0.612889 | 0.108142 |
"""Command line interface for extending feed effective dates."""
import argparse
import csv
from datetime import datetime, timedelta
import logging
import os
import shutil
import sys
import zipfile
DOWNLOAD_DIRECTORY = 'gtfs'
# extend feed effective date range this far into the past and future
EFFECTIVE_DAYS = 365
GTFS_DATE_FMT = '%Y%m%d'
logging.basicConfig()
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
def extend_feed(feed_path, effective_days):
"""Extend feed effective date range.
:param feed_path: Full path to the GTFS to extend
:param effective_days Number of days from today the feed should extend into future and past
"""
file_name = os.path.basename(feed_path)
tmpdir = os.path.join(os.path.dirname(feed_path), 'tmp')
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
os.mkdir(tmpdir)
try:
with zipfile.ZipFile(feed_path, 'r') as feedzip:
if 'calendar.txt' not in feedzip.namelist():
LOG.warn('Feed %s has no calendar.txt; cannot extend effective date range.',
file_name)
return
LOG.debug('calendar.txt found for %s. Extracting zip.', file_name)
feedzip.extractall(tmpdir)
except zipfile.BadZipfile:
LOG.error('Could not process zip file %s.', file_name)
with open(os.path.join(tmpdir, 'calendar.txt'), 'rb') as cal_file:
csvdict = csv.DictReader(cal_file, skipinitialspace=True)
fldnames = csvdict.fieldnames
cal = [x for x in csvdict]
# flag to track whether this feed's effective dates have actually been extended
cal = extended_calendar(cal, effective_days)
if cal:
with open(os.path.join(tmpdir, 'calendar.txt'), 'wb') as cal_file:
csvdict = csv.DictWriter(cal_file, fieldnames=fldnames)
csvdict.writeheader()
csvdict.writerows(cal)
LOG.info('Done writing new calendar file for %s.', file_name)
# now re-zip and move the zip back to the download directory
lastdir = os.getcwd()
os.chdir(tmpdir)
with zipfile.ZipFile(os.path.join(os.path.dirname(feed_path),
file_name[:-4] + '_extended.zip'),
'w',
zipfile.ZIP_DEFLATED) as feedzip:
for _, _, files in os.walk(tmpdir):
for filename in files:
if filename.endswith('.txt'):
feedzip.write(filename)
os.chdir(lastdir)
else:
LOG.info('Feed %s does not need extension.', file_name)
# delete tmp directory when done
shutil.rmtree(tmpdir)
def extend_feeds(feed_directory, effective_days):
"""Extend effective dates for all fees found in given directory.
:param feed_directory: Full path to the directory containing the GTFS to extend
:param effective_days: Number of days from today into future and past to extend the feeds
"""
LOG.debug('Extending effective dates for feeds in %s...', feed_directory)
for pdir, _, feed_files in os.walk(feed_directory):
for feed_file in feed_files:
if feed_file.endswith('.zip'):
feed_path = os.path.join(pdir, feed_file)
if zipfile.is_zipfile(feed_path):
extend_feed(feed_path, effective_days)
else:
LOG.warn('File %s does not look like a valid zip file.', feed_file)
LOG.info('All done!')
def extended_calendar(cal, effective_days):
"""Extends the effective date range for the given calendar.
:param cal Dictionary of calendar.txt values
:param effective_days Number of days from today the effective dates should extend
:returns Extended calendar, or False if calendar does not require extension.
"""
past_start = datetime.today() - timedelta(days=effective_days)
future_end = datetime.today() + timedelta(days=effective_days)
LOG.debug('Extending feed to be effective from %s to %s.', past_start, future_end)
modified = False
for entry in cal:
start_date_str = entry['start_date']
end_date_str = entry['end_date']
start_date = datetime.strptime(start_date_str, GTFS_DATE_FMT)
end_date = datetime.strptime(end_date_str, GTFS_DATE_FMT)
if start_date <= past_start:
LOG.debug('Start date %s already includes %s in period.', start_date, past_start)
else:
modified = True
entry['start_date'] = past_start.strftime(GTFS_DATE_FMT)
if end_date >= future_end:
LOG.debug('End date %s already includes %s in period.', end_date, future_end)
else:
modified = True
entry['end_date'] = future_end.strftime(GTFS_DATE_FMT)
return cal if modified else modified
def main():
"""Main entry point for command line interface."""
parser = argparse.ArgumentParser(description='Extend GTFS effective date range.')
parser.add_argument('--download-directory', '-d',
default=os.path.join(os.getcwd(), DOWNLOAD_DIRECTORY),
help='Full path to GTFS directory (default: ./%s/)' %
DOWNLOAD_DIRECTORY)
parser.add_argument('--extend-days', '-e', type=int,
default=EFFECTIVE_DAYS,
help='Extend GTFS this many days into past and future (default: %s)' %
EFFECTIVE_DAYS)
parser.add_argument('--feeds', '-f',
default=None,
help='Comma-separated list of feeds to get (optional; default: all)')
parser.add_argument('--verbose', '-v',
action='count',
help='Increase log level verbosity (default log level: info)')
args = parser.parse_args()
if args.verbose:
LOG.setLevel(logging.DEBUG)
if not os.path.isdir(args.download_directory):
LOG.error('GTFS directory %s not found. Exiting.', args.download_directory)
sys.exit(1)
if args.extend_days < 1:
LOG.error('--extend-days must be a positive integer. Exiting.')
sys.exit(2)
if args.feeds:
feeds = args.feeds.split(',')
for feed in feeds:
LOG.debug('Going to extend feed %s...', feed)
extend_feed(os.path.join(args.download_directory, feed), args.extend_days)
LOG.info('All done!')
else:
extend_feeds(args.download_directory, args.extend_days)
if __name__ == '__main__':
main() | extend_effective_dates.py | """Command line interface for extending feed effective dates."""
import argparse
import csv
from datetime import datetime, timedelta
import logging
import os
import shutil
import sys
import zipfile
DOWNLOAD_DIRECTORY = 'gtfs'
# extend feed effective date range this far into the past and future
EFFECTIVE_DAYS = 365
GTFS_DATE_FMT = '%Y%m%d'
logging.basicConfig()
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
def extend_feed(feed_path, effective_days):
"""Extend feed effective date range.
:param feed_path: Full path to the GTFS to extend
:param effective_days Number of days from today the feed should extend into future and past
"""
file_name = os.path.basename(feed_path)
tmpdir = os.path.join(os.path.dirname(feed_path), 'tmp')
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
os.mkdir(tmpdir)
try:
with zipfile.ZipFile(feed_path, 'r') as feedzip:
if 'calendar.txt' not in feedzip.namelist():
LOG.warn('Feed %s has no calendar.txt; cannot extend effective date range.',
file_name)
return
LOG.debug('calendar.txt found for %s. Extracting zip.', file_name)
feedzip.extractall(tmpdir)
except zipfile.BadZipfile:
LOG.error('Could not process zip file %s.', file_name)
with open(os.path.join(tmpdir, 'calendar.txt'), 'rb') as cal_file:
csvdict = csv.DictReader(cal_file, skipinitialspace=True)
fldnames = csvdict.fieldnames
cal = [x for x in csvdict]
# flag to track whether this feed's effective dates have actually been extended
cal = extended_calendar(cal, effective_days)
if cal:
with open(os.path.join(tmpdir, 'calendar.txt'), 'wb') as cal_file:
csvdict = csv.DictWriter(cal_file, fieldnames=fldnames)
csvdict.writeheader()
csvdict.writerows(cal)
LOG.info('Done writing new calendar file for %s.', file_name)
# now re-zip and move the zip back to the download directory
lastdir = os.getcwd()
os.chdir(tmpdir)
with zipfile.ZipFile(os.path.join(os.path.dirname(feed_path),
file_name[:-4] + '_extended.zip'),
'w',
zipfile.ZIP_DEFLATED) as feedzip:
for _, _, files in os.walk(tmpdir):
for filename in files:
if filename.endswith('.txt'):
feedzip.write(filename)
os.chdir(lastdir)
else:
LOG.info('Feed %s does not need extension.', file_name)
# delete tmp directory when done
shutil.rmtree(tmpdir)
def extend_feeds(feed_directory, effective_days):
"""Extend effective dates for all fees found in given directory.
:param feed_directory: Full path to the directory containing the GTFS to extend
:param effective_days: Number of days from today into future and past to extend the feeds
"""
LOG.debug('Extending effective dates for feeds in %s...', feed_directory)
for pdir, _, feed_files in os.walk(feed_directory):
for feed_file in feed_files:
if feed_file.endswith('.zip'):
feed_path = os.path.join(pdir, feed_file)
if zipfile.is_zipfile(feed_path):
extend_feed(feed_path, effective_days)
else:
LOG.warn('File %s does not look like a valid zip file.', feed_file)
LOG.info('All done!')
def extended_calendar(cal, effective_days):
"""Extends the effective date range for the given calendar.
:param cal Dictionary of calendar.txt values
:param effective_days Number of days from today the effective dates should extend
:returns Extended calendar, or False if calendar does not require extension.
"""
past_start = datetime.today() - timedelta(days=effective_days)
future_end = datetime.today() + timedelta(days=effective_days)
LOG.debug('Extending feed to be effective from %s to %s.', past_start, future_end)
modified = False
for entry in cal:
start_date_str = entry['start_date']
end_date_str = entry['end_date']
start_date = datetime.strptime(start_date_str, GTFS_DATE_FMT)
end_date = datetime.strptime(end_date_str, GTFS_DATE_FMT)
if start_date <= past_start:
LOG.debug('Start date %s already includes %s in period.', start_date, past_start)
else:
modified = True
entry['start_date'] = past_start.strftime(GTFS_DATE_FMT)
if end_date >= future_end:
LOG.debug('End date %s already includes %s in period.', end_date, future_end)
else:
modified = True
entry['end_date'] = future_end.strftime(GTFS_DATE_FMT)
return cal if modified else modified
def main():
"""Main entry point for command line interface."""
parser = argparse.ArgumentParser(description='Extend GTFS effective date range.')
parser.add_argument('--download-directory', '-d',
default=os.path.join(os.getcwd(), DOWNLOAD_DIRECTORY),
help='Full path to GTFS directory (default: ./%s/)' %
DOWNLOAD_DIRECTORY)
parser.add_argument('--extend-days', '-e', type=int,
default=EFFECTIVE_DAYS,
help='Extend GTFS this many days into past and future (default: %s)' %
EFFECTIVE_DAYS)
parser.add_argument('--feeds', '-f',
default=None,
help='Comma-separated list of feeds to get (optional; default: all)')
parser.add_argument('--verbose', '-v',
action='count',
help='Increase log level verbosity (default log level: info)')
args = parser.parse_args()
if args.verbose:
LOG.setLevel(logging.DEBUG)
if not os.path.isdir(args.download_directory):
LOG.error('GTFS directory %s not found. Exiting.', args.download_directory)
sys.exit(1)
if args.extend_days < 1:
LOG.error('--extend-days must be a positive integer. Exiting.')
sys.exit(2)
if args.feeds:
feeds = args.feeds.split(',')
for feed in feeds:
LOG.debug('Going to extend feed %s...', feed)
extend_feed(os.path.join(args.download_directory, feed), args.extend_days)
LOG.info('All done!')
else:
extend_feeds(args.download_directory, args.extend_days)
if __name__ == '__main__':
main() | 0.46952 | 0.147893 |
import os
import sys
import time
import ConfigParser
import pandas as pd
import numpy as np
import theano
import theano.tensor as T
import cPickle
theano.config.floatX = 'float32'
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../external'))
sys.path.insert(2,os.path.join(base_path, '../common'))
sys.path
from logistic_sgd import LogisticRegression
from mlp import HiddenLayer
from mlp_model import MLP_Model
from lenet import LeNetConvPoolLayer
from activation_functions import rectified_linear
class CNN_Model(object):
def __init__(self,
input,
batch_size,
patchSize,
rng,
nkerns,
kernelSizes,
hiddenSizes,
fileName=None,
activation=rectified_linear):
self.convLayers = []
self.trainingCost = []
self.validationError = []
self.nkerns = nkerns
self.kernelSizes = kernelSizes
self.hiddenSizes = hiddenSizes
self.patchSize = patchSize
self.batch_size = batch_size
input = input.reshape((self.batch_size, 1, self.patchSize, self.patchSize))
self.layer0_input = input
self.params = []
input_next = input
numberOfFeatureMaps = 1
featureMapSize = patchSize
for i in range(len(nkerns)):
layer = LeNetConvPoolLayer(
rng,
input=input_next,
image_shape=(batch_size, numberOfFeatureMaps, featureMapSize, featureMapSize),
filter_shape=(nkerns[i], numberOfFeatureMaps, kernelSizes[i], kernelSizes[i]),
poolsize=(2, 2)
)
input_next = layer.output
numberOfFeatureMaps = nkerns[i]
featureMapSize = np.int16(np.floor((featureMapSize - kernelSizes[i]+1) / 2))
self.params += layer.params
self.convLayers.append(layer)
# the 2 is there to preserve the batchSize
mlp_input = self.convLayers[-1].output.flatten(2)
self.mlp = MLP_Model(
rng=rng,
input=mlp_input,
n_in=nkerns[-1] * (featureMapSize ** 2),
n_hidden=hiddenSizes,
n_out=2,
activation=rectified_linear
)
self.params += self.mlp.params
self.cost = self.mlp.negative_log_likelihood
self.errors = self.mlp.errors
self.p_y_given_x = self.mlp.p_y_given_x
self.y_pred = self.mlp.y_pred
self.debug_x = self.p_y_given_x
if not fileName is None:
with open(fileName, 'r') as file:
saved_convLayers,
saved_hiddenLayers,
saved_logRegressionLayer,
self.trainingCost,
self.validationError,
saved_nkerns,
saved_kernelSizes,
saved_batch_size,
saved_patchSize,
saved_hiddenSizes = cPickle.load(file)
for s_cl, cl in zip(saved_convLayers, self.convLayers):
cl.W.set_value(s_cl.W.get_value())
cl.b.set_value(s_cl.b.get_value())
for s_hl, hl in zip(saved_hiddenLayers, self.mlp.hiddenLayers):
hl.W.set_value(np.float32(s_hl.W.eval()))
hl.b.set_value(s_hl.b.get_value())
self.mlp.logRegressionLayer.W.set_value(np.float32(saved_logRegressionLayer.W.eval()))
self.mlp.logRegressionLayer.b.set_value(saved_logRegressionLayer.b.get_value())
def save(self, filename):
with open(filename, 'wb') as file:
cPickle.dump((self.convLayers,
self.mlp.hiddenLayers,
self.mlp.logRegressionLayer,
self.trainingCost,
self.validationError,
self.nkerns,
self.kernelSizes,
self.batch_size,
self.patchSize,
self.hiddenSizes), file) | code/model/deleteme/cnn_model.py |
import os
import sys
import time
import ConfigParser
import pandas as pd
import numpy as np
import theano
import theano.tensor as T
import cPickle
theano.config.floatX = 'float32'
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../external'))
sys.path.insert(2,os.path.join(base_path, '../common'))
sys.path
from logistic_sgd import LogisticRegression
from mlp import HiddenLayer
from mlp_model import MLP_Model
from lenet import LeNetConvPoolLayer
from activation_functions import rectified_linear
class CNN_Model(object):
def __init__(self,
input,
batch_size,
patchSize,
rng,
nkerns,
kernelSizes,
hiddenSizes,
fileName=None,
activation=rectified_linear):
self.convLayers = []
self.trainingCost = []
self.validationError = []
self.nkerns = nkerns
self.kernelSizes = kernelSizes
self.hiddenSizes = hiddenSizes
self.patchSize = patchSize
self.batch_size = batch_size
input = input.reshape((self.batch_size, 1, self.patchSize, self.patchSize))
self.layer0_input = input
self.params = []
input_next = input
numberOfFeatureMaps = 1
featureMapSize = patchSize
for i in range(len(nkerns)):
layer = LeNetConvPoolLayer(
rng,
input=input_next,
image_shape=(batch_size, numberOfFeatureMaps, featureMapSize, featureMapSize),
filter_shape=(nkerns[i], numberOfFeatureMaps, kernelSizes[i], kernelSizes[i]),
poolsize=(2, 2)
)
input_next = layer.output
numberOfFeatureMaps = nkerns[i]
featureMapSize = np.int16(np.floor((featureMapSize - kernelSizes[i]+1) / 2))
self.params += layer.params
self.convLayers.append(layer)
# the 2 is there to preserve the batchSize
mlp_input = self.convLayers[-1].output.flatten(2)
self.mlp = MLP_Model(
rng=rng,
input=mlp_input,
n_in=nkerns[-1] * (featureMapSize ** 2),
n_hidden=hiddenSizes,
n_out=2,
activation=rectified_linear
)
self.params += self.mlp.params
self.cost = self.mlp.negative_log_likelihood
self.errors = self.mlp.errors
self.p_y_given_x = self.mlp.p_y_given_x
self.y_pred = self.mlp.y_pred
self.debug_x = self.p_y_given_x
if not fileName is None:
with open(fileName, 'r') as file:
saved_convLayers,
saved_hiddenLayers,
saved_logRegressionLayer,
self.trainingCost,
self.validationError,
saved_nkerns,
saved_kernelSizes,
saved_batch_size,
saved_patchSize,
saved_hiddenSizes = cPickle.load(file)
for s_cl, cl in zip(saved_convLayers, self.convLayers):
cl.W.set_value(s_cl.W.get_value())
cl.b.set_value(s_cl.b.get_value())
for s_hl, hl in zip(saved_hiddenLayers, self.mlp.hiddenLayers):
hl.W.set_value(np.float32(s_hl.W.eval()))
hl.b.set_value(s_hl.b.get_value())
self.mlp.logRegressionLayer.W.set_value(np.float32(saved_logRegressionLayer.W.eval()))
self.mlp.logRegressionLayer.b.set_value(saved_logRegressionLayer.b.get_value())
def save(self, filename):
with open(filename, 'wb') as file:
cPickle.dump((self.convLayers,
self.mlp.hiddenLayers,
self.mlp.logRegressionLayer,
self.trainingCost,
self.validationError,
self.nkerns,
self.kernelSizes,
self.batch_size,
self.patchSize,
self.hiddenSizes), file) | 0.330039 | 0.117471 |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
import wandb
import sys
import multiprocessing
import collections
import random
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import SimpleRNN, Dense, Bidirectional, LSTM, Dropout
from tensorflow.keras.metrics import Recall, Precision
from tensorflow.compat.v1 import ConfigProto, InteractiveSession
from wandb.keras import WandbCallback
from data_repository import DataRepository
from sklearn.model_selection import StratifiedKFold
Worker = collections.namedtuple("Worker", ("queue", "process"))
WorkerInitData = collections.namedtuple(
"WorkerInitData", ("num", "sweep_id", "sweep_run_name", "sweep_name","config","train","test","x","y","num_classes","token_labels")
)
WorkerDoneData = collections.namedtuple("WorkerDoneData", ("val_accuracy"))
def reset_wandb_env():
exclude = {
"WANDB_PROJECT",
"WANDB_ENTITY",
"WANDB_API_KEY",
}
for k, v in os.environ.items():
if k.startswith("WANDB_") and k not in exclude:
del os.environ[k]
def training(sweep_q, worker_q):
# GPU-initialization
gpu_config = ConfigProto()
gpu_config.gpu_options.per_process_gpu_memory_fraction = 0.3
gpu_config.gpu_options.allow_growth = True
session = InteractiveSession(config=gpu_config)
reset_wandb_env()
worker_data = worker_q.get()
run_name = "{}-{}".format(worker_data.sweep_run_name, worker_data.num)
config = worker_data.config
train=worker_data.train
test=worker_data.test
num_classes=worker_data.num_classes
x=worker_data.x
y=worker_data.y
run = wandb.init(
group=worker_data.sweep_name,
job_type=worker_data.sweep_run_name,
name=run_name,
config=config,
)
wandb.config.update({'hostname':os.uname()[1]})
# Model
dropout = run.config.dropout
nodesizes = [run.config.node_size2, run.config.node_size3, run.config.node_size4]
model = Sequential()
model.add(LSTM(run.config.node_size1, return_sequences=True, input_shape=(x.shape[1], x.shape[2])))
model.add(Dropout(rate=dropout))
for i in range(0,run.config.num_layers): #number of layers ramdom between 1 an 3
model.add(LSTM(nodesizes[i],return_sequences=True))
model.add(Dropout(rate=dropout))
model.add(LSTM(run.config.node_size5))
model.add(Dropout(rate=dropout))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=run.config.optimizer,
metrics=['accuracy',Precision(),Recall()])
model.summary()
model.fit(x[train],y[train],
epochs=run.config.epochs,
batch_size=run.config.batch_size,
validation_data=(x[test],y[test]),
shuffle=False,verbose=2,
callbacks=[WandbCallback()])
#Test accuracy
model_best_path = os.path.join(run.dir, "model-best.h5")
best_model= load_model(filepath=model_best_path)
y_eval = best_model.evaluate(x[test],y[test], verbose=0)
#Confusion Matrix
y_pred = best_model.predict(x[test])
y_pred_integer = np.argmax(y_pred, axis=1)
y_test_integer = np.argmax(y[test], axis=1)
y_pred_name = ([worker_data.token_labels[p] for p in y_pred_integer])
y_test_name = ([worker_data.token_labels[p] for p in y_test_integer])
wandb.sklearn.plot_confusion_matrix(y_test_name, y_pred_name)
#Convert to TFLite
tflite_converter = tf.lite.TFLiteConverter.from_keras_model(best_model)
tflite_converter.experimental_new_converter = True
tflite_model = tflite_converter.convert()
open(os.path.join(wandb.run.dir, "model-best.tflite"), "wb").write(tflite_model)
#Finish Run
run.log(dict(val_accuracy=y_eval[1]))
wandb.join()
sweep_q.put(WorkerDoneData(val_accuracy=y_eval[1]))
def main():
num_folds = 5
# Spin up workers before calling wandb.init()
# Workers will be blocked on a queue waiting to start
sweep_q = multiprocessing.Queue()
workers = []
for num in range(num_folds):
q = multiprocessing.Queue()
p = multiprocessing.Process(
target=training, kwargs=dict(sweep_q=sweep_q, worker_q=q)
)
p.start()
workers.append(Worker(queue=q, process=p))
sweep_run = wandb.init()
sweep_id = sweep_run.sweep_id or "unknown"
sweep_name = sweep_run.config.sweep_name
project_url = sweep_run.get_project_url()
sweep_group_url = "{}/groups/{}".format(project_url, sweep_name)
sweep_run.notes = sweep_group_url
sweep_run.save()
sweep_run_name = sweep_run.name or sweep_run.id or "unknown"
artifact = sweep_run.use_artifact(sweep_run.config.artifact, type='dataset')
artifact_dir = artifact.download()
dirname= artifact_dir + '\\'
dirname= dirname.replace('\\','/')
warnings.simplefilter(action='ignore', category=FutureWarning)
np.set_printoptions(threshold=sys.maxsize)
skfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=7)
# Load data and print summary, if desired
repo = DataRepository(dirname)
x, y = repo.getDataAndLabels()
#load tokens
tokens = os.listdir(dirname)
tokens = sorted(tokens, key=str.casefold)
token_labels = {i:tokens[i] for i in range(0, len(tokens))}
y_integer = np.argmax(y, axis=1)
y_name = ([token_labels[p] for p in y_integer])
num_classes = repo.numClasses
metrics = []
num=0
for train, test in skfold.split(x, y_name):
worker = workers[num]
# start worker
worker.queue.put(
WorkerInitData(
sweep_id=sweep_id,
num=num,
sweep_run_name=sweep_run_name,
sweep_name=sweep_name,
config=dict(sweep_run.config),
train=train,
test=test,
x=x,
y=y,
num_classes=num_classes,
token_labels=token_labels
)
)
# get metric from worker
result = sweep_q.get()
# wait for worker to finish
worker.process.join()
# log metric to sweep_run
metrics.append(result.val_accuracy)
num=num+1
wandb.config.update({'hostname':os.uname()[1]})
sweep_run.log(dict(val_accuracy=sum(metrics) / len(metrics)))
wandb.join()
if __name__ == "__main__":
main() | lab/train-stable.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
import wandb
import sys
import multiprocessing
import collections
import random
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import SimpleRNN, Dense, Bidirectional, LSTM, Dropout
from tensorflow.keras.metrics import Recall, Precision
from tensorflow.compat.v1 import ConfigProto, InteractiveSession
from wandb.keras import WandbCallback
from data_repository import DataRepository
from sklearn.model_selection import StratifiedKFold
Worker = collections.namedtuple("Worker", ("queue", "process"))
WorkerInitData = collections.namedtuple(
"WorkerInitData", ("num", "sweep_id", "sweep_run_name", "sweep_name","config","train","test","x","y","num_classes","token_labels")
)
WorkerDoneData = collections.namedtuple("WorkerDoneData", ("val_accuracy"))
def reset_wandb_env():
exclude = {
"WANDB_PROJECT",
"WANDB_ENTITY",
"WANDB_API_KEY",
}
for k, v in os.environ.items():
if k.startswith("WANDB_") and k not in exclude:
del os.environ[k]
def training(sweep_q, worker_q):
# GPU-initialization
gpu_config = ConfigProto()
gpu_config.gpu_options.per_process_gpu_memory_fraction = 0.3
gpu_config.gpu_options.allow_growth = True
session = InteractiveSession(config=gpu_config)
reset_wandb_env()
worker_data = worker_q.get()
run_name = "{}-{}".format(worker_data.sweep_run_name, worker_data.num)
config = worker_data.config
train=worker_data.train
test=worker_data.test
num_classes=worker_data.num_classes
x=worker_data.x
y=worker_data.y
run = wandb.init(
group=worker_data.sweep_name,
job_type=worker_data.sweep_run_name,
name=run_name,
config=config,
)
wandb.config.update({'hostname':os.uname()[1]})
# Model
dropout = run.config.dropout
nodesizes = [run.config.node_size2, run.config.node_size3, run.config.node_size4]
model = Sequential()
model.add(LSTM(run.config.node_size1, return_sequences=True, input_shape=(x.shape[1], x.shape[2])))
model.add(Dropout(rate=dropout))
for i in range(0,run.config.num_layers): #number of layers ramdom between 1 an 3
model.add(LSTM(nodesizes[i],return_sequences=True))
model.add(Dropout(rate=dropout))
model.add(LSTM(run.config.node_size5))
model.add(Dropout(rate=dropout))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=run.config.optimizer,
metrics=['accuracy',Precision(),Recall()])
model.summary()
model.fit(x[train],y[train],
epochs=run.config.epochs,
batch_size=run.config.batch_size,
validation_data=(x[test],y[test]),
shuffle=False,verbose=2,
callbacks=[WandbCallback()])
#Test accuracy
model_best_path = os.path.join(run.dir, "model-best.h5")
best_model= load_model(filepath=model_best_path)
y_eval = best_model.evaluate(x[test],y[test], verbose=0)
#Confusion Matrix
y_pred = best_model.predict(x[test])
y_pred_integer = np.argmax(y_pred, axis=1)
y_test_integer = np.argmax(y[test], axis=1)
y_pred_name = ([worker_data.token_labels[p] for p in y_pred_integer])
y_test_name = ([worker_data.token_labels[p] for p in y_test_integer])
wandb.sklearn.plot_confusion_matrix(y_test_name, y_pred_name)
#Convert to TFLite
tflite_converter = tf.lite.TFLiteConverter.from_keras_model(best_model)
tflite_converter.experimental_new_converter = True
tflite_model = tflite_converter.convert()
open(os.path.join(wandb.run.dir, "model-best.tflite"), "wb").write(tflite_model)
#Finish Run
run.log(dict(val_accuracy=y_eval[1]))
wandb.join()
sweep_q.put(WorkerDoneData(val_accuracy=y_eval[1]))
def main():
num_folds = 5
# Spin up workers before calling wandb.init()
# Workers will be blocked on a queue waiting to start
sweep_q = multiprocessing.Queue()
workers = []
for num in range(num_folds):
q = multiprocessing.Queue()
p = multiprocessing.Process(
target=training, kwargs=dict(sweep_q=sweep_q, worker_q=q)
)
p.start()
workers.append(Worker(queue=q, process=p))
sweep_run = wandb.init()
sweep_id = sweep_run.sweep_id or "unknown"
sweep_name = sweep_run.config.sweep_name
project_url = sweep_run.get_project_url()
sweep_group_url = "{}/groups/{}".format(project_url, sweep_name)
sweep_run.notes = sweep_group_url
sweep_run.save()
sweep_run_name = sweep_run.name or sweep_run.id or "unknown"
artifact = sweep_run.use_artifact(sweep_run.config.artifact, type='dataset')
artifact_dir = artifact.download()
dirname= artifact_dir + '\\'
dirname= dirname.replace('\\','/')
warnings.simplefilter(action='ignore', category=FutureWarning)
np.set_printoptions(threshold=sys.maxsize)
skfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=7)
# Load data and print summary, if desired
repo = DataRepository(dirname)
x, y = repo.getDataAndLabels()
#load tokens
tokens = os.listdir(dirname)
tokens = sorted(tokens, key=str.casefold)
token_labels = {i:tokens[i] for i in range(0, len(tokens))}
y_integer = np.argmax(y, axis=1)
y_name = ([token_labels[p] for p in y_integer])
num_classes = repo.numClasses
metrics = []
num=0
for train, test in skfold.split(x, y_name):
worker = workers[num]
# start worker
worker.queue.put(
WorkerInitData(
sweep_id=sweep_id,
num=num,
sweep_run_name=sweep_run_name,
sweep_name=sweep_name,
config=dict(sweep_run.config),
train=train,
test=test,
x=x,
y=y,
num_classes=num_classes,
token_labels=token_labels
)
)
# get metric from worker
result = sweep_q.get()
# wait for worker to finish
worker.process.join()
# log metric to sweep_run
metrics.append(result.val_accuracy)
num=num+1
wandb.config.update({'hostname':os.uname()[1]})
sweep_run.log(dict(val_accuracy=sum(metrics) / len(metrics)))
wandb.join()
if __name__ == "__main__":
main() | 0.539954 | 0.194846 |
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import models
class SiamModel(tf.keras.layers.Layer):
def __init__(self, input_shape=16, dense1_shape=32, dense2_shape=64, name='IntenalSiamModel', **kwargs):
super(SiamModel, self).__init__(name=name, **kwargs)
self.inputs = layers.Input(shape=input_shape)
self.fc1 = layers.Dense(dense1_shape, activation='relu')
self.batchnorm = layers.BatchNormalization()
self.fc2 = layers.Dense(dense2_shape, activation='sigmoid')
def call(self, inputs):
x = self.fc1(inputs)
x = self.batchnorm(x)
output = self.fc2(x)
return output
class SiameseNetwork(tf.keras.Model):
def __init__(self, input_shape=16, dense1_shape=32, dense2_shape=64, name='SiameseNetwork', **kwargs):
super(SiameseNetwork, self).__init__(name=name, **kwargs)
self.internal_model = SiamModel(input_shape=input_shape, dense1_shape=dense1_shape, dense2_shape=dense2_shape)
self.distance_layer = layers.Lambda(lambda features: tf.math.abs(features[0] - features[1]))
self.classifier = layers.Dense(1, activation='sigmoid')
def call(self, inputs):
sample1_vector, sample2_vector = inputs
sample1_features = self.internal_model(sample1_vector)
sample2_features = self.internal_model(sample2_vector)
# distance layer
distance_vector = tf.math.abs(sample1_features - sample2_features)
# classification head - only for training
output = self.classifier(distance_vector)
return output, distance_vector
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred, distance = self(x, training=True)
# Compute the loss function (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# compute the gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# update the model's weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# update metrics
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metrics names to current value
return {m.name: m.result() for m in self.metrics} | src/siamese_model.py | import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import models
class SiamModel(tf.keras.layers.Layer):
def __init__(self, input_shape=16, dense1_shape=32, dense2_shape=64, name='IntenalSiamModel', **kwargs):
super(SiamModel, self).__init__(name=name, **kwargs)
self.inputs = layers.Input(shape=input_shape)
self.fc1 = layers.Dense(dense1_shape, activation='relu')
self.batchnorm = layers.BatchNormalization()
self.fc2 = layers.Dense(dense2_shape, activation='sigmoid')
def call(self, inputs):
x = self.fc1(inputs)
x = self.batchnorm(x)
output = self.fc2(x)
return output
class SiameseNetwork(tf.keras.Model):
def __init__(self, input_shape=16, dense1_shape=32, dense2_shape=64, name='SiameseNetwork', **kwargs):
super(SiameseNetwork, self).__init__(name=name, **kwargs)
self.internal_model = SiamModel(input_shape=input_shape, dense1_shape=dense1_shape, dense2_shape=dense2_shape)
self.distance_layer = layers.Lambda(lambda features: tf.math.abs(features[0] - features[1]))
self.classifier = layers.Dense(1, activation='sigmoid')
def call(self, inputs):
sample1_vector, sample2_vector = inputs
sample1_features = self.internal_model(sample1_vector)
sample2_features = self.internal_model(sample2_vector)
# distance layer
distance_vector = tf.math.abs(sample1_features - sample2_features)
# classification head - only for training
output = self.classifier(distance_vector)
return output, distance_vector
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred, distance = self(x, training=True)
# Compute the loss function (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# compute the gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# update the model's weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# update metrics
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metrics names to current value
return {m.name: m.result() for m in self.metrics} | 0.926112 | 0.432842 |
from __future__ import print_function
import argparse
import os
import resource
import sys
USAGE_PROGRAM = ('%s -m oslo_concurrency.prlimit'
% os.path.basename(sys.executable))
RESOURCES = (
# argparse argument => resource
('as', resource.RLIMIT_AS),
('core', resource.RLIMIT_CORE),
('cpu', resource.RLIMIT_CPU),
('data', resource.RLIMIT_DATA),
('fsize', resource.RLIMIT_FSIZE),
('memlock', resource.RLIMIT_MEMLOCK),
('nofile', resource.RLIMIT_NOFILE),
('nproc', resource.RLIMIT_NPROC),
('rss', resource.RLIMIT_RSS),
('stack', resource.RLIMIT_STACK),
)
def parse_args():
parser = argparse.ArgumentParser(description='prlimit', prog=USAGE_PROGRAM)
parser.add_argument('--as', type=int,
help='Address space limit in bytes')
parser.add_argument('--core', type=int,
help='Core file size limit in bytes')
parser.add_argument('--cpu', type=int,
help='CPU time limit in seconds')
parser.add_argument('--data', type=int,
help='Data size limit in bytes')
parser.add_argument('--fsize', type=int,
help='File size limit in bytes')
parser.add_argument('--memlock', type=int,
help='Locked memory limit in bytes')
parser.add_argument('--nofile', type=int,
help='Maximum number of open files')
parser.add_argument('--nproc', type=int,
help='Maximum number of processes')
parser.add_argument('--rss', type=int,
help='Maximum Resident Set Size (RSS) in bytes')
parser.add_argument('--stack', type=int,
help='Stack size limit in bytes')
parser.add_argument('program',
help='Program (absolute path)')
parser.add_argument('program_args', metavar="arg", nargs='...',
help='Program parameters')
args = parser.parse_args()
return args
def main():
args = parse_args()
program = args.program
if not os.path.isabs(program):
# program uses a relative path: try to find the absolute path
# to the executable
if sys.version_info >= (3, 3):
import shutil
program_abs = shutil.which(program)
else:
import distutils.spawn
program_abs = distutils.spawn.find_executable(program)
if program_abs:
program = program_abs
for arg_name, rlimit in RESOURCES:
value = getattr(args, arg_name)
if value is None:
continue
try:
resource.setrlimit(rlimit, (value, value))
except ValueError as exc:
print("%s: failed to set the %s resource limit: %s"
% (USAGE_PROGRAM, arg_name.upper(), exc),
file=sys.stderr)
sys.exit(1)
try:
os.execv(program, [program] + args.program_args)
except Exception as exc:
print("%s: failed to execute %s: %s"
% (USAGE_PROGRAM, program, exc),
file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main() | oslo_concurrency/prlimit.py |
from __future__ import print_function
import argparse
import os
import resource
import sys
USAGE_PROGRAM = ('%s -m oslo_concurrency.prlimit'
% os.path.basename(sys.executable))
RESOURCES = (
# argparse argument => resource
('as', resource.RLIMIT_AS),
('core', resource.RLIMIT_CORE),
('cpu', resource.RLIMIT_CPU),
('data', resource.RLIMIT_DATA),
('fsize', resource.RLIMIT_FSIZE),
('memlock', resource.RLIMIT_MEMLOCK),
('nofile', resource.RLIMIT_NOFILE),
('nproc', resource.RLIMIT_NPROC),
('rss', resource.RLIMIT_RSS),
('stack', resource.RLIMIT_STACK),
)
def parse_args():
parser = argparse.ArgumentParser(description='prlimit', prog=USAGE_PROGRAM)
parser.add_argument('--as', type=int,
help='Address space limit in bytes')
parser.add_argument('--core', type=int,
help='Core file size limit in bytes')
parser.add_argument('--cpu', type=int,
help='CPU time limit in seconds')
parser.add_argument('--data', type=int,
help='Data size limit in bytes')
parser.add_argument('--fsize', type=int,
help='File size limit in bytes')
parser.add_argument('--memlock', type=int,
help='Locked memory limit in bytes')
parser.add_argument('--nofile', type=int,
help='Maximum number of open files')
parser.add_argument('--nproc', type=int,
help='Maximum number of processes')
parser.add_argument('--rss', type=int,
help='Maximum Resident Set Size (RSS) in bytes')
parser.add_argument('--stack', type=int,
help='Stack size limit in bytes')
parser.add_argument('program',
help='Program (absolute path)')
parser.add_argument('program_args', metavar="arg", nargs='...',
help='Program parameters')
args = parser.parse_args()
return args
def main():
args = parse_args()
program = args.program
if not os.path.isabs(program):
# program uses a relative path: try to find the absolute path
# to the executable
if sys.version_info >= (3, 3):
import shutil
program_abs = shutil.which(program)
else:
import distutils.spawn
program_abs = distutils.spawn.find_executable(program)
if program_abs:
program = program_abs
for arg_name, rlimit in RESOURCES:
value = getattr(args, arg_name)
if value is None:
continue
try:
resource.setrlimit(rlimit, (value, value))
except ValueError as exc:
print("%s: failed to set the %s resource limit: %s"
% (USAGE_PROGRAM, arg_name.upper(), exc),
file=sys.stderr)
sys.exit(1)
try:
os.execv(program, [program] + args.program_args)
except Exception as exc:
print("%s: failed to execute %s: %s"
% (USAGE_PROGRAM, program, exc),
file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main() | 0.454956 | 0.052062 |
import shutil
import os
import json
import re
import time
import hashlib
import uuid
from typing import List, Optional, Union, Tuple
from aim.__version__ import __version__ as aim_version
from aim.engine.configs import *
from aim.engine.utils import (
ls_dir,
deep_compare,
import_module,
clean_repo_path,
get_dict_item_by_path,
)
from aim.engine.profile import AimProfile
from aim.engine.repo.run import Run
from aim.engine.repo.dql.select import SelectResult
from aim.engine.repo.utils import (
cat_to_dir,
get_experiment_path,
get_experiment_run_path,
get_run_objects_dir_path,
get_run_objects_meta_file_path,
)
from aim.ql.grammar import Expression
from aim.ql.tree import BinaryExpressionTree
from aim.ql.utils import build_bet
class AimRepo:
# TODO: Refactor repo to have minimal side effects
WRITING_MODE = 'w'
READING_MODE = 'r'
@staticmethod
def get_working_repo(*args,
initialized_only=False,
**kwargs):
"""
Searches for .aim repository in working directory
and returns AimRepo object if exists
"""
# Get working directory path
working_dir = os.getcwd()
# Try to find closest .aim repository
repo_found = False
while True:
if len(working_dir) <= 1:
break
repo_path = os.path.join(working_dir, AIM_REPO_NAME)
config_file_path = os.path.join(repo_path, AIM_CONFIG_FILE_NAME)
if (not initialized_only and os.path.exists(repo_path)) \
or (initialized_only and os.path.isfile(config_file_path)):
repo_found = True
break
else:
working_dir = os.path.split(working_dir)[0]
if not repo_found:
return None
return AimRepo(working_dir, *args, **kwargs)
@staticmethod
def generate_commit_hash():
return str(uuid.uuid1())
@staticmethod
def get_artifact_cat(cat: tuple):
if isinstance(cat, tuple):
if len(cat) > 1:
return cat
elif len(cat) == 1:
return cat[0]
return None
@classmethod
def get_active_branch_if_exists(cls):
repo = cls.get_working_repo(initialized_only=True)
if repo is not None:
return repo.branch
return None
def __init__(self, path=None, repo_branch=None,
repo_commit=None,
repo_full_path=None,
mode=WRITING_MODE):
self._config = {}
path = clean_repo_path(path)
self.path = repo_full_path or os.path.join(path, AIM_REPO_NAME)
self.config_path = os.path.join(self.path, AIM_CONFIG_FILE_NAME)
self.hash = hashlib.md5(self.path.encode('utf-8')).hexdigest()
self.active_commit = repo_commit or AIM_COMMIT_INDEX_DIR_NAME
if re.match(r'^[A-Za-z0-9_\-]{2,}$', self.active_commit) is None:
raise ValueError('run name must be at least 2 characters ' +
'and contain only latin letters, numbers, ' +
'dash and underscore')
self.root_path = repo_full_path or path
self.name = self.root_path.split(os.sep)[-1]
self.branch_path = None
self.index_path = None
self.objects_dir_path = None
self.media_dir_path = None
self.records_storage = None
self.mode = mode
active_exp = self.config.get('active_branch')
if repo_branch is not None:
experiment = repo_branch
elif active_exp is not None:
experiment = active_exp
else:
experiment = None
if experiment is not None:
run_full_path = get_experiment_run_path(self.path,
experiment,
self.active_commit)
else:
run_full_path = None
if self.active_commit != AIM_COMMIT_INDEX_DIR_NAME and run_full_path \
and os.path.exists(run_full_path):
raise ValueError(('run `{}` already exists' +
'').format(self.active_commit))
if experiment is not None:
self.branch = experiment
def __str__(self):
return self.path
@property
def config(self):
"""
Config property getter, loads config file if not already loaded and
returns json object
"""
if len(self._config) == 0:
if os.path.isfile(self.config_path):
with open(self.config_path, 'r') as f:
config = json.load(f)
self._config = config
return self._config
@config.setter
def config(self, config):
self._config = config
@property
def branch(self):
return self._branch
@branch.setter
def branch(self, branch):
self._branch = branch
if self._branch not in self.list_branches():
self.create_branch(self._branch)
self.branch_path = get_experiment_path(self.path, self._branch)
self.index_path = get_experiment_run_path(self.path, self._branch,
self.active_commit)
self.objects_dir_path = get_run_objects_dir_path(self.path,
self._branch,
self.active_commit)
self.media_dir_path = os.path.join(self.objects_dir_path,
AIM_MEDIA_DIR_NAME)
self.meta_file_content = None
self.meta_file_path = get_run_objects_meta_file_path(self.path,
self._branch,
self.active_commit)
if not os.path.isdir(self.index_path):
os.makedirs(self.index_path)
if self.records_storage:
self.records_storage.close()
if os.path.exists(self.branch_path):
self.records_storage = self.get_records_storage(
self.objects_dir_path,
self.mode)
def get_records_storage(self, path, mode):
from aimrecords import Storage
return Storage(path, mode)
def close_records_storage(self):
"""
Finalizes and closes records storage
"""
if self.records_storage:
self.records_storage.close()
def save_config(self):
"""
Saves object config to config file
"""
with open(self.config_path, 'w') as f:
f.write(json.dumps(self._config))
def get_project_name(self):
"""
Returns project name from config file
"""
config = self.config
return config['project_name']
def get_remote_url(self, remote_name):
"""
Returns remote url specified by remote name
"""
for i in self.config['remotes']:
if i['name'] == remote_name:
return i['url']
return None
def init(self):
"""
Initializes empty Aim repository
"""
# Return if repo exists and is initialized
if self.is_initialized():
return True
try:
# Create `.aim` repo
os.makedirs(self.path, exist_ok=True)
except:
return False
# Create config file
with open(self.config_path, 'w') as config_file:
config_file.write(json.dumps({
'remotes': [],
'branches': [],
'active_branch': '',
}))
# self.create_logs()
self.create_branch(AIM_DEFAULT_BRANCH_NAME)
self.checkout_branch(AIM_DEFAULT_BRANCH_NAME)
return True
def rm(self):
"""
Removes Aim repository
"""
shutil.rmtree(self.path)
def exists(self):
"""
Checks whether Aim repository is created
"""
return os.path.exists(self.path)
def is_initialized(self):
"""
Checks whether Aim repository is initialized
"""
return os.path.exists(self.path) and os.path.isfile(self.config_path)
def ls_files(self):
"""
Returns list of repository files
"""
return ls_dir([self.path])
def reconstruct_meta_file(self):
"""
Reconstruct meta file(`Metric` and `NestedMap` artifacts)
from tracked artifacts data.
NOTE: Only can be needed in very specific cases.
"""
meta_file_content = {}
# Check if `NestedMap` were saved
map_path = os.path.join(self.objects_dir_path, 'map', 'dictionary.log')
if os.path.isfile(map_path):
meta_file_content['dictionary.log'] = {
'name': 'dictionary',
'type': ['map', 'nested_map'],
'data': None,
'data_path': 'map',
}
# Collect metrics meta info
metrics_info = self.records_storage.get_artifacts_names()
for metric_name, context_items in metrics_info.items():
meta_file_content[metric_name] = {
'name': metric_name,
'type': 'metrics',
'data': None,
'data_path': '__AIMRECORDS__',
'format': {
'artifact_format': 'aimrecords',
'record_format': 'protobuf',
},
'context': [list(c.items()) for c in context_items],
}
return meta_file_content
def load_meta_file(self, create_if_not_exist=True):
if self.meta_file_content is None:
if os.path.isfile(self.meta_file_path):
with open(self.meta_file_path, 'r+') as meta_file:
self.meta_file_content = json.loads(meta_file.read())
else:
if not create_if_not_exist:
self.meta_file_content = {}
return
os.makedirs(os.path.dirname(self.meta_file_path), exist_ok=True)
self.meta_file_content = {}
with open(self.meta_file_path, 'w+') as meta_file:
meta_file.write(json.dumps(self.meta_file_content))
def update_meta_file(self, item_key, item_content, flush=1):
"""
:param item_key: item key to insert or update
:param item_content: item value
:param flush: 0 not flush, 1 always flush, 2 flush on data update
"""
self.load_meta_file()
if flush == 0:
self.meta_file_content[item_key] = item_content
elif flush == 1:
self.meta_file_content[item_key] = item_content
self.flush_meta_file()
elif flush == 2:
updated = True
if item_key not in self.meta_file_content.keys():
# Item is not added to meta file yet
self.meta_file_content[item_key] = item_content
elif deep_compare(self.meta_file_content[item_key], item_content):
# Item is outdated
self.meta_file_content[item_key] = item_content
else:
updated = False
if updated:
self.flush_meta_file()
def flush_meta_file(self, content=None):
with open(self.meta_file_path, 'w+') as meta_file:
meta_file.write(json.dumps(content or self.meta_file_content))
def store_dir(self, name, cat, data={}):
"""
Creates a new directory inside repo and returns it's relative path
"""
# Create directory if not exists
dir_rel_path = os.path.join(AIM_CORR_DIRS_NAME, name)
dir_path = os.path.join(self.objects_dir_path,
dir_rel_path)
if not os.path.isdir(dir_path):
os.makedirs(dir_path, exist_ok=True)
self.update_meta_file(name, {
'name': name,
'type': 'dir',
'cat': cat,
'data': data,
'data_path': dir_rel_path,
})
return dir_path, dir_rel_path
def store_file(self, file_name, name, cat, data={}, rel_dir_path=None):
"""
Appends new data to the specified file or rewrites it
and updates repo meta file
"""
if not rel_dir_path:
cat_path = cat_to_dir(cat)
else:
cat_path = rel_dir_path
dir_path = os.path.join(self.objects_dir_path, cat_path)
data_file_path = os.path.join(dir_path, file_name)
# Create directory if not exists
if not os.path.isdir(dir_path):
os.makedirs(dir_path, exist_ok=True)
# Update meta file
if rel_dir_path is not None:
file_name_for_meta = '{}/{}'.format(rel_dir_path, file_name)
else:
file_name_for_meta = file_name
self.update_meta_file(file_name_for_meta, {
'name': name,
'type': self.get_artifact_cat(cat),
'data': data,
'data_path': cat_path,
}, 2)
return {
'path': os.path.join(cat_path, file_name),
'abs_path': data_file_path,
}
def store_artifact(self, name, cat, data, artifact_format=None,
binary_format=None, context=None):
"""
Adds artifact info to the repo meta file
"""
self.load_meta_file()
flush = 0
if name in self.meta_file_content.keys():
artifact_value = self.meta_file_content[name]
else:
flush = 1
artifact_value = {
'name': name,
'type': self.get_artifact_cat(cat),
'data': data,
'data_path': '__AIMRECORDS__',
'format': {
'artifact_format': artifact_format,
'record_format': binary_format,
},
'context': [],
}
if context is not None:
context_item = tuple(sorted(context.items()))
if context_item not in artifact_value['context']:
artifact_value['context'].append(context_item)
flush = 1
self.update_meta_file(name, artifact_value, flush)
return {
'name': name,
}
def store_image(self, name, cat, save_to_meta=False):
"""
Returns saved object full path
and updates repo meta file
"""
images_dir_path = os.path.join(self.media_dir_path,
AIM_IMAGES_DIR_NAME)
img_rel_path = os.path.join(AIM_MEDIA_DIR_NAME,
AIM_IMAGES_DIR_NAME)
img_abs_path = os.path.join(images_dir_path, name)
# Create image directory if not exists
dir_path = os.path.dirname(img_abs_path)
if not os.path.isdir(dir_path):
os.makedirs(dir_path, exist_ok=True)
# Update meta file
if save_to_meta:
self.update_meta_file(name, {
'name': name,
'type': self.get_artifact_cat(cat),
'data': {},
'data_path': img_rel_path,
})
return {
'path': os.path.join(img_rel_path, name),
'abs_path': img_abs_path,
}
def store_model_file(self, checkpoint_name, cat):
"""
Saves a model file into repo
"""
root_path = os.path.join(self.objects_dir_path,
cat_to_dir(cat))
dir_name = checkpoint_name
dir_path = os.path.join(root_path, dir_name)
model_file_name = 'model'
model_file_path = os.path.join(dir_path,
model_file_name)
# Create directory
os.makedirs(dir_path, exist_ok=True)
return model_file_path
def store_model(self, checkpoint_name, name, epoch,
meta_info, model_info, cat):
"""
Saves a model into repo
"""
root_path = os.path.join(self.objects_dir_path,
cat_to_dir(cat))
dir_name = checkpoint_name
dir_path = os.path.join(root_path, dir_name)
model_file_name = 'model'
model_file_path = os.path.join(dir_path,
model_file_name)
meta_file_path = os.path.join(dir_path, 'model.json')
# Create directory
os.makedirs(dir_path, exist_ok=True)
# Create meta file
with open(meta_file_path, 'w+') as meta_file:
meta_file.write(json.dumps({
'name': name,
'epoch': epoch,
'model': model_info,
}))
zip_name = '{}.aim'.format(dir_name)
zip_path = os.path.join(root_path, zip_name)
# Update repo meta file
self.update_meta_file(checkpoint_name, {
'name': checkpoint_name,
'type': self.get_artifact_cat(cat),
'data': {
'name': name,
'epoch': epoch,
'meta': meta_info,
'model': model_info,
},
'data_path': dir_name,
})
return {
'model_path': model_file_path,
'dir_path': dir_path,
'zip_path': zip_path,
}
def create_branch(self, branch):
"""
Creates a new branch - a sub-directory in repo
"""
dir_path = os.path.join(self.path, branch)
if not re.match(r'^[A-Za-z0-9_\-]{2,}$', branch):
raise AttributeError('experiment name must be at least ' +
'2 characters and contain only latin ' +
'letters, numbers, dash and underscore')
# Save branch in repo config file
branches = self.config.get('branches') or []
for b in branches:
if b.get('name') == branch:
raise AttributeError('branch {} already exists'.format(branch))
# Create branch directory
objects_dir_path = os.path.join(dir_path,
AIM_COMMIT_INDEX_DIR_NAME)
os.makedirs(objects_dir_path)
branches.append({
'name': branch,
})
self.config['branches'] = branches
self.save_config()
def checkout_branch(self, branch):
"""
Checkouts to specified branch
"""
branches = self.config.get('branches') or []
for b in branches:
if branch == b.get('name'):
self.config['active_branch'] = branch
self.branch = branch
self.save_config()
return
raise AttributeError('Experiment {} does not exist'.format(branch))
def remove_branch(self, branch):
"""
Removes specified branch
"""
if branch == AIM_DEFAULT_BRANCH_NAME:
msg = '{} branch can not be deleted'.format(AIM_DEFAULT_BRANCH_NAME)
raise AttributeError(msg)
branches = self.config.get('branches')
branch_exists = False
for b in branches:
if b.get('name') == branch:
branch_exists = True
break
if not branch_exists:
raise AttributeError('Experiment {} does not exist'.format(branch))
# Remove branch
self.config['branches'] = list(filter(lambda i: i.get('name') != branch,
self.config['branches']))
self.save_config()
# Remove branch sub-directory
dir_path = os.path.join(self.path, branch)
shutil.rmtree(dir_path)
# Set active branch to default if selected branch was active
if self.branch == branch:
self.checkout_branch(AIM_DEFAULT_BRANCH_NAME)
def list_branches(self):
"""
Returns list of existing branches
"""
if self.config.get('branches') is None:
return []
return list(filter(lambda b: b != '',
map(lambda b: b.get('name') if b else '',
self.config.get('branches'))))
def list_branch_commits(self, branch):
"""
Returns list of specified branch commits
"""
branch_path = os.path.join(self.path, branch.strip())
commits = []
for i in os.listdir(branch_path):
if os.path.isdir(os.path.join(branch_path, i)) \
and i != AIM_COMMIT_INDEX_DIR_NAME:
commits.append(i)
return commits
def is_index_empty(self):
"""
Returns `True` if index directory is empty and
`False` otherwise
"""
return not len(ls_dir([self.index_path]))
def get_latest_vc_branch(self):
"""
Returns latest created branch name and hash
"""
# Get commits
commits = {}
for c in os.listdir(self.branch_path):
commit_path = os.path.join(self.branch_path, c)
if os.path.isdir(commit_path) and c != AIM_COMMIT_INDEX_DIR_NAME:
config_file_path = os.path.join(commit_path,
AIM_COMMIT_CONFIG_FILE_NAME)
with open(config_file_path, 'r') as config_file:
commits[c] = json.loads(config_file.read())
# Find latest commit
latest_commit = None
for _, c in commits.items():
if latest_commit is None or c['date'] > latest_commit['date']:
latest_commit = c
return latest_commit.get('vc') if latest_commit else None
def run_exists(self, experiment_name: str, run_hash: str) -> bool:
"""Return true if run exists"""
return os.path.isdir(os.path.join(self.path, experiment_name, run_hash))
def commit(self, commit_hash, commit_msg, vc_branch=None, vc_hash=None):
"""
Moves current uncommitted artefacts temporary storage(aka `index`)
to commit directory and re-initializes `index`
"""
index_dir = self.index_path
# Commit dir name is same as commit hash
commit_dir = os.path.join(self.branch_path,
commit_hash)
# Move index to commit dir
shutil.move(index_dir, commit_dir)
# Init new index
os.makedirs(index_dir)
# Create commit config file
config_file_path = os.path.join(commit_dir,
AIM_COMMIT_CONFIG_FILE_NAME)
with open(config_file_path, 'w+') as config_file:
configs = {
'hash': commit_hash,
'date': int(time.time()),
'message': commit_msg,
'aim': {
'version': aim_version,
},
}
profile = AimProfile()
username = profile.get_username()
if username:
configs['user'] = {
'username': username,
}
if vc_branch and vc_hash:
configs['vc'] = {
'system': 'git',
'branch': vc_branch,
'hash': vc_hash,
}
config_file.write(json.dumps(configs))
return {
'branch': self.config.get('active_branch'),
'commit': commit_hash,
}
def commit_init(self):
index_dir = self.index_path
if not os.path.isdir(index_dir):
os.makedirs(index_dir, exist_ok=True)
# Create commit config file
config_file_path = os.path.join(index_dir,
AIM_COMMIT_CONFIG_FILE_NAME)
curr_timestamp = int(time.time())
with open(config_file_path, 'w+') as config_file:
configs = {
'hash': self.active_commit,
'date': curr_timestamp,
'message': curr_timestamp,
'archived': False,
'process': {
'start': True,
'finish': False,
'start_date': curr_timestamp,
'finish_date': 0,
'uuid': os.getenv(AIM_PROCESS_ENV_VAR),
},
'aim': {
'version': aim_version,
},
}
config_file.write(json.dumps(configs))
return True
def get_run_config(self):
config_file_path = os.path.join(self.index_path,
AIM_COMMIT_CONFIG_FILE_NAME)
if not os.path.isfile(config_file_path):
return None
with open(config_file_path, 'r+') as config_file:
try:
configs = json.loads(config_file.read())
except:
configs = None
return configs
def is_run_finished(self) -> Optional[bool]:
run_config = self.get_run_config()
process = run_config.get('process') or {}
return process.get('finish')
def commit_finish(self):
index_dir = self.index_path
config_file_path = os.path.join(index_dir,
AIM_COMMIT_CONFIG_FILE_NAME)
configs = self.get_run_config() or {}
curr_timestamp = int(time.time())
configs['date'] = curr_timestamp
configs['message'] = curr_timestamp
configs['process']['finish'] = True
configs['process']['finish_date'] = curr_timestamp
with open(config_file_path, 'w+') as config_file:
config_file.write(json.dumps(configs))
return True
def reset_index(self):
"""
Removes all files inside repo's index dir
"""
index_dir = self.index_path
# List all files inside index
for filename in os.listdir(index_dir):
file_path = os.path.join(index_dir, filename)
# Delete files, links and dirs
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
def get_index_meta(self):
"""
Returns parsed meta file of index or `False` if file does not exist
"""
meta_file_path = os.path.join(self.objects_dir_path,
AIM_COMMIT_META_FILE_NAME)
if not os.path.isfile(meta_file_path):
return False
with open(meta_file_path, 'r') as meta_file:
meta_file_content = json.load(meta_file)
return meta_file_content
def is_archived(self, experiment_name: str,
run_hash: str) -> Optional[bool]:
run_dir_path = get_experiment_run_path(self.path, experiment_name,
run_hash)
config_file_path = os.path.join(run_dir_path,
AIM_COMMIT_CONFIG_FILE_NAME)
if not os.path.exists(config_file_path):
return None
with open(config_file_path, 'r') as config_file:
try:
config = json.loads(config_file.read())
except:
return None
return config.get('archived')
def archive(self, experiment_name: str, run_hash: str) -> bool:
return self._toggle_archive_flag(experiment_name, run_hash, True)
def unarchive(self, experiment_name: str, run_hash: str) -> bool:
return self._toggle_archive_flag(experiment_name, run_hash, False)
def _toggle_archive_flag(self, experiment_name: str,
run_hash: str, flag: bool) -> bool:
run_dir_path = get_experiment_run_path(self.path, experiment_name,
run_hash)
config_file_path = os.path.join(run_dir_path,
AIM_COMMIT_CONFIG_FILE_NAME)
with open(config_file_path, 'r') as config_file:
try:
config = json.loads(config_file.read())
except:
return False
config['archived'] = flag
with open(config_file_path, 'w') as config_file:
try:
config_file.write(json.dumps(config))
except:
return False
return True
def save_diff(self, diff):
"""
Saves diff to the repo
"""
diff_dir_path = os.path.join(self.objects_dir_path,
AIM_DIFF_DIR_NAME)
diff_file_path = os.path.join(diff_dir_path,
AIM_DIFF_FILE_NAME)
# Create `diff` directory
os.makedirs(diff_dir_path, exist_ok=True)
# Write diff content to the `diff` file
with open(diff_file_path, 'w+') as diff_file:
diff_file.write(diff)
def ls_branch_files(self, branch):
"""
Returns list of files of the specified branch
"""
branch_path = os.path.join(self.path, branch)
return ls_dir([branch_path])
def ls_commit_files(self, branch, commit):
"""
Returns list of files of the specified commit
"""
commit_path = os.path.join(self.path, branch, commit)
return ls_dir([commit_path])
def select(self,
select_fields: List[str] = [],
expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
default_expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
):
select_result = SelectResult(select_fields)
runs = {
exp_name: [
Run(self, exp_name, run_hash)
for run_hash in self.list_branch_commits(exp_name)
]
for exp_name in self.list_branches()
}
# Build expression tree
if expression:
expression = build_bet(expression)
expression.strict = True
if default_expression:
default_expression = build_bet(default_expression)
default_expression.strict = True
if expression:
expression.concat(default_expression)
else:
expression = default_expression
for experiment_runs in runs.values():
for run in experiment_runs:
# Dictionary representing all search fields
fields = {
'experiment': run.experiment_name,
'run': run.config, # Run configs (date, name, archived etc)
'params': run.params, # Run parameters (`NestedMap`)
}
# Default parameters - those passed without namespace
default_params = {
'params': (run.params.get(AIM_NESTED_MAP_DEFAULT) or {}),
}
# Search metrics
for metric_name, metric in run.get_all_metrics().items():
fields['metric'] = metric_name
for trace in metric.get_all_traces():
fields['context'] = trace.context
# Pass fields in descending order by priority
if expression is None:
res = True
else:
res = expression.match(fields,
run.params,
default_params)
if res is not True:
continue
# Append trace data if metric is selected
for select_field in select_fields:
if select_field == metric_name:
metric.append(trace)
run.add(metric)
break
# Append run if either metric or param is selected
for select_field in select_fields:
if select_field == metric_name:
select_result.append_run(run)
break
field_val = get_dict_item_by_path(run.params,
select_field)
if field_val is not None:
select_result.append_run(run)
break
return select_result
def select_runs(self,
expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
default_expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
) -> List[Run]:
runs = {
exp_name: [
Run(self, exp_name, run_hash)
for run_hash in self.list_branch_commits(exp_name)
]
for exp_name in self.list_branches()
}
matched_runs = [] # type: List[Run]
# Build expression tree
if expression:
expression = build_bet(expression)
expression.strict = True
if default_expression:
default_expression = build_bet(default_expression)
default_expression.strict = True
if expression:
expression.concat(default_expression)
else:
expression = default_expression
for experiment_runs in runs.values():
for run in experiment_runs:
# Add metrics path modifier
expression.dump_path_modifiers()
if AIM_MAP_METRICS_KEYWORD in run.params.keys():
expression.add_path_modifier(
lambda path_token: self.metrics_path_checker(
path_token,
run.config.keys()),
lambda path_token: self.metrics_path_modifier(
path_token,
run.params[AIM_MAP_METRICS_KEYWORD])
)
# Dictionary representing all search fields
fields = {
'experiment': run.experiment_name,
'run': run.config, # Run configs (date, name, archived etc)
'params': run.params, # Run parameters (`NestedMap`)
}
# Default parameters - ones passed without namespace
default_params = run.params.get(AIM_NESTED_MAP_DEFAULT) or {}
if not expression:
res = True
else:
res = expression.match(fields,
run.params,
default_params)
if res is True:
matched_runs.append(run)
return matched_runs
def select_metrics(self, select_metrics: Union[str, List[str], Tuple[str]],
expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
default_expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
) -> List[Run]:
"""
Searches repo and returns matching metrics
"""
if isinstance(select_metrics, str):
select_metrics = [select_metrics]
runs = {
exp_name: [
Run(self, exp_name, run_hash)
for run_hash in self.list_branch_commits(exp_name)
]
for exp_name in self.list_branches()
}
matched_runs = [] # type: List[Run]
expression = build_bet(expression)
expression.strict = True
if default_expression:
default_expression = build_bet(default_expression)
expression.concat(default_expression)
for experiment_runs in runs.values():
for run in experiment_runs:
# Add metrics path modifier
expression.dump_path_modifiers()
if AIM_MAP_METRICS_KEYWORD in run.params.keys():
expression.add_path_modifier(
lambda path_token: self.metrics_path_checker(
path_token,
run.config.keys()),
lambda path_token: self.metrics_path_modifier(
path_token,
run.params[AIM_MAP_METRICS_KEYWORD])
)
# Dictionary representing all search fields
fields = {
'experiment': run.experiment_name,
'run': run.config, # Run configs (date, name, archived etc)
'params': run.params, # Run parameters (`NestedMap`)
}
# Default parameters - ones passed without namespace
default_params = run.params.get(AIM_NESTED_MAP_DEFAULT) or {}
# Search metrics
for metric_name, metric in run.get_all_metrics().items():
if metric_name not in select_metrics:
continue
fields['metric'] = metric_name
for trace in metric.get_all_traces():
fields['context'] = trace.context
# Pass fields in descending order by priority
if expression is None:
res = True
else:
res = expression.match(fields,
run.params,
default_params)
if res is True:
metric.append(trace)
run.add(metric)
if run not in matched_runs:
matched_runs.append(run)
return matched_runs
@staticmethod
def metrics_path_checker(path, run_fields: list) -> bool:
path = str(path)
if not path.startswith('run.'):
return False
identifiers = path.split('.')[1:]
if len(identifiers) == 0 or identifiers[0] in run_fields:
return False
return True
@staticmethod
def metrics_path_modifier(path, metrics) -> Optional[bool]:
path = str(path)
if '.' not in path:
return None
identifiers = path.split('.')
if len(identifiers) < 2:
return None
metric_name = identifiers[1]
if len(identifiers) > 2 and identifiers[-1] in ('min', 'max', 'last'):
value_field = identifiers[-1]
identifiers = identifiers[:-1]
else:
value_field = 'last'
context_identifiers = identifiers[2:]
if metric_name not in metrics:
return None
metric_data = metrics[metric_name]
for trace in metric_data:
context_values = list(map(lambda c: c[1], trace['context']))
if all(c in context_values for c in context_identifiers):
return trace['values'][value_field]
return None
def select_run_metrics(self, experiment_name: str, run_hash: str,
select_metrics: Optional[
Union[str, List[str], Tuple[str]]
] = None
) -> Optional[Run]:
if not self.run_exists(experiment_name, run_hash):
return None
if select_metrics is not None and isinstance(select_metrics, str):
select_metrics = [select_metrics]
run = Run(self, experiment_name, run_hash)
for metric_name, metric in run.get_all_metrics().items():
if select_metrics is None or metric_name in select_metrics:
for trace in metric.get_all_traces():
metric.append(trace)
run.add(metric)
return run
def create_logs(self):
"""
Creates the logs dir in .aim to store error and activity logs
for cli and sdk respectively
"""
logs_path = os.path.join(self.path, AIM_LOGGING_DIR_NAME)
os.mkdir(logs_path)
def get_logs_dir(self):
return os.path.join(self.path, AIM_LOGGING_DIR_NAME) | aim/engine/repo/repo.py | import shutil
import os
import json
import re
import time
import hashlib
import uuid
from typing import List, Optional, Union, Tuple
from aim.__version__ import __version__ as aim_version
from aim.engine.configs import *
from aim.engine.utils import (
ls_dir,
deep_compare,
import_module,
clean_repo_path,
get_dict_item_by_path,
)
from aim.engine.profile import AimProfile
from aim.engine.repo.run import Run
from aim.engine.repo.dql.select import SelectResult
from aim.engine.repo.utils import (
cat_to_dir,
get_experiment_path,
get_experiment_run_path,
get_run_objects_dir_path,
get_run_objects_meta_file_path,
)
from aim.ql.grammar import Expression
from aim.ql.tree import BinaryExpressionTree
from aim.ql.utils import build_bet
class AimRepo:
# TODO: Refactor repo to have minimal side effects
WRITING_MODE = 'w'
READING_MODE = 'r'
@staticmethod
def get_working_repo(*args,
initialized_only=False,
**kwargs):
"""
Searches for .aim repository in working directory
and returns AimRepo object if exists
"""
# Get working directory path
working_dir = os.getcwd()
# Try to find closest .aim repository
repo_found = False
while True:
if len(working_dir) <= 1:
break
repo_path = os.path.join(working_dir, AIM_REPO_NAME)
config_file_path = os.path.join(repo_path, AIM_CONFIG_FILE_NAME)
if (not initialized_only and os.path.exists(repo_path)) \
or (initialized_only and os.path.isfile(config_file_path)):
repo_found = True
break
else:
working_dir = os.path.split(working_dir)[0]
if not repo_found:
return None
return AimRepo(working_dir, *args, **kwargs)
@staticmethod
def generate_commit_hash():
return str(uuid.uuid1())
@staticmethod
def get_artifact_cat(cat: tuple):
if isinstance(cat, tuple):
if len(cat) > 1:
return cat
elif len(cat) == 1:
return cat[0]
return None
@classmethod
def get_active_branch_if_exists(cls):
repo = cls.get_working_repo(initialized_only=True)
if repo is not None:
return repo.branch
return None
def __init__(self, path=None, repo_branch=None,
repo_commit=None,
repo_full_path=None,
mode=WRITING_MODE):
self._config = {}
path = clean_repo_path(path)
self.path = repo_full_path or os.path.join(path, AIM_REPO_NAME)
self.config_path = os.path.join(self.path, AIM_CONFIG_FILE_NAME)
self.hash = hashlib.md5(self.path.encode('utf-8')).hexdigest()
self.active_commit = repo_commit or AIM_COMMIT_INDEX_DIR_NAME
if re.match(r'^[A-Za-z0-9_\-]{2,}$', self.active_commit) is None:
raise ValueError('run name must be at least 2 characters ' +
'and contain only latin letters, numbers, ' +
'dash and underscore')
self.root_path = repo_full_path or path
self.name = self.root_path.split(os.sep)[-1]
self.branch_path = None
self.index_path = None
self.objects_dir_path = None
self.media_dir_path = None
self.records_storage = None
self.mode = mode
active_exp = self.config.get('active_branch')
if repo_branch is not None:
experiment = repo_branch
elif active_exp is not None:
experiment = active_exp
else:
experiment = None
if experiment is not None:
run_full_path = get_experiment_run_path(self.path,
experiment,
self.active_commit)
else:
run_full_path = None
if self.active_commit != AIM_COMMIT_INDEX_DIR_NAME and run_full_path \
and os.path.exists(run_full_path):
raise ValueError(('run `{}` already exists' +
'').format(self.active_commit))
if experiment is not None:
self.branch = experiment
def __str__(self):
return self.path
@property
def config(self):
"""
Config property getter, loads config file if not already loaded and
returns json object
"""
if len(self._config) == 0:
if os.path.isfile(self.config_path):
with open(self.config_path, 'r') as f:
config = json.load(f)
self._config = config
return self._config
@config.setter
def config(self, config):
self._config = config
@property
def branch(self):
return self._branch
@branch.setter
def branch(self, branch):
self._branch = branch
if self._branch not in self.list_branches():
self.create_branch(self._branch)
self.branch_path = get_experiment_path(self.path, self._branch)
self.index_path = get_experiment_run_path(self.path, self._branch,
self.active_commit)
self.objects_dir_path = get_run_objects_dir_path(self.path,
self._branch,
self.active_commit)
self.media_dir_path = os.path.join(self.objects_dir_path,
AIM_MEDIA_DIR_NAME)
self.meta_file_content = None
self.meta_file_path = get_run_objects_meta_file_path(self.path,
self._branch,
self.active_commit)
if not os.path.isdir(self.index_path):
os.makedirs(self.index_path)
if self.records_storage:
self.records_storage.close()
if os.path.exists(self.branch_path):
self.records_storage = self.get_records_storage(
self.objects_dir_path,
self.mode)
def get_records_storage(self, path, mode):
from aimrecords import Storage
return Storage(path, mode)
def close_records_storage(self):
"""
Finalizes and closes records storage
"""
if self.records_storage:
self.records_storage.close()
def save_config(self):
"""
Saves object config to config file
"""
with open(self.config_path, 'w') as f:
f.write(json.dumps(self._config))
def get_project_name(self):
"""
Returns project name from config file
"""
config = self.config
return config['project_name']
def get_remote_url(self, remote_name):
"""
Returns remote url specified by remote name
"""
for i in self.config['remotes']:
if i['name'] == remote_name:
return i['url']
return None
def init(self):
"""
Initializes empty Aim repository
"""
# Return if repo exists and is initialized
if self.is_initialized():
return True
try:
# Create `.aim` repo
os.makedirs(self.path, exist_ok=True)
except:
return False
# Create config file
with open(self.config_path, 'w') as config_file:
config_file.write(json.dumps({
'remotes': [],
'branches': [],
'active_branch': '',
}))
# self.create_logs()
self.create_branch(AIM_DEFAULT_BRANCH_NAME)
self.checkout_branch(AIM_DEFAULT_BRANCH_NAME)
return True
def rm(self):
"""
Removes Aim repository
"""
shutil.rmtree(self.path)
def exists(self):
"""
Checks whether Aim repository is created
"""
return os.path.exists(self.path)
def is_initialized(self):
"""
Checks whether Aim repository is initialized
"""
return os.path.exists(self.path) and os.path.isfile(self.config_path)
def ls_files(self):
"""
Returns list of repository files
"""
return ls_dir([self.path])
def reconstruct_meta_file(self):
"""
Reconstruct meta file(`Metric` and `NestedMap` artifacts)
from tracked artifacts data.
NOTE: Only can be needed in very specific cases.
"""
meta_file_content = {}
# Check if `NestedMap` were saved
map_path = os.path.join(self.objects_dir_path, 'map', 'dictionary.log')
if os.path.isfile(map_path):
meta_file_content['dictionary.log'] = {
'name': 'dictionary',
'type': ['map', 'nested_map'],
'data': None,
'data_path': 'map',
}
# Collect metrics meta info
metrics_info = self.records_storage.get_artifacts_names()
for metric_name, context_items in metrics_info.items():
meta_file_content[metric_name] = {
'name': metric_name,
'type': 'metrics',
'data': None,
'data_path': '__AIMRECORDS__',
'format': {
'artifact_format': 'aimrecords',
'record_format': 'protobuf',
},
'context': [list(c.items()) for c in context_items],
}
return meta_file_content
def load_meta_file(self, create_if_not_exist=True):
if self.meta_file_content is None:
if os.path.isfile(self.meta_file_path):
with open(self.meta_file_path, 'r+') as meta_file:
self.meta_file_content = json.loads(meta_file.read())
else:
if not create_if_not_exist:
self.meta_file_content = {}
return
os.makedirs(os.path.dirname(self.meta_file_path), exist_ok=True)
self.meta_file_content = {}
with open(self.meta_file_path, 'w+') as meta_file:
meta_file.write(json.dumps(self.meta_file_content))
def update_meta_file(self, item_key, item_content, flush=1):
"""
:param item_key: item key to insert or update
:param item_content: item value
:param flush: 0 not flush, 1 always flush, 2 flush on data update
"""
self.load_meta_file()
if flush == 0:
self.meta_file_content[item_key] = item_content
elif flush == 1:
self.meta_file_content[item_key] = item_content
self.flush_meta_file()
elif flush == 2:
updated = True
if item_key not in self.meta_file_content.keys():
# Item is not added to meta file yet
self.meta_file_content[item_key] = item_content
elif deep_compare(self.meta_file_content[item_key], item_content):
# Item is outdated
self.meta_file_content[item_key] = item_content
else:
updated = False
if updated:
self.flush_meta_file()
def flush_meta_file(self, content=None):
with open(self.meta_file_path, 'w+') as meta_file:
meta_file.write(json.dumps(content or self.meta_file_content))
def store_dir(self, name, cat, data={}):
"""
Creates a new directory inside repo and returns it's relative path
"""
# Create directory if not exists
dir_rel_path = os.path.join(AIM_CORR_DIRS_NAME, name)
dir_path = os.path.join(self.objects_dir_path,
dir_rel_path)
if not os.path.isdir(dir_path):
os.makedirs(dir_path, exist_ok=True)
self.update_meta_file(name, {
'name': name,
'type': 'dir',
'cat': cat,
'data': data,
'data_path': dir_rel_path,
})
return dir_path, dir_rel_path
def store_file(self, file_name, name, cat, data={}, rel_dir_path=None):
"""
Appends new data to the specified file or rewrites it
and updates repo meta file
"""
if not rel_dir_path:
cat_path = cat_to_dir(cat)
else:
cat_path = rel_dir_path
dir_path = os.path.join(self.objects_dir_path, cat_path)
data_file_path = os.path.join(dir_path, file_name)
# Create directory if not exists
if not os.path.isdir(dir_path):
os.makedirs(dir_path, exist_ok=True)
# Update meta file
if rel_dir_path is not None:
file_name_for_meta = '{}/{}'.format(rel_dir_path, file_name)
else:
file_name_for_meta = file_name
self.update_meta_file(file_name_for_meta, {
'name': name,
'type': self.get_artifact_cat(cat),
'data': data,
'data_path': cat_path,
}, 2)
return {
'path': os.path.join(cat_path, file_name),
'abs_path': data_file_path,
}
def store_artifact(self, name, cat, data, artifact_format=None,
binary_format=None, context=None):
"""
Adds artifact info to the repo meta file
"""
self.load_meta_file()
flush = 0
if name in self.meta_file_content.keys():
artifact_value = self.meta_file_content[name]
else:
flush = 1
artifact_value = {
'name': name,
'type': self.get_artifact_cat(cat),
'data': data,
'data_path': '__AIMRECORDS__',
'format': {
'artifact_format': artifact_format,
'record_format': binary_format,
},
'context': [],
}
if context is not None:
context_item = tuple(sorted(context.items()))
if context_item not in artifact_value['context']:
artifact_value['context'].append(context_item)
flush = 1
self.update_meta_file(name, artifact_value, flush)
return {
'name': name,
}
def store_image(self, name, cat, save_to_meta=False):
"""
Returns saved object full path
and updates repo meta file
"""
images_dir_path = os.path.join(self.media_dir_path,
AIM_IMAGES_DIR_NAME)
img_rel_path = os.path.join(AIM_MEDIA_DIR_NAME,
AIM_IMAGES_DIR_NAME)
img_abs_path = os.path.join(images_dir_path, name)
# Create image directory if not exists
dir_path = os.path.dirname(img_abs_path)
if not os.path.isdir(dir_path):
os.makedirs(dir_path, exist_ok=True)
# Update meta file
if save_to_meta:
self.update_meta_file(name, {
'name': name,
'type': self.get_artifact_cat(cat),
'data': {},
'data_path': img_rel_path,
})
return {
'path': os.path.join(img_rel_path, name),
'abs_path': img_abs_path,
}
def store_model_file(self, checkpoint_name, cat):
"""
Saves a model file into repo
"""
root_path = os.path.join(self.objects_dir_path,
cat_to_dir(cat))
dir_name = checkpoint_name
dir_path = os.path.join(root_path, dir_name)
model_file_name = 'model'
model_file_path = os.path.join(dir_path,
model_file_name)
# Create directory
os.makedirs(dir_path, exist_ok=True)
return model_file_path
def store_model(self, checkpoint_name, name, epoch,
meta_info, model_info, cat):
"""
Saves a model into repo
"""
root_path = os.path.join(self.objects_dir_path,
cat_to_dir(cat))
dir_name = checkpoint_name
dir_path = os.path.join(root_path, dir_name)
model_file_name = 'model'
model_file_path = os.path.join(dir_path,
model_file_name)
meta_file_path = os.path.join(dir_path, 'model.json')
# Create directory
os.makedirs(dir_path, exist_ok=True)
# Create meta file
with open(meta_file_path, 'w+') as meta_file:
meta_file.write(json.dumps({
'name': name,
'epoch': epoch,
'model': model_info,
}))
zip_name = '{}.aim'.format(dir_name)
zip_path = os.path.join(root_path, zip_name)
# Update repo meta file
self.update_meta_file(checkpoint_name, {
'name': checkpoint_name,
'type': self.get_artifact_cat(cat),
'data': {
'name': name,
'epoch': epoch,
'meta': meta_info,
'model': model_info,
},
'data_path': dir_name,
})
return {
'model_path': model_file_path,
'dir_path': dir_path,
'zip_path': zip_path,
}
def create_branch(self, branch):
"""
Creates a new branch - a sub-directory in repo
"""
dir_path = os.path.join(self.path, branch)
if not re.match(r'^[A-Za-z0-9_\-]{2,}$', branch):
raise AttributeError('experiment name must be at least ' +
'2 characters and contain only latin ' +
'letters, numbers, dash and underscore')
# Save branch in repo config file
branches = self.config.get('branches') or []
for b in branches:
if b.get('name') == branch:
raise AttributeError('branch {} already exists'.format(branch))
# Create branch directory
objects_dir_path = os.path.join(dir_path,
AIM_COMMIT_INDEX_DIR_NAME)
os.makedirs(objects_dir_path)
branches.append({
'name': branch,
})
self.config['branches'] = branches
self.save_config()
def checkout_branch(self, branch):
"""
Checkouts to specified branch
"""
branches = self.config.get('branches') or []
for b in branches:
if branch == b.get('name'):
self.config['active_branch'] = branch
self.branch = branch
self.save_config()
return
raise AttributeError('Experiment {} does not exist'.format(branch))
def remove_branch(self, branch):
"""
Removes specified branch
"""
if branch == AIM_DEFAULT_BRANCH_NAME:
msg = '{} branch can not be deleted'.format(AIM_DEFAULT_BRANCH_NAME)
raise AttributeError(msg)
branches = self.config.get('branches')
branch_exists = False
for b in branches:
if b.get('name') == branch:
branch_exists = True
break
if not branch_exists:
raise AttributeError('Experiment {} does not exist'.format(branch))
# Remove branch
self.config['branches'] = list(filter(lambda i: i.get('name') != branch,
self.config['branches']))
self.save_config()
# Remove branch sub-directory
dir_path = os.path.join(self.path, branch)
shutil.rmtree(dir_path)
# Set active branch to default if selected branch was active
if self.branch == branch:
self.checkout_branch(AIM_DEFAULT_BRANCH_NAME)
def list_branches(self):
"""
Returns list of existing branches
"""
if self.config.get('branches') is None:
return []
return list(filter(lambda b: b != '',
map(lambda b: b.get('name') if b else '',
self.config.get('branches'))))
def list_branch_commits(self, branch):
"""
Returns list of specified branch commits
"""
branch_path = os.path.join(self.path, branch.strip())
commits = []
for i in os.listdir(branch_path):
if os.path.isdir(os.path.join(branch_path, i)) \
and i != AIM_COMMIT_INDEX_DIR_NAME:
commits.append(i)
return commits
def is_index_empty(self):
"""
Returns `True` if index directory is empty and
`False` otherwise
"""
return not len(ls_dir([self.index_path]))
def get_latest_vc_branch(self):
"""
Returns latest created branch name and hash
"""
# Get commits
commits = {}
for c in os.listdir(self.branch_path):
commit_path = os.path.join(self.branch_path, c)
if os.path.isdir(commit_path) and c != AIM_COMMIT_INDEX_DIR_NAME:
config_file_path = os.path.join(commit_path,
AIM_COMMIT_CONFIG_FILE_NAME)
with open(config_file_path, 'r') as config_file:
commits[c] = json.loads(config_file.read())
# Find latest commit
latest_commit = None
for _, c in commits.items():
if latest_commit is None or c['date'] > latest_commit['date']:
latest_commit = c
return latest_commit.get('vc') if latest_commit else None
def run_exists(self, experiment_name: str, run_hash: str) -> bool:
"""Return true if run exists"""
return os.path.isdir(os.path.join(self.path, experiment_name, run_hash))
def commit(self, commit_hash, commit_msg, vc_branch=None, vc_hash=None):
"""
Moves current uncommitted artefacts temporary storage(aka `index`)
to commit directory and re-initializes `index`
"""
index_dir = self.index_path
# Commit dir name is same as commit hash
commit_dir = os.path.join(self.branch_path,
commit_hash)
# Move index to commit dir
shutil.move(index_dir, commit_dir)
# Init new index
os.makedirs(index_dir)
# Create commit config file
config_file_path = os.path.join(commit_dir,
AIM_COMMIT_CONFIG_FILE_NAME)
with open(config_file_path, 'w+') as config_file:
configs = {
'hash': commit_hash,
'date': int(time.time()),
'message': commit_msg,
'aim': {
'version': aim_version,
},
}
profile = AimProfile()
username = profile.get_username()
if username:
configs['user'] = {
'username': username,
}
if vc_branch and vc_hash:
configs['vc'] = {
'system': 'git',
'branch': vc_branch,
'hash': vc_hash,
}
config_file.write(json.dumps(configs))
return {
'branch': self.config.get('active_branch'),
'commit': commit_hash,
}
def commit_init(self):
index_dir = self.index_path
if not os.path.isdir(index_dir):
os.makedirs(index_dir, exist_ok=True)
# Create commit config file
config_file_path = os.path.join(index_dir,
AIM_COMMIT_CONFIG_FILE_NAME)
curr_timestamp = int(time.time())
with open(config_file_path, 'w+') as config_file:
configs = {
'hash': self.active_commit,
'date': curr_timestamp,
'message': curr_timestamp,
'archived': False,
'process': {
'start': True,
'finish': False,
'start_date': curr_timestamp,
'finish_date': 0,
'uuid': os.getenv(AIM_PROCESS_ENV_VAR),
},
'aim': {
'version': aim_version,
},
}
config_file.write(json.dumps(configs))
return True
def get_run_config(self):
config_file_path = os.path.join(self.index_path,
AIM_COMMIT_CONFIG_FILE_NAME)
if not os.path.isfile(config_file_path):
return None
with open(config_file_path, 'r+') as config_file:
try:
configs = json.loads(config_file.read())
except:
configs = None
return configs
def is_run_finished(self) -> Optional[bool]:
run_config = self.get_run_config()
process = run_config.get('process') or {}
return process.get('finish')
def commit_finish(self):
index_dir = self.index_path
config_file_path = os.path.join(index_dir,
AIM_COMMIT_CONFIG_FILE_NAME)
configs = self.get_run_config() or {}
curr_timestamp = int(time.time())
configs['date'] = curr_timestamp
configs['message'] = curr_timestamp
configs['process']['finish'] = True
configs['process']['finish_date'] = curr_timestamp
with open(config_file_path, 'w+') as config_file:
config_file.write(json.dumps(configs))
return True
def reset_index(self):
"""
Removes all files inside repo's index dir
"""
index_dir = self.index_path
# List all files inside index
for filename in os.listdir(index_dir):
file_path = os.path.join(index_dir, filename)
# Delete files, links and dirs
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
def get_index_meta(self):
"""
Returns parsed meta file of index or `False` if file does not exist
"""
meta_file_path = os.path.join(self.objects_dir_path,
AIM_COMMIT_META_FILE_NAME)
if not os.path.isfile(meta_file_path):
return False
with open(meta_file_path, 'r') as meta_file:
meta_file_content = json.load(meta_file)
return meta_file_content
def is_archived(self, experiment_name: str,
run_hash: str) -> Optional[bool]:
run_dir_path = get_experiment_run_path(self.path, experiment_name,
run_hash)
config_file_path = os.path.join(run_dir_path,
AIM_COMMIT_CONFIG_FILE_NAME)
if not os.path.exists(config_file_path):
return None
with open(config_file_path, 'r') as config_file:
try:
config = json.loads(config_file.read())
except:
return None
return config.get('archived')
def archive(self, experiment_name: str, run_hash: str) -> bool:
return self._toggle_archive_flag(experiment_name, run_hash, True)
def unarchive(self, experiment_name: str, run_hash: str) -> bool:
return self._toggle_archive_flag(experiment_name, run_hash, False)
def _toggle_archive_flag(self, experiment_name: str,
run_hash: str, flag: bool) -> bool:
run_dir_path = get_experiment_run_path(self.path, experiment_name,
run_hash)
config_file_path = os.path.join(run_dir_path,
AIM_COMMIT_CONFIG_FILE_NAME)
with open(config_file_path, 'r') as config_file:
try:
config = json.loads(config_file.read())
except:
return False
config['archived'] = flag
with open(config_file_path, 'w') as config_file:
try:
config_file.write(json.dumps(config))
except:
return False
return True
def save_diff(self, diff):
"""
Saves diff to the repo
"""
diff_dir_path = os.path.join(self.objects_dir_path,
AIM_DIFF_DIR_NAME)
diff_file_path = os.path.join(diff_dir_path,
AIM_DIFF_FILE_NAME)
# Create `diff` directory
os.makedirs(diff_dir_path, exist_ok=True)
# Write diff content to the `diff` file
with open(diff_file_path, 'w+') as diff_file:
diff_file.write(diff)
def ls_branch_files(self, branch):
"""
Returns list of files of the specified branch
"""
branch_path = os.path.join(self.path, branch)
return ls_dir([branch_path])
def ls_commit_files(self, branch, commit):
"""
Returns list of files of the specified commit
"""
commit_path = os.path.join(self.path, branch, commit)
return ls_dir([commit_path])
def select(self,
select_fields: List[str] = [],
expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
default_expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
):
select_result = SelectResult(select_fields)
runs = {
exp_name: [
Run(self, exp_name, run_hash)
for run_hash in self.list_branch_commits(exp_name)
]
for exp_name in self.list_branches()
}
# Build expression tree
if expression:
expression = build_bet(expression)
expression.strict = True
if default_expression:
default_expression = build_bet(default_expression)
default_expression.strict = True
if expression:
expression.concat(default_expression)
else:
expression = default_expression
for experiment_runs in runs.values():
for run in experiment_runs:
# Dictionary representing all search fields
fields = {
'experiment': run.experiment_name,
'run': run.config, # Run configs (date, name, archived etc)
'params': run.params, # Run parameters (`NestedMap`)
}
# Default parameters - those passed without namespace
default_params = {
'params': (run.params.get(AIM_NESTED_MAP_DEFAULT) or {}),
}
# Search metrics
for metric_name, metric in run.get_all_metrics().items():
fields['metric'] = metric_name
for trace in metric.get_all_traces():
fields['context'] = trace.context
# Pass fields in descending order by priority
if expression is None:
res = True
else:
res = expression.match(fields,
run.params,
default_params)
if res is not True:
continue
# Append trace data if metric is selected
for select_field in select_fields:
if select_field == metric_name:
metric.append(trace)
run.add(metric)
break
# Append run if either metric or param is selected
for select_field in select_fields:
if select_field == metric_name:
select_result.append_run(run)
break
field_val = get_dict_item_by_path(run.params,
select_field)
if field_val is not None:
select_result.append_run(run)
break
return select_result
def select_runs(self,
expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
default_expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
) -> List[Run]:
runs = {
exp_name: [
Run(self, exp_name, run_hash)
for run_hash in self.list_branch_commits(exp_name)
]
for exp_name in self.list_branches()
}
matched_runs = [] # type: List[Run]
# Build expression tree
if expression:
expression = build_bet(expression)
expression.strict = True
if default_expression:
default_expression = build_bet(default_expression)
default_expression.strict = True
if expression:
expression.concat(default_expression)
else:
expression = default_expression
for experiment_runs in runs.values():
for run in experiment_runs:
# Add metrics path modifier
expression.dump_path_modifiers()
if AIM_MAP_METRICS_KEYWORD in run.params.keys():
expression.add_path_modifier(
lambda path_token: self.metrics_path_checker(
path_token,
run.config.keys()),
lambda path_token: self.metrics_path_modifier(
path_token,
run.params[AIM_MAP_METRICS_KEYWORD])
)
# Dictionary representing all search fields
fields = {
'experiment': run.experiment_name,
'run': run.config, # Run configs (date, name, archived etc)
'params': run.params, # Run parameters (`NestedMap`)
}
# Default parameters - ones passed without namespace
default_params = run.params.get(AIM_NESTED_MAP_DEFAULT) or {}
if not expression:
res = True
else:
res = expression.match(fields,
run.params,
default_params)
if res is True:
matched_runs.append(run)
return matched_runs
def select_metrics(self, select_metrics: Union[str, List[str], Tuple[str]],
expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
default_expression: Optional[
Union[str,
Expression,
BinaryExpressionTree]] = None,
) -> List[Run]:
"""
Searches repo and returns matching metrics
"""
if isinstance(select_metrics, str):
select_metrics = [select_metrics]
runs = {
exp_name: [
Run(self, exp_name, run_hash)
for run_hash in self.list_branch_commits(exp_name)
]
for exp_name in self.list_branches()
}
matched_runs = [] # type: List[Run]
expression = build_bet(expression)
expression.strict = True
if default_expression:
default_expression = build_bet(default_expression)
expression.concat(default_expression)
for experiment_runs in runs.values():
for run in experiment_runs:
# Add metrics path modifier
expression.dump_path_modifiers()
if AIM_MAP_METRICS_KEYWORD in run.params.keys():
expression.add_path_modifier(
lambda path_token: self.metrics_path_checker(
path_token,
run.config.keys()),
lambda path_token: self.metrics_path_modifier(
path_token,
run.params[AIM_MAP_METRICS_KEYWORD])
)
# Dictionary representing all search fields
fields = {
'experiment': run.experiment_name,
'run': run.config, # Run configs (date, name, archived etc)
'params': run.params, # Run parameters (`NestedMap`)
}
# Default parameters - ones passed without namespace
default_params = run.params.get(AIM_NESTED_MAP_DEFAULT) or {}
# Search metrics
for metric_name, metric in run.get_all_metrics().items():
if metric_name not in select_metrics:
continue
fields['metric'] = metric_name
for trace in metric.get_all_traces():
fields['context'] = trace.context
# Pass fields in descending order by priority
if expression is None:
res = True
else:
res = expression.match(fields,
run.params,
default_params)
if res is True:
metric.append(trace)
run.add(metric)
if run not in matched_runs:
matched_runs.append(run)
return matched_runs
@staticmethod
def metrics_path_checker(path, run_fields: list) -> bool:
path = str(path)
if not path.startswith('run.'):
return False
identifiers = path.split('.')[1:]
if len(identifiers) == 0 or identifiers[0] in run_fields:
return False
return True
@staticmethod
def metrics_path_modifier(path, metrics) -> Optional[bool]:
path = str(path)
if '.' not in path:
return None
identifiers = path.split('.')
if len(identifiers) < 2:
return None
metric_name = identifiers[1]
if len(identifiers) > 2 and identifiers[-1] in ('min', 'max', 'last'):
value_field = identifiers[-1]
identifiers = identifiers[:-1]
else:
value_field = 'last'
context_identifiers = identifiers[2:]
if metric_name not in metrics:
return None
metric_data = metrics[metric_name]
for trace in metric_data:
context_values = list(map(lambda c: c[1], trace['context']))
if all(c in context_values for c in context_identifiers):
return trace['values'][value_field]
return None
def select_run_metrics(self, experiment_name: str, run_hash: str,
select_metrics: Optional[
Union[str, List[str], Tuple[str]]
] = None
) -> Optional[Run]:
if not self.run_exists(experiment_name, run_hash):
return None
if select_metrics is not None and isinstance(select_metrics, str):
select_metrics = [select_metrics]
run = Run(self, experiment_name, run_hash)
for metric_name, metric in run.get_all_metrics().items():
if select_metrics is None or metric_name in select_metrics:
for trace in metric.get_all_traces():
metric.append(trace)
run.add(metric)
return run
def create_logs(self):
"""
Creates the logs dir in .aim to store error and activity logs
for cli and sdk respectively
"""
logs_path = os.path.join(self.path, AIM_LOGGING_DIR_NAME)
os.mkdir(logs_path)
def get_logs_dir(self):
return os.path.join(self.path, AIM_LOGGING_DIR_NAME) | 0.408631 | 0.078395 |
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
import time;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cmhal","1");
obj1 = tdklib.TDKScriptingLibrary("tdkbtr181","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CMHAL_ClearDocsisEventLog');
obj1.configureTestCase(ip,port,'TS_CMHAL_ClearDocsisEventLog');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
loadmodulestatus1 =obj1.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus1 ;
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in loadmodulestatus1.upper():
obj.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj1.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_CableModem.DocsisLogNumberOfEntries");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
numofDocsisLog = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the number of docsis logs";
print "EXPECTED RESULT 1: Should get the number of docsis logs";
print "ACTUAL RESULT 1: number of docsis logs is %s" %numofDocsisLog;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
if int(numofDocsisLog) > 0:
#Script to load the configuration file of the component
tdkTestObj = obj.createTestStep("CMHAL_ClearDocsisEventLog");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Clear the Docsis logs";
print "EXPECTED RESULT 2: Should clear the Docsis successfully";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
time.sleep(30);
#Validate the clear function using get
tdkTestObj = obj1.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_CableModem.DocsisLogNumberOfEntries");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
numofDocsisLog1 = tdkTestObj.getResultDetails();
if expectedresult in actualresult and int(numofDocsisLog1)== 0:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Get the number of docsis logs";
print "EXPECTED RESULT 3: Should get the number of docsis logs";
print "ACTUAL RESULT 3: number of docsis logs is %s" %numofDocsisLog1;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Get the number of docsis logs";
print "EXPECTED RESULT 3: Should get the number of docsis logs";
print "ACTUAL RESULT 3: number of docsis logs is %s" %numofDocsisLog1;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Clear teh Docsis logs";
print "EXPECTED RESULT 2: Should clear the Docsis successfully";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
print "Number of Docsis Log entries is already zero";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the number of docsis logs";
print "EXPECTED RESULT 1: Should get the number of docsis logs";
print "ACTUAL RESULT 1: number of docsis logs is %s" %numofDocsisLog;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("cmhal");
obj1.unloadModule("tdkbtr181");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed"; | testscripts/RDKB/component/CMHAL/TS_CMHAL_ClearDocsisEventLog.py | # use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
import time;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cmhal","1");
obj1 = tdklib.TDKScriptingLibrary("tdkbtr181","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CMHAL_ClearDocsisEventLog');
obj1.configureTestCase(ip,port,'TS_CMHAL_ClearDocsisEventLog');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
loadmodulestatus1 =obj1.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus1 ;
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in loadmodulestatus1.upper():
obj.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj1.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_CableModem.DocsisLogNumberOfEntries");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
numofDocsisLog = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the number of docsis logs";
print "EXPECTED RESULT 1: Should get the number of docsis logs";
print "ACTUAL RESULT 1: number of docsis logs is %s" %numofDocsisLog;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
if int(numofDocsisLog) > 0:
#Script to load the configuration file of the component
tdkTestObj = obj.createTestStep("CMHAL_ClearDocsisEventLog");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Clear the Docsis logs";
print "EXPECTED RESULT 2: Should clear the Docsis successfully";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
time.sleep(30);
#Validate the clear function using get
tdkTestObj = obj1.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_CableModem.DocsisLogNumberOfEntries");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
numofDocsisLog1 = tdkTestObj.getResultDetails();
if expectedresult in actualresult and int(numofDocsisLog1)== 0:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Get the number of docsis logs";
print "EXPECTED RESULT 3: Should get the number of docsis logs";
print "ACTUAL RESULT 3: number of docsis logs is %s" %numofDocsisLog1;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Get the number of docsis logs";
print "EXPECTED RESULT 3: Should get the number of docsis logs";
print "ACTUAL RESULT 3: number of docsis logs is %s" %numofDocsisLog1;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Clear teh Docsis logs";
print "EXPECTED RESULT 2: Should clear the Docsis successfully";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
print "Number of Docsis Log entries is already zero";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the number of docsis logs";
print "EXPECTED RESULT 1: Should get the number of docsis logs";
print "ACTUAL RESULT 1: number of docsis logs is %s" %numofDocsisLog;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("cmhal");
obj1.unloadModule("tdkbtr181");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed"; | 0.286169 | 0.3031 |
import json
import unittest
import boto3
from botocore.exceptions import (
ClientError,
)
from moto import (
mock_s3,
mock_sts,
)
from azul import (
cached_property,
config,
)
from azul.logging import (
configure_test_logging,
)
from azul.plugins import (
RepositoryPlugin,
)
from azul.portal_service import (
PortalService,
)
from azul.types import (
JSONs,
)
from azul.version_service import (
NoSuchObjectVersion,
)
from version_table_test_case import (
VersionTableTestCase,
)
# noinspection PyPep8Naming
def setUpModule():
configure_test_logging()
@mock_s3
@mock_sts
class TestPortalService(VersionTableTestCase):
dummy_db = [
{
"spam": "eggs"
}
]
@cached_property
def plugin_db(self) -> JSONs:
# Must be lazy so the mock catalog's repository plugin is used
catalog = config.default_catalog
plugin = RepositoryPlugin.load(catalog).create(catalog)
return plugin.portal_db()
multiplex_db = [
{
"integrations": [
# this should be flattened
{
"entity_ids": {
config.dss_deployment_stage: ["good"],
"other": ["bad"],
}
},
# this should be removed (entity_ids defined but missing for current stage)
{
"entity_ids": {
config.dss_deployment_stage: [],
"other": ["whatever"]
}
},
# this should be present but still empty (missing entity_ids field is ignored)
{
}
]
}
]
demultiplex_db = [
{
"integrations": [
{"entity_ids": ["good"]},
{}
]
}
]
def setUp(self):
super().setUp()
self.s3_client = boto3.client('s3')
self.s3_client.create_bucket(Bucket=config.portal_db_bucket)
self.s3_client.put_bucket_versioning(Bucket=config.portal_db_bucket,
VersioningConfiguration={
'Status': 'Enabled',
'MFADelete': 'Disabled'
})
self.portal_service = PortalService()
def tearDown(self):
super().tearDown()
# To ensure that the bucket is cleared between tests, all versions
# must be deleted. The most convenient way to do this is just to
# disabling versioning and perform a single delete.
self.s3_client.put_bucket_versioning(Bucket=config.portal_db_bucket,
VersioningConfiguration={
'Status': 'Disabled',
'MFADelete': 'Disabled'
})
self.s3_client.delete_object(Bucket=config.portal_db_bucket,
Key=config.portal_db_object_key)
self.s3_client.delete_bucket(Bucket=config.portal_db_bucket)
def download_db(self) -> JSONs:
response = self.s3_client.get_object(Bucket=config.portal_db_bucket,
Key=config.portal_db_object_key)
return json.loads(response['Body'].read().decode())
def test_demultiplex(self):
result = self.portal_service.demultiplex(self.multiplex_db)
self.assertNotEqual(result, self.multiplex_db)
self.assertEqual(result, self.demultiplex_db)
def test_internal_crud(self):
self.assertRaises(ClientError, self.download_db)
# These tests all ignore the issue of eventual consistency, which may be
# a non-issue when mocking.
with self.subTest('create'):
create_db, version = self.portal_service._create_db()
download_db = self.download_db() # Grabs latest version
self.assertEqual(create_db, download_db)
self.assertEqual(create_db, self.portal_service.demultiplex(self.plugin_db))
with self.subTest('read'):
read_db = self.portal_service._read_db(version)
self.assertEqual(read_db, download_db)
self.assertRaises(NoSuchObjectVersion, self.portal_service._read_db, 'fake_version')
with self.subTest('update'):
version = self.portal_service._write_db(self.dummy_db, version)
read_db = self.portal_service._read_db(version)
download_db = self.download_db()
self.assertEqual(read_db, download_db)
self.assertEqual(read_db, self.dummy_db)
with self.subTest('delete'):
self.portal_service._delete_db(version)
self.assertRaises(NoSuchObjectVersion, self.portal_service._read_db, version)
def test_crud(self):
# DB not initially present in mock S3
self.assertRaises(ClientError, self.download_db)
def test(callback, expected):
self.portal_service._crud(callback)
self.portal_service._crud(lambda db: self.assertEqual(db, expected))
self.portal_service._crud(lambda db: self.assertEqual(db, self.download_db()))
# It would be cool if we could force version conflicts but I'm not sure how
test_cases = [
('create', (lambda db: None), self.portal_service.demultiplex(self.plugin_db)),
('update', (lambda db: self.dummy_db), self.dummy_db),
('read', (lambda db: None), self.dummy_db)
]
# Note that bucket is not re-emptied between sub-tests
for op, callback, expected in test_cases:
with self.subTest(operation=op):
test(callback, expected)
if __name__ == '__main__':
unittest.main() | test/service/test_portal_service.py | import json
import unittest
import boto3
from botocore.exceptions import (
ClientError,
)
from moto import (
mock_s3,
mock_sts,
)
from azul import (
cached_property,
config,
)
from azul.logging import (
configure_test_logging,
)
from azul.plugins import (
RepositoryPlugin,
)
from azul.portal_service import (
PortalService,
)
from azul.types import (
JSONs,
)
from azul.version_service import (
NoSuchObjectVersion,
)
from version_table_test_case import (
VersionTableTestCase,
)
# noinspection PyPep8Naming
def setUpModule():
configure_test_logging()
@mock_s3
@mock_sts
class TestPortalService(VersionTableTestCase):
dummy_db = [
{
"spam": "eggs"
}
]
@cached_property
def plugin_db(self) -> JSONs:
# Must be lazy so the mock catalog's repository plugin is used
catalog = config.default_catalog
plugin = RepositoryPlugin.load(catalog).create(catalog)
return plugin.portal_db()
multiplex_db = [
{
"integrations": [
# this should be flattened
{
"entity_ids": {
config.dss_deployment_stage: ["good"],
"other": ["bad"],
}
},
# this should be removed (entity_ids defined but missing for current stage)
{
"entity_ids": {
config.dss_deployment_stage: [],
"other": ["whatever"]
}
},
# this should be present but still empty (missing entity_ids field is ignored)
{
}
]
}
]
demultiplex_db = [
{
"integrations": [
{"entity_ids": ["good"]},
{}
]
}
]
def setUp(self):
super().setUp()
self.s3_client = boto3.client('s3')
self.s3_client.create_bucket(Bucket=config.portal_db_bucket)
self.s3_client.put_bucket_versioning(Bucket=config.portal_db_bucket,
VersioningConfiguration={
'Status': 'Enabled',
'MFADelete': 'Disabled'
})
self.portal_service = PortalService()
def tearDown(self):
super().tearDown()
# To ensure that the bucket is cleared between tests, all versions
# must be deleted. The most convenient way to do this is just to
# disabling versioning and perform a single delete.
self.s3_client.put_bucket_versioning(Bucket=config.portal_db_bucket,
VersioningConfiguration={
'Status': 'Disabled',
'MFADelete': 'Disabled'
})
self.s3_client.delete_object(Bucket=config.portal_db_bucket,
Key=config.portal_db_object_key)
self.s3_client.delete_bucket(Bucket=config.portal_db_bucket)
def download_db(self) -> JSONs:
response = self.s3_client.get_object(Bucket=config.portal_db_bucket,
Key=config.portal_db_object_key)
return json.loads(response['Body'].read().decode())
def test_demultiplex(self):
result = self.portal_service.demultiplex(self.multiplex_db)
self.assertNotEqual(result, self.multiplex_db)
self.assertEqual(result, self.demultiplex_db)
def test_internal_crud(self):
self.assertRaises(ClientError, self.download_db)
# These tests all ignore the issue of eventual consistency, which may be
# a non-issue when mocking.
with self.subTest('create'):
create_db, version = self.portal_service._create_db()
download_db = self.download_db() # Grabs latest version
self.assertEqual(create_db, download_db)
self.assertEqual(create_db, self.portal_service.demultiplex(self.plugin_db))
with self.subTest('read'):
read_db = self.portal_service._read_db(version)
self.assertEqual(read_db, download_db)
self.assertRaises(NoSuchObjectVersion, self.portal_service._read_db, 'fake_version')
with self.subTest('update'):
version = self.portal_service._write_db(self.dummy_db, version)
read_db = self.portal_service._read_db(version)
download_db = self.download_db()
self.assertEqual(read_db, download_db)
self.assertEqual(read_db, self.dummy_db)
with self.subTest('delete'):
self.portal_service._delete_db(version)
self.assertRaises(NoSuchObjectVersion, self.portal_service._read_db, version)
def test_crud(self):
# DB not initially present in mock S3
self.assertRaises(ClientError, self.download_db)
def test(callback, expected):
self.portal_service._crud(callback)
self.portal_service._crud(lambda db: self.assertEqual(db, expected))
self.portal_service._crud(lambda db: self.assertEqual(db, self.download_db()))
# It would be cool if we could force version conflicts but I'm not sure how
test_cases = [
('create', (lambda db: None), self.portal_service.demultiplex(self.plugin_db)),
('update', (lambda db: self.dummy_db), self.dummy_db),
('read', (lambda db: None), self.dummy_db)
]
# Note that bucket is not re-emptied between sub-tests
for op, callback, expected in test_cases:
with self.subTest(operation=op):
test(callback, expected)
if __name__ == '__main__':
unittest.main() | 0.52342 | 0.204461 |
import machine
from time import sleep_us
class pca9865(object):
'''16 servo contoller. Use index 0-15 for the servo #.'''
_ADDRESS = 0x40
_MODE1 = 0
_PRESCALE = 0xFE
_LED0_ON_L = 0x6 # We only use LED0 and offset 0-16 from it.
# _LED0_ON_H = const(0x7)
# _LED0_OFF_L = const(0x8)
# _LED0_OFF_H = const(0x9)
# _ALLLED_ON_L = const(0xFA)
# _ALLLED_ON_H = const(0xFB)
# _ALLLED_OFF_L = const(0xFC)
# _ALLLED_OFF_H = const(0xFD)
_DEFAULTFREQ = 60
_MINPULSE = 120
_MAXPULSE = 600
def __init__(self, aSDA, aSCL):
'''aSDA is I2C SDA pin #, aSCL is I2C SCL pin #.'''
super(pca9865, self).__init__()
self.i2c = machine.I2C(scl=machine.Pin(aSCL), sda=machine.Pin(aSDA))
self._buffer = bytearray(4)
self._b1 = bytearray(1)
sleep_us(50)
self.reset()
self.minmax(self._MINPULSE, self._MAXPULSE)
def minmax(self, aMin, aMax):
'''Set min/max and calculate range.'''
self._min = aMin
self._max = aMax
self._range = aMax - aMin
def read(self, aLoc):
'''Read 8 bit value and return.'''
self.i2c.readfrom_mem_into(self._ADDRESS, aLoc, self._b1)
return self._b1[0]
def writebuffer(self, aBuffer, aLoc):
"""Write buffer to given address."""
self.i2c.writeto_mem(self._ADDRESS, aLoc, aBuffer)
def write(self, aVal, aLoc):
"""Write 8 bit integer aVal to given address aLoc."""
self._b1[0] = aVal
self.writebuffer(self._b1, aLoc)
def reset(self):
'''Reset the controller and set default frequency.'''
self.write(0, self._MODE1)
self.setfreq(self._DEFAULTFREQ)
def setfreq(self, aFreq):
'''Set frequency for all servos. A good value is 60hz (default).'''
aFreq *= 0.9 # Correct for overshoot in frequency setting.
prescalefloat = (6103.51562 / aFreq) - 1 # 25000000 / 4096 / freq.
prescale = int(prescalefloat + 0.5)
oldmode = self.read(self._MODE1)
newmode = (oldmode & 0x7F) | 0x10
self.write(newmode, self._MODE1)
self.write(prescale, self._PRESCALE)
self.write(oldmode, self._MODE1)
sleep_us(50)
self.write(oldmode | 0xA1, self._MODE1) # This sets the MODE1 register to turn on auto increment.
def setpwm(self, aServo, aOn, aOff):
'''aServo = 0-15.
aOn = 16 bit on value.
aOff = 16 bit off value.
'''
if 0 <= aServo <= 15:
# Data = on-low, on-high, off-low and off-high. That's 4 bytes each servo.
loc = self._LED0_ON_L + (aServo * 4)
# print(loc)
self._buffer[0] = aOn
self._buffer[1] = aOn >> 8
self._buffer[2] = aOff
self._buffer[3] = aOff >> 8
self.writebuffer(self._buffer, loc)
else:
raise Exception('Servo index {} out of range.'.format(str(aServo)))
def off(self, aServo):
'''Turn off a servo.'''
self.setpwm(aServo, 0, 0)
def alloff(self):
'''Turn all servos off.'''
for x in range(0, 16):
self.off(x)
def set(self, aServo, aPerc):
'''Set the 0-100%. If < 0 turns servo off.'''
if aPerc < 0:
self.off(aServo)
else:
val = self._min + ((self._range * aPerc) // 100)
self.setpwm(aServo, 0, val)
def setangle(self, aServo, aAngle):
'''Set angle -90 to +90. < -90 is off.'''
# ((a + 90.0) * 100.0) / 180.0
perc = int((aAngle + 90.0) * 0.5556) # Convert angle +/- 90 to 0-100%
self.set(aServo, perc) | Projects/ESP32Micropython/pca9865.py |
import machine
from time import sleep_us
class pca9865(object):
'''16 servo contoller. Use index 0-15 for the servo #.'''
_ADDRESS = 0x40
_MODE1 = 0
_PRESCALE = 0xFE
_LED0_ON_L = 0x6 # We only use LED0 and offset 0-16 from it.
# _LED0_ON_H = const(0x7)
# _LED0_OFF_L = const(0x8)
# _LED0_OFF_H = const(0x9)
# _ALLLED_ON_L = const(0xFA)
# _ALLLED_ON_H = const(0xFB)
# _ALLLED_OFF_L = const(0xFC)
# _ALLLED_OFF_H = const(0xFD)
_DEFAULTFREQ = 60
_MINPULSE = 120
_MAXPULSE = 600
def __init__(self, aSDA, aSCL):
'''aSDA is I2C SDA pin #, aSCL is I2C SCL pin #.'''
super(pca9865, self).__init__()
self.i2c = machine.I2C(scl=machine.Pin(aSCL), sda=machine.Pin(aSDA))
self._buffer = bytearray(4)
self._b1 = bytearray(1)
sleep_us(50)
self.reset()
self.minmax(self._MINPULSE, self._MAXPULSE)
def minmax(self, aMin, aMax):
'''Set min/max and calculate range.'''
self._min = aMin
self._max = aMax
self._range = aMax - aMin
def read(self, aLoc):
'''Read 8 bit value and return.'''
self.i2c.readfrom_mem_into(self._ADDRESS, aLoc, self._b1)
return self._b1[0]
def writebuffer(self, aBuffer, aLoc):
"""Write buffer to given address."""
self.i2c.writeto_mem(self._ADDRESS, aLoc, aBuffer)
def write(self, aVal, aLoc):
"""Write 8 bit integer aVal to given address aLoc."""
self._b1[0] = aVal
self.writebuffer(self._b1, aLoc)
def reset(self):
'''Reset the controller and set default frequency.'''
self.write(0, self._MODE1)
self.setfreq(self._DEFAULTFREQ)
def setfreq(self, aFreq):
'''Set frequency for all servos. A good value is 60hz (default).'''
aFreq *= 0.9 # Correct for overshoot in frequency setting.
prescalefloat = (6103.51562 / aFreq) - 1 # 25000000 / 4096 / freq.
prescale = int(prescalefloat + 0.5)
oldmode = self.read(self._MODE1)
newmode = (oldmode & 0x7F) | 0x10
self.write(newmode, self._MODE1)
self.write(prescale, self._PRESCALE)
self.write(oldmode, self._MODE1)
sleep_us(50)
self.write(oldmode | 0xA1, self._MODE1) # This sets the MODE1 register to turn on auto increment.
def setpwm(self, aServo, aOn, aOff):
'''aServo = 0-15.
aOn = 16 bit on value.
aOff = 16 bit off value.
'''
if 0 <= aServo <= 15:
# Data = on-low, on-high, off-low and off-high. That's 4 bytes each servo.
loc = self._LED0_ON_L + (aServo * 4)
# print(loc)
self._buffer[0] = aOn
self._buffer[1] = aOn >> 8
self._buffer[2] = aOff
self._buffer[3] = aOff >> 8
self.writebuffer(self._buffer, loc)
else:
raise Exception('Servo index {} out of range.'.format(str(aServo)))
def off(self, aServo):
'''Turn off a servo.'''
self.setpwm(aServo, 0, 0)
def alloff(self):
'''Turn all servos off.'''
for x in range(0, 16):
self.off(x)
def set(self, aServo, aPerc):
'''Set the 0-100%. If < 0 turns servo off.'''
if aPerc < 0:
self.off(aServo)
else:
val = self._min + ((self._range * aPerc) // 100)
self.setpwm(aServo, 0, val)
def setangle(self, aServo, aAngle):
'''Set angle -90 to +90. < -90 is off.'''
# ((a + 90.0) * 100.0) / 180.0
perc = int((aAngle + 90.0) * 0.5556) # Convert angle +/- 90 to 0-100%
self.set(aServo, perc) | 0.464659 | 0.222531 |
CSV-compare
-----------
Compare table data stored in CSV (comma seperated values) format.
"""
import re
import csv
import sys
import os
def _pr_list(l1, l2, replace_chars = '[\n ]'):
""" Calculate precision and recall regarding elements of a list.
When a 1:1 match cannot be achieved, the list pointers will be
moved forward until a match occurs (first of list A, then of list B).
The closest match will count, and matching will continue from those
list positions onwards.
The replace_chars parameter is used to remove characters from the
strings before comparing. The default will remove newlines and spaces.
"""
def _fnext(l, item):
item = re.sub(replace_chars, '', item).strip()
for i, txt in enumerate(l):
txt = re.sub(replace_chars, '', txt).strip()
if txt == item:
return i
return -1
if len(l2)==0 or len(l1)==0:
return 0, 0
i = 0
j = 0
match = 0
while len(l1)>i and len(l2)>j:
t1 = re.sub(replace_chars, '', l1[i]).strip()
t2 = re.sub(replace_chars, '', l2[j]).strip()
if t1 == t2:
match += 1
i += 1
j += 1
else:
ii = _fnext(l1[i:], l2[j])
jj = _fnext(l2[j:], l1[i])
if ii>=0 and (ii<jj or jj<0): i+=ii
elif jj>=0: j+=jj
else:
i+=1
j+=1
return float(match)/len(l2), float(match)/len(l1)
def clean_table(tab):
""" Remove trailing empty cells resulting from the way some
spreadsheet application output csv for multi table documents.
"""
if len(tab) == 0:
return []
n_empty=[]
for row in tab:
for n, val in enumerate(reversed(row)):
if val!='':
break
n_empty.append(n)
strip_cols = min(n_empty)
cleaned = []
for row in tab:
cleaned.append(row[0:len(row)-strip_cols])
return cleaned
def compare_tables(tab1, tab2):
""" Compare two tables (2dim lists).
"""
info = {'rows_a':len(tab1),
'rows_b':len(tab2),
'rows_match': 1 if len(tab1) == len(tab2) else 0,
}
sizesA = [len(l) for l in tab1]
sizesB = [len(l) for l in tab2]
info['dim_match'] = 1 if sizesA == sizesB else 0
info['size_a'] = sum(sizesA)
info['size_b'] = sum(sizesA)
if len(sizesA)>0 and len(sizesB)>0:
info['cols_match'] = 1 if min(sizesA) == max(sizesA) and \
min(sizesB) == max(sizesB) and min(sizesA) == min(sizesB) else 0
# 'flatten' tables
cellsA = []
cellsB = []
for r in tab1: cellsA += [c for c in r]
for r in tab2: cellsB += [c for c in r]
info['p'], info['r'] = _pr_list(cellsA, cellsB)
info['F1'] = F1(info['p'], info['r'])
return info
def compare_files_pr(file1, file2):
""" Calculate simple P/R .
Compare lists of cells, left to right , top to bottom.
"""
cells = [[], []]
for i, fname in enumerate([file1, file2]):
with file(fname) as csvfile:
rd = csv.reader(csvfile, delimiter=',', quotechar='"')
for r in rd:
cells[i] += [c for c in r]
return _pr_list(*cells)
def compare_files(file1, file2):
""" Compare two csv files.
"""
groundtruth = read_tables_from_file(file1)
try:
compare = read_tables_from_file(file2)
except:
compare = []
tbs = [groundtruth, compare]
finfo = {'tabcount_a': len(tbs[0]),
'tabcount_b': len(tbs[1]),
'tabcount_match': len(tbs[0]) == len(tbs[1]),
}
finfo['tables']=[]
for n in range(0, len(tbs[0])):
if finfo['tabcount_match']:
comp_info = compare_tables(tbs[0][n], tbs[1][n])
else:
if n < len(tbs[1]):
comp_info = compare_tables(tbs[0][n], tbs[1][n])
else:
comp_info = compare_tables(tbs[0][n], [[]])
comp_info['n']=n
finfo['tables'].append(comp_info)
return finfo
def output_compareinfo_csv(file, info, fields=['p', 'r', 'F1']):
""" Pre-format a row that holds measures about similarity of a table
to the ground truth.
"""
lines = []
tabmatch = 1 if info['tabcount_match'] else 0
for tinfo in info['tables']:
lines.append([file, str(tabmatch)] + [str(tinfo[k]) for k in fields])
return lines
def F1(p, r):
""" Calculate F1 score from precision and recall.
Returns zero if one of p, r is zero.
"""
return (2*p*r/(p+r)) if p != 0 and r != 0 else 0
def read_tables_from_file(csvfile):
""" Opens csvfile, returns all tables found.
Guesses csv format (delimiter, etc.)
Splits data into different tables at newline (or empty row).
Returns list of tables.
"""
tables=[]
table_id = 0
with file(csvfile) as f:
sniffer = csv.Sniffer()
dialect = sniffer.sniff(f.next())
rd = csv.reader(f, delimiter=dialect.delimiter,
quotechar=dialect.quotechar)
for r in rd:
if len(tables) <= table_id:
tables.append([])
# Begin next table if there is an empty line
if r == [] or sum([len(v) for v in r]) == 0:
if len(tables[table_id])>0:
table_id+=1
else:
tables[table_id].append(r)
return [clean_table(t) for t in tables if t!=[]]
if __name__ == '__main__':
""" Script usage.
"""
fields = [
#'rows_a', 'rows_b',
#'size_a', 'size_b',
'n',
'rows_match', 'cols_match', 'dim_match',
'p', 'r', 'F1',]
limitchar = ' & '
if len(sys.argv) < 3:
print "Specify two (csv-)files or directories"
quit(-1)
# Params 1 + 2 are files or directories
file1 = sys.argv[1]
file2 = sys.argv[2]
srcinfo = [os.path.basename(file1), os.path.basename(file2)]
# 3rd parameter becomes 'tooldef' (text cols to name rows),
# and 4th parameter tells whether to print headers
tooldef = sys.argv[3].split('-') if len(sys.argv) > 3 else ['na', 'na']
print_headers = len(sys.argv) > 4 and sys.argv[4] in ["1", "y", "yes"]
if print_headers:
print ','.join(['name', 'tool', 'src1', 'src2',
'filename', 'tabsmatch',] + fields)
if os.path.isfile(file1) and os.path.isfile(file2):
inf = compare_files(file1, file2)
lines = output_compareinfo_csv(file1, inf, fields)
for l in lines:
print ','.join(tooldef + srcinfo + l)
elif os.path.isdir(file1) and os.path.isdir(file2):
for f in [path for path in os.listdir(file1) if path[-4:]=='.csv']:
if os.path.isfile(file2 + '/' + f):
inf = compare_files(file1 + '/' + f, file2 + '/' + f)
lines = output_compareinfo_csv(f, inf, fields)
for l in lines:
print ','.join(tooldef + srcinfo + l)
else:
print ','.join(['','',] + srcinfo + ['', "Missing {} for {} {}".format(f, *tooldef)]) | script/csv-compare.py |
CSV-compare
-----------
Compare table data stored in CSV (comma seperated values) format.
"""
import re
import csv
import sys
import os
def _pr_list(l1, l2, replace_chars = '[\n ]'):
""" Calculate precision and recall regarding elements of a list.
When a 1:1 match cannot be achieved, the list pointers will be
moved forward until a match occurs (first of list A, then of list B).
The closest match will count, and matching will continue from those
list positions onwards.
The replace_chars parameter is used to remove characters from the
strings before comparing. The default will remove newlines and spaces.
"""
def _fnext(l, item):
item = re.sub(replace_chars, '', item).strip()
for i, txt in enumerate(l):
txt = re.sub(replace_chars, '', txt).strip()
if txt == item:
return i
return -1
if len(l2)==0 or len(l1)==0:
return 0, 0
i = 0
j = 0
match = 0
while len(l1)>i and len(l2)>j:
t1 = re.sub(replace_chars, '', l1[i]).strip()
t2 = re.sub(replace_chars, '', l2[j]).strip()
if t1 == t2:
match += 1
i += 1
j += 1
else:
ii = _fnext(l1[i:], l2[j])
jj = _fnext(l2[j:], l1[i])
if ii>=0 and (ii<jj or jj<0): i+=ii
elif jj>=0: j+=jj
else:
i+=1
j+=1
return float(match)/len(l2), float(match)/len(l1)
def clean_table(tab):
""" Remove trailing empty cells resulting from the way some
spreadsheet application output csv for multi table documents.
"""
if len(tab) == 0:
return []
n_empty=[]
for row in tab:
for n, val in enumerate(reversed(row)):
if val!='':
break
n_empty.append(n)
strip_cols = min(n_empty)
cleaned = []
for row in tab:
cleaned.append(row[0:len(row)-strip_cols])
return cleaned
def compare_tables(tab1, tab2):
""" Compare two tables (2dim lists).
"""
info = {'rows_a':len(tab1),
'rows_b':len(tab2),
'rows_match': 1 if len(tab1) == len(tab2) else 0,
}
sizesA = [len(l) for l in tab1]
sizesB = [len(l) for l in tab2]
info['dim_match'] = 1 if sizesA == sizesB else 0
info['size_a'] = sum(sizesA)
info['size_b'] = sum(sizesA)
if len(sizesA)>0 and len(sizesB)>0:
info['cols_match'] = 1 if min(sizesA) == max(sizesA) and \
min(sizesB) == max(sizesB) and min(sizesA) == min(sizesB) else 0
# 'flatten' tables
cellsA = []
cellsB = []
for r in tab1: cellsA += [c for c in r]
for r in tab2: cellsB += [c for c in r]
info['p'], info['r'] = _pr_list(cellsA, cellsB)
info['F1'] = F1(info['p'], info['r'])
return info
def compare_files_pr(file1, file2):
""" Calculate simple P/R .
Compare lists of cells, left to right , top to bottom.
"""
cells = [[], []]
for i, fname in enumerate([file1, file2]):
with file(fname) as csvfile:
rd = csv.reader(csvfile, delimiter=',', quotechar='"')
for r in rd:
cells[i] += [c for c in r]
return _pr_list(*cells)
def compare_files(file1, file2):
""" Compare two csv files.
"""
groundtruth = read_tables_from_file(file1)
try:
compare = read_tables_from_file(file2)
except:
compare = []
tbs = [groundtruth, compare]
finfo = {'tabcount_a': len(tbs[0]),
'tabcount_b': len(tbs[1]),
'tabcount_match': len(tbs[0]) == len(tbs[1]),
}
finfo['tables']=[]
for n in range(0, len(tbs[0])):
if finfo['tabcount_match']:
comp_info = compare_tables(tbs[0][n], tbs[1][n])
else:
if n < len(tbs[1]):
comp_info = compare_tables(tbs[0][n], tbs[1][n])
else:
comp_info = compare_tables(tbs[0][n], [[]])
comp_info['n']=n
finfo['tables'].append(comp_info)
return finfo
def output_compareinfo_csv(file, info, fields=['p', 'r', 'F1']):
""" Pre-format a row that holds measures about similarity of a table
to the ground truth.
"""
lines = []
tabmatch = 1 if info['tabcount_match'] else 0
for tinfo in info['tables']:
lines.append([file, str(tabmatch)] + [str(tinfo[k]) for k in fields])
return lines
def F1(p, r):
""" Calculate F1 score from precision and recall.
Returns zero if one of p, r is zero.
"""
return (2*p*r/(p+r)) if p != 0 and r != 0 else 0
def read_tables_from_file(csvfile):
""" Opens csvfile, returns all tables found.
Guesses csv format (delimiter, etc.)
Splits data into different tables at newline (or empty row).
Returns list of tables.
"""
tables=[]
table_id = 0
with file(csvfile) as f:
sniffer = csv.Sniffer()
dialect = sniffer.sniff(f.next())
rd = csv.reader(f, delimiter=dialect.delimiter,
quotechar=dialect.quotechar)
for r in rd:
if len(tables) <= table_id:
tables.append([])
# Begin next table if there is an empty line
if r == [] or sum([len(v) for v in r]) == 0:
if len(tables[table_id])>0:
table_id+=1
else:
tables[table_id].append(r)
return [clean_table(t) for t in tables if t!=[]]
if __name__ == '__main__':
""" Script usage.
"""
fields = [
#'rows_a', 'rows_b',
#'size_a', 'size_b',
'n',
'rows_match', 'cols_match', 'dim_match',
'p', 'r', 'F1',]
limitchar = ' & '
if len(sys.argv) < 3:
print "Specify two (csv-)files or directories"
quit(-1)
# Params 1 + 2 are files or directories
file1 = sys.argv[1]
file2 = sys.argv[2]
srcinfo = [os.path.basename(file1), os.path.basename(file2)]
# 3rd parameter becomes 'tooldef' (text cols to name rows),
# and 4th parameter tells whether to print headers
tooldef = sys.argv[3].split('-') if len(sys.argv) > 3 else ['na', 'na']
print_headers = len(sys.argv) > 4 and sys.argv[4] in ["1", "y", "yes"]
if print_headers:
print ','.join(['name', 'tool', 'src1', 'src2',
'filename', 'tabsmatch',] + fields)
if os.path.isfile(file1) and os.path.isfile(file2):
inf = compare_files(file1, file2)
lines = output_compareinfo_csv(file1, inf, fields)
for l in lines:
print ','.join(tooldef + srcinfo + l)
elif os.path.isdir(file1) and os.path.isdir(file2):
for f in [path for path in os.listdir(file1) if path[-4:]=='.csv']:
if os.path.isfile(file2 + '/' + f):
inf = compare_files(file1 + '/' + f, file2 + '/' + f)
lines = output_compareinfo_csv(f, inf, fields)
for l in lines:
print ','.join(tooldef + srcinfo + l)
else:
print ','.join(['','',] + srcinfo + ['', "Missing {} for {} {}".format(f, *tooldef)]) | 0.276007 | 0.466663 |
import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# <NAME>. and <NAME>., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y) | summary/sumy/sklearn/linear_model/tests/test_ransac.py | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# <NAME>. and <NAME>., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y) | 0.850686 | 0.684468 |
from __future__ import division
import sys
import os
import unittest
import numpy as np
from numpy.testing import assert_allclose
from quantecon.lqnash import nnash
from quantecon.lqcontrol import LQ
class TestLQNash(unittest.TestCase):
def test_noninteractive(self):
"Test case for when agents don't interact with each other"
# Copied these values from test_lqcontrol
a = np.array([[.95, 0.], [0, .95]])
b1 = np.array([.95, 0.])
b2 = np.array([0., .95])
r1 = np.array([[-.25, 0.], [0., 0.]])
r2 = np.array([[0., 0.], [0., -.25]])
q1 = np.array([[-.15]])
q2 = np.array([[-.15]])
f1, f2, p1, p2 = nnash(a, b1, b2, r1, r2, q1, q2, 0, 0, 0, 0, 0, 0,
tol=1e-8, max_iter=10000)
alq = a[:1, :1]
blq = b1[:1].reshape((1, 1))
rlq = r1[:1, :1]
qlq = q1
lq_obj = LQ(qlq, rlq, alq, blq, beta=1.)
p, f, d = lq_obj.stationary_values()
assert_allclose(f1, f2[:, ::-1])
assert_allclose(f1[0, 0], f[0])
assert_allclose(p1[0, 0], p2[1, 1])
assert_allclose(p1[0, 0], p[0, 0])
def test_nnash(self):
"Use judd test case for nnash. Follows judd.m"
# Define Parameters
delta = 0.02
d = np.array([[-1, 0.5], [0.5, -1]])
B = np.array([25, 25])
c1 = np.array([1, -2, 1])
c2 = np.array([1, -2, 1])
e1 = np.array([10, 10, 3])
e2 = np.array([10, 10, 3])
delta_1 = 1 - delta
## Define matrices
a = np.array([[delta_1, 0, -delta_1*B[0]],
[0, delta_1, -delta_1*B[1]],
[0, 0, 1]])
b1 = delta_1 * np.array([[1, -d[0, 0]],
[0, -d[1, 0]],
[0, 0]])
b2 = delta_1 * np.array([[0, -d[0, 1]],
[1, -d[1, 1]],
[0, 0]])
r1 = -np.array([[0.5*c1[2], 0, 0.5*c1[1]],
[0, 0, 0],
[0.5*c1[1], 0, c1[0]]])
r2 = -np.array([[0, 0, 0],
[0, 0.5*c2[2], 0.5*c2[1]],
[0, 0.5*c2[1], c2[0]]])
q1 = np.array([[-0.5*e1[2], 0], [0, d[0, 0]]])
q2 = np.array([[-0.5*e2[2], 0], [0, d[1, 1]]])
s1 = np.zeros((2, 2))
s2 = np.copy(s1)
w1 = np.array([[0, 0],
[0, 0],
[-0.5*e1[1], B[0]/2.]])
w2 = np.array([[0, 0],
[0, 0],
[-0.5*e2[1], B[1]/2.]])
m1 = np.array([[0, 0], [0, d[0, 1] / 2.]])
m2 = np.copy(m1)
# build model and solve it
f1, f2, p1, p2 = nnash(a, b1, b2, r1, r2, q1, q2, s1, s2, w1, w2, m1,
m2)
aaa = a - b1.dot(f1) - b2.dot(f2)
aa = aaa[:2, :2]
tf = np.eye(2)-aa
tfi = np.linalg.inv(tf)
xbar = tfi.dot(aaa[:2, 2])
# Define answers from matlab. TODO: this is ghetto
f1_ml = np.asarray(np.matrix("""\
0.243666582208565, 0.027236062661951, -6.827882928738190;
0.392370733875639, 0.139696450885998, -37.734107291009138"""))
f2_ml = np.asarray(np.matrix("""\
0.027236062661951, 0.243666582208565, -6.827882928738186;
0.139696450885998, 0.392370733875639, -37.734107291009131"""))
xbar_ml = np.array([1.246871007582702, 1.246871007582685])
assert_allclose(f1, f1_ml)
assert_allclose(f2, f2_ml)
assert_allclose(xbar, xbar_ml)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestLQNash)
unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite) | quantecon/tests/test_lqnash.py | from __future__ import division
import sys
import os
import unittest
import numpy as np
from numpy.testing import assert_allclose
from quantecon.lqnash import nnash
from quantecon.lqcontrol import LQ
class TestLQNash(unittest.TestCase):
def test_noninteractive(self):
"Test case for when agents don't interact with each other"
# Copied these values from test_lqcontrol
a = np.array([[.95, 0.], [0, .95]])
b1 = np.array([.95, 0.])
b2 = np.array([0., .95])
r1 = np.array([[-.25, 0.], [0., 0.]])
r2 = np.array([[0., 0.], [0., -.25]])
q1 = np.array([[-.15]])
q2 = np.array([[-.15]])
f1, f2, p1, p2 = nnash(a, b1, b2, r1, r2, q1, q2, 0, 0, 0, 0, 0, 0,
tol=1e-8, max_iter=10000)
alq = a[:1, :1]
blq = b1[:1].reshape((1, 1))
rlq = r1[:1, :1]
qlq = q1
lq_obj = LQ(qlq, rlq, alq, blq, beta=1.)
p, f, d = lq_obj.stationary_values()
assert_allclose(f1, f2[:, ::-1])
assert_allclose(f1[0, 0], f[0])
assert_allclose(p1[0, 0], p2[1, 1])
assert_allclose(p1[0, 0], p[0, 0])
def test_nnash(self):
"Use judd test case for nnash. Follows judd.m"
# Define Parameters
delta = 0.02
d = np.array([[-1, 0.5], [0.5, -1]])
B = np.array([25, 25])
c1 = np.array([1, -2, 1])
c2 = np.array([1, -2, 1])
e1 = np.array([10, 10, 3])
e2 = np.array([10, 10, 3])
delta_1 = 1 - delta
## Define matrices
a = np.array([[delta_1, 0, -delta_1*B[0]],
[0, delta_1, -delta_1*B[1]],
[0, 0, 1]])
b1 = delta_1 * np.array([[1, -d[0, 0]],
[0, -d[1, 0]],
[0, 0]])
b2 = delta_1 * np.array([[0, -d[0, 1]],
[1, -d[1, 1]],
[0, 0]])
r1 = -np.array([[0.5*c1[2], 0, 0.5*c1[1]],
[0, 0, 0],
[0.5*c1[1], 0, c1[0]]])
r2 = -np.array([[0, 0, 0],
[0, 0.5*c2[2], 0.5*c2[1]],
[0, 0.5*c2[1], c2[0]]])
q1 = np.array([[-0.5*e1[2], 0], [0, d[0, 0]]])
q2 = np.array([[-0.5*e2[2], 0], [0, d[1, 1]]])
s1 = np.zeros((2, 2))
s2 = np.copy(s1)
w1 = np.array([[0, 0],
[0, 0],
[-0.5*e1[1], B[0]/2.]])
w2 = np.array([[0, 0],
[0, 0],
[-0.5*e2[1], B[1]/2.]])
m1 = np.array([[0, 0], [0, d[0, 1] / 2.]])
m2 = np.copy(m1)
# build model and solve it
f1, f2, p1, p2 = nnash(a, b1, b2, r1, r2, q1, q2, s1, s2, w1, w2, m1,
m2)
aaa = a - b1.dot(f1) - b2.dot(f2)
aa = aaa[:2, :2]
tf = np.eye(2)-aa
tfi = np.linalg.inv(tf)
xbar = tfi.dot(aaa[:2, 2])
# Define answers from matlab. TODO: this is ghetto
f1_ml = np.asarray(np.matrix("""\
0.243666582208565, 0.027236062661951, -6.827882928738190;
0.392370733875639, 0.139696450885998, -37.734107291009138"""))
f2_ml = np.asarray(np.matrix("""\
0.027236062661951, 0.243666582208565, -6.827882928738186;
0.139696450885998, 0.392370733875639, -37.734107291009131"""))
xbar_ml = np.array([1.246871007582702, 1.246871007582685])
assert_allclose(f1, f1_ml)
assert_allclose(f2, f2_ml)
assert_allclose(xbar, xbar_ml)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestLQNash)
unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite) | 0.392453 | 0.592195 |
"""Current-flow closeness centrality measures."""
import networkx as nx
from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
from networkx.algorithms.centrality.flow_matrix import *
__all__ = ['current_flow_closeness_centrality', 'information_centrality']
@not_implemented_for('directed')
def current_flow_closeness_centrality(G, weight=None,
dtype=float, solver='lu'):
"""Compute current-flow closeness centrality for nodes.
Current-flow closeness centrality is variant of closeness
centrality based on effective resistance between nodes in
a network. This metric is also known as information centrality.
Parameters
----------
G : graph
A NetworkX graph.
weight : None or string, optional (default=None)
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
dtype: data type (default=float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with current flow closeness centrality as the value.
See Also
--------
closeness_centrality
Notes
-----
The algorithm is from Brandes [1]_.
See also [2]_ for the original definition of information centrality.
References
----------
.. [1] <NAME> and <NAME>,
Centrality Measures Based on Current Flow.
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://algo.uni-konstanz.de/publications/bf-cmbcf-05.pdf
.. [2] <NAME> and <NAME>:
Rethinking centrality: Methods and examples.
Social Networks 11(1):1-37, 1989.
http://dx.doi.org/10.1016/0378-8733(89)90016-6
"""
import numpy as np
import scipy
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
solvername = {"full": FullInverseLaplacian,
"lu": SuperLUInverseLaplacian,
"cg": CGInverseLaplacian}
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H
n = H.number_of_nodes()
L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
dtype=dtype, format='csc')
C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
for v in H:
col = C2.get_row(v)
for w in H:
betweenness[v] += col[v] - 2 * col[w]
betweenness[w] += col[v]
for v in H:
betweenness[v] = 1.0 / (betweenness[v])
return dict((ordering[k], float(v)) for k, v in betweenness.items())
information_centrality = current_flow_closeness_centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available") | src/networkx/algorithms/centrality/current_flow_closeness.py | """Current-flow closeness centrality measures."""
import networkx as nx
from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
from networkx.algorithms.centrality.flow_matrix import *
__all__ = ['current_flow_closeness_centrality', 'information_centrality']
@not_implemented_for('directed')
def current_flow_closeness_centrality(G, weight=None,
dtype=float, solver='lu'):
"""Compute current-flow closeness centrality for nodes.
Current-flow closeness centrality is variant of closeness
centrality based on effective resistance between nodes in
a network. This metric is also known as information centrality.
Parameters
----------
G : graph
A NetworkX graph.
weight : None or string, optional (default=None)
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
dtype: data type (default=float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with current flow closeness centrality as the value.
See Also
--------
closeness_centrality
Notes
-----
The algorithm is from Brandes [1]_.
See also [2]_ for the original definition of information centrality.
References
----------
.. [1] <NAME> and <NAME>,
Centrality Measures Based on Current Flow.
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://algo.uni-konstanz.de/publications/bf-cmbcf-05.pdf
.. [2] <NAME> and <NAME>:
Rethinking centrality: Methods and examples.
Social Networks 11(1):1-37, 1989.
http://dx.doi.org/10.1016/0378-8733(89)90016-6
"""
import numpy as np
import scipy
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
solvername = {"full": FullInverseLaplacian,
"lu": SuperLUInverseLaplacian,
"cg": CGInverseLaplacian}
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H
n = H.number_of_nodes()
L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
dtype=dtype, format='csc')
C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
for v in H:
col = C2.get_row(v)
for w in H:
betweenness[v] += col[v] - 2 * col[w]
betweenness[w] += col[v]
for v in H:
betweenness[v] = 1.0 / (betweenness[v])
return dict((ordering[k], float(v)) for k, v in betweenness.items())
information_centrality = current_flow_closeness_centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available") | 0.918233 | 0.696391 |
from security_monkey.datastore import Account, AccountType, Technology
from security_monkey.tests import SecurityMonkeyTestCase
from security_monkey import db
from security_monkey.watchers.github.org import GitHubOrgItem
from security_monkey.auditors.github.repo import GitHubRepoAuditor
CONFIG_ONE = {
"id": 1296269,
"owner": {
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False
},
"name": "Hello-World",
"full_name": "octocat/Hello-World",
"description": "This your first repo!",
"private": False,
"fork": False,
"url": "https://api.github.com/repos/octocat/Hello-World",
"html_url": "https://github.com/octocat/Hello-World",
"archive_url": "http://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}",
"assignees_url": "http://api.github.com/repos/octocat/Hello-World/assignees{/user}",
"blobs_url": "http://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}",
"branches_url": "http://api.github.com/repos/octocat/Hello-World/branches{/branch}",
"clone_url": "https://github.com/octocat/Hello-World.git",
"collaborators_url": "http://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}",
"comments_url": "http://api.github.com/repos/octocat/Hello-World/comments{/number}",
"commits_url": "http://api.github.com/repos/octocat/Hello-World/commits{/sha}",
"compare_url": "http://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}",
"contents_url": "http://api.github.com/repos/octocat/Hello-World/contents/{+path}",
"contributors_url": "http://api.github.com/repos/octocat/Hello-World/contributors",
"deployments_url": "http://api.github.com/repos/octocat/Hello-World/deployments",
"downloads_url": "http://api.github.com/repos/octocat/Hello-World/downloads",
"events_url": "http://api.github.com/repos/octocat/Hello-World/events",
"forks_url": "http://api.github.com/repos/octocat/Hello-World/forks",
"git_commits_url": "http://api.github.com/repos/octocat/Hello-World/git/commits{/sha}",
"git_refs_url": "http://api.github.com/repos/octocat/Hello-World/git/refs{/sha}",
"git_tags_url": "http://api.github.com/repos/octocat/Hello-World/git/tags{/sha}",
"git_url": "git:github.com/octocat/Hello-World.git",
"hooks_url": "http://api.github.com/repos/octocat/Hello-World/hooks",
"issue_comment_url": "http://api.github.com/repos/octocat/Hello-World/issues/comments{/number}",
"issue_events_url": "http://api.github.com/repos/octocat/Hello-World/issues/events{/number}",
"issues_url": "http://api.github.com/repos/octocat/Hello-World/issues{/number}",
"keys_url": "http://api.github.com/repos/octocat/Hello-World/keys{/key_id}",
"labels_url": "http://api.github.com/repos/octocat/Hello-World/labels{/name}",
"languages_url": "http://api.github.com/repos/octocat/Hello-World/languages",
"merges_url": "http://api.github.com/repos/octocat/Hello-World/merges",
"milestones_url": "http://api.github.com/repos/octocat/Hello-World/milestones{/number}",
"mirror_url": "git:git.example.com/octocat/Hello-World",
"notifications_url": "http://api.github.com/repos/octocat/Hello-World/notifications{?since, all, participating}",
"pulls_url": "http://api.github.com/repos/octocat/Hello-World/pulls{/number}",
"releases_url": "http://api.github.com/repos/octocat/Hello-World/releases{/id}",
"ssh_url": "git@github.com:octocat/Hello-World.git",
"stargazers_url": "http://api.github.com/repos/octocat/Hello-World/stargazers",
"statuses_url": "http://api.github.com/repos/octocat/Hello-World/statuses/{sha}",
"subscribers_url": "http://api.github.com/repos/octocat/Hello-World/subscribers",
"subscription_url": "http://api.github.com/repos/octocat/Hello-World/subscription",
"svn_url": "https://svn.github.com/octocat/Hello-World",
"tags_url": "http://api.github.com/repos/octocat/Hello-World/tags",
"teams_url": "http://api.github.com/repos/octocat/Hello-World/teams",
"trees_url": "http://api.github.com/repos/octocat/Hello-World/git/trees{/sha}",
"homepage": "https://github.com",
"language": None,
"forks_count": 9,
"stargazers_count": 80,
"watchers_count": 80,
"size": 108,
"default_branch": "master",
"open_issues_count": 0,
"topics": [
"octocat",
"atom",
"electron",
"API"
],
"has_issues": True,
"has_wiki": True,
"has_pages": False,
"has_downloads": True,
"pushed_at": "2011-01-26T19:06:43Z",
"created_at": "2011-01-26T19:01:12Z",
"updated_at": "2011-01-26T19:14:43Z",
"permissions": {
"admin": False,
"push": False,
"pull": True
},
"allow_rebase_merge": True,
"allow_squash_merge": True,
"allow_merge_commit": True,
"subscribers_count": 42,
"network_count": 0,
"protected_branches": [],
"deploy_keys": [],
"outside_collaborators": [],
"team_permissions": {
"myteam": "push"
}
}
CONFIG_TWO = {
"id": 1296269,
"owner": {
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False
},
"name": "Repo-Private",
"full_name": "octocat/Repo-Private",
"description": "This your second repo!",
"private": True,
"fork": True,
"url": "https://api.github.com/repos/octocat/Hello-World",
"html_url": "https://github.com/octocat/Hello-World",
"archive_url": "http://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}",
"assignees_url": "http://api.github.com/repos/octocat/Hello-World/assignees{/user}",
"blobs_url": "http://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}",
"branches_url": "http://api.github.com/repos/octocat/Hello-World/branches{/branch}",
"clone_url": "https://github.com/octocat/Hello-World.git",
"collaborators_url": "http://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}",
"comments_url": "http://api.github.com/repos/octocat/Hello-World/comments{/number}",
"commits_url": "http://api.github.com/repos/octocat/Hello-World/commits{/sha}",
"compare_url": "http://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}",
"contents_url": "http://api.github.com/repos/octocat/Hello-World/contents/{+path}",
"contributors_url": "http://api.github.com/repos/octocat/Hello-World/contributors",
"deployments_url": "http://api.github.com/repos/octocat/Hello-World/deployments",
"downloads_url": "http://api.github.com/repos/octocat/Hello-World/downloads",
"events_url": "http://api.github.com/repos/octocat/Hello-World/events",
"forks_url": "http://api.github.com/repos/octocat/Hello-World/forks",
"git_commits_url": "http://api.github.com/repos/octocat/Hello-World/git/commits{/sha}",
"git_refs_url": "http://api.github.com/repos/octocat/Hello-World/git/refs{/sha}",
"git_tags_url": "http://api.github.com/repos/octocat/Hello-World/git/tags{/sha}",
"git_url": "git:github.com/octocat/Hello-World.git",
"hooks_url": "http://api.github.com/repos/octocat/Hello-World/hooks",
"issue_comment_url": "http://api.github.com/repos/octocat/Hello-World/issues/comments{/number}",
"issue_events_url": "http://api.github.com/repos/octocat/Hello-World/issues/events{/number}",
"issues_url": "http://api.github.com/repos/octocat/Hello-World/issues{/number}",
"keys_url": "http://api.github.com/repos/octocat/Hello-World/keys{/key_id}",
"labels_url": "http://api.github.com/repos/octocat/Hello-World/labels{/name}",
"languages_url": "http://api.github.com/repos/octocat/Hello-World/languages",
"merges_url": "http://api.github.com/repos/octocat/Hello-World/merges",
"milestones_url": "http://api.github.com/repos/octocat/Hello-World/milestones{/number}",
"mirror_url": "git:git.example.com/octocat/Hello-World",
"notifications_url": "http://api.github.com/repos/octocat/Hello-World/notifications{?since, all, participating}",
"pulls_url": "http://api.github.com/repos/octocat/Hello-World/pulls{/number}",
"releases_url": "http://api.github.com/repos/octocat/Hello-World/releases{/id}",
"ssh_url": "git@github.com:octocat/Hello-World.git",
"stargazers_url": "http://api.github.com/repos/octocat/Hello-World/stargazers",
"statuses_url": "http://api.github.com/repos/octocat/Hello-World/statuses/{sha}",
"subscribers_url": "http://api.github.com/repos/octocat/Hello-World/subscribers",
"subscription_url": "http://api.github.com/repos/octocat/Hello-World/subscription",
"svn_url": "https://svn.github.com/octocat/Hello-World",
"tags_url": "http://api.github.com/repos/octocat/Hello-World/tags",
"teams_url": "http://api.github.com/repos/octocat/Hello-World/teams",
"trees_url": "http://api.github.com/repos/octocat/Hello-World/git/trees{/sha}",
"homepage": "https://github.com",
"language": None,
"forks_count": 9,
"stargazers_count": 80,
"watchers_count": 80,
"size": 108,
"default_branch": "master",
"open_issues_count": 0,
"topics": [
"octocat",
"atom",
"electron",
"API"
],
"has_issues": True,
"has_wiki": True,
"has_pages": False,
"has_downloads": True,
"pushed_at": "2011-01-26T19:06:43Z",
"created_at": "2011-01-26T19:01:12Z",
"updated_at": "2011-01-26T19:14:43Z",
"permissions": {
"admin": False,
"push": False,
"pull": True
},
"allow_rebase_merge": True,
"allow_squash_merge": True,
"allow_merge_commit": True,
"subscribers_count": 42,
"network_count": 0,
"protected_branches": [
{
"name": "master"
}
],
"deploy_keys": [
{
"id": 1234567890,
"key": "ssh-rsa A<KEY>==",
"url": "https://api.github.com/repos/octocat/Repo-Private/keys/1234567890",
"title": "Some Deploy Key That Doesn't Exist",
"verified": True,
"created_at": "2017-02-01T00:56:06Z",
"read_only": True
},
{
"id": 1234567891,
"key": "ssh-rsa A<KEY>==",
"url": "https://api.github.com/repos/octocat/Repo-Private/keys/1234567891",
"title": "Some OTHER Deploy Key That Doesn't Exist",
"verified": True,
"created_at": "2017-02-01T00:56:06Z",
"read_only": False
}
],
"outside_collaborators": [
{
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False,
"permissions": {
"pull": True,
"push": True,
"admin": False
}
},
{
"login": "octocat-admin",
"id": 2,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat-admin",
"html_url": "https://github.com/octocat-admin",
"followers_url": "https://api.github.com/users/octocat-admin/followers",
"following_url": "https://api.github.com/users/octocat-admin/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat-admin/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat-admin/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat-admin/subscriptions",
"organizations_url": "https://api.github.com/users/octocat-admin/orgs",
"repos_url": "https://api.github.com/users/octocat-admin/repos",
"events_url": "https://api.github.com/users/octocat-admin/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat-admin/received_events",
"type": "User",
"site_admin": False,
"permissions": {
"pull": True,
"push": True,
"admin": True
}
}
],
"team_permissions": {
"myteam": "admin"
}
}
class GitHubRepoAuditorTestCase(SecurityMonkeyTestCase):
def pre_test_setup(self):
self.account_type = AccountType(name="GitHub")
db.session.add(self.account_type)
db.session.commit()
db.session.add(Account(name="octocat", account_type_id=self.account_type.id,
identifier="octocat", active=True, third_party=False))
self.technology = Technology(name="repository")
db.session.add(self.technology)
db.session.commit()
self.gh_items = [
GitHubOrgItem(account="octocat", name="Hello-World", arn="octocat/Hello-World", config=CONFIG_ONE),
GitHubOrgItem(account="octocat", name="Repo-Private", arn="octocat/Repo-Private", config=CONFIG_TWO),
]
def test_public_repo_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_public_repo(self.gh_items[0])
repo_auditor.check_for_public_repo(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[0].audit_issues), 1)
self.assertEqual(self.gh_items[0].audit_issues[0].score, 5)
# Should not raise issues:
self.assertEqual(len(self.gh_items[1].audit_issues), 0)
def test_forked_repo_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_if_forked_repo(self.gh_items[0])
repo_auditor.check_if_forked_repo(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[1].audit_issues), 1)
self.assertEqual(self.gh_items[1].audit_issues[0].score, 3)
# Should not raise issues:
self.assertEqual(len(self.gh_items[0].audit_issues), 0)
def test_no_protected_branches_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_no_protected_branches(self.gh_items[0])
repo_auditor.check_for_no_protected_branches(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[0].audit_issues), 1)
self.assertEqual(self.gh_items[0].audit_issues[0].score, 0)
# Should not raise issues:
self.assertEqual(len(self.gh_items[1].audit_issues), 0)
def test_deploy_key_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_deploy_keys(self.gh_items[0])
repo_auditor.check_for_deploy_keys(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[1].audit_issues), 2)
self.assertEqual(self.gh_items[1].audit_issues[0].score, 3)
self.assertEqual(self.gh_items[1].audit_issues[1].score, 5)
# Should not raise issues:
self.assertEqual(len(self.gh_items[0].audit_issues), 0)
def test_outside_collaborators_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_outside_collaborators(self.gh_items[0])
repo_auditor.check_for_outside_collaborators(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[1].audit_issues), 2)
self.assertEqual(self.gh_items[1].audit_issues[0].score, 3)
self.assertEqual(self.gh_items[1].audit_issues[1].score, 8)
# Should not raise issues:
self.assertEqual(len(self.gh_items[0].audit_issues), 0)
def test_admin_teams_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_admin_teams(self.gh_items[0])
repo_auditor.check_for_admin_teams(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[1].audit_issues), 1)
self.assertEqual(self.gh_items[1].audit_issues[0].score, 3)
# Should not raise issues:
self.assertEqual(len(self.gh_items[0].audit_issues), 0) | security_monkey/tests/auditors/github/test_repo_auditor.py | from security_monkey.datastore import Account, AccountType, Technology
from security_monkey.tests import SecurityMonkeyTestCase
from security_monkey import db
from security_monkey.watchers.github.org import GitHubOrgItem
from security_monkey.auditors.github.repo import GitHubRepoAuditor
CONFIG_ONE = {
"id": 1296269,
"owner": {
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False
},
"name": "Hello-World",
"full_name": "octocat/Hello-World",
"description": "This your first repo!",
"private": False,
"fork": False,
"url": "https://api.github.com/repos/octocat/Hello-World",
"html_url": "https://github.com/octocat/Hello-World",
"archive_url": "http://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}",
"assignees_url": "http://api.github.com/repos/octocat/Hello-World/assignees{/user}",
"blobs_url": "http://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}",
"branches_url": "http://api.github.com/repos/octocat/Hello-World/branches{/branch}",
"clone_url": "https://github.com/octocat/Hello-World.git",
"collaborators_url": "http://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}",
"comments_url": "http://api.github.com/repos/octocat/Hello-World/comments{/number}",
"commits_url": "http://api.github.com/repos/octocat/Hello-World/commits{/sha}",
"compare_url": "http://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}",
"contents_url": "http://api.github.com/repos/octocat/Hello-World/contents/{+path}",
"contributors_url": "http://api.github.com/repos/octocat/Hello-World/contributors",
"deployments_url": "http://api.github.com/repos/octocat/Hello-World/deployments",
"downloads_url": "http://api.github.com/repos/octocat/Hello-World/downloads",
"events_url": "http://api.github.com/repos/octocat/Hello-World/events",
"forks_url": "http://api.github.com/repos/octocat/Hello-World/forks",
"git_commits_url": "http://api.github.com/repos/octocat/Hello-World/git/commits{/sha}",
"git_refs_url": "http://api.github.com/repos/octocat/Hello-World/git/refs{/sha}",
"git_tags_url": "http://api.github.com/repos/octocat/Hello-World/git/tags{/sha}",
"git_url": "git:github.com/octocat/Hello-World.git",
"hooks_url": "http://api.github.com/repos/octocat/Hello-World/hooks",
"issue_comment_url": "http://api.github.com/repos/octocat/Hello-World/issues/comments{/number}",
"issue_events_url": "http://api.github.com/repos/octocat/Hello-World/issues/events{/number}",
"issues_url": "http://api.github.com/repos/octocat/Hello-World/issues{/number}",
"keys_url": "http://api.github.com/repos/octocat/Hello-World/keys{/key_id}",
"labels_url": "http://api.github.com/repos/octocat/Hello-World/labels{/name}",
"languages_url": "http://api.github.com/repos/octocat/Hello-World/languages",
"merges_url": "http://api.github.com/repos/octocat/Hello-World/merges",
"milestones_url": "http://api.github.com/repos/octocat/Hello-World/milestones{/number}",
"mirror_url": "git:git.example.com/octocat/Hello-World",
"notifications_url": "http://api.github.com/repos/octocat/Hello-World/notifications{?since, all, participating}",
"pulls_url": "http://api.github.com/repos/octocat/Hello-World/pulls{/number}",
"releases_url": "http://api.github.com/repos/octocat/Hello-World/releases{/id}",
"ssh_url": "git@github.com:octocat/Hello-World.git",
"stargazers_url": "http://api.github.com/repos/octocat/Hello-World/stargazers",
"statuses_url": "http://api.github.com/repos/octocat/Hello-World/statuses/{sha}",
"subscribers_url": "http://api.github.com/repos/octocat/Hello-World/subscribers",
"subscription_url": "http://api.github.com/repos/octocat/Hello-World/subscription",
"svn_url": "https://svn.github.com/octocat/Hello-World",
"tags_url": "http://api.github.com/repos/octocat/Hello-World/tags",
"teams_url": "http://api.github.com/repos/octocat/Hello-World/teams",
"trees_url": "http://api.github.com/repos/octocat/Hello-World/git/trees{/sha}",
"homepage": "https://github.com",
"language": None,
"forks_count": 9,
"stargazers_count": 80,
"watchers_count": 80,
"size": 108,
"default_branch": "master",
"open_issues_count": 0,
"topics": [
"octocat",
"atom",
"electron",
"API"
],
"has_issues": True,
"has_wiki": True,
"has_pages": False,
"has_downloads": True,
"pushed_at": "2011-01-26T19:06:43Z",
"created_at": "2011-01-26T19:01:12Z",
"updated_at": "2011-01-26T19:14:43Z",
"permissions": {
"admin": False,
"push": False,
"pull": True
},
"allow_rebase_merge": True,
"allow_squash_merge": True,
"allow_merge_commit": True,
"subscribers_count": 42,
"network_count": 0,
"protected_branches": [],
"deploy_keys": [],
"outside_collaborators": [],
"team_permissions": {
"myteam": "push"
}
}
CONFIG_TWO = {
"id": 1296269,
"owner": {
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False
},
"name": "Repo-Private",
"full_name": "octocat/Repo-Private",
"description": "This your second repo!",
"private": True,
"fork": True,
"url": "https://api.github.com/repos/octocat/Hello-World",
"html_url": "https://github.com/octocat/Hello-World",
"archive_url": "http://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}",
"assignees_url": "http://api.github.com/repos/octocat/Hello-World/assignees{/user}",
"blobs_url": "http://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}",
"branches_url": "http://api.github.com/repos/octocat/Hello-World/branches{/branch}",
"clone_url": "https://github.com/octocat/Hello-World.git",
"collaborators_url": "http://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}",
"comments_url": "http://api.github.com/repos/octocat/Hello-World/comments{/number}",
"commits_url": "http://api.github.com/repos/octocat/Hello-World/commits{/sha}",
"compare_url": "http://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}",
"contents_url": "http://api.github.com/repos/octocat/Hello-World/contents/{+path}",
"contributors_url": "http://api.github.com/repos/octocat/Hello-World/contributors",
"deployments_url": "http://api.github.com/repos/octocat/Hello-World/deployments",
"downloads_url": "http://api.github.com/repos/octocat/Hello-World/downloads",
"events_url": "http://api.github.com/repos/octocat/Hello-World/events",
"forks_url": "http://api.github.com/repos/octocat/Hello-World/forks",
"git_commits_url": "http://api.github.com/repos/octocat/Hello-World/git/commits{/sha}",
"git_refs_url": "http://api.github.com/repos/octocat/Hello-World/git/refs{/sha}",
"git_tags_url": "http://api.github.com/repos/octocat/Hello-World/git/tags{/sha}",
"git_url": "git:github.com/octocat/Hello-World.git",
"hooks_url": "http://api.github.com/repos/octocat/Hello-World/hooks",
"issue_comment_url": "http://api.github.com/repos/octocat/Hello-World/issues/comments{/number}",
"issue_events_url": "http://api.github.com/repos/octocat/Hello-World/issues/events{/number}",
"issues_url": "http://api.github.com/repos/octocat/Hello-World/issues{/number}",
"keys_url": "http://api.github.com/repos/octocat/Hello-World/keys{/key_id}",
"labels_url": "http://api.github.com/repos/octocat/Hello-World/labels{/name}",
"languages_url": "http://api.github.com/repos/octocat/Hello-World/languages",
"merges_url": "http://api.github.com/repos/octocat/Hello-World/merges",
"milestones_url": "http://api.github.com/repos/octocat/Hello-World/milestones{/number}",
"mirror_url": "git:git.example.com/octocat/Hello-World",
"notifications_url": "http://api.github.com/repos/octocat/Hello-World/notifications{?since, all, participating}",
"pulls_url": "http://api.github.com/repos/octocat/Hello-World/pulls{/number}",
"releases_url": "http://api.github.com/repos/octocat/Hello-World/releases{/id}",
"ssh_url": "git@github.com:octocat/Hello-World.git",
"stargazers_url": "http://api.github.com/repos/octocat/Hello-World/stargazers",
"statuses_url": "http://api.github.com/repos/octocat/Hello-World/statuses/{sha}",
"subscribers_url": "http://api.github.com/repos/octocat/Hello-World/subscribers",
"subscription_url": "http://api.github.com/repos/octocat/Hello-World/subscription",
"svn_url": "https://svn.github.com/octocat/Hello-World",
"tags_url": "http://api.github.com/repos/octocat/Hello-World/tags",
"teams_url": "http://api.github.com/repos/octocat/Hello-World/teams",
"trees_url": "http://api.github.com/repos/octocat/Hello-World/git/trees{/sha}",
"homepage": "https://github.com",
"language": None,
"forks_count": 9,
"stargazers_count": 80,
"watchers_count": 80,
"size": 108,
"default_branch": "master",
"open_issues_count": 0,
"topics": [
"octocat",
"atom",
"electron",
"API"
],
"has_issues": True,
"has_wiki": True,
"has_pages": False,
"has_downloads": True,
"pushed_at": "2011-01-26T19:06:43Z",
"created_at": "2011-01-26T19:01:12Z",
"updated_at": "2011-01-26T19:14:43Z",
"permissions": {
"admin": False,
"push": False,
"pull": True
},
"allow_rebase_merge": True,
"allow_squash_merge": True,
"allow_merge_commit": True,
"subscribers_count": 42,
"network_count": 0,
"protected_branches": [
{
"name": "master"
}
],
"deploy_keys": [
{
"id": 1234567890,
"key": "ssh-rsa A<KEY>==",
"url": "https://api.github.com/repos/octocat/Repo-Private/keys/1234567890",
"title": "Some Deploy Key That Doesn't Exist",
"verified": True,
"created_at": "2017-02-01T00:56:06Z",
"read_only": True
},
{
"id": 1234567891,
"key": "ssh-rsa A<KEY>==",
"url": "https://api.github.com/repos/octocat/Repo-Private/keys/1234567891",
"title": "Some OTHER Deploy Key That Doesn't Exist",
"verified": True,
"created_at": "2017-02-01T00:56:06Z",
"read_only": False
}
],
"outside_collaborators": [
{
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": False,
"permissions": {
"pull": True,
"push": True,
"admin": False
}
},
{
"login": "octocat-admin",
"id": 2,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://api.github.com/users/octocat-admin",
"html_url": "https://github.com/octocat-admin",
"followers_url": "https://api.github.com/users/octocat-admin/followers",
"following_url": "https://api.github.com/users/octocat-admin/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat-admin/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat-admin/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat-admin/subscriptions",
"organizations_url": "https://api.github.com/users/octocat-admin/orgs",
"repos_url": "https://api.github.com/users/octocat-admin/repos",
"events_url": "https://api.github.com/users/octocat-admin/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat-admin/received_events",
"type": "User",
"site_admin": False,
"permissions": {
"pull": True,
"push": True,
"admin": True
}
}
],
"team_permissions": {
"myteam": "admin"
}
}
class GitHubRepoAuditorTestCase(SecurityMonkeyTestCase):
def pre_test_setup(self):
self.account_type = AccountType(name="GitHub")
db.session.add(self.account_type)
db.session.commit()
db.session.add(Account(name="octocat", account_type_id=self.account_type.id,
identifier="octocat", active=True, third_party=False))
self.technology = Technology(name="repository")
db.session.add(self.technology)
db.session.commit()
self.gh_items = [
GitHubOrgItem(account="octocat", name="Hello-World", arn="octocat/Hello-World", config=CONFIG_ONE),
GitHubOrgItem(account="octocat", name="Repo-Private", arn="octocat/Repo-Private", config=CONFIG_TWO),
]
def test_public_repo_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_public_repo(self.gh_items[0])
repo_auditor.check_for_public_repo(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[0].audit_issues), 1)
self.assertEqual(self.gh_items[0].audit_issues[0].score, 5)
# Should not raise issues:
self.assertEqual(len(self.gh_items[1].audit_issues), 0)
def test_forked_repo_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_if_forked_repo(self.gh_items[0])
repo_auditor.check_if_forked_repo(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[1].audit_issues), 1)
self.assertEqual(self.gh_items[1].audit_issues[0].score, 3)
# Should not raise issues:
self.assertEqual(len(self.gh_items[0].audit_issues), 0)
def test_no_protected_branches_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_no_protected_branches(self.gh_items[0])
repo_auditor.check_for_no_protected_branches(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[0].audit_issues), 1)
self.assertEqual(self.gh_items[0].audit_issues[0].score, 0)
# Should not raise issues:
self.assertEqual(len(self.gh_items[1].audit_issues), 0)
def test_deploy_key_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_deploy_keys(self.gh_items[0])
repo_auditor.check_for_deploy_keys(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[1].audit_issues), 2)
self.assertEqual(self.gh_items[1].audit_issues[0].score, 3)
self.assertEqual(self.gh_items[1].audit_issues[1].score, 5)
# Should not raise issues:
self.assertEqual(len(self.gh_items[0].audit_issues), 0)
def test_outside_collaborators_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_outside_collaborators(self.gh_items[0])
repo_auditor.check_for_outside_collaborators(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[1].audit_issues), 2)
self.assertEqual(self.gh_items[1].audit_issues[0].score, 3)
self.assertEqual(self.gh_items[1].audit_issues[1].score, 8)
# Should not raise issues:
self.assertEqual(len(self.gh_items[0].audit_issues), 0)
def test_admin_teams_check(self):
repo_auditor = GitHubRepoAuditor(accounts=["octocat"])
repo_auditor.check_for_admin_teams(self.gh_items[0])
repo_auditor.check_for_admin_teams(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[1].audit_issues), 1)
self.assertEqual(self.gh_items[1].audit_issues[0].score, 3)
# Should not raise issues:
self.assertEqual(len(self.gh_items[0].audit_issues), 0) | 0.466359 | 0.372848 |
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .forms import LinkForm
from .models import Link
class LinkPlugin(CMSPluginBase):
model = Link
form = LinkForm
name = _('Link')
text_enabled = True
allow_children = True
fieldsets = [
(None, {
'fields': (
'name',
('external_link', 'internal_link'),
)
}),
(_('Link settings'), {
'classes': ('collapse',),
'fields': (
('mailto', 'phone'),
('anchor', 'target'),
)
}),
(_('Advanced settings'), {
'classes': ('collapse',),
'fields': (
'template',
'attributes',
)
}),
]
@classmethod
def get_render_queryset(cls):
queryset = super(LinkPlugin, cls).get_render_queryset()
return queryset.select_related('internal_link')
def get_render_template(self, context, instance, placeholder):
return 'djangocms_link/{}/link.html'.format(instance.template)
def render(self, context, instance, placeholder):
context['link'] = instance.get_link()
return super(LinkPlugin, self).render(context, instance, placeholder)
def get_form(self, request, obj=None, **kwargs):
form_class = super(LinkPlugin, self).get_form(request, obj, **kwargs)
try:
if obj and obj.page and obj.page.site:
site = obj.page.site
elif self.page and self.page.site:
site = self.page.site
except:
site = Site.objects.get_current()
else:
site = Site.objects.get_current()
class Form(form_class):
def __init__(self, *args, **kwargs):
super(Form, self).__init__(*args, **kwargs)
self.for_site(site)
return Form
plugin_pool.register_plugin(LinkPlugin) | tech_project/lib/python2.7/site-packages/djangocms_link/cms_plugins.py | from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .forms import LinkForm
from .models import Link
class LinkPlugin(CMSPluginBase):
model = Link
form = LinkForm
name = _('Link')
text_enabled = True
allow_children = True
fieldsets = [
(None, {
'fields': (
'name',
('external_link', 'internal_link'),
)
}),
(_('Link settings'), {
'classes': ('collapse',),
'fields': (
('mailto', 'phone'),
('anchor', 'target'),
)
}),
(_('Advanced settings'), {
'classes': ('collapse',),
'fields': (
'template',
'attributes',
)
}),
]
@classmethod
def get_render_queryset(cls):
queryset = super(LinkPlugin, cls).get_render_queryset()
return queryset.select_related('internal_link')
def get_render_template(self, context, instance, placeholder):
return 'djangocms_link/{}/link.html'.format(instance.template)
def render(self, context, instance, placeholder):
context['link'] = instance.get_link()
return super(LinkPlugin, self).render(context, instance, placeholder)
def get_form(self, request, obj=None, **kwargs):
form_class = super(LinkPlugin, self).get_form(request, obj, **kwargs)
try:
if obj and obj.page and obj.page.site:
site = obj.page.site
elif self.page and self.page.site:
site = self.page.site
except:
site = Site.objects.get_current()
else:
site = Site.objects.get_current()
class Form(form_class):
def __init__(self, *args, **kwargs):
super(Form, self).__init__(*args, **kwargs)
self.for_site(site)
return Form
plugin_pool.register_plugin(LinkPlugin) | 0.418578 | 0.105487 |
from unittest import TestCase, skip # use TestCase and skip
from pathlib import Path # use Path
from itertools import zip_longest
from veniq.utils.ast_builder import build_ast
from veniq.ast_framework import AST, ASTNodeType
from veniq.ast_framework.ast import MemberReferenceParams, MethodInvocationParams
import os # use os
import sys # use sys
# create class ASTTestSuite(TestCase):
class ASTTestSuite(TestCase):
def test_parsing(self):
ast = self._build_ast("SimpleClass.java")
actual_node_types = [node.node_type for node in ast]
self.assertEqual(actual_node_types,
ASTTestSuite._java_simple_class_preordered)
# create def test_subtrees_selection(self):
def test_subtrees_selection(self):
ast = self._build_ast("SimpleClass.java")
subtrees = ast.get_subtrees(ASTNodeType.BASIC_TYPE)
for actual_subtree, expected_subtree in \
zip_longest(subtrees, ASTTestSuite._java_simple_class_basic_type_subtrees):
with self.subTest():
self.assertEqual([node.node_index for node in actual_subtree],
expected_subtree)
# create def test_complex_fields(self):
def test_complex_fields(self):
ast = self._build_ast('StaticConstructor.java')
class_declaration = next((declaration for declaration in ast.get_root().types if
declaration.node_type == ASTNodeType.CLASS_DECLARATION), None)
assert class_declaration is not None, "Cannot find class declaration"
static_constructor, method_declaration = class_declaration.body
self.assertEqual([node.node_type for node in static_constructor],
[ASTNodeType.STATEMENT_EXPRESSION, ASTNodeType.STATEMENT_EXPRESSION])
self.assertEqual(method_declaration.node_type, ASTNodeType.METHOD_DECLARATION)
@skip('Method "get_member_reference_params" is deprecated')
def test_member_reference_params(self):
ast = self._build_ast("MemberReferencesExample.java")
for node, expected_params in zip_longest(ast.get_nodes(ASTNodeType.MEMBER_REFERENCE),
ASTTestSuite._expected_member_reference_params):
self.assertEqual(ast.get_member_reference_params(node), expected_params)
@skip('Method "get_method_invocation_params" is deprecated')
def test_method_invocation_params(self):
ast = self._build_ast("MethodInvokeExample.java")
for node, expected_params in zip_longest(ast.get_nodes(ASTNodeType.METHOD_INVOCATION),
ASTTestSuite._expected_method_invocation_params):
self.assertEqual(ast.get_method_invocation_params(node), expected_params)
# create def _build_ast(self, filename: str):
def _build_ast(self, filename: str):
javalang_ast = build_ast(str(Path(__file__).parent.absolute() / filename))
return AST.build_from_javalang(javalang_ast)
_java_simple_class_preordered = [
ASTNodeType.COMPILATION_UNIT,
ASTNodeType.CLASS_DECLARATION,
ASTNodeType.COLLECTION,
ASTNodeType.STRING,
ASTNodeType.FIELD_DECLARATION,
ASTNodeType.COLLECTION,
ASTNodeType.STRING,
ASTNodeType.BASIC_TYPE,
ASTNodeType.STRING,
ASTNodeType.VARIABLE_DECLARATOR,
ASTNodeType.STRING,
ASTNodeType.LITERAL,
ASTNodeType.STRING,
ASTNodeType.METHOD_DECLARATION,
ASTNodeType.COLLECTION,
ASTNodeType.STRING,
ASTNodeType.BASIC_TYPE,
ASTNodeType.STRING,
ASTNodeType.STRING,
ASTNodeType.STATEMENT_EXPRESSION,
ASTNodeType.ASSIGNMENT,
ASTNodeType.MEMBER_REFERENCE,
ASTNodeType.STRING,
ASTNodeType.STRING,
ASTNodeType.LITERAL,
ASTNodeType.STRING,
ASTNodeType.STRING,
ASTNodeType.RETURN_STATEMENT,
ASTNodeType.MEMBER_REFERENCE,
ASTNodeType.STRING,
ASTNodeType.STRING,
]
_java_simple_class_basic_type_subtrees = [
[8, 9],
[17, 18],
]
_expected_member_reference_params = [
MemberReferenceParams('', 'block_variable', ''),
MemberReferenceParams('', 'method_parameter', ''),
MemberReferenceParams('', 'block_variable', '++'),
MemberReferenceParams('', 'field', ''),
MemberReferenceParams('', 'block_variable', ''),
MemberReferenceParams('Something', 'outer_field', ''),
MemberReferenceParams('', 'field', ''),
]
_expected_method_invocation_params = [
MethodInvocationParams(object_name='System.out', method_name='println'),
MethodInvocationParams(object_name='', method_name='method1'),
] | test/ast_framework/test_ast.py | from unittest import TestCase, skip # use TestCase and skip
from pathlib import Path # use Path
from itertools import zip_longest
from veniq.utils.ast_builder import build_ast
from veniq.ast_framework import AST, ASTNodeType
from veniq.ast_framework.ast import MemberReferenceParams, MethodInvocationParams
import os # use os
import sys # use sys
# create class ASTTestSuite(TestCase):
class ASTTestSuite(TestCase):
def test_parsing(self):
ast = self._build_ast("SimpleClass.java")
actual_node_types = [node.node_type for node in ast]
self.assertEqual(actual_node_types,
ASTTestSuite._java_simple_class_preordered)
# create def test_subtrees_selection(self):
def test_subtrees_selection(self):
ast = self._build_ast("SimpleClass.java")
subtrees = ast.get_subtrees(ASTNodeType.BASIC_TYPE)
for actual_subtree, expected_subtree in \
zip_longest(subtrees, ASTTestSuite._java_simple_class_basic_type_subtrees):
with self.subTest():
self.assertEqual([node.node_index for node in actual_subtree],
expected_subtree)
# create def test_complex_fields(self):
def test_complex_fields(self):
ast = self._build_ast('StaticConstructor.java')
class_declaration = next((declaration for declaration in ast.get_root().types if
declaration.node_type == ASTNodeType.CLASS_DECLARATION), None)
assert class_declaration is not None, "Cannot find class declaration"
static_constructor, method_declaration = class_declaration.body
self.assertEqual([node.node_type for node in static_constructor],
[ASTNodeType.STATEMENT_EXPRESSION, ASTNodeType.STATEMENT_EXPRESSION])
self.assertEqual(method_declaration.node_type, ASTNodeType.METHOD_DECLARATION)
@skip('Method "get_member_reference_params" is deprecated')
def test_member_reference_params(self):
ast = self._build_ast("MemberReferencesExample.java")
for node, expected_params in zip_longest(ast.get_nodes(ASTNodeType.MEMBER_REFERENCE),
ASTTestSuite._expected_member_reference_params):
self.assertEqual(ast.get_member_reference_params(node), expected_params)
@skip('Method "get_method_invocation_params" is deprecated')
def test_method_invocation_params(self):
ast = self._build_ast("MethodInvokeExample.java")
for node, expected_params in zip_longest(ast.get_nodes(ASTNodeType.METHOD_INVOCATION),
ASTTestSuite._expected_method_invocation_params):
self.assertEqual(ast.get_method_invocation_params(node), expected_params)
# create def _build_ast(self, filename: str):
def _build_ast(self, filename: str):
javalang_ast = build_ast(str(Path(__file__).parent.absolute() / filename))
return AST.build_from_javalang(javalang_ast)
_java_simple_class_preordered = [
ASTNodeType.COMPILATION_UNIT,
ASTNodeType.CLASS_DECLARATION,
ASTNodeType.COLLECTION,
ASTNodeType.STRING,
ASTNodeType.FIELD_DECLARATION,
ASTNodeType.COLLECTION,
ASTNodeType.STRING,
ASTNodeType.BASIC_TYPE,
ASTNodeType.STRING,
ASTNodeType.VARIABLE_DECLARATOR,
ASTNodeType.STRING,
ASTNodeType.LITERAL,
ASTNodeType.STRING,
ASTNodeType.METHOD_DECLARATION,
ASTNodeType.COLLECTION,
ASTNodeType.STRING,
ASTNodeType.BASIC_TYPE,
ASTNodeType.STRING,
ASTNodeType.STRING,
ASTNodeType.STATEMENT_EXPRESSION,
ASTNodeType.ASSIGNMENT,
ASTNodeType.MEMBER_REFERENCE,
ASTNodeType.STRING,
ASTNodeType.STRING,
ASTNodeType.LITERAL,
ASTNodeType.STRING,
ASTNodeType.STRING,
ASTNodeType.RETURN_STATEMENT,
ASTNodeType.MEMBER_REFERENCE,
ASTNodeType.STRING,
ASTNodeType.STRING,
]
_java_simple_class_basic_type_subtrees = [
[8, 9],
[17, 18],
]
_expected_member_reference_params = [
MemberReferenceParams('', 'block_variable', ''),
MemberReferenceParams('', 'method_parameter', ''),
MemberReferenceParams('', 'block_variable', '++'),
MemberReferenceParams('', 'field', ''),
MemberReferenceParams('', 'block_variable', ''),
MemberReferenceParams('Something', 'outer_field', ''),
MemberReferenceParams('', 'field', ''),
]
_expected_method_invocation_params = [
MethodInvocationParams(object_name='System.out', method_name='println'),
MethodInvocationParams(object_name='', method_name='method1'),
] | 0.492432 | 0.481149 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import os
import random
import time
import copy
import multiprocessing
import psutil
import socket
import warnings
from collections import OrderedDict, defaultdict, deque
import numpy as np
import torch
import torch.distributed as distrib
import torch.nn as nn
import torch.nn.functional as F
import psutil
# import v4r_example
from gym import spaces
from gym.spaces import Dict as SpaceDict
from torch.optim.lr_scheduler import LambdaLR
from bps_nav.common.env_utils import construct_envs
from bps_nav.common.rollout_storage import DoubleBufferedRolloutStorage
from bps_nav.common.tensorboard_utils import TensorboardWriter
from bps_nav.common.utils import Timing, batch_obs, linear_decay
from bps_nav.rl.ddppo.algo.ddp_utils import (
EXIT,
REQUEUE,
add_signal_handlers,
init_distrib_slurm,
load_interrupted_state,
requeue_job,
save_interrupted_state,
)
from bps_nav.rl.ddppo.algo.ddppo import DDPPO
from bps_nav.common.tree_utils import (
tree_select,
tree_copy_in_place,
)
from bps_nav.rl.ppo.ppo_trainer import PPOTrainer
from bps_nav.rl.ddppo.policy.resnet import Dropblock
import socket
from bps_nav.common.logger import logger
from bps_nav.rl.ddppo.policy import ResNetPolicy
try:
import psutil
except ImportError:
psutil = None
warnings.filterwarnings("ignore", torch.optim.lr_scheduler.SAVE_STATE_WARNING)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
BURN_IN_UPDATES = 50
BPS_BENCHMARK = os.environ.get("BPS_BENCHMARK", "0") != "0"
if BPS_BENCHMARK:
logger.warn("In benchmark mode")
def set_cpus(local_rank, world_size):
local_size = min(world_size, 8)
curr_process = psutil.Process()
total_cpus = curr_process.cpu_affinity()
total_cpu_count = len(total_cpus)
# Assuming things where already set
if total_cpu_count > multiprocessing.cpu_count() / world_size:
orig_cpus = total_cpus
total_cpus = []
for i in range(total_cpu_count // 2):
total_cpus.append(orig_cpus[i])
total_cpus.append(orig_cpus[i + total_cpu_count // 2])
ptr = 0
local_cpu_count = 0
local_cpus = []
CORE_GROUPING = min(
local_size,
4 if total_cpu_count / 2 >= 20 else (2 if total_cpu_count / 2 >= 10 else 1),
)
CORE_GROUPING = 1
core_dist_size = max(local_size // CORE_GROUPING, 1)
core_dist_rank = local_rank // CORE_GROUPING
for r in range(core_dist_rank + 1):
ptr += local_cpu_count
local_cpu_count = total_cpu_count // core_dist_size + (
1 if r < (total_cpu_count % core_dist_size) else 0
)
local_cpus += total_cpus[ptr : ptr + local_cpu_count]
pop_inds = [
((local_rank + offset + 1) % CORE_GROUPING)
for offset in range(CORE_GROUPING - 1)
]
for ind in sorted(pop_inds, reverse=True):
local_cpus.pop(ind)
if BPS_BENCHMARK and world_size == 1:
local_cpus = total_cpus[0:12]
curr_process.cpu_affinity(local_cpus)
logger.info(
"Rank {} uses cpus {}".format(local_rank, sorted(curr_process.cpu_affinity()))
)
class DDPPOTrainer(PPOTrainer):
# DD-PPO cuts rollouts short to mitigate the straggler effect
# This, in theory, can cause some rollouts to be very short.
# All rollouts contributed equally to the loss/model-update,
# thus very short rollouts can be problematic. This threshold
# limits the how short a short rollout can be as a fraction of the
# max rollout length
SHORT_ROLLOUT_THRESHOLD: float = 0.25
def __init__(self, config=None, resume_from=None):
self.resume_from = resume_from
interrupted_state = load_interrupted_state(resume_from=self.resume_from)
if interrupted_state is not None:
config = interrupted_state["config"]
super().__init__(config)
def _setup_actor_critic_agent(self, ppo_cfg) -> None:
r"""Sets up actor critic and agent for DD-PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if hasattr(self.config.RL.DDPPO, 'use_avg_pool'):
use_avg_pool = self.config.RL.DDPPO.use_avg_pool
else:
use_avg_pool = False
self.actor_critic = ResNetPolicy(
observation_space=self.observation_space,
action_space=self.action_space,
hidden_size=ppo_cfg.hidden_size,
rnn_type=self.config.RL.DDPPO.rnn_type,
num_recurrent_layers=self.config.RL.DDPPO.num_recurrent_layers,
backbone=self.config.RL.DDPPO.backbone,
resnet_baseplanes=self.config.RL.DDPPO.resnet_baseplanes,
use_avg_pool=use_avg_pool,
)
self.actor_critic.to(self.device)
if self.config.RL.DDPPO.pretrained_encoder or self.config.RL.DDPPO.pretrained:
pretrained_state = torch.load(
self.config.RL.DDPPO.pretrained_weights, map_location="cpu"
)
if self.config.RL.DDPPO.pretrained:
self.actor_critic.load_state_dict(
{
k[len("actor_critic.") :]: v
for k, v in pretrained_state["state_dict"].items()
}
)
elif self.config.RL.DDPPO.pretrained_encoder:
prefix = "actor_critic.net.visual_encoder."
self.actor_critic.ac.net.visual_encoder.load_state_dict(
{
k[len(prefix) :]: v
for k, v in pretrained_state["state_dict"].items()
if k.startswith(prefix)
}
)
if not self.config.RL.DDPPO.train_encoder:
self._static_encoder = True
for param in self.actor_critic.ac.net.visual_encoder.parameters():
param.requires_grad_(False)
if self.config.RL.DDPPO.reset_critic:
self.actor_critic.ac.critic.layer_init()
self.agent = DDPPO(actor_critic=self.actor_critic, ppo_cfg=ppo_cfg)
self.agent.to(self.device)
def _update_policy(self):
pass
def _n_buffered_sampling(
self,
rollouts,
current_episode_reward,
running_episode_stats,
buffer_ranges,
real_steps,
num_rollouts_done_store,
):
count_steps_delta = 0
sim_step_reses = [None for _ in range(len(rollouts))]
actions = [None for _ in range(len(rollouts))]
is_double_buffered = len(rollouts) > 1
for idx in range(len(rollouts)):
actions[idx] = self._inference(rollouts, idx)
if is_double_buffered and idx == 0:
self._start_simulation(actions[idx], idx)
for step in range(real_steps):
is_last_step = (step + 1) == real_steps
if (
(step + 1) >= max(real_steps * self.SHORT_ROLLOUT_THRESHOLD, 1)
) and int(num_rollouts_done_store.get("num_done")) >= (
self.config.RL.DDPPO.sync_frac * self.world_size
):
is_last_step = True
for idx in range(len(rollouts)):
if is_double_buffered:
sim_step_reses[idx] = self._wait_simulation(idx)
if len(rollouts) > 1:
other_idx = (idx + 1) % len(rollouts)
if not is_last_step or other_idx > idx:
self._start_simulation(actions[other_idx], other_idx)
self._render(idx)
elif True:
self._start_simulation(actions[idx], idx)
sim_step_reses[idx] = self._wait_simulation(idx)
self._render(idx)
else:
sim_step_reses[idx] = self._step_simulation(actions[idx], idx)
self._update_stats(
rollouts,
current_episode_reward,
running_episode_stats,
sim_step_reses[idx],
buffer_ranges[idx],
idx,
)
count_steps_delta += self._sync_renderer_and_insert(
rollouts, sim_step_reses[idx], idx
)
if not is_last_step:
actions[idx] = self._inference(rollouts, idx)
if is_last_step:
break
return count_steps_delta
def _warmup(self, rollouts):
model_state = {k: v.clone() for k, v in self.agent.state_dict().items()}
optim_state = self.agent.optimizer.state.copy()
self.agent.eval()
for _ in range(20):
self._inference(rollouts, 0)
# Do a cache empty as sometimes cudnn searching
# doesn't do that
torch.cuda.empty_cache()
t_inference_start = time.time()
n_infers = 200
for _ in range(n_infers):
self._inference(rollouts, 0)
if self.world_rank == 0:
logger.info(
"Inference time: {:.3f} ms".format(
(time.time() - t_inference_start) / n_infers * 1000
)
)
logger.info(
"PyTorch CUDA Memory Cache Size: {:.3f} GB".format(
torch.cuda.memory_reserved(self.device) / 1e9
)
)
self.agent.train()
for _ in range(10):
self._update_agent(rollouts, warmup=True)
# Do a cache empty as sometimes cudnn searching
# doesn't do that
torch.cuda.empty_cache()
t_learning_start = time.time()
n_learns = 15
for _ in range(n_learns):
self._update_agent(rollouts, warmup=True)
if self.world_rank == 0:
logger.info(
"Learning time: {:.3f} ms".format(
(time.time() - t_learning_start) / n_learns * 1000
)
)
logger.info(self.timing)
logger.info(
"PyTorch CUDA Memory Cache Size: {:.3f} GB".format(
torch.cuda.memory_reserved(self.device) / 1e9
)
)
self.agent.load_state_dict(model_state)
self.agent.optimizer.state = optim_state
self.agent.ada_scale.zero_grad()
self.timing = Timing()
def train(self) -> None:
r"""Main method for DD-PPO.
Returns:
None
"""
import apex
self.local_rank, tcp_store = init_distrib_slurm(
self.config.RL.DDPPO.distrib_backend
)
# add_signal_handlers()
self.timing = Timing()
# Stores the number of workers that have finished their rollout
num_rollouts_done_store = distrib.PrefixStore("rollout_tracker", tcp_store)
num_rollouts_done_store.set("num_done", "0")
self.world_rank = distrib.get_rank()
self.world_size = distrib.get_world_size()
set_cpus(self.local_rank, self.world_size)
self.config.defrost()
self.config.TORCH_GPU_ID = self.local_rank
self.config.SIMULATOR_GPU_ID = self.local_rank
# Multiply by the number of simulators to make sure they also get unique seeds
self.config.TASK_CONFIG.SEED += self.world_rank * self.config.SIM_BATCH_SIZE
self.config.freeze()
random.seed(self.config.TASK_CONFIG.SEED)
np.random.seed(self.config.TASK_CONFIG.SEED)
torch.manual_seed(self.config.TASK_CONFIG.SEED)
if torch.cuda.is_available():
self.device = torch.device("cuda", self.local_rank)
torch.cuda.set_device(self.device)
else:
self.device = torch.device("cpu")
double_buffered = False
self._num_worker_groups = self.config.NUM_PARALLEL_SCENES
self._depth = self.config.DEPTH
self._color = self.config.COLOR
if self.config.TASK.lower() == "pointnav":
self.observation_space = SpaceDict(
{
"pointgoal_with_gps_compass": spaces.Box(
low=0.0, high=1.0, shape=(2,), dtype=np.float32
)
}
)
else:
self.observation_space = SpaceDict({})
self.action_space = spaces.Discrete(4)
if self._color:
self.observation_space = SpaceDict(
{
"rgb": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(3, *self.config.RESOLUTION),
dtype=np.uint8,
),
**self.observation_space.spaces,
}
)
if self._depth:
self.observation_space = SpaceDict(
{
"depth": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(1, *self.config.RESOLUTION),
dtype=np.float32,
),
**self.observation_space.spaces,
}
)
ppo_cfg = self.config.RL.PPO
if not os.path.isdir(self.config.CHECKPOINT_FOLDER) and self.world_rank == 0:
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
self.count_steps = 0
burn_steps = 0
burn_time = 0
count_checkpoints = 0
prev_time = 0
self.update = 0
LR_SCALE = (
max(
np.sqrt(
ppo_cfg.num_steps
* self.config.SIM_BATCH_SIZE
* ppo_cfg.num_accumulate_steps
/ ppo_cfg.num_mini_batch
* self.world_size
/ (128 * 2)
),
1.0,
)
if (self.config.RL.DDPPO.scale_lr and not self.config.RL.PPO.ada_scale)
else 1.0
)
def cosine_decay(x):
if x < 1:
return (np.cos(x * np.pi) + 1.0) / 2.0
else:
return 0.0
def warmup_fn(x):
return LR_SCALE * (0.5 + 0.5 * x)
def decay_fn(x):
return LR_SCALE * (DECAY_TARGET + (1 - DECAY_TARGET) * cosine_decay(x))
DECAY_TARGET = (
0.01 / LR_SCALE
if self.config.RL.PPO.ada_scale or True
else (0.25 / LR_SCALE if self.config.RL.DDPPO.scale_lr else 1.0)
)
DECAY_PERCENT = 1.0 if self.config.RL.PPO.ada_scale or True else 0.5
WARMUP_PERCENT = (
0.01
if (self.config.RL.DDPPO.scale_lr and not self.config.RL.PPO.ada_scale)
else 0.0
)
def lr_fn():
x = self.percent_done()
if x < WARMUP_PERCENT:
return warmup_fn(x / WARMUP_PERCENT)
else:
return decay_fn((x - WARMUP_PERCENT) / DECAY_PERCENT)
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer, lr_lambda=lambda x: lr_fn()
)
interrupted_state = load_interrupted_state(resume_from=self.resume_from)
if interrupted_state is not None:
self.agent.load_state_dict(interrupted_state["state_dict"])
self.agent.init_amp(self.config.SIM_BATCH_SIZE)
self.actor_critic.init_trt(self.config.SIM_BATCH_SIZE)
self.actor_critic.script_net()
self.agent.init_distributed(find_unused_params=False)
if self.world_rank == 0:
logger.info(
"agent number of trainable parameters: {}".format(
sum(
param.numel()
for param in self.agent.parameters()
if param.requires_grad
)
)
)
if self._static_encoder:
self._encoder = self.actor_critic.net.visual_encoder
self.observation_space = SpaceDict(
{
"visual_features": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=self._encoder.output_shape,
dtype=np.float32,
),
**self.observation_space,
}
)
with torch.no_grad():
batch["visual_features"] = self._encoder(batch)
nenvs = self.config.SIM_BATCH_SIZE
rollouts = DoubleBufferedRolloutStorage(
ppo_cfg.num_steps,
nenvs,
self.observation_space,
self.action_space,
ppo_cfg.hidden_size,
num_recurrent_layers=self.actor_critic.num_recurrent_layers,
use_data_aug=ppo_cfg.use_data_aug,
aug_type=ppo_cfg.aug_type,
double_buffered=double_buffered,
vtrace=ppo_cfg.vtrace,
)
rollouts.to(self.device)
rollouts.to_fp16()
self._warmup(rollouts)
(
self.envs,
self._observations,
self._rewards,
self._masks,
self._rollout_infos,
self._syncs,
) = construct_envs(
self.config,
num_worker_groups=self.config.NUM_PARALLEL_SCENES,
double_buffered=double_buffered,
)
def _setup_render_and_populate_initial_frame():
for idx in range(2 if double_buffered else 1):
self.envs.reset(idx)
batch = self._observations[idx]
self._syncs[idx].wait()
tree_copy_in_place(
tree_select(0, rollouts[idx].storage_buffers["observations"]),
batch,
)
_setup_render_and_populate_initial_frame()
current_episode_reward = torch.zeros(nenvs, 1)
running_episode_stats = dict(
count=torch.zeros(nenvs, 1,), reward=torch.zeros(nenvs, 1,),
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
time_per_frame_window = deque(maxlen=ppo_cfg.reward_window_size)
buffer_ranges = []
for i in range(2 if double_buffered else 1):
start_ind = buffer_ranges[-1].stop if i > 0 else 0
buffer_ranges.append(
slice(
start_ind,
start_ind
+ self.config.SIM_BATCH_SIZE // (2 if double_buffered else 1),
)
)
if interrupted_state is not None:
requeue_stats = interrupted_state["requeue_stats"]
self.count_steps = requeue_stats["count_steps"]
self.update = requeue_stats["start_update"]
count_checkpoints = requeue_stats["count_checkpoints"]
prev_time = requeue_stats["prev_time"]
burn_steps = requeue_stats["burn_steps"]
burn_time = requeue_stats["burn_time"]
self.agent.ada_scale.load_state_dict(interrupted_state["ada_scale_state"])
lr_scheduler.load_state_dict(interrupted_state["lr_sched_state"])
if "amp_state" in interrupted_state:
apex.amp.load_state_dict(interrupted_state["amp_state"])
if "grad_scaler_state" in interrupted_state:
self.agent.grad_scaler.load_state_dict(
interrupted_state["grad_scaler_state"]
)
with (
TensorboardWriter(
self.config.TENSORBOARD_DIR,
flush_secs=self.flush_secs,
purge_step=int(self.count_steps),
)
if self.world_rank == 0
else contextlib.suppress()
) as writer:
distrib.barrier()
t_start = time.time()
while not self.is_done():
t_rollout_start = time.time()
if self.update == BURN_IN_UPDATES:
burn_time = t_rollout_start - t_start
burn_steps = self.count_steps
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
self.percent_done(), final_decay=ppo_cfg.decay_factor,
)
if (
not BPS_BENCHMARK
and (REQUEUE.is_set() or ((self.update + 1) % 100) == 0)
and self.world_rank == 0
):
requeue_stats = dict(
count_steps=self.count_steps,
count_checkpoints=count_checkpoints,
start_update=self.update,
prev_time=(time.time() - t_start) + prev_time,
burn_time=burn_time,
burn_steps=burn_steps,
)
def _cast(param):
if "Half" in param.type():
param = param.to(dtype=torch.float32)
return param
save_interrupted_state(
dict(
state_dict={
k: _cast(v) for k, v in self.agent.state_dict().items()
},
ada_scale_state=self.agent.ada_scale.state_dict(),
lr_sched_state=lr_scheduler.state_dict(),
config=self.config,
requeue_stats=requeue_stats,
grad_scaler_state=self.agent.grad_scaler.state_dict(),
)
)
if EXIT.is_set():
self._observations = None
self._rewards = None
self._masks = None
self._rollout_infos = None
self._syncs = None
del self.envs
self.envs = None
requeue_job()
return
self.agent.eval()
count_steps_delta = self._n_buffered_sampling(
rollouts,
current_episode_reward,
running_episode_stats,
buffer_ranges,
ppo_cfg.num_steps,
num_rollouts_done_store,
)
num_rollouts_done_store.add("num_done", 1)
if not rollouts.vtrace:
self._compute_returns(ppo_cfg, rollouts)
(value_loss, action_loss, dist_entropy) = self._update_agent(rollouts)
if self.world_rank == 0:
num_rollouts_done_store.set("num_done", "0")
lr_scheduler.step()
with self.timing.add_time("Logging"):
stats_ordering = list(sorted(running_episode_stats.keys()))
stats = torch.stack(
[running_episode_stats[k] for k in stats_ordering], 0,
).to(device=self.device)
distrib.all_reduce(stats)
stats = stats.to(device="cpu")
for i, k in enumerate(stats_ordering):
window_episode_stats[k].append(stats[i])
stats = torch.tensor(
[
value_loss,
action_loss,
count_steps_delta,
*self.envs.swap_stats,
],
device=self.device,
)
distrib.all_reduce(stats)
stats = stats.to(device="cpu")
count_steps_delta = int(stats[2].item())
self.count_steps += count_steps_delta
time_per_frame_window.append(
(time.time() - t_rollout_start) / count_steps_delta
)
if self.world_rank == 0:
losses = [
stats[0].item() / self.world_size,
stats[1].item() / self.world_size,
]
deltas = {
k: (
(v[-1] - v[0]).sum().item()
if len(v) > 1
else v[0].sum().item()
)
for k, v in window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
writer.add_scalar(
"reward",
deltas["reward"] / deltas["count"],
self.count_steps,
)
# Check to see if there are any metrics
# that haven't been logged yet
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"reward", "count"}
}
if len(metrics) > 0:
writer.add_scalars("metrics", metrics, self.count_steps)
writer.add_scalars(
"losses",
{k: l for l, k in zip(losses, ["value", "policy"])},
self.count_steps,
)
optim = self.agent.optimizer
writer.add_scalar(
"optimizer/base_lr",
optim.param_groups[-1]["lr"],
self.count_steps,
)
if "gain" in optim.param_groups[-1]:
for idx, group in enumerate(optim.param_groups):
writer.add_scalar(
f"optimizer/lr_{idx}",
group["lr"] * group["gain"],
self.count_steps,
)
writer.add_scalar(
f"optimizer/gain_{idx}",
group["gain"],
self.count_steps,
)
# log stats
if (
self.update > 0
and self.update % self.config.LOG_INTERVAL == 0
):
logger.info(
"update: {}\twindow fps: {:.3f}\ttotal fps: {:.3f}\tframes: {}".format(
self.update,
1.0
/ (
sum(time_per_frame_window)
/ len(time_per_frame_window)
),
(self.count_steps - burn_steps)
/ ((time.time() - t_start) + prev_time - burn_time),
self.count_steps,
)
)
logger.info(
"swap percent: {:.3f}\tscenes in use: {:.3f}\tenvs per scene: {:.3f}".format(
stats[3].item() / self.world_size,
stats[4].item() / self.world_size,
stats[5].item() / self.world_size,
)
)
logger.info(
"Average window size: {} {}".format(
len(window_episode_stats["count"]),
" ".join(
"{}: {:.3f}".format(k, v / deltas["count"])
for k, v in deltas.items()
if k != "count"
),
)
)
logger.info(self.timing)
# self.envs.print_renderer_stats()
# checkpoint model
if self.should_checkpoint():
self.save_checkpoint(
f"ckpt.{count_checkpoints}.pth",
dict(
step=self.count_steps,
wall_clock_time=(
(time.time() - t_start) + prev_time
),
),
)
count_checkpoints += 1
self.update += 1
self.save_checkpoint(
"ckpt.done.pth",
dict(
step=self.count_steps,
wall_clock_time=((time.time() - t_start) + prev_time),
),
)
self._observations = None
self._rewards = None
self._masks = None
self._rollout_infos = None
self._syncs = None
del self.envs
self.envs = None | bps_nav/rl/ddppo/algo/ddppo_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import os
import random
import time
import copy
import multiprocessing
import psutil
import socket
import warnings
from collections import OrderedDict, defaultdict, deque
import numpy as np
import torch
import torch.distributed as distrib
import torch.nn as nn
import torch.nn.functional as F
import psutil
# import v4r_example
from gym import spaces
from gym.spaces import Dict as SpaceDict
from torch.optim.lr_scheduler import LambdaLR
from bps_nav.common.env_utils import construct_envs
from bps_nav.common.rollout_storage import DoubleBufferedRolloutStorage
from bps_nav.common.tensorboard_utils import TensorboardWriter
from bps_nav.common.utils import Timing, batch_obs, linear_decay
from bps_nav.rl.ddppo.algo.ddp_utils import (
EXIT,
REQUEUE,
add_signal_handlers,
init_distrib_slurm,
load_interrupted_state,
requeue_job,
save_interrupted_state,
)
from bps_nav.rl.ddppo.algo.ddppo import DDPPO
from bps_nav.common.tree_utils import (
tree_select,
tree_copy_in_place,
)
from bps_nav.rl.ppo.ppo_trainer import PPOTrainer
from bps_nav.rl.ddppo.policy.resnet import Dropblock
import socket
from bps_nav.common.logger import logger
from bps_nav.rl.ddppo.policy import ResNetPolicy
try:
import psutil
except ImportError:
psutil = None
warnings.filterwarnings("ignore", torch.optim.lr_scheduler.SAVE_STATE_WARNING)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
BURN_IN_UPDATES = 50
BPS_BENCHMARK = os.environ.get("BPS_BENCHMARK", "0") != "0"
if BPS_BENCHMARK:
logger.warn("In benchmark mode")
def set_cpus(local_rank, world_size):
local_size = min(world_size, 8)
curr_process = psutil.Process()
total_cpus = curr_process.cpu_affinity()
total_cpu_count = len(total_cpus)
# Assuming things where already set
if total_cpu_count > multiprocessing.cpu_count() / world_size:
orig_cpus = total_cpus
total_cpus = []
for i in range(total_cpu_count // 2):
total_cpus.append(orig_cpus[i])
total_cpus.append(orig_cpus[i + total_cpu_count // 2])
ptr = 0
local_cpu_count = 0
local_cpus = []
CORE_GROUPING = min(
local_size,
4 if total_cpu_count / 2 >= 20 else (2 if total_cpu_count / 2 >= 10 else 1),
)
CORE_GROUPING = 1
core_dist_size = max(local_size // CORE_GROUPING, 1)
core_dist_rank = local_rank // CORE_GROUPING
for r in range(core_dist_rank + 1):
ptr += local_cpu_count
local_cpu_count = total_cpu_count // core_dist_size + (
1 if r < (total_cpu_count % core_dist_size) else 0
)
local_cpus += total_cpus[ptr : ptr + local_cpu_count]
pop_inds = [
((local_rank + offset + 1) % CORE_GROUPING)
for offset in range(CORE_GROUPING - 1)
]
for ind in sorted(pop_inds, reverse=True):
local_cpus.pop(ind)
if BPS_BENCHMARK and world_size == 1:
local_cpus = total_cpus[0:12]
curr_process.cpu_affinity(local_cpus)
logger.info(
"Rank {} uses cpus {}".format(local_rank, sorted(curr_process.cpu_affinity()))
)
class DDPPOTrainer(PPOTrainer):
# DD-PPO cuts rollouts short to mitigate the straggler effect
# This, in theory, can cause some rollouts to be very short.
# All rollouts contributed equally to the loss/model-update,
# thus very short rollouts can be problematic. This threshold
# limits the how short a short rollout can be as a fraction of the
# max rollout length
SHORT_ROLLOUT_THRESHOLD: float = 0.25
def __init__(self, config=None, resume_from=None):
self.resume_from = resume_from
interrupted_state = load_interrupted_state(resume_from=self.resume_from)
if interrupted_state is not None:
config = interrupted_state["config"]
super().__init__(config)
def _setup_actor_critic_agent(self, ppo_cfg) -> None:
r"""Sets up actor critic and agent for DD-PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if hasattr(self.config.RL.DDPPO, 'use_avg_pool'):
use_avg_pool = self.config.RL.DDPPO.use_avg_pool
else:
use_avg_pool = False
self.actor_critic = ResNetPolicy(
observation_space=self.observation_space,
action_space=self.action_space,
hidden_size=ppo_cfg.hidden_size,
rnn_type=self.config.RL.DDPPO.rnn_type,
num_recurrent_layers=self.config.RL.DDPPO.num_recurrent_layers,
backbone=self.config.RL.DDPPO.backbone,
resnet_baseplanes=self.config.RL.DDPPO.resnet_baseplanes,
use_avg_pool=use_avg_pool,
)
self.actor_critic.to(self.device)
if self.config.RL.DDPPO.pretrained_encoder or self.config.RL.DDPPO.pretrained:
pretrained_state = torch.load(
self.config.RL.DDPPO.pretrained_weights, map_location="cpu"
)
if self.config.RL.DDPPO.pretrained:
self.actor_critic.load_state_dict(
{
k[len("actor_critic.") :]: v
for k, v in pretrained_state["state_dict"].items()
}
)
elif self.config.RL.DDPPO.pretrained_encoder:
prefix = "actor_critic.net.visual_encoder."
self.actor_critic.ac.net.visual_encoder.load_state_dict(
{
k[len(prefix) :]: v
for k, v in pretrained_state["state_dict"].items()
if k.startswith(prefix)
}
)
if not self.config.RL.DDPPO.train_encoder:
self._static_encoder = True
for param in self.actor_critic.ac.net.visual_encoder.parameters():
param.requires_grad_(False)
if self.config.RL.DDPPO.reset_critic:
self.actor_critic.ac.critic.layer_init()
self.agent = DDPPO(actor_critic=self.actor_critic, ppo_cfg=ppo_cfg)
self.agent.to(self.device)
def _update_policy(self):
pass
def _n_buffered_sampling(
self,
rollouts,
current_episode_reward,
running_episode_stats,
buffer_ranges,
real_steps,
num_rollouts_done_store,
):
count_steps_delta = 0
sim_step_reses = [None for _ in range(len(rollouts))]
actions = [None for _ in range(len(rollouts))]
is_double_buffered = len(rollouts) > 1
for idx in range(len(rollouts)):
actions[idx] = self._inference(rollouts, idx)
if is_double_buffered and idx == 0:
self._start_simulation(actions[idx], idx)
for step in range(real_steps):
is_last_step = (step + 1) == real_steps
if (
(step + 1) >= max(real_steps * self.SHORT_ROLLOUT_THRESHOLD, 1)
) and int(num_rollouts_done_store.get("num_done")) >= (
self.config.RL.DDPPO.sync_frac * self.world_size
):
is_last_step = True
for idx in range(len(rollouts)):
if is_double_buffered:
sim_step_reses[idx] = self._wait_simulation(idx)
if len(rollouts) > 1:
other_idx = (idx + 1) % len(rollouts)
if not is_last_step or other_idx > idx:
self._start_simulation(actions[other_idx], other_idx)
self._render(idx)
elif True:
self._start_simulation(actions[idx], idx)
sim_step_reses[idx] = self._wait_simulation(idx)
self._render(idx)
else:
sim_step_reses[idx] = self._step_simulation(actions[idx], idx)
self._update_stats(
rollouts,
current_episode_reward,
running_episode_stats,
sim_step_reses[idx],
buffer_ranges[idx],
idx,
)
count_steps_delta += self._sync_renderer_and_insert(
rollouts, sim_step_reses[idx], idx
)
if not is_last_step:
actions[idx] = self._inference(rollouts, idx)
if is_last_step:
break
return count_steps_delta
def _warmup(self, rollouts):
model_state = {k: v.clone() for k, v in self.agent.state_dict().items()}
optim_state = self.agent.optimizer.state.copy()
self.agent.eval()
for _ in range(20):
self._inference(rollouts, 0)
# Do a cache empty as sometimes cudnn searching
# doesn't do that
torch.cuda.empty_cache()
t_inference_start = time.time()
n_infers = 200
for _ in range(n_infers):
self._inference(rollouts, 0)
if self.world_rank == 0:
logger.info(
"Inference time: {:.3f} ms".format(
(time.time() - t_inference_start) / n_infers * 1000
)
)
logger.info(
"PyTorch CUDA Memory Cache Size: {:.3f} GB".format(
torch.cuda.memory_reserved(self.device) / 1e9
)
)
self.agent.train()
for _ in range(10):
self._update_agent(rollouts, warmup=True)
# Do a cache empty as sometimes cudnn searching
# doesn't do that
torch.cuda.empty_cache()
t_learning_start = time.time()
n_learns = 15
for _ in range(n_learns):
self._update_agent(rollouts, warmup=True)
if self.world_rank == 0:
logger.info(
"Learning time: {:.3f} ms".format(
(time.time() - t_learning_start) / n_learns * 1000
)
)
logger.info(self.timing)
logger.info(
"PyTorch CUDA Memory Cache Size: {:.3f} GB".format(
torch.cuda.memory_reserved(self.device) / 1e9
)
)
self.agent.load_state_dict(model_state)
self.agent.optimizer.state = optim_state
self.agent.ada_scale.zero_grad()
self.timing = Timing()
def train(self) -> None:
r"""Main method for DD-PPO.
Returns:
None
"""
import apex
self.local_rank, tcp_store = init_distrib_slurm(
self.config.RL.DDPPO.distrib_backend
)
# add_signal_handlers()
self.timing = Timing()
# Stores the number of workers that have finished their rollout
num_rollouts_done_store = distrib.PrefixStore("rollout_tracker", tcp_store)
num_rollouts_done_store.set("num_done", "0")
self.world_rank = distrib.get_rank()
self.world_size = distrib.get_world_size()
set_cpus(self.local_rank, self.world_size)
self.config.defrost()
self.config.TORCH_GPU_ID = self.local_rank
self.config.SIMULATOR_GPU_ID = self.local_rank
# Multiply by the number of simulators to make sure they also get unique seeds
self.config.TASK_CONFIG.SEED += self.world_rank * self.config.SIM_BATCH_SIZE
self.config.freeze()
random.seed(self.config.TASK_CONFIG.SEED)
np.random.seed(self.config.TASK_CONFIG.SEED)
torch.manual_seed(self.config.TASK_CONFIG.SEED)
if torch.cuda.is_available():
self.device = torch.device("cuda", self.local_rank)
torch.cuda.set_device(self.device)
else:
self.device = torch.device("cpu")
double_buffered = False
self._num_worker_groups = self.config.NUM_PARALLEL_SCENES
self._depth = self.config.DEPTH
self._color = self.config.COLOR
if self.config.TASK.lower() == "pointnav":
self.observation_space = SpaceDict(
{
"pointgoal_with_gps_compass": spaces.Box(
low=0.0, high=1.0, shape=(2,), dtype=np.float32
)
}
)
else:
self.observation_space = SpaceDict({})
self.action_space = spaces.Discrete(4)
if self._color:
self.observation_space = SpaceDict(
{
"rgb": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(3, *self.config.RESOLUTION),
dtype=np.uint8,
),
**self.observation_space.spaces,
}
)
if self._depth:
self.observation_space = SpaceDict(
{
"depth": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(1, *self.config.RESOLUTION),
dtype=np.float32,
),
**self.observation_space.spaces,
}
)
ppo_cfg = self.config.RL.PPO
if not os.path.isdir(self.config.CHECKPOINT_FOLDER) and self.world_rank == 0:
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
self.count_steps = 0
burn_steps = 0
burn_time = 0
count_checkpoints = 0
prev_time = 0
self.update = 0
LR_SCALE = (
max(
np.sqrt(
ppo_cfg.num_steps
* self.config.SIM_BATCH_SIZE
* ppo_cfg.num_accumulate_steps
/ ppo_cfg.num_mini_batch
* self.world_size
/ (128 * 2)
),
1.0,
)
if (self.config.RL.DDPPO.scale_lr and not self.config.RL.PPO.ada_scale)
else 1.0
)
def cosine_decay(x):
if x < 1:
return (np.cos(x * np.pi) + 1.0) / 2.0
else:
return 0.0
def warmup_fn(x):
return LR_SCALE * (0.5 + 0.5 * x)
def decay_fn(x):
return LR_SCALE * (DECAY_TARGET + (1 - DECAY_TARGET) * cosine_decay(x))
DECAY_TARGET = (
0.01 / LR_SCALE
if self.config.RL.PPO.ada_scale or True
else (0.25 / LR_SCALE if self.config.RL.DDPPO.scale_lr else 1.0)
)
DECAY_PERCENT = 1.0 if self.config.RL.PPO.ada_scale or True else 0.5
WARMUP_PERCENT = (
0.01
if (self.config.RL.DDPPO.scale_lr and not self.config.RL.PPO.ada_scale)
else 0.0
)
def lr_fn():
x = self.percent_done()
if x < WARMUP_PERCENT:
return warmup_fn(x / WARMUP_PERCENT)
else:
return decay_fn((x - WARMUP_PERCENT) / DECAY_PERCENT)
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer, lr_lambda=lambda x: lr_fn()
)
interrupted_state = load_interrupted_state(resume_from=self.resume_from)
if interrupted_state is not None:
self.agent.load_state_dict(interrupted_state["state_dict"])
self.agent.init_amp(self.config.SIM_BATCH_SIZE)
self.actor_critic.init_trt(self.config.SIM_BATCH_SIZE)
self.actor_critic.script_net()
self.agent.init_distributed(find_unused_params=False)
if self.world_rank == 0:
logger.info(
"agent number of trainable parameters: {}".format(
sum(
param.numel()
for param in self.agent.parameters()
if param.requires_grad
)
)
)
if self._static_encoder:
self._encoder = self.actor_critic.net.visual_encoder
self.observation_space = SpaceDict(
{
"visual_features": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=self._encoder.output_shape,
dtype=np.float32,
),
**self.observation_space,
}
)
with torch.no_grad():
batch["visual_features"] = self._encoder(batch)
nenvs = self.config.SIM_BATCH_SIZE
rollouts = DoubleBufferedRolloutStorage(
ppo_cfg.num_steps,
nenvs,
self.observation_space,
self.action_space,
ppo_cfg.hidden_size,
num_recurrent_layers=self.actor_critic.num_recurrent_layers,
use_data_aug=ppo_cfg.use_data_aug,
aug_type=ppo_cfg.aug_type,
double_buffered=double_buffered,
vtrace=ppo_cfg.vtrace,
)
rollouts.to(self.device)
rollouts.to_fp16()
self._warmup(rollouts)
(
self.envs,
self._observations,
self._rewards,
self._masks,
self._rollout_infos,
self._syncs,
) = construct_envs(
self.config,
num_worker_groups=self.config.NUM_PARALLEL_SCENES,
double_buffered=double_buffered,
)
def _setup_render_and_populate_initial_frame():
for idx in range(2 if double_buffered else 1):
self.envs.reset(idx)
batch = self._observations[idx]
self._syncs[idx].wait()
tree_copy_in_place(
tree_select(0, rollouts[idx].storage_buffers["observations"]),
batch,
)
_setup_render_and_populate_initial_frame()
current_episode_reward = torch.zeros(nenvs, 1)
running_episode_stats = dict(
count=torch.zeros(nenvs, 1,), reward=torch.zeros(nenvs, 1,),
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
time_per_frame_window = deque(maxlen=ppo_cfg.reward_window_size)
buffer_ranges = []
for i in range(2 if double_buffered else 1):
start_ind = buffer_ranges[-1].stop if i > 0 else 0
buffer_ranges.append(
slice(
start_ind,
start_ind
+ self.config.SIM_BATCH_SIZE // (2 if double_buffered else 1),
)
)
if interrupted_state is not None:
requeue_stats = interrupted_state["requeue_stats"]
self.count_steps = requeue_stats["count_steps"]
self.update = requeue_stats["start_update"]
count_checkpoints = requeue_stats["count_checkpoints"]
prev_time = requeue_stats["prev_time"]
burn_steps = requeue_stats["burn_steps"]
burn_time = requeue_stats["burn_time"]
self.agent.ada_scale.load_state_dict(interrupted_state["ada_scale_state"])
lr_scheduler.load_state_dict(interrupted_state["lr_sched_state"])
if "amp_state" in interrupted_state:
apex.amp.load_state_dict(interrupted_state["amp_state"])
if "grad_scaler_state" in interrupted_state:
self.agent.grad_scaler.load_state_dict(
interrupted_state["grad_scaler_state"]
)
with (
TensorboardWriter(
self.config.TENSORBOARD_DIR,
flush_secs=self.flush_secs,
purge_step=int(self.count_steps),
)
if self.world_rank == 0
else contextlib.suppress()
) as writer:
distrib.barrier()
t_start = time.time()
while not self.is_done():
t_rollout_start = time.time()
if self.update == BURN_IN_UPDATES:
burn_time = t_rollout_start - t_start
burn_steps = self.count_steps
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
self.percent_done(), final_decay=ppo_cfg.decay_factor,
)
if (
not BPS_BENCHMARK
and (REQUEUE.is_set() or ((self.update + 1) % 100) == 0)
and self.world_rank == 0
):
requeue_stats = dict(
count_steps=self.count_steps,
count_checkpoints=count_checkpoints,
start_update=self.update,
prev_time=(time.time() - t_start) + prev_time,
burn_time=burn_time,
burn_steps=burn_steps,
)
def _cast(param):
if "Half" in param.type():
param = param.to(dtype=torch.float32)
return param
save_interrupted_state(
dict(
state_dict={
k: _cast(v) for k, v in self.agent.state_dict().items()
},
ada_scale_state=self.agent.ada_scale.state_dict(),
lr_sched_state=lr_scheduler.state_dict(),
config=self.config,
requeue_stats=requeue_stats,
grad_scaler_state=self.agent.grad_scaler.state_dict(),
)
)
if EXIT.is_set():
self._observations = None
self._rewards = None
self._masks = None
self._rollout_infos = None
self._syncs = None
del self.envs
self.envs = None
requeue_job()
return
self.agent.eval()
count_steps_delta = self._n_buffered_sampling(
rollouts,
current_episode_reward,
running_episode_stats,
buffer_ranges,
ppo_cfg.num_steps,
num_rollouts_done_store,
)
num_rollouts_done_store.add("num_done", 1)
if not rollouts.vtrace:
self._compute_returns(ppo_cfg, rollouts)
(value_loss, action_loss, dist_entropy) = self._update_agent(rollouts)
if self.world_rank == 0:
num_rollouts_done_store.set("num_done", "0")
lr_scheduler.step()
with self.timing.add_time("Logging"):
stats_ordering = list(sorted(running_episode_stats.keys()))
stats = torch.stack(
[running_episode_stats[k] for k in stats_ordering], 0,
).to(device=self.device)
distrib.all_reduce(stats)
stats = stats.to(device="cpu")
for i, k in enumerate(stats_ordering):
window_episode_stats[k].append(stats[i])
stats = torch.tensor(
[
value_loss,
action_loss,
count_steps_delta,
*self.envs.swap_stats,
],
device=self.device,
)
distrib.all_reduce(stats)
stats = stats.to(device="cpu")
count_steps_delta = int(stats[2].item())
self.count_steps += count_steps_delta
time_per_frame_window.append(
(time.time() - t_rollout_start) / count_steps_delta
)
if self.world_rank == 0:
losses = [
stats[0].item() / self.world_size,
stats[1].item() / self.world_size,
]
deltas = {
k: (
(v[-1] - v[0]).sum().item()
if len(v) > 1
else v[0].sum().item()
)
for k, v in window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
writer.add_scalar(
"reward",
deltas["reward"] / deltas["count"],
self.count_steps,
)
# Check to see if there are any metrics
# that haven't been logged yet
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"reward", "count"}
}
if len(metrics) > 0:
writer.add_scalars("metrics", metrics, self.count_steps)
writer.add_scalars(
"losses",
{k: l for l, k in zip(losses, ["value", "policy"])},
self.count_steps,
)
optim = self.agent.optimizer
writer.add_scalar(
"optimizer/base_lr",
optim.param_groups[-1]["lr"],
self.count_steps,
)
if "gain" in optim.param_groups[-1]:
for idx, group in enumerate(optim.param_groups):
writer.add_scalar(
f"optimizer/lr_{idx}",
group["lr"] * group["gain"],
self.count_steps,
)
writer.add_scalar(
f"optimizer/gain_{idx}",
group["gain"],
self.count_steps,
)
# log stats
if (
self.update > 0
and self.update % self.config.LOG_INTERVAL == 0
):
logger.info(
"update: {}\twindow fps: {:.3f}\ttotal fps: {:.3f}\tframes: {}".format(
self.update,
1.0
/ (
sum(time_per_frame_window)
/ len(time_per_frame_window)
),
(self.count_steps - burn_steps)
/ ((time.time() - t_start) + prev_time - burn_time),
self.count_steps,
)
)
logger.info(
"swap percent: {:.3f}\tscenes in use: {:.3f}\tenvs per scene: {:.3f}".format(
stats[3].item() / self.world_size,
stats[4].item() / self.world_size,
stats[5].item() / self.world_size,
)
)
logger.info(
"Average window size: {} {}".format(
len(window_episode_stats["count"]),
" ".join(
"{}: {:.3f}".format(k, v / deltas["count"])
for k, v in deltas.items()
if k != "count"
),
)
)
logger.info(self.timing)
# self.envs.print_renderer_stats()
# checkpoint model
if self.should_checkpoint():
self.save_checkpoint(
f"ckpt.{count_checkpoints}.pth",
dict(
step=self.count_steps,
wall_clock_time=(
(time.time() - t_start) + prev_time
),
),
)
count_checkpoints += 1
self.update += 1
self.save_checkpoint(
"ckpt.done.pth",
dict(
step=self.count_steps,
wall_clock_time=((time.time() - t_start) + prev_time),
),
)
self._observations = None
self._rewards = None
self._masks = None
self._rollout_infos = None
self._syncs = None
del self.envs
self.envs = None | 0.776199 | 0.112429 |
import argparse
import os
import errno
import subprocess
import sys
import venv
from common_setup import running_on_ci, remote_cache_token, which
from torch_blade_build import TorchBladeBuild, get_fullpath_or_create
cwd = os.path.dirname(os.path.abspath(__file__))
def _make_executable(path):
mode = os.stat(path).st_mode
mode |= (mode & 0o444) >> 2 # copy R bits to X
os.chmod(path, mode)
def _symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
class BazelBuild(TorchBladeBuild):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_suite = "//src:torch_blade_gtests"
self.targets = [
"@org_tensorflow//tensorflow/compiler/mlir/disc:disc_compiler_main",
"//src:_torch_blade.so",
self.test_suite,
]
torch_major_version, torch_minor_version = self.torch_version.split(".")[:2]
self.extra_opts = [
"--copt=-DPYTORCH_VERSION_STRING={}".format(self.torch_version),
"--copt=-DPYTORCH_MAJOR_VERSION={}".format(torch_major_version),
"--copt=-DPYTORCH_MINOR_VERSION={}".format(torch_minor_version),
"--copt=-DTORCH_BLADE_CUDA_VERSION={}".format(self.cuda_version),
"--action_env PYTHON_BIN_PATH={}".format(sys.executable),
"--action_env TORCH_BLADE_TORCH_INSTALL_PATH={}".format(self.torch_dir),
# Workaroud issue: https://github.com/bazelbuild/bazel/issues/10327
"--action_env BAZEL_LINKLIBS=-lstdc++"
]
remote_cache = remote_cache_token()
if remote_cache:
self.extra_opts += ["--remote_cache={}".format(remote_cache)]
self.configs = ["--config=cxx11abi_{}".format(int(self.GLIBCXX_USE_CXX11_ABI))]
if self.is_debug:
self.configs.append("--config=dbg")
if self.cuda_available:
self.configs.append("--config=torch_disc_cuda")
else:
self.configs += ["--config=torch_disc_cpu"]
if self.cuda_available and self.build_tensorrt:
self.configs.append("--config=torch_tensorrt")
self.extra_opts += [
"--action_env TENSORRT_INSTALL_PATH={}".format(self.tensorrt_dir)]
if running_on_ci():
self.configs += ["--config=ci_build"]
self.shell_setting = "set -e; set -o pipefail; "
# Workaround: this venv ensure that $(/usr/bin/env python) is evaluated to python3
venv.create(".bazel_pyenv", clear=True)
self.build_cmd = "source .bazel_pyenv/bin/activate; bazel build"
self.test_cmd = "source .bazel_pyenv/bin/activate; bazel test"
def run(self, extdir=None, srcdir=None, build_temp=None):
srcdir = get_fullpath_or_create(
srcdir or os.path.dirname(os.path.abspath(__file__))
)
extdir = get_fullpath_or_create(extdir or "build/temp")
bazel_bin_dir = os.path.join(srcdir, "bazel-bin/")
env = os.environ.copy()
ld_library_path = ":".join([self.torch_lib_dir, env.get("LD_LIBRARY_PATH", "")])
env["LD_LIBRARY_PATH"] = ld_library_path
env["GCC_HOST_COMPILER_PATH"] = env.get("GCC_HOST_COMPILER_PATH", which("gcc"))
bazel_cmd = " ".join(
[self.shell_setting, self.build_cmd]
+ self.extra_opts
+ self.configs
)
with open("debug_bazel.sh", "w") as f:
f.write("#!/bin/bash\n")
f.write("export LD_LIBRARY_PATH={}\n".format(ld_library_path))
f.write("export GCC_HOST_COMPILER_PATH={}\n".format(env.get("GCC_HOST_COMPILER_PATH", "")))
f.write(bazel_cmd + " $@")
_make_executable("debug_bazel.sh")
bazel_cmd = " ".join([bazel_cmd] + self.targets)
subprocess.check_call(
bazel_cmd, shell=True, env=env, executable="/bin/bash"
)
ext_so_fpath = "src/_torch_blade.so"
ral_so_fpath = "external/org_tensorflow/tensorflow/compiler/mlir/xla/ral/libral_base_context.so"
disc_bin_fpath = (
"external/org_tensorflow/tensorflow/compiler/mlir/disc/disc_compiler_main"
)
for fpath in [ext_so_fpath, ral_so_fpath, disc_bin_fpath]:
fpath = os.path.realpath(os.path.join(bazel_bin_dir, fpath))
fname = os.path.basename(fpath)
_symlink_force(fpath, os.path.join(extdir, fname))
def test(self):
env = os.environ.copy()
ld_library_path = ":".join([self.torch_lib_dir, env.get("LD_LIBRARY_PATH", "")])
env["LD_LIBRARY_PATH"] = ld_library_path
env["GCC_HOST_COMPILER_PATH"] = env.get("GCC_HOST_COMPILER_PATH", which("gcc"))
test_cmd = " ".join(
[self.shell_setting, self.test_cmd]
+ self.extra_opts
+ self.configs
+ [self.test_suite]
)
subprocess.check_call(test_cmd, shell=True, env=env, executable="/bin/bash")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Bazel build TorchBlade")
parser.add_argument(
"--torch_version", type=str, required=True, help="The version of torch"
)
parser.add_argument(
"--torch_dir", type=str, required=True, help="The directory where torch located"
)
parser.add_argument(
"--cuda_version", type=str, default=None, help="The version of cuda toolkit"
)
parser.add_argument("--cxx11", action="store_true", help="Use c++ cxx11 abi")
args = parser.parse_args()
build = BazelBuild(
args.torch_dir, args.torch_version, args.cuda_version, cxx11_abi=args.cxx11
)
build.write_version_file(os.path.join(cwd, "version.txt"))
srcdir = os.path.dirname(os.path.abspath(__file__))
build.run(extdir=os.path.join(srcdir, "torch_blade")) | pytorch_blade/bazel_build.py |
import argparse
import os
import errno
import subprocess
import sys
import venv
from common_setup import running_on_ci, remote_cache_token, which
from torch_blade_build import TorchBladeBuild, get_fullpath_or_create
cwd = os.path.dirname(os.path.abspath(__file__))
def _make_executable(path):
mode = os.stat(path).st_mode
mode |= (mode & 0o444) >> 2 # copy R bits to X
os.chmod(path, mode)
def _symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
class BazelBuild(TorchBladeBuild):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_suite = "//src:torch_blade_gtests"
self.targets = [
"@org_tensorflow//tensorflow/compiler/mlir/disc:disc_compiler_main",
"//src:_torch_blade.so",
self.test_suite,
]
torch_major_version, torch_minor_version = self.torch_version.split(".")[:2]
self.extra_opts = [
"--copt=-DPYTORCH_VERSION_STRING={}".format(self.torch_version),
"--copt=-DPYTORCH_MAJOR_VERSION={}".format(torch_major_version),
"--copt=-DPYTORCH_MINOR_VERSION={}".format(torch_minor_version),
"--copt=-DTORCH_BLADE_CUDA_VERSION={}".format(self.cuda_version),
"--action_env PYTHON_BIN_PATH={}".format(sys.executable),
"--action_env TORCH_BLADE_TORCH_INSTALL_PATH={}".format(self.torch_dir),
# Workaroud issue: https://github.com/bazelbuild/bazel/issues/10327
"--action_env BAZEL_LINKLIBS=-lstdc++"
]
remote_cache = remote_cache_token()
if remote_cache:
self.extra_opts += ["--remote_cache={}".format(remote_cache)]
self.configs = ["--config=cxx11abi_{}".format(int(self.GLIBCXX_USE_CXX11_ABI))]
if self.is_debug:
self.configs.append("--config=dbg")
if self.cuda_available:
self.configs.append("--config=torch_disc_cuda")
else:
self.configs += ["--config=torch_disc_cpu"]
if self.cuda_available and self.build_tensorrt:
self.configs.append("--config=torch_tensorrt")
self.extra_opts += [
"--action_env TENSORRT_INSTALL_PATH={}".format(self.tensorrt_dir)]
if running_on_ci():
self.configs += ["--config=ci_build"]
self.shell_setting = "set -e; set -o pipefail; "
# Workaround: this venv ensure that $(/usr/bin/env python) is evaluated to python3
venv.create(".bazel_pyenv", clear=True)
self.build_cmd = "source .bazel_pyenv/bin/activate; bazel build"
self.test_cmd = "source .bazel_pyenv/bin/activate; bazel test"
def run(self, extdir=None, srcdir=None, build_temp=None):
srcdir = get_fullpath_or_create(
srcdir or os.path.dirname(os.path.abspath(__file__))
)
extdir = get_fullpath_or_create(extdir or "build/temp")
bazel_bin_dir = os.path.join(srcdir, "bazel-bin/")
env = os.environ.copy()
ld_library_path = ":".join([self.torch_lib_dir, env.get("LD_LIBRARY_PATH", "")])
env["LD_LIBRARY_PATH"] = ld_library_path
env["GCC_HOST_COMPILER_PATH"] = env.get("GCC_HOST_COMPILER_PATH", which("gcc"))
bazel_cmd = " ".join(
[self.shell_setting, self.build_cmd]
+ self.extra_opts
+ self.configs
)
with open("debug_bazel.sh", "w") as f:
f.write("#!/bin/bash\n")
f.write("export LD_LIBRARY_PATH={}\n".format(ld_library_path))
f.write("export GCC_HOST_COMPILER_PATH={}\n".format(env.get("GCC_HOST_COMPILER_PATH", "")))
f.write(bazel_cmd + " $@")
_make_executable("debug_bazel.sh")
bazel_cmd = " ".join([bazel_cmd] + self.targets)
subprocess.check_call(
bazel_cmd, shell=True, env=env, executable="/bin/bash"
)
ext_so_fpath = "src/_torch_blade.so"
ral_so_fpath = "external/org_tensorflow/tensorflow/compiler/mlir/xla/ral/libral_base_context.so"
disc_bin_fpath = (
"external/org_tensorflow/tensorflow/compiler/mlir/disc/disc_compiler_main"
)
for fpath in [ext_so_fpath, ral_so_fpath, disc_bin_fpath]:
fpath = os.path.realpath(os.path.join(bazel_bin_dir, fpath))
fname = os.path.basename(fpath)
_symlink_force(fpath, os.path.join(extdir, fname))
def test(self):
env = os.environ.copy()
ld_library_path = ":".join([self.torch_lib_dir, env.get("LD_LIBRARY_PATH", "")])
env["LD_LIBRARY_PATH"] = ld_library_path
env["GCC_HOST_COMPILER_PATH"] = env.get("GCC_HOST_COMPILER_PATH", which("gcc"))
test_cmd = " ".join(
[self.shell_setting, self.test_cmd]
+ self.extra_opts
+ self.configs
+ [self.test_suite]
)
subprocess.check_call(test_cmd, shell=True, env=env, executable="/bin/bash")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Bazel build TorchBlade")
parser.add_argument(
"--torch_version", type=str, required=True, help="The version of torch"
)
parser.add_argument(
"--torch_dir", type=str, required=True, help="The directory where torch located"
)
parser.add_argument(
"--cuda_version", type=str, default=None, help="The version of cuda toolkit"
)
parser.add_argument("--cxx11", action="store_true", help="Use c++ cxx11 abi")
args = parser.parse_args()
build = BazelBuild(
args.torch_dir, args.torch_version, args.cuda_version, cxx11_abi=args.cxx11
)
build.write_version_file(os.path.join(cwd, "version.txt"))
srcdir = os.path.dirname(os.path.abspath(__file__))
build.run(extdir=os.path.join(srcdir, "torch_blade")) | 0.34798 | 0.072933 |
import os
from pathlib import Path
from rpi.inputs2 import *
start_mtime=0
first_mtime=0
header="# timecode format v2"
pts_default="extracted"
pts_name=""
times=[]
start_time=0
min_start=0 # unit s: 1 µs
max_start=2592000 # unit s: 30 d
default_start=0
interval=0
min_interval=0.000001 # unit s: 1 µs
max_interval=2592000 # unit s: 30 d
default_int=1
def get_time(start_time,first_mtime,mtime):
return round(1000*(start_time+(mtime-first_mtime)),3)
def get_time_interval(start_time,interval,count):
return round(1000*(start_time+interval*(count-1)),3)
# Get current directory
print("ptsextract")
print("")
curdir=os.getcwd()
path=Path(curdir)
print("Current directory:")
print(curdir)
print("")
start_time=inputValue("start time",min_start,max_start,default_start,"s","Value out of range!",False)
isMtime=inputYesNo("mtime mode","Use file mtime instead of interval",True)
if not isMtime:
interval=inputValue("interval",min_interval,max_interval,1,"s","Value out of range!",False)
l=0
while l==0:
ext=input("File extension: ")
ext=ext.strip()
if len(ext)==0:
print("Enter extension!")
continue
elif len(ext)==1 and ext.isalnum()==False:
print("Invalid extension!")
continue
l=len(ext)
if ext[0]!=".":
ext="."+ext
pts_name=os.path.basename(curdir)
if len(pts_name)==0:
pts_name=pts_default
pts_name+=".pts"
times.append(header)
file=0
for p in sorted(path.iterdir()):
suffix=p.suffix.lower()
if p.is_file() and p.suffix==ext:
file+=1
fname=p.name
mtime=os.path.getmtime(fname)
if file==1:
first_mtime=mtime
if isMtime:
t=get_time(start_time,first_mtime,mtime)
else:
t=get_time_interval(start_time,interval,file)
print(fname,'{:.3f}'.format(t))
times.append('{:.3f}'.format(t))
if file>0:
try:
with open(pts_name, "w") as f:
for row in times:
f.write(row+"\n")
print("")
print(pts_name+" created with "+str(file)+" time stamps")
except:
print("Unable to create: "+pts_name)
else:
print("Files not found!\n") | python/ptsextract.py | import os
from pathlib import Path
from rpi.inputs2 import *
start_mtime=0
first_mtime=0
header="# timecode format v2"
pts_default="extracted"
pts_name=""
times=[]
start_time=0
min_start=0 # unit s: 1 µs
max_start=2592000 # unit s: 30 d
default_start=0
interval=0
min_interval=0.000001 # unit s: 1 µs
max_interval=2592000 # unit s: 30 d
default_int=1
def get_time(start_time,first_mtime,mtime):
return round(1000*(start_time+(mtime-first_mtime)),3)
def get_time_interval(start_time,interval,count):
return round(1000*(start_time+interval*(count-1)),3)
# Get current directory
print("ptsextract")
print("")
curdir=os.getcwd()
path=Path(curdir)
print("Current directory:")
print(curdir)
print("")
start_time=inputValue("start time",min_start,max_start,default_start,"s","Value out of range!",False)
isMtime=inputYesNo("mtime mode","Use file mtime instead of interval",True)
if not isMtime:
interval=inputValue("interval",min_interval,max_interval,1,"s","Value out of range!",False)
l=0
while l==0:
ext=input("File extension: ")
ext=ext.strip()
if len(ext)==0:
print("Enter extension!")
continue
elif len(ext)==1 and ext.isalnum()==False:
print("Invalid extension!")
continue
l=len(ext)
if ext[0]!=".":
ext="."+ext
pts_name=os.path.basename(curdir)
if len(pts_name)==0:
pts_name=pts_default
pts_name+=".pts"
times.append(header)
file=0
for p in sorted(path.iterdir()):
suffix=p.suffix.lower()
if p.is_file() and p.suffix==ext:
file+=1
fname=p.name
mtime=os.path.getmtime(fname)
if file==1:
first_mtime=mtime
if isMtime:
t=get_time(start_time,first_mtime,mtime)
else:
t=get_time_interval(start_time,interval,file)
print(fname,'{:.3f}'.format(t))
times.append('{:.3f}'.format(t))
if file>0:
try:
with open(pts_name, "w") as f:
for row in times:
f.write(row+"\n")
print("")
print(pts_name+" created with "+str(file)+" time stamps")
except:
print("Unable to create: "+pts_name)
else:
print("Files not found!\n") | 0.119434 | 0.09709 |
from typing import List, Any, Dict
import pandas as pd
import requests
from .settings import BASE_URL
import delta_sharing
class FidapClient:
"""
class for fidap client
"""
_api_key = None
_api_secret = None
_file_path = "https://fidap.s3-us-west-2.amazonaws.com/fidap_data.share"
_custom_source = None
_headers = None
def __init__(self, source, api_key, api_secret):
"""
:param source:
:param api_key:
:param api_secret:
"""
self._custom_source = source
self._api_key = api_key
self._api_secret = api_secret
self._headers = {"api-key": api_key}
@property
def api_keys(self):
return {'api_key': self._api_key, 'api_secret': self._api_secret, 'db': self._custom_source}
def sql(self, sql, source=None):
"""
:param sql: SQL query here in str
:return: Pandas Dataframe
"""
if source:
self._custom_source = source
return self.api({'sql_query': sql, **self.api_keys})
def load_table_as_dataframe(self, share_name="fidap_share", schema_name=None, table_name=None, df_type='pandas'):
"""
:param share_name: String, your share name default is fidap_share.
:param schema_name: String, Schema name where table exist.
:param table_name: String, Table name want to load.
:param df_type: String, pandas or spark.
:return dataframe.
"""
_ = delta_sharing.SharingClient(self._file_path)
table_url = self._file_path + "#" + f"{share_name}.{schema_name}.{table_name}"
if df_type == 'spark':
df = delta_sharing.load_as_spark(table_url)
elif df_type == 'pandas':
df = delta_sharing.load_as_pandas(table_url)
else:
df = "Invalid dataframe type."
return df
def tickers(self, field, ticker, source):
"""
:param field: field for lookup
:param ticker: ticker for specify a ticker type
:param source: source connection type snowflake, bigquery etc.
:return: Pandas Dataframe
"""
query = dict(
bq=f"select {field} from tickers where fidapschema.ticker='{ticker}'",
sf=f"select {field} from tickers where ticker='{ticker}'",
s3=f"select {field} from tickers where ticker='{ticker}'"
)
return self.sql(sql=query[source if source else self._custom_source], source=source)
def api(self, json: Dict[str, Any]):
"""
:param json: JSON contain function and sql values
:return: return Pandas Dataframe
"""
response = requests.post(f"{BASE_URL}/api/v1/query/run/query/", json=json, headers=self._headers)
if response.status_code == 400:
return response.json()
if response.status_code == 401:
return response.json()['detail']
df = pd.read_json(response.json()['data'])
return df
def send_email(self, df: pd.DataFrame, emails: List[str], file_name: str, rows: int = 1000, cols: int = 30) -> bool:
"""
:param df: Pandas Dataframe
:param emails: list of Emails
:param file_name: It is CSV filename
:param rows: Integer number of rows, current value is 1000
:param cols: Integer number of cols, current value is 30
:return: return bool value status True= all send, False = something wrong
"""
df = df.iloc[0:rows, 0:cols]
data = {
'emails': emails,
'df_data': df.to_json(),
'file_name': file_name,
**self.api_keys
}
response = requests.post(f"{BASE_URL}/api/v1/common/send/email/", json=data, headers=self._headers).json()
return response['success']
def create_dataset(self, name=None, description=None, source=None, project=None, dataset=None, public=False):
"""
:param name: String field required, Dataset name it should be unique
:param description: Discription about dataset, it is optional field
:param source: It is optional field otherwise fidapclient source will be considered
:param project: Name of Bigquery or Snowflake project/dataset
:param dataset: Name of bigquery dataset in the project, for snowflake it will be schema name
:param public: Created dataset in fidap project is public or not public, defualt public=False
:return: return created object id.
"""
json = dict(
api_key=self._api_key,
source=source if source else self.api_keys["db"],
name=name,
description=description,
project=project,
schema=dataset,
is_public=public
)
response = requests.post(f"{BASE_URL}/api/v1/catalog/metadataset/", json=json, headers=self._headers)
if response.ok:
return response.json()
return response.json()
def datasets(self, limit=100, json=False):
"""
:param limit: limit the result. default is 100
:param json: Boolean flag return json or dataframe default value is False.
:return: json
"""
response = requests.get(f"{BASE_URL}/api/v1/catalog/metadataset/?page=1&page_size={limit}", headers=self._headers)
if response.ok and json:
return response.json()['results']
elif response.ok:
return pd.DataFrame(response.json()['results'])
return response.json()
def dataset(self, dataset_id, json=False):
"""
:param dataset_id: dataset id should be numeric.
:param json: Boolean flag, if value is True return json else return dict of dataframe default value is False.
:return: dataset info and tables list
"""
dataset = requests.get(f"{BASE_URL}/api/v1/catalog/metadataset/{dataset_id}/", headers=self._headers).json()
tables = requests.get(
f"{BASE_URL}/api/v1/catalog/metatable/", params=dict(id=dataset_id), headers=self._headers
).json()
if json:
return dict(dataset=dataset, tables=tables)
return dict(dataset=pd.DataFrame([dataset]), tables=pd.DataFrame(tables))
def table(self, table_id, json=False):
"""
:param table_id: table id should be numeric.
:param json: Boolean flag, if value is True return json else return dict of dataframe default value is False.
:return: table info and fields list
"""
table = requests.get(f"{BASE_URL}/api/v1/catalog/metatable/{table_id}/", headers=self._headers).json()
fields = requests.get(f"{BASE_URL}/api/v1/catalog/metafield/", params=dict(q_table=table_id), headers=self._headers).json()
if json:
return dict(table=table, fields=fields)
return dict(table=pd.DataFrame([table]), fields=pd.DataFrame(fields))
def field(self, field_id, json=False):
"""
:param field_id: field id should be numeric.
:param json: Boolean flag, if value is True return json else return dict of dataframe default value is False.
:return: field info.
"""
field = requests.get(f"{BASE_URL}/api/v1/catalog/metafield/{field_id}/", headers=self._headers).json()
if json:
return field
return pd.DataFrame([field])
def update_entity(self, entity, id, values):
"""
:param entity: String , dataset, table, field
:param id: Number, entity id
:param values: dict of values, display_name, description, is_public
:return entity
"""
if entity == "dataset":
response = requests.patch(
f"{BASE_URL}/api/v1/catalog/metadataset/{id}/", headers=self._headers, json=values
).json()
elif entity == "table":
response = requests.patch(
f"{BASE_URL}/api/v1/catalog/metatable/{id}/",
headers=self._headers,
json=values
).json()
elif entity == 'field':
response = requests.patch(
f"{BASE_URL}/api/v1/catalog/metafield/{id}/",
headers=self._headers,
json=values
).json()
else:
response = "Invalid entity"
return response
def update_dataset(self, dataset_id, values):
"""
:param dataset_id: Number, dataset id
:param values: dict of values, name, description, is_public
:return dataset
"""
return self.update_entity('dataset', dataset_id, values)
def update_table(self, table_id, values):
"""
:param table_id: Number, table table_id
:param values: dict of values, display_name, description, is_public
:return table
"""
return self.update_entity('table', table_id, values)
def update_field(self, field_id, values):
"""
:param field_id: Number, field id
:param values: dict of values, display_name, description
:return field
"""
return self.update_entity('field', field_id, values)
def fidap_client(api_key, source='bq', api_secret=None):
"""
:param source: Sting
:param api_key: String
:param api_secret: String
:return:
"""
return FidapClient(source=source, api_key=api_key, api_secret=api_secret) | fidap/fidap.py | from typing import List, Any, Dict
import pandas as pd
import requests
from .settings import BASE_URL
import delta_sharing
class FidapClient:
"""
class for fidap client
"""
_api_key = None
_api_secret = None
_file_path = "https://fidap.s3-us-west-2.amazonaws.com/fidap_data.share"
_custom_source = None
_headers = None
def __init__(self, source, api_key, api_secret):
"""
:param source:
:param api_key:
:param api_secret:
"""
self._custom_source = source
self._api_key = api_key
self._api_secret = api_secret
self._headers = {"api-key": api_key}
@property
def api_keys(self):
return {'api_key': self._api_key, 'api_secret': self._api_secret, 'db': self._custom_source}
def sql(self, sql, source=None):
"""
:param sql: SQL query here in str
:return: Pandas Dataframe
"""
if source:
self._custom_source = source
return self.api({'sql_query': sql, **self.api_keys})
def load_table_as_dataframe(self, share_name="fidap_share", schema_name=None, table_name=None, df_type='pandas'):
"""
:param share_name: String, your share name default is fidap_share.
:param schema_name: String, Schema name where table exist.
:param table_name: String, Table name want to load.
:param df_type: String, pandas or spark.
:return dataframe.
"""
_ = delta_sharing.SharingClient(self._file_path)
table_url = self._file_path + "#" + f"{share_name}.{schema_name}.{table_name}"
if df_type == 'spark':
df = delta_sharing.load_as_spark(table_url)
elif df_type == 'pandas':
df = delta_sharing.load_as_pandas(table_url)
else:
df = "Invalid dataframe type."
return df
def tickers(self, field, ticker, source):
"""
:param field: field for lookup
:param ticker: ticker for specify a ticker type
:param source: source connection type snowflake, bigquery etc.
:return: Pandas Dataframe
"""
query = dict(
bq=f"select {field} from tickers where fidapschema.ticker='{ticker}'",
sf=f"select {field} from tickers where ticker='{ticker}'",
s3=f"select {field} from tickers where ticker='{ticker}'"
)
return self.sql(sql=query[source if source else self._custom_source], source=source)
def api(self, json: Dict[str, Any]):
"""
:param json: JSON contain function and sql values
:return: return Pandas Dataframe
"""
response = requests.post(f"{BASE_URL}/api/v1/query/run/query/", json=json, headers=self._headers)
if response.status_code == 400:
return response.json()
if response.status_code == 401:
return response.json()['detail']
df = pd.read_json(response.json()['data'])
return df
def send_email(self, df: pd.DataFrame, emails: List[str], file_name: str, rows: int = 1000, cols: int = 30) -> bool:
"""
:param df: Pandas Dataframe
:param emails: list of Emails
:param file_name: It is CSV filename
:param rows: Integer number of rows, current value is 1000
:param cols: Integer number of cols, current value is 30
:return: return bool value status True= all send, False = something wrong
"""
df = df.iloc[0:rows, 0:cols]
data = {
'emails': emails,
'df_data': df.to_json(),
'file_name': file_name,
**self.api_keys
}
response = requests.post(f"{BASE_URL}/api/v1/common/send/email/", json=data, headers=self._headers).json()
return response['success']
def create_dataset(self, name=None, description=None, source=None, project=None, dataset=None, public=False):
"""
:param name: String field required, Dataset name it should be unique
:param description: Discription about dataset, it is optional field
:param source: It is optional field otherwise fidapclient source will be considered
:param project: Name of Bigquery or Snowflake project/dataset
:param dataset: Name of bigquery dataset in the project, for snowflake it will be schema name
:param public: Created dataset in fidap project is public or not public, defualt public=False
:return: return created object id.
"""
json = dict(
api_key=self._api_key,
source=source if source else self.api_keys["db"],
name=name,
description=description,
project=project,
schema=dataset,
is_public=public
)
response = requests.post(f"{BASE_URL}/api/v1/catalog/metadataset/", json=json, headers=self._headers)
if response.ok:
return response.json()
return response.json()
def datasets(self, limit=100, json=False):
"""
:param limit: limit the result. default is 100
:param json: Boolean flag return json or dataframe default value is False.
:return: json
"""
response = requests.get(f"{BASE_URL}/api/v1/catalog/metadataset/?page=1&page_size={limit}", headers=self._headers)
if response.ok and json:
return response.json()['results']
elif response.ok:
return pd.DataFrame(response.json()['results'])
return response.json()
def dataset(self, dataset_id, json=False):
"""
:param dataset_id: dataset id should be numeric.
:param json: Boolean flag, if value is True return json else return dict of dataframe default value is False.
:return: dataset info and tables list
"""
dataset = requests.get(f"{BASE_URL}/api/v1/catalog/metadataset/{dataset_id}/", headers=self._headers).json()
tables = requests.get(
f"{BASE_URL}/api/v1/catalog/metatable/", params=dict(id=dataset_id), headers=self._headers
).json()
if json:
return dict(dataset=dataset, tables=tables)
return dict(dataset=pd.DataFrame([dataset]), tables=pd.DataFrame(tables))
def table(self, table_id, json=False):
"""
:param table_id: table id should be numeric.
:param json: Boolean flag, if value is True return json else return dict of dataframe default value is False.
:return: table info and fields list
"""
table = requests.get(f"{BASE_URL}/api/v1/catalog/metatable/{table_id}/", headers=self._headers).json()
fields = requests.get(f"{BASE_URL}/api/v1/catalog/metafield/", params=dict(q_table=table_id), headers=self._headers).json()
if json:
return dict(table=table, fields=fields)
return dict(table=pd.DataFrame([table]), fields=pd.DataFrame(fields))
def field(self, field_id, json=False):
"""
:param field_id: field id should be numeric.
:param json: Boolean flag, if value is True return json else return dict of dataframe default value is False.
:return: field info.
"""
field = requests.get(f"{BASE_URL}/api/v1/catalog/metafield/{field_id}/", headers=self._headers).json()
if json:
return field
return pd.DataFrame([field])
def update_entity(self, entity, id, values):
"""
:param entity: String , dataset, table, field
:param id: Number, entity id
:param values: dict of values, display_name, description, is_public
:return entity
"""
if entity == "dataset":
response = requests.patch(
f"{BASE_URL}/api/v1/catalog/metadataset/{id}/", headers=self._headers, json=values
).json()
elif entity == "table":
response = requests.patch(
f"{BASE_URL}/api/v1/catalog/metatable/{id}/",
headers=self._headers,
json=values
).json()
elif entity == 'field':
response = requests.patch(
f"{BASE_URL}/api/v1/catalog/metafield/{id}/",
headers=self._headers,
json=values
).json()
else:
response = "Invalid entity"
return response
def update_dataset(self, dataset_id, values):
"""
:param dataset_id: Number, dataset id
:param values: dict of values, name, description, is_public
:return dataset
"""
return self.update_entity('dataset', dataset_id, values)
def update_table(self, table_id, values):
"""
:param table_id: Number, table table_id
:param values: dict of values, display_name, description, is_public
:return table
"""
return self.update_entity('table', table_id, values)
def update_field(self, field_id, values):
"""
:param field_id: Number, field id
:param values: dict of values, display_name, description
:return field
"""
return self.update_entity('field', field_id, values)
def fidap_client(api_key, source='bq', api_secret=None):
"""
:param source: Sting
:param api_key: String
:param api_secret: String
:return:
"""
return FidapClient(source=source, api_key=api_key, api_secret=api_secret) | 0.827967 | 0.167934 |
import pyro
from ..gp import GP
from ._pyro_mixin import _PyroMixin
class PyroGP(GP, _PyroMixin):
"""
A :obj:`~gpytorch.models.ApproximateGP` designed to work with Pyro.
This module makes it possible to include GP models with more complex probablistic models,
or to use likelihood functions with additional variational/approximate distributions.
The parameters of these models are learned using Pyro's inference tools, unlike other models
that optimize models with respect to a :obj:`~gpytorch.mlls.MarginalLogLikelihood`.
See `the Pyro examples <examples/09_Pyro_Integration/index.html>`_ for detailed examples.
Args:
:attr:`variational_strategy` (:obj:`~gpytorch.variational.VariationalStrategy`):
The variational strategy that defines the variational distribution and
the marginalization strategy.
:attr:`likelihood` (:obj:`~gpytorch.likelihoods.Likelihood`):
The likelihood for the model
:attr:`num_data` (int):
The total number of training data points (necessary for SGD)
:attr:`name_prefix` (str, optional):
A prefix to put in front of pyro sample/plate sites
:attr:`beta` (float - default 1.):
A multiplicative factor for the KL divergence term.
Setting it to 1 (default) recovers true variational inference
(as derived in `Scalable Variational Gaussian Process Classification`_).
Setting it to anything less than 1 reduces the regularization effect of the model
(similarly to what was proposed in `the beta-VAE paper`_).
Example:
>>> class MyVariationalGP(gpytorch.models.PyroGP):
>>> # implementation
>>>
>>> # variational_strategy = ...
>>> likelihood = gpytorch.likelihoods.GaussianLikelihood()
>>> model = MyVariationalGP(variational_strategy, likelihood, train_y.size())
>>>
>>> optimizer = pyro.optim.Adam({"lr": 0.01})
>>> elbo = pyro.infer.Trace_ELBO(num_particles=64, vectorize_particles=True)
>>> svi = pyro.infer.SVI(model.model, model.guide, optimizer, elbo)
>>>
>>> # Optimize variational parameters
>>> for _ in range(n_iter):
>>> loss = svi.step(train_x, train_y)
.. _Scalable Variational Gaussian Process Classification:
http://proceedings.mlr.press/v38/hensman15.pdf
.. _the beta-VAE paper:
https://openreview.net/pdf?id=Sy2fzU9gl
"""
def __init__(self, variational_strategy, likelihood, num_data, name_prefix="", beta=1.0):
super().__init__()
self.variational_strategy = variational_strategy
self.name_prefix = name_prefix
self.likelihood = likelihood
self.num_data = num_data
self.beta = beta
# Set values for the likelihood
self.likelihood.num_data = num_data
self.likelihood.name_prefix = name_prefix
def guide(self, input, target, *args, **kwargs):
r"""
Guide function for Pyro inference.
Includes the guide for the GP's likelihood function as well.
:param torch.Tensor input: :math:`\mathbf X` The input values values
:param torch.Tensor target: :math:`\mathbf y` The target values
:param args: Additional arguments passed to the likelihood's forward function.
:param kwargs: Additional keyword arguments passed to the likelihood's forward function.
"""
# Get q(f)
function_dist = self.pyro_guide(input, beta=self.beta, name_prefix=self.name_prefix)
return self.likelihood.pyro_guide(function_dist, target, *args, **kwargs)
def model(self, input, target, *args, **kwargs):
r"""
Model function for Pyro inference.
Includes the model for the GP's likelihood function as well.
:param torch.Tensor input: :math:`\mathbf X` The input values values
:param torch.Tensor target: :math:`\mathbf y` The target values
:param args: Additional arguments passed to the likelihood's forward function.
:param kwargs: Additional keyword arguments passed to the likelihood's forward function.
"""
# Include module
pyro.module(self.name_prefix + ".gp", self)
# Get p(f)
function_dist = self.pyro_model(input, beta=self.beta, name_prefix=self.name_prefix)
return self.likelihood.pyro_model(function_dist, target, *args, **kwargs)
def __call__(self, inputs, prior=False):
if inputs.dim() == 1:
inputs = inputs.unsqueeze(-1)
return self.variational_strategy(inputs, prior=prior) | gpytorch/models/pyro/pyro_gp.py |
import pyro
from ..gp import GP
from ._pyro_mixin import _PyroMixin
class PyroGP(GP, _PyroMixin):
"""
A :obj:`~gpytorch.models.ApproximateGP` designed to work with Pyro.
This module makes it possible to include GP models with more complex probablistic models,
or to use likelihood functions with additional variational/approximate distributions.
The parameters of these models are learned using Pyro's inference tools, unlike other models
that optimize models with respect to a :obj:`~gpytorch.mlls.MarginalLogLikelihood`.
See `the Pyro examples <examples/09_Pyro_Integration/index.html>`_ for detailed examples.
Args:
:attr:`variational_strategy` (:obj:`~gpytorch.variational.VariationalStrategy`):
The variational strategy that defines the variational distribution and
the marginalization strategy.
:attr:`likelihood` (:obj:`~gpytorch.likelihoods.Likelihood`):
The likelihood for the model
:attr:`num_data` (int):
The total number of training data points (necessary for SGD)
:attr:`name_prefix` (str, optional):
A prefix to put in front of pyro sample/plate sites
:attr:`beta` (float - default 1.):
A multiplicative factor for the KL divergence term.
Setting it to 1 (default) recovers true variational inference
(as derived in `Scalable Variational Gaussian Process Classification`_).
Setting it to anything less than 1 reduces the regularization effect of the model
(similarly to what was proposed in `the beta-VAE paper`_).
Example:
>>> class MyVariationalGP(gpytorch.models.PyroGP):
>>> # implementation
>>>
>>> # variational_strategy = ...
>>> likelihood = gpytorch.likelihoods.GaussianLikelihood()
>>> model = MyVariationalGP(variational_strategy, likelihood, train_y.size())
>>>
>>> optimizer = pyro.optim.Adam({"lr": 0.01})
>>> elbo = pyro.infer.Trace_ELBO(num_particles=64, vectorize_particles=True)
>>> svi = pyro.infer.SVI(model.model, model.guide, optimizer, elbo)
>>>
>>> # Optimize variational parameters
>>> for _ in range(n_iter):
>>> loss = svi.step(train_x, train_y)
.. _Scalable Variational Gaussian Process Classification:
http://proceedings.mlr.press/v38/hensman15.pdf
.. _the beta-VAE paper:
https://openreview.net/pdf?id=Sy2fzU9gl
"""
def __init__(self, variational_strategy, likelihood, num_data, name_prefix="", beta=1.0):
super().__init__()
self.variational_strategy = variational_strategy
self.name_prefix = name_prefix
self.likelihood = likelihood
self.num_data = num_data
self.beta = beta
# Set values for the likelihood
self.likelihood.num_data = num_data
self.likelihood.name_prefix = name_prefix
def guide(self, input, target, *args, **kwargs):
r"""
Guide function for Pyro inference.
Includes the guide for the GP's likelihood function as well.
:param torch.Tensor input: :math:`\mathbf X` The input values values
:param torch.Tensor target: :math:`\mathbf y` The target values
:param args: Additional arguments passed to the likelihood's forward function.
:param kwargs: Additional keyword arguments passed to the likelihood's forward function.
"""
# Get q(f)
function_dist = self.pyro_guide(input, beta=self.beta, name_prefix=self.name_prefix)
return self.likelihood.pyro_guide(function_dist, target, *args, **kwargs)
def model(self, input, target, *args, **kwargs):
r"""
Model function for Pyro inference.
Includes the model for the GP's likelihood function as well.
:param torch.Tensor input: :math:`\mathbf X` The input values values
:param torch.Tensor target: :math:`\mathbf y` The target values
:param args: Additional arguments passed to the likelihood's forward function.
:param kwargs: Additional keyword arguments passed to the likelihood's forward function.
"""
# Include module
pyro.module(self.name_prefix + ".gp", self)
# Get p(f)
function_dist = self.pyro_model(input, beta=self.beta, name_prefix=self.name_prefix)
return self.likelihood.pyro_model(function_dist, target, *args, **kwargs)
def __call__(self, inputs, prior=False):
if inputs.dim() == 1:
inputs = inputs.unsqueeze(-1)
return self.variational_strategy(inputs, prior=prior) | 0.950371 | 0.702849 |
import requests
url = "http://192.168.86.192/targetcmd/"
ramRomPageWrBase = 0x70
ramRomPgenWrBase = 0x74
def takeControl():
req = requests.request("get", url + "rawBusControlOn")
req = requests.request("get", url + "rawBusWaitDisable")
req = requests.request("get", url + "rawBusWaitClear")
req = requests.request("get", url + "rawBusTake")
def enablePaging():
req = requests.request("get", url + "rawBusSetAddress/" + f"{ramRomPageWrBase:04x}")
req = requests.request("get", url + "rawBusSetData/01")
req = requests.request("get", url + "rawBusSetLine/IORQ/0")
req = requests.request("get", url + "rawBusSetLine/WR/0")
req = requests.request("get", url + "rawBusSetLine/WR/1")
req = requests.request("get", url + "rawBusSetLine/IORQ/1")
def writeRegister(regIdx, val):
req = requests.request("get", url + "rawBusSetAddress/" + f"{(ramRomPgenWrBase + regIdx):04x}")
req = requests.request("get", url + "rawBusSetData/" + f"{val:02x}")
req = requests.request("get", url + "rawBusSetLine/IORQ/0")
req = requests.request("get", url + "rawBusSetLine/WR/0")
req = requests.request("get", url + "rawBusSetLine/WR/1")
req = requests.request("get", url + "rawBusSetLine/IORQ/1")
def writeData(addr, data):
print(f"Writing addr {addr:02x} data {data:02x}")
req = requests.request("get", url + "rawBusSetAddress/" + f"{addr:04x}")
req = requests.request("get", url + "rawBusSetData/" + f"{data:02x}")
req = requests.request("get", url + "rawBusSetLine/MREQ/0")
req = requests.request("get", url + "rawBusSetLine/WR/0")
req = requests.request("get", url + "rawBusSetLine/WR/1")
req = requests.request("get", url + "rawBusSetLine/MREQ/1")
def readData(addr):
req = requests.request("get", url + "rawBusSetAddress/" + f"{addr:04x}")
req = requests.request("get", url + "rawBusGetData")
req = requests.request("get", url + "rawBusSetLine/MREQ/0")
req = requests.request("get", url + "rawBusSetLine/RD/0")
dataVal = req.json()["pib"]
req = requests.request("get", url + "rawBusSetLine/RD/1")
req = requests.request("get", url + "rawBusSetLine/MREQ/1")
return dataVal
takeControl()
enablePaging()
for i in range(4):
writeRegister(i, i + 1 + 0x20)
dataToWrite = (0x44 + i * 57) % 0xff
# print(f"Writing {dataToWrite:02x}")
writeData(0x0000 + i * 0x4000, dataToWrite)
for i in range(4):
print("Read", readData(0x0000 + i * 0x4000)) | PiSw/examples/hardwareDebug/hwDebugRRRegSet.py | import requests
url = "http://192.168.86.192/targetcmd/"
ramRomPageWrBase = 0x70
ramRomPgenWrBase = 0x74
def takeControl():
req = requests.request("get", url + "rawBusControlOn")
req = requests.request("get", url + "rawBusWaitDisable")
req = requests.request("get", url + "rawBusWaitClear")
req = requests.request("get", url + "rawBusTake")
def enablePaging():
req = requests.request("get", url + "rawBusSetAddress/" + f"{ramRomPageWrBase:04x}")
req = requests.request("get", url + "rawBusSetData/01")
req = requests.request("get", url + "rawBusSetLine/IORQ/0")
req = requests.request("get", url + "rawBusSetLine/WR/0")
req = requests.request("get", url + "rawBusSetLine/WR/1")
req = requests.request("get", url + "rawBusSetLine/IORQ/1")
def writeRegister(regIdx, val):
req = requests.request("get", url + "rawBusSetAddress/" + f"{(ramRomPgenWrBase + regIdx):04x}")
req = requests.request("get", url + "rawBusSetData/" + f"{val:02x}")
req = requests.request("get", url + "rawBusSetLine/IORQ/0")
req = requests.request("get", url + "rawBusSetLine/WR/0")
req = requests.request("get", url + "rawBusSetLine/WR/1")
req = requests.request("get", url + "rawBusSetLine/IORQ/1")
def writeData(addr, data):
print(f"Writing addr {addr:02x} data {data:02x}")
req = requests.request("get", url + "rawBusSetAddress/" + f"{addr:04x}")
req = requests.request("get", url + "rawBusSetData/" + f"{data:02x}")
req = requests.request("get", url + "rawBusSetLine/MREQ/0")
req = requests.request("get", url + "rawBusSetLine/WR/0")
req = requests.request("get", url + "rawBusSetLine/WR/1")
req = requests.request("get", url + "rawBusSetLine/MREQ/1")
def readData(addr):
req = requests.request("get", url + "rawBusSetAddress/" + f"{addr:04x}")
req = requests.request("get", url + "rawBusGetData")
req = requests.request("get", url + "rawBusSetLine/MREQ/0")
req = requests.request("get", url + "rawBusSetLine/RD/0")
dataVal = req.json()["pib"]
req = requests.request("get", url + "rawBusSetLine/RD/1")
req = requests.request("get", url + "rawBusSetLine/MREQ/1")
return dataVal
takeControl()
enablePaging()
for i in range(4):
writeRegister(i, i + 1 + 0x20)
dataToWrite = (0x44 + i * 57) % 0xff
# print(f"Writing {dataToWrite:02x}")
writeData(0x0000 + i * 0x4000, dataToWrite)
for i in range(4):
print("Read", readData(0x0000 + i * 0x4000)) | 0.06028 | 0.050894 |
from django.test import TestCase
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from .models import Notion
# Create your tests here.
User = get_user_model()
class NotionTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='testuser',password='<PASSWORD>')
self.user2 = User.objects.create_user(username='testuser2',password='<PASSWORD>')
Notion.objects.create(content="my first notion", user=self.user)
Notion.objects.create(content="my second notion", user=self.user)
Notion.objects.create(content="my third notion", user=self.user2)
self.currentCount = Notion.objects.all().count()
def test_notion_created(self):
notion_obj = Notion.objects.create(content="my second notion", user=self.user)
self.assertEqual(notion_obj.id,4)
self.assertEqual(notion_obj.user, self.user)
def get_client(self):
client = APIClient()
client.login(username=self.user.username, password='<PASSWORD>')
return client
def test_notion_list(self):
client = self.get_client()
response = client.get("/api/notions/")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()),1)
def test_notion_list(self):
client = self.get_client()
response = client.get("/api/notions/")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()),3)
def test_action_like(self):
client = self.get_client()
response = client.post("/api/notions/action/", {"id":1 ,"action":"like"})
self.assertEqual(response.status_code, 200)
like_count = response.json().get("likes")
self.assertEqual(like_count, 1)
print(response.json())
def test_action_unlike(self):
#like first then unlike
client = self.get_client()
response = client.post("/api/notions/action/", {"id":2 ,"action":"like"})
self.assertEqual(response.status_code, 200)
response = client.post("/api/notions/action/", {"id":2 ,"action":"unlike"})
self.assertEqual(response.status_code, 200)
like_count = response.json().get("likes")
self.assertEqual(like_count, 0)
def test_action_share(self):
#share a post then count number of posts
client = self.get_client()
response = client.post("/api/notions/action/", {"id":2 ,"action":"share"})
self.assertEqual(response.status_code, 201)
data = response.json()
new_notion_id = data.get("id")
self.assertNotEqual(2,new_notion_id)
self.assertEqual(self.currentCount +1, new_notion_id)
def test_notion_create_api(self):
request_data = {"content": "This is my test notion"}
client = self.get_client()
response = client.post("/api/notions/create/",request_data)
self.assertEqual(response.status_code, 201)
response_data = response.json()
new_notion_id = response_data.get("id")
self.assertEqual(self.currentCount + 1, new_notion_id)
def test_notion_detail_api_view(self):
client = self.get_client()
response = client.get("/api/notions/1/")
self.assertEqual(response.status_code, 200)
data = response.json()
_id = data.get("id")
self.assertEqual(_id, 1)
def test_notion_delete_api_view(self):
client = self.get_client()
response = client.delete("/api/notions/1/delete/")
self.assertEqual(response.status_code, 200)
client = self.get_client()
response = client.delete("/api/notions/1/delete/")
self.assertEqual(response.status_code, 404)
response_incorrect_owner = client.delete("/api/notions/3/delete/")
self.assertEqual(response_incorrect_owner.status_code, 401) | myapp/tests.py | from django.test import TestCase
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from .models import Notion
# Create your tests here.
User = get_user_model()
class NotionTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='testuser',password='<PASSWORD>')
self.user2 = User.objects.create_user(username='testuser2',password='<PASSWORD>')
Notion.objects.create(content="my first notion", user=self.user)
Notion.objects.create(content="my second notion", user=self.user)
Notion.objects.create(content="my third notion", user=self.user2)
self.currentCount = Notion.objects.all().count()
def test_notion_created(self):
notion_obj = Notion.objects.create(content="my second notion", user=self.user)
self.assertEqual(notion_obj.id,4)
self.assertEqual(notion_obj.user, self.user)
def get_client(self):
client = APIClient()
client.login(username=self.user.username, password='<PASSWORD>')
return client
def test_notion_list(self):
client = self.get_client()
response = client.get("/api/notions/")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()),1)
def test_notion_list(self):
client = self.get_client()
response = client.get("/api/notions/")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()),3)
def test_action_like(self):
client = self.get_client()
response = client.post("/api/notions/action/", {"id":1 ,"action":"like"})
self.assertEqual(response.status_code, 200)
like_count = response.json().get("likes")
self.assertEqual(like_count, 1)
print(response.json())
def test_action_unlike(self):
#like first then unlike
client = self.get_client()
response = client.post("/api/notions/action/", {"id":2 ,"action":"like"})
self.assertEqual(response.status_code, 200)
response = client.post("/api/notions/action/", {"id":2 ,"action":"unlike"})
self.assertEqual(response.status_code, 200)
like_count = response.json().get("likes")
self.assertEqual(like_count, 0)
def test_action_share(self):
#share a post then count number of posts
client = self.get_client()
response = client.post("/api/notions/action/", {"id":2 ,"action":"share"})
self.assertEqual(response.status_code, 201)
data = response.json()
new_notion_id = data.get("id")
self.assertNotEqual(2,new_notion_id)
self.assertEqual(self.currentCount +1, new_notion_id)
def test_notion_create_api(self):
request_data = {"content": "This is my test notion"}
client = self.get_client()
response = client.post("/api/notions/create/",request_data)
self.assertEqual(response.status_code, 201)
response_data = response.json()
new_notion_id = response_data.get("id")
self.assertEqual(self.currentCount + 1, new_notion_id)
def test_notion_detail_api_view(self):
client = self.get_client()
response = client.get("/api/notions/1/")
self.assertEqual(response.status_code, 200)
data = response.json()
_id = data.get("id")
self.assertEqual(_id, 1)
def test_notion_delete_api_view(self):
client = self.get_client()
response = client.delete("/api/notions/1/delete/")
self.assertEqual(response.status_code, 200)
client = self.get_client()
response = client.delete("/api/notions/1/delete/")
self.assertEqual(response.status_code, 404)
response_incorrect_owner = client.delete("/api/notions/3/delete/")
self.assertEqual(response_incorrect_owner.status_code, 401) | 0.348534 | 0.222352 |
import argparse
from corpus_readers import PandasBasedCorpus
import json
from cv_utils import CVManager, run_cv_evaluation
from global_constants import *
import os
from collections import defaultdict
from gram_matrix_extractors import compute_default_predefined_coling_gram_matrices
from corpus_readers import unpickle_precomputed_gram_matrices
import numpy as np
from global_utils import get_predictions
from eval_pd import evaluate_and_get_message
from cv_utils import evaluate_macro_performance_and_std
from sklearn.linear_model import LogisticRegression
from metaclassifier import get_metaclassifier_training_data_and_labels, get_metaclassifier_prediction_data
from results_writer import ResultsWriter
import logging
def get_arg_parser():
parser = argparse.ArgumentParser(description='Gets all the numbers for the emnlp paper')
parser.add_argument('-c', '--corpus_name', help='corpus_name', required=True)
parser.add_argument('-s', '--corpus_settings_filename', help='json file with the corpus settings ', required=True)
parser.add_argument('-m', '--precomputed_matrices_settings_filename', help='json file with the paths'
'to the precomputed gram matrices',
required=True)
parser.add_argument('-o', '--output_folder', help='folder to which to output the predictions data', required=True)
parser.add_argument('-r', '--remove_irrelevant', help='remove all positives/all negatives (NOTE: MUST BE THE SAME '
'SETTING '
'THAT YOU USED TO CREATE THE PRECOMPUTED GRAM MATRICES)',
dest='remove_irrelevant', default=False, action='store_true')
parser.add_argument('-k', '--kernel_settings_file', help='file with all the kernel settings')
return parser
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
parser = get_arg_parser()
args = parser.parse_args()
# running experiments
logging.info("Reading experiment settings from %s" %(args.kernel_settings_file))
experiments_config = json.load(open(args.kernel_settings_file))
corpus = PandasBasedCorpus(corpus_settings_path=args.corpus_settings_filename,
corpus_name=args.corpus_name)
dataset = corpus.dataset
labels = corpus.labels
#initializing the results writer
rwriter = ResultsWriter(args.corpus_settings_filename, corpus.dataset, args.output_folder)
#Reading the corpus configuration file
config = json.load(open(args.corpus_settings_filename))
#Computing predefined matrices which are fast to compute
basedir = corpus.basedir
matrix_folder = os.path.join(basedir, "data/gram-matrices/%s" % (args.corpus_name))
gram_matrix_dict = defaultdict(dict)
compute_default_predefined_coling_gram_matrices(gram_matrix_dict, corpus.dataset)
#reading the precomputed matrices
precomputed_matrices_file = os.path.join(corpus.basedir, args.precomputed_matrices_settings_filename)
unpickle_precomputed_gram_matrices(precomputed_matrices_file, gram_matrix_dict, corpus.basedir)
logging.info("total set of matrices available:")
for key in gram_matrix_dict:
logging.info(key)
logging.info("")
# RUNNING STANDALONE SYSTEMS
standalone_systems = [(k["id"], k["kernels"]) for k in experiments_config["standalone_experiments"]]
standalone_systems_dict = dict(standalone_systems)
standalone_predictions = dict([(k, get_predictions(v, gram_matrix_dict, labels)) for k, v in standalone_systems])
messages = []
for key in standalone_predictions:
message = evaluate_and_get_message(corpus.dataset, standalone_predictions[key],
label=key, show_test=True,
skip_all_positives_and_all_negatives=args.remove_irrelevant)
rwriter.write_results(key,standalone_predictions[key])
messages.append(message)
logging.info("Results obtained by the standalone systems: ")
for m in messages:
logging.info(m)
# SUMMING OUTPUTS OF SELECTED STANDALONE SYSTEMS
combo_rez = defaultdict(dict)
sum_messages = []
for experiment in experiments_config["standalone_experiments_to_sum"]:
standalone_system_names = set(experiment["kernels"])
relevant_standalone_predictions = dict([(k, standalone_predictions[k]) for k in standalone_system_names])
experiment_id = experiment["id"]
for mode in [DEV, TEST]:
combo_rez[experiment_id][mode] = sum([np.array(v[mode]) for k, v in relevant_standalone_predictions.items()]).tolist()
message = evaluate_and_get_message(corpus.dataset, combo_rez[experiment_id],
label=experiment_id, show_test=True,
skip_all_positives_and_all_negatives=args.remove_irrelevant)
rwriter.write_results(experiment_id, combo_rez[experiment_id])
sum_messages.append(message)
logging.info("Results obtained by the simple ensemble systems which simply sum outputs of the basic systems")
for m in sum_messages:
logging.info(m)
#RUNNING CROSS-VALIDATION
train_qids_file = [e['file'] for e in config['answers_files'] if e["mode"] == TRAIN][0]
cv_manager = CVManager(dataset[TRAIN], labels[TRAIN],
qid_file=os.path.join(config['basedir'], train_qids_file))
run_all_systems_in_cv = len(experiments_config["standalone_systems_to_run_cv_upon"])==1 \
and experiments_config["standalone_systems_to_run_cv_upon"][0]=="all"
cv_configs = standalone_systems_dict.keys() if run_all_systems_in_cv else experiments_config["standalone_systems_to_run_cv_upon"]
cv_predictions = dict()
for config_name in cv_configs:
cv_predictions[config_name] = run_cv_evaluation(standalone_systems_dict[config_name],
gram_matrix_dict,
cv_manager)
rwriter.write_cv_results(config_name, cv_predictions[config_name])
logging.info("Cross-validation results")
print "SYSTEM\tMRR\tMAP\tP@1"
for label, pred in cv_predictions.items():
macrop = evaluate_macro_performance_and_std(pred)
message = u"%s\t%5.2f \u00B1%5.2f\t%5.2f \u00B1%5.2f\t%5.2f \u00B1%5.2f" % (tuple([label]) + tuple(macrop))
print message.encode('utf-8')
#RUNNING ENSEMBLE SYSTEMS WITH LOGISTIC REGRESSION
ensemble_systems = [(k["id"], k["kernels"]) for k in experiments_config["ensembles"]]
meta_messages=[]
for system_name, feature_names in ensemble_systems:
train_X, train_Y = get_metaclassifier_training_data_and_labels(cv_predictions, feature_names)
X = get_metaclassifier_prediction_data(standalone_predictions, feature_names)
classifier = LogisticRegression()
classifier.fit(train_X, train_Y)
ensemble_scores = dict()
for mode in [DEV, TEST]:
ensemble_scores[mode] = classifier.predict_proba(X[mode])[:, 1]
rwriter.write_results(system_name, ensemble_scores)
meta_messages.append(evaluate_and_get_message(corpus.dataset, ensemble_scores, label=system_name, show_test=True))
print ""
print "************"
print "Results obtained by the standalone SVMs which sum several kernels with different features (please refer to the paper for the notation explanations)"
print "System\tMRR-DEV\tMAP-DEV\tP@1-DEV\tMRR-TEST\tMAP-TEST\tP@1-TEST"
for m in messages:
print m
print ""
print "Results obtained by the simple ensemble systems which simply sum outputs of the basic systems"
print "System\tMRR-DEV\tMAP-DEV\tP@1-DEV\tMRR-TEST\tMAP-TEST\tP@1-TEST"
for m in sum_messages:
print m
print ""
print "Results obtained by the logistic-regression meta_classifier"
print "System\tMRR-DEV\tMAP-DEV\tP@1-DEV\tMRR-TEST\tMAP-TEST\tP@1-TEST"
for m in meta_messages:
print m | scripts/emnlp2018/run_experiments.py | import argparse
from corpus_readers import PandasBasedCorpus
import json
from cv_utils import CVManager, run_cv_evaluation
from global_constants import *
import os
from collections import defaultdict
from gram_matrix_extractors import compute_default_predefined_coling_gram_matrices
from corpus_readers import unpickle_precomputed_gram_matrices
import numpy as np
from global_utils import get_predictions
from eval_pd import evaluate_and_get_message
from cv_utils import evaluate_macro_performance_and_std
from sklearn.linear_model import LogisticRegression
from metaclassifier import get_metaclassifier_training_data_and_labels, get_metaclassifier_prediction_data
from results_writer import ResultsWriter
import logging
def get_arg_parser():
parser = argparse.ArgumentParser(description='Gets all the numbers for the emnlp paper')
parser.add_argument('-c', '--corpus_name', help='corpus_name', required=True)
parser.add_argument('-s', '--corpus_settings_filename', help='json file with the corpus settings ', required=True)
parser.add_argument('-m', '--precomputed_matrices_settings_filename', help='json file with the paths'
'to the precomputed gram matrices',
required=True)
parser.add_argument('-o', '--output_folder', help='folder to which to output the predictions data', required=True)
parser.add_argument('-r', '--remove_irrelevant', help='remove all positives/all negatives (NOTE: MUST BE THE SAME '
'SETTING '
'THAT YOU USED TO CREATE THE PRECOMPUTED GRAM MATRICES)',
dest='remove_irrelevant', default=False, action='store_true')
parser.add_argument('-k', '--kernel_settings_file', help='file with all the kernel settings')
return parser
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
parser = get_arg_parser()
args = parser.parse_args()
# running experiments
logging.info("Reading experiment settings from %s" %(args.kernel_settings_file))
experiments_config = json.load(open(args.kernel_settings_file))
corpus = PandasBasedCorpus(corpus_settings_path=args.corpus_settings_filename,
corpus_name=args.corpus_name)
dataset = corpus.dataset
labels = corpus.labels
#initializing the results writer
rwriter = ResultsWriter(args.corpus_settings_filename, corpus.dataset, args.output_folder)
#Reading the corpus configuration file
config = json.load(open(args.corpus_settings_filename))
#Computing predefined matrices which are fast to compute
basedir = corpus.basedir
matrix_folder = os.path.join(basedir, "data/gram-matrices/%s" % (args.corpus_name))
gram_matrix_dict = defaultdict(dict)
compute_default_predefined_coling_gram_matrices(gram_matrix_dict, corpus.dataset)
#reading the precomputed matrices
precomputed_matrices_file = os.path.join(corpus.basedir, args.precomputed_matrices_settings_filename)
unpickle_precomputed_gram_matrices(precomputed_matrices_file, gram_matrix_dict, corpus.basedir)
logging.info("total set of matrices available:")
for key in gram_matrix_dict:
logging.info(key)
logging.info("")
# RUNNING STANDALONE SYSTEMS
standalone_systems = [(k["id"], k["kernels"]) for k in experiments_config["standalone_experiments"]]
standalone_systems_dict = dict(standalone_systems)
standalone_predictions = dict([(k, get_predictions(v, gram_matrix_dict, labels)) for k, v in standalone_systems])
messages = []
for key in standalone_predictions:
message = evaluate_and_get_message(corpus.dataset, standalone_predictions[key],
label=key, show_test=True,
skip_all_positives_and_all_negatives=args.remove_irrelevant)
rwriter.write_results(key,standalone_predictions[key])
messages.append(message)
logging.info("Results obtained by the standalone systems: ")
for m in messages:
logging.info(m)
# SUMMING OUTPUTS OF SELECTED STANDALONE SYSTEMS
combo_rez = defaultdict(dict)
sum_messages = []
for experiment in experiments_config["standalone_experiments_to_sum"]:
standalone_system_names = set(experiment["kernels"])
relevant_standalone_predictions = dict([(k, standalone_predictions[k]) for k in standalone_system_names])
experiment_id = experiment["id"]
for mode in [DEV, TEST]:
combo_rez[experiment_id][mode] = sum([np.array(v[mode]) for k, v in relevant_standalone_predictions.items()]).tolist()
message = evaluate_and_get_message(corpus.dataset, combo_rez[experiment_id],
label=experiment_id, show_test=True,
skip_all_positives_and_all_negatives=args.remove_irrelevant)
rwriter.write_results(experiment_id, combo_rez[experiment_id])
sum_messages.append(message)
logging.info("Results obtained by the simple ensemble systems which simply sum outputs of the basic systems")
for m in sum_messages:
logging.info(m)
#RUNNING CROSS-VALIDATION
train_qids_file = [e['file'] for e in config['answers_files'] if e["mode"] == TRAIN][0]
cv_manager = CVManager(dataset[TRAIN], labels[TRAIN],
qid_file=os.path.join(config['basedir'], train_qids_file))
run_all_systems_in_cv = len(experiments_config["standalone_systems_to_run_cv_upon"])==1 \
and experiments_config["standalone_systems_to_run_cv_upon"][0]=="all"
cv_configs = standalone_systems_dict.keys() if run_all_systems_in_cv else experiments_config["standalone_systems_to_run_cv_upon"]
cv_predictions = dict()
for config_name in cv_configs:
cv_predictions[config_name] = run_cv_evaluation(standalone_systems_dict[config_name],
gram_matrix_dict,
cv_manager)
rwriter.write_cv_results(config_name, cv_predictions[config_name])
logging.info("Cross-validation results")
print "SYSTEM\tMRR\tMAP\tP@1"
for label, pred in cv_predictions.items():
macrop = evaluate_macro_performance_and_std(pred)
message = u"%s\t%5.2f \u00B1%5.2f\t%5.2f \u00B1%5.2f\t%5.2f \u00B1%5.2f" % (tuple([label]) + tuple(macrop))
print message.encode('utf-8')
#RUNNING ENSEMBLE SYSTEMS WITH LOGISTIC REGRESSION
ensemble_systems = [(k["id"], k["kernels"]) for k in experiments_config["ensembles"]]
meta_messages=[]
for system_name, feature_names in ensemble_systems:
train_X, train_Y = get_metaclassifier_training_data_and_labels(cv_predictions, feature_names)
X = get_metaclassifier_prediction_data(standalone_predictions, feature_names)
classifier = LogisticRegression()
classifier.fit(train_X, train_Y)
ensemble_scores = dict()
for mode in [DEV, TEST]:
ensemble_scores[mode] = classifier.predict_proba(X[mode])[:, 1]
rwriter.write_results(system_name, ensemble_scores)
meta_messages.append(evaluate_and_get_message(corpus.dataset, ensemble_scores, label=system_name, show_test=True))
print ""
print "************"
print "Results obtained by the standalone SVMs which sum several kernels with different features (please refer to the paper for the notation explanations)"
print "System\tMRR-DEV\tMAP-DEV\tP@1-DEV\tMRR-TEST\tMAP-TEST\tP@1-TEST"
for m in messages:
print m
print ""
print "Results obtained by the simple ensemble systems which simply sum outputs of the basic systems"
print "System\tMRR-DEV\tMAP-DEV\tP@1-DEV\tMRR-TEST\tMAP-TEST\tP@1-TEST"
for m in sum_messages:
print m
print ""
print "Results obtained by the logistic-regression meta_classifier"
print "System\tMRR-DEV\tMAP-DEV\tP@1-DEV\tMRR-TEST\tMAP-TEST\tP@1-TEST"
for m in meta_messages:
print m | 0.480235 | 0.166879 |
from django.db import models
from cloudinary.models import CloudinaryField
from cloudinary.uploader import upload
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
class UserManager(BaseUserManager):
def create_user(self, first_name, last_name, username, email, password, **kwargs):
if not email:
raise ValueError('Email required')
if not first_name:
raise ValueError('First name required')
if not last_name:
raise ValueError('Last name required')
if not username:
raise ValueError('Username required')
if not password:
raise ValueError('Password required')
email = self.normalize_email(email)
user = self.model(first_name=first_name, last_name=last_name, username=username, email=email, password=password, **kwargs)
user.set_password(password)
user.save()
return user
def create_superuser(self, first_name, last_name, username, email, password, **kwargs):
kwargs.setdefault('is_staff', True)
kwargs.setdefault('is_superuser', True)
kwargs.setdefault('is_active', True)
if kwargs.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True')
if kwargs.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True')
if kwargs.get('is_active') is not True:
raise ValueError('Superuser must have is_active=True')
return self.create_user(first_name, last_name, username, email, password, **kwargs)
class User(AbstractBaseUser, PermissionsMixin):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
username = models.CharField(max_length=255, unique=True)
email = models.EmailField(max_length=255, unique=True)
profile_pic = models.URLField(
default="https://res.cloudinary.com/victormainak/image/upload/v1606634881/icons8-male-user-100_zratap.png")
bio = models.TextField(blank=True)
website = models.URLField(null=True)
social_media = models.JSONField(null=True)
date_joined = models.CharField(max_length=255, default=timezone.now)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email', 'first_name', 'last_name']
def __str__(self):
return f"{self.username} | ID: {self.id}"
def upload_profile_pic(self, file):
try:
link = upload(file)
print('CLOUDINARY URL: ', link.get('url'))
self.profile_pic = link.get('url')
self.save()
details = {'public_id': link.get('public_id'), 'url':link.get('url')}
return details
except Exception as e:
print("Cloudinary Error: ", e)
class Project(models.Model):
title = models.CharField(max_length=255)
landing_page_image = models.URLField(
default='https://res.cloudinary.com/victormainak/image/upload/v1606635375/default_image_01_x3tuoe.png')
description = models.TextField()
site_url = models.URLField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
@property
def average_rating(self):
reviews = Review.find_by_project(self)
average_rating = 0
for review in reviews:
review_average = (review.design + review.usability + review.content)/3
average_rating += review_average
average_rating = average_rating/len(reviews)
return round(average_rating, 2)
def upload_landing_page(self, file):
try:
link = upload(file)
print('CLOUDINARY URL: ', link.get('url'))
self.landing_page_image = link.get('url')
self.save()
details = {'public_id': link.get(
'public_id'), 'url': link.get('url')}
return details
except Exception as e:
print("Cloudinary Error: ", e)
@classmethod
def find_by_id(cls, id):
"""
Returns single instance
"""
project = cls.objects.filter(id = id).first()
return project
@classmethod
def find_by_user(cls, user):
"""
Returns queryset by user
"""
projects = cls.objects.filter(user = user).all()
return projects
class Review(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
design = models.IntegerField(null=True)
usability = models.IntegerField(null=True)
content = models.IntegerField(null=True)
comment = models.TextField(blank=True)
@classmethod
def find_by_project(cls, project):
"""
Returns queryset of reviews by project
"""
reviews = Review.objects.filter(project = project).all()
return reviews | apps/api/models.py | from django.db import models
from cloudinary.models import CloudinaryField
from cloudinary.uploader import upload
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
class UserManager(BaseUserManager):
def create_user(self, first_name, last_name, username, email, password, **kwargs):
if not email:
raise ValueError('Email required')
if not first_name:
raise ValueError('First name required')
if not last_name:
raise ValueError('Last name required')
if not username:
raise ValueError('Username required')
if not password:
raise ValueError('Password required')
email = self.normalize_email(email)
user = self.model(first_name=first_name, last_name=last_name, username=username, email=email, password=password, **kwargs)
user.set_password(password)
user.save()
return user
def create_superuser(self, first_name, last_name, username, email, password, **kwargs):
kwargs.setdefault('is_staff', True)
kwargs.setdefault('is_superuser', True)
kwargs.setdefault('is_active', True)
if kwargs.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True')
if kwargs.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True')
if kwargs.get('is_active') is not True:
raise ValueError('Superuser must have is_active=True')
return self.create_user(first_name, last_name, username, email, password, **kwargs)
class User(AbstractBaseUser, PermissionsMixin):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
username = models.CharField(max_length=255, unique=True)
email = models.EmailField(max_length=255, unique=True)
profile_pic = models.URLField(
default="https://res.cloudinary.com/victormainak/image/upload/v1606634881/icons8-male-user-100_zratap.png")
bio = models.TextField(blank=True)
website = models.URLField(null=True)
social_media = models.JSONField(null=True)
date_joined = models.CharField(max_length=255, default=timezone.now)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email', 'first_name', 'last_name']
def __str__(self):
return f"{self.username} | ID: {self.id}"
def upload_profile_pic(self, file):
try:
link = upload(file)
print('CLOUDINARY URL: ', link.get('url'))
self.profile_pic = link.get('url')
self.save()
details = {'public_id': link.get('public_id'), 'url':link.get('url')}
return details
except Exception as e:
print("Cloudinary Error: ", e)
class Project(models.Model):
title = models.CharField(max_length=255)
landing_page_image = models.URLField(
default='https://res.cloudinary.com/victormainak/image/upload/v1606635375/default_image_01_x3tuoe.png')
description = models.TextField()
site_url = models.URLField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
@property
def average_rating(self):
reviews = Review.find_by_project(self)
average_rating = 0
for review in reviews:
review_average = (review.design + review.usability + review.content)/3
average_rating += review_average
average_rating = average_rating/len(reviews)
return round(average_rating, 2)
def upload_landing_page(self, file):
try:
link = upload(file)
print('CLOUDINARY URL: ', link.get('url'))
self.landing_page_image = link.get('url')
self.save()
details = {'public_id': link.get(
'public_id'), 'url': link.get('url')}
return details
except Exception as e:
print("Cloudinary Error: ", e)
@classmethod
def find_by_id(cls, id):
"""
Returns single instance
"""
project = cls.objects.filter(id = id).first()
return project
@classmethod
def find_by_user(cls, user):
"""
Returns queryset by user
"""
projects = cls.objects.filter(user = user).all()
return projects
class Review(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
design = models.IntegerField(null=True)
usability = models.IntegerField(null=True)
content = models.IntegerField(null=True)
comment = models.TextField(blank=True)
@classmethod
def find_by_project(cls, project):
"""
Returns queryset of reviews by project
"""
reviews = Review.objects.filter(project = project).all()
return reviews | 0.558327 | 0.089018 |
from django.http import Http404
from django.contrib import messages
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, redirect, render
from .models import Entry, Topic
from .forms import EntryForm, TopicForm
def check_topic_owner(current_user, topic_owner):
"""Raises a Http404 if the current user is not the topic owner."""
if current_user != topic_owner:
raise Http404
def index(request):
"""The main page of the Learning Log."""
return render(request, 'learning_log/index.html')
@login_required
def topics(request):
"""Shows all the topics (newer to older)."""
topics = Topic.objects.filter(owner=request.user)
paginator = Paginator(topics, 20) # 20 topics per page.
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {'page_obj': page_obj}
return render(request, 'learning_log/topics.html', context)
def topic(request, topic_id):
"""Shows the entries of a specific topic (newer to older)."""
topic = get_object_or_404(Topic, pk=topic_id)
if not topic.public and request.user != topic.owner:
raise Http404
entries = topic.entry_set.all()
paginator = Paginator(entries, 10) # 10 entries per page.
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'topic': topic,
'page_obj': page_obj,
}
return render(request, 'learning_log/topic.html', context)
@login_required
def new_topic(request):
"""Adds a new topic."""
if request.method != 'POST':
# Shows a blank form for add a new topic.
form = TopicForm()
else:
# POST data submitted; validates the form,
# then save the new topic.
form = TopicForm(request.POST)
if form.is_valid():
# Don't save the new topic on database yet and make the
# request.user the topic owner, so then save the topic.
new_topic = form.save(commit=False)
new_topic.owner = request.user
new_topic.save()
# Redirects to the new topic.
return redirect('learning_log:topic', topic_id=new_topic.id)
context = {'form': form}
return render(request, 'learning_log/new_topic.html', context)
@login_required
def edit_topic(request, topic_id):
"""Edits a specific topic."""
topic = get_object_or_404(Topic, pk=topic_id)
check_topic_owner(request.user, topic.owner)
if request.method != 'POST':
# Shows the form with the topic data.
form = TopicForm(instance=topic)
else:
# POST data submitted; validates the form with the new data and
# the topic data, then save the edited topic.
form = TopicForm(request.POST, instance=topic)
if form.is_valid():
form.save()
# Redirects to the edited topic.
return redirect('learning_log:topic', topic_id=topic.id)
context = {
'topic': topic,
'form': form,
}
return render(request, 'learning_log/edit_topic.html', context)
@login_required
def delete_topic(request, topic_id):
"""Deletes a topic and the entries from this topic completly."""
topic = get_object_or_404(Topic, pk=topic_id)
check_topic_owner(request.user, topic.owner)
# Topic will be delete if the request method is 'POST'
# (there will be a form asking if the user really want delete the
# topic, if the user click on the submit button, the topic is
# deleted).
if request.method == 'POST':
topic.delete()
messages.info(
request,
f'The topic "{topic}" has successfully deleted.')
# Redirects to the topic list.
return redirect('learning_log:topics')
context = {'topic': topic}
return render(request, 'learning_log/delete_topic.html', context)
@login_required
def new_entry(request, topic_id):
"""Adds a new entry for a specific topic."""
topic = get_object_or_404(Topic, pk=topic_id)
check_topic_owner(request.user, topic.owner)
if request.method != 'POST':
# Shows a blank form for add the new entry.
form = EntryForm()
else:
# POST data submitted; validates the form,
# then save the new entry.
form = EntryForm(request.POST)
if form.is_valid():
# Don't save the new entry on database yet and make the
# current topic the topic for this entry, then save the
# new entry.
new_entry = form.save(commit=False)
new_entry.topic = topic
new_entry.save()
# Redirects to the topic of the new entry.
return redirect('learning_log:topic', topic_id=topic.id)
context = {
'topic': topic,
'form': form,
}
return render(request, 'learning_log/new_entry.html', context)
@login_required
def edit_entry(request, entry_id):
"""Edits a specific entry."""
entry = get_object_or_404(Entry, pk=entry_id)
topic = entry.topic
check_topic_owner(request.user, topic.owner)
if request.method != 'POST':
# Shows the form with the current entry instance for edit.
form = EntryForm(instance=entry)
else:
# POST data submitted; validates the form with the new data,
# then save the edited entry.
form = EntryForm(request.POST, instance=entry)
if form.is_valid():
form.save()
# Redirects to the topic of the edited entry.
return redirect('learning_log:topic', topic_id=topic.id)
context = {
'entry': entry,
'topic': topic,
'form': form,
}
return render(request, 'learning_log/edit_entry.html', context)
@login_required
def delete_entry(request, entry_id):
"""Deletes an entry completly."""
entry = get_object_or_404(Entry, pk=entry_id)
topic = entry.topic
check_topic_owner(request.user, topic.owner)
# Deletes the entry and shows a flash message informating that the
# entry has successfully deleted.
entry.delete()
messages.info(
request,
f'The entry "{entry}" has successfully deleted.')
# Redirects to the topic of the entry deleted.
return redirect('learning_log:topic', topic_id=topic.id) | learning_log/views.py | from django.http import Http404
from django.contrib import messages
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, redirect, render
from .models import Entry, Topic
from .forms import EntryForm, TopicForm
def check_topic_owner(current_user, topic_owner):
"""Raises a Http404 if the current user is not the topic owner."""
if current_user != topic_owner:
raise Http404
def index(request):
"""The main page of the Learning Log."""
return render(request, 'learning_log/index.html')
@login_required
def topics(request):
"""Shows all the topics (newer to older)."""
topics = Topic.objects.filter(owner=request.user)
paginator = Paginator(topics, 20) # 20 topics per page.
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {'page_obj': page_obj}
return render(request, 'learning_log/topics.html', context)
def topic(request, topic_id):
"""Shows the entries of a specific topic (newer to older)."""
topic = get_object_or_404(Topic, pk=topic_id)
if not topic.public and request.user != topic.owner:
raise Http404
entries = topic.entry_set.all()
paginator = Paginator(entries, 10) # 10 entries per page.
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'topic': topic,
'page_obj': page_obj,
}
return render(request, 'learning_log/topic.html', context)
@login_required
def new_topic(request):
"""Adds a new topic."""
if request.method != 'POST':
# Shows a blank form for add a new topic.
form = TopicForm()
else:
# POST data submitted; validates the form,
# then save the new topic.
form = TopicForm(request.POST)
if form.is_valid():
# Don't save the new topic on database yet and make the
# request.user the topic owner, so then save the topic.
new_topic = form.save(commit=False)
new_topic.owner = request.user
new_topic.save()
# Redirects to the new topic.
return redirect('learning_log:topic', topic_id=new_topic.id)
context = {'form': form}
return render(request, 'learning_log/new_topic.html', context)
@login_required
def edit_topic(request, topic_id):
"""Edits a specific topic."""
topic = get_object_or_404(Topic, pk=topic_id)
check_topic_owner(request.user, topic.owner)
if request.method != 'POST':
# Shows the form with the topic data.
form = TopicForm(instance=topic)
else:
# POST data submitted; validates the form with the new data and
# the topic data, then save the edited topic.
form = TopicForm(request.POST, instance=topic)
if form.is_valid():
form.save()
# Redirects to the edited topic.
return redirect('learning_log:topic', topic_id=topic.id)
context = {
'topic': topic,
'form': form,
}
return render(request, 'learning_log/edit_topic.html', context)
@login_required
def delete_topic(request, topic_id):
"""Deletes a topic and the entries from this topic completly."""
topic = get_object_or_404(Topic, pk=topic_id)
check_topic_owner(request.user, topic.owner)
# Topic will be delete if the request method is 'POST'
# (there will be a form asking if the user really want delete the
# topic, if the user click on the submit button, the topic is
# deleted).
if request.method == 'POST':
topic.delete()
messages.info(
request,
f'The topic "{topic}" has successfully deleted.')
# Redirects to the topic list.
return redirect('learning_log:topics')
context = {'topic': topic}
return render(request, 'learning_log/delete_topic.html', context)
@login_required
def new_entry(request, topic_id):
"""Adds a new entry for a specific topic."""
topic = get_object_or_404(Topic, pk=topic_id)
check_topic_owner(request.user, topic.owner)
if request.method != 'POST':
# Shows a blank form for add the new entry.
form = EntryForm()
else:
# POST data submitted; validates the form,
# then save the new entry.
form = EntryForm(request.POST)
if form.is_valid():
# Don't save the new entry on database yet and make the
# current topic the topic for this entry, then save the
# new entry.
new_entry = form.save(commit=False)
new_entry.topic = topic
new_entry.save()
# Redirects to the topic of the new entry.
return redirect('learning_log:topic', topic_id=topic.id)
context = {
'topic': topic,
'form': form,
}
return render(request, 'learning_log/new_entry.html', context)
@login_required
def edit_entry(request, entry_id):
"""Edits a specific entry."""
entry = get_object_or_404(Entry, pk=entry_id)
topic = entry.topic
check_topic_owner(request.user, topic.owner)
if request.method != 'POST':
# Shows the form with the current entry instance for edit.
form = EntryForm(instance=entry)
else:
# POST data submitted; validates the form with the new data,
# then save the edited entry.
form = EntryForm(request.POST, instance=entry)
if form.is_valid():
form.save()
# Redirects to the topic of the edited entry.
return redirect('learning_log:topic', topic_id=topic.id)
context = {
'entry': entry,
'topic': topic,
'form': form,
}
return render(request, 'learning_log/edit_entry.html', context)
@login_required
def delete_entry(request, entry_id):
"""Deletes an entry completly."""
entry = get_object_or_404(Entry, pk=entry_id)
topic = entry.topic
check_topic_owner(request.user, topic.owner)
# Deletes the entry and shows a flash message informating that the
# entry has successfully deleted.
entry.delete()
messages.info(
request,
f'The entry "{entry}" has successfully deleted.')
# Redirects to the topic of the entry deleted.
return redirect('learning_log:topic', topic_id=topic.id) | 0.66236 | 0.138812 |
import json
from json import JSONDecodeError
import argparse
import urllib.request
from colorama import Fore
from prettytable import PrettyTable
statistics = "/var/log/dystopia/statistics.json"
key_file = "/var/log/dystopia/ipstack.key"
def print_message(message):
print(Fore.GREEN + "[*] " + Fore.WHITE + message)
def print_error(message):
print(Fore.RED + "[-] " + Fore.WHITE + message)
def print_warning(message):
print(Fore.YELLOW + "[!] " + Fore.WHITE + message)
def read_json_file(filename):
if filename is None:
print_error("file was not found!")
exit()
try:
with open(filename, "r") as outfile:
data = json.load(outfile)
return data
except JSONDecodeError as e:
print_error(
"file: " + statistics + " might be corrupted! JSONDecodeError: " + str(e)
)
exit()
except FileNotFoundError:
print_error("file: '{}' was not found.".format(filename))
exit()
def write_to_file(filename, data):
try:
with open(filename, "a+") as f:
f.write(data)
except FileNotFoundError:
print_error("file: '{}' was not found.".format(filename))
exit()
def get_access_key():
try:
with open(key_file, "r") as f:
content = f.readlines()
return content[0]
except FileNotFoundError:
return None
def get_geo_data(address):
key = get_access_key()
key = key.strip()
if key is None or len(key) == 0:
return None
url = "http://api.ipstack.com/"
url = url + address.strip() + "?access_key=" + key
try:
with urllib.request.urlopen(url) as url:
data = json.loads(url.read().decode())
return data
except urllib.error.URLError:
print_error("Connection refused: "+url)
exit()
class Statistics:
def __init__(self):
self.ips = []
self.sort = args.sort
self.update = args.update
if self.update:
print_message("Updating geolocation data!")
self.filename = args.filename
self.table = PrettyTable()
self.table.field_names = [
"IP Address",
"Times Connected",
"Failed Logins",
"Correct Logins",
"Continent Name",
"Country Name",
"Region Name",
"Zip",
"latitude",
"longitude",
]
if args.address is not None:
self.address = args.address
self.data = read_json_file(statistics)
for ip, stat in self.data.items():
self.ips.append(ip)
def show_report(self):
for ip in self.ips:
self.table.add_row(
[
ip,
self.data[ip]["Times Connected"],
self.data[ip]["Failed Logins"],
self.data[ip]["Correct Logins"],
self.data[ip]["Continent Name"],
self.data[ip]["Country Name"],
self.data[ip]["Region Name"],
self.data[ip]["Zip"],
self.data[ip]["latitude"],
self.data[ip]["longitude"],
]
)
print(self.table.get_string(sortby=self.sort, sortKey=lambda row: int(row[0])))
if self.save is not None:
Statistics.save(self)
def show_address_report(self):
try:
self.table.add_row(
[
self.address,
self.data[self.address]["Times Connected"],
self.data[self.address]["Failed Logins"],
self.data[self.address]["Correct Logins"],
self.data[self.address]["Continent Name"],
self.data[self.address]["Country Name"],
self.data[self.address]["Region Name"],
self.data[self.address]["Zip"],
self.data[self.address]["latitude"],
self.data[self.address]["longitude"],
]
)
except KeyError:
print_error("Address: " + self.address + " not found!")
exit()
print(self.table)
if self.save is not None:
Statistics.save(self)
def geolocation(self):
for ip in self.ips:
try:
_t = self.data[ip]["Zip"]
if self.update:
raise KeyError
except KeyError:
json_data = get_geo_data(ip)
if json_data is None:
print_warning(
"Could not fetch geolocation data please put your api key here:"
+ key_file
)
self.data[ip]["Continent Name"] = None
self.data[ip]["Country Name"] = None
self.data[ip]["Region Name"] = None
self.data[ip]["Zip"] = None
self.data[ip]["latitude"] = None
self.data[ip]["longitude"] = None
else:
self.data[ip]["Continent Name"] = json_data["continent_name"]
self.data[ip]["Country Name"] = json_data["country_name"]
self.data[ip]["Region Name"] = json_data["region_name"]
self.data[ip]["Zip"] = json_data["zip"]
self.data[ip]["latitude"] = json_data["latitude"]
self.data[ip]["longitude"] = json_data["longitude"]
def update_statistics_file(self):
with open(statistics, "w+") as f:
json.dump(self.data, f, indent=4, ensure_ascii=False)
def save(self):
html = self.table.get_html_string()
if self.filename is not None:
if self.filename.endswith(".html"):
write_to_file(self.filename, html)
else:
self.filename = self.filename + ".html"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="dstat | Statistics tool for Dystopia")
parser.add_argument("--address", "-a", help="ip address to investigate")
parser.add_argument(
"--report",
"-r",
help="show a general report",
action="store_true",
default=False,
)
parser.add_argument("--sort", "-s", help="sort the report table by row name")
parser.add_argument(
"--update",
"-U",
help="update geolocation entries",
action="store_true",
default=False,
)
parser.add_argument("--filename", "-f", help="Filename of report file")
args = parser.parse_args()
s = Statistics()
Statistics.geolocation(s)
Statistics.update_statistics_file(s)
if args.report:
Statistics.show_report(s)
elif args.address is not None:
Statistics.show_address_report(s) | tools/dstat.py | import json
from json import JSONDecodeError
import argparse
import urllib.request
from colorama import Fore
from prettytable import PrettyTable
statistics = "/var/log/dystopia/statistics.json"
key_file = "/var/log/dystopia/ipstack.key"
def print_message(message):
print(Fore.GREEN + "[*] " + Fore.WHITE + message)
def print_error(message):
print(Fore.RED + "[-] " + Fore.WHITE + message)
def print_warning(message):
print(Fore.YELLOW + "[!] " + Fore.WHITE + message)
def read_json_file(filename):
if filename is None:
print_error("file was not found!")
exit()
try:
with open(filename, "r") as outfile:
data = json.load(outfile)
return data
except JSONDecodeError as e:
print_error(
"file: " + statistics + " might be corrupted! JSONDecodeError: " + str(e)
)
exit()
except FileNotFoundError:
print_error("file: '{}' was not found.".format(filename))
exit()
def write_to_file(filename, data):
try:
with open(filename, "a+") as f:
f.write(data)
except FileNotFoundError:
print_error("file: '{}' was not found.".format(filename))
exit()
def get_access_key():
try:
with open(key_file, "r") as f:
content = f.readlines()
return content[0]
except FileNotFoundError:
return None
def get_geo_data(address):
key = get_access_key()
key = key.strip()
if key is None or len(key) == 0:
return None
url = "http://api.ipstack.com/"
url = url + address.strip() + "?access_key=" + key
try:
with urllib.request.urlopen(url) as url:
data = json.loads(url.read().decode())
return data
except urllib.error.URLError:
print_error("Connection refused: "+url)
exit()
class Statistics:
def __init__(self):
self.ips = []
self.sort = args.sort
self.update = args.update
if self.update:
print_message("Updating geolocation data!")
self.filename = args.filename
self.table = PrettyTable()
self.table.field_names = [
"IP Address",
"Times Connected",
"Failed Logins",
"Correct Logins",
"Continent Name",
"Country Name",
"Region Name",
"Zip",
"latitude",
"longitude",
]
if args.address is not None:
self.address = args.address
self.data = read_json_file(statistics)
for ip, stat in self.data.items():
self.ips.append(ip)
def show_report(self):
for ip in self.ips:
self.table.add_row(
[
ip,
self.data[ip]["Times Connected"],
self.data[ip]["Failed Logins"],
self.data[ip]["Correct Logins"],
self.data[ip]["Continent Name"],
self.data[ip]["Country Name"],
self.data[ip]["Region Name"],
self.data[ip]["Zip"],
self.data[ip]["latitude"],
self.data[ip]["longitude"],
]
)
print(self.table.get_string(sortby=self.sort, sortKey=lambda row: int(row[0])))
if self.save is not None:
Statistics.save(self)
def show_address_report(self):
try:
self.table.add_row(
[
self.address,
self.data[self.address]["Times Connected"],
self.data[self.address]["Failed Logins"],
self.data[self.address]["Correct Logins"],
self.data[self.address]["Continent Name"],
self.data[self.address]["Country Name"],
self.data[self.address]["Region Name"],
self.data[self.address]["Zip"],
self.data[self.address]["latitude"],
self.data[self.address]["longitude"],
]
)
except KeyError:
print_error("Address: " + self.address + " not found!")
exit()
print(self.table)
if self.save is not None:
Statistics.save(self)
def geolocation(self):
for ip in self.ips:
try:
_t = self.data[ip]["Zip"]
if self.update:
raise KeyError
except KeyError:
json_data = get_geo_data(ip)
if json_data is None:
print_warning(
"Could not fetch geolocation data please put your api key here:"
+ key_file
)
self.data[ip]["Continent Name"] = None
self.data[ip]["Country Name"] = None
self.data[ip]["Region Name"] = None
self.data[ip]["Zip"] = None
self.data[ip]["latitude"] = None
self.data[ip]["longitude"] = None
else:
self.data[ip]["Continent Name"] = json_data["continent_name"]
self.data[ip]["Country Name"] = json_data["country_name"]
self.data[ip]["Region Name"] = json_data["region_name"]
self.data[ip]["Zip"] = json_data["zip"]
self.data[ip]["latitude"] = json_data["latitude"]
self.data[ip]["longitude"] = json_data["longitude"]
def update_statistics_file(self):
with open(statistics, "w+") as f:
json.dump(self.data, f, indent=4, ensure_ascii=False)
def save(self):
html = self.table.get_html_string()
if self.filename is not None:
if self.filename.endswith(".html"):
write_to_file(self.filename, html)
else:
self.filename = self.filename + ".html"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="dstat | Statistics tool for Dystopia")
parser.add_argument("--address", "-a", help="ip address to investigate")
parser.add_argument(
"--report",
"-r",
help="show a general report",
action="store_true",
default=False,
)
parser.add_argument("--sort", "-s", help="sort the report table by row name")
parser.add_argument(
"--update",
"-U",
help="update geolocation entries",
action="store_true",
default=False,
)
parser.add_argument("--filename", "-f", help="Filename of report file")
args = parser.parse_args()
s = Statistics()
Statistics.geolocation(s)
Statistics.update_statistics_file(s)
if args.report:
Statistics.show_report(s)
elif args.address is not None:
Statistics.show_address_report(s) | 0.287368 | 0.10725 |
import datetime
import jwt
from django import forms
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.views import View
from django.views.decorators.clickjacking import xframe_options_exempt
from pretix.base.forms import SettingsForm, SecretKeySettingsField
from pretix.base.models import Event, Order, Item
from pretix.base.reldate import RelativeDateTimeField
from pretix.control.views.event import EventSettingsFormView, EventSettingsViewMixin
from pretix.presale.views import EventViewMixin
from pretix.presale.views.order import OrderPositionDetailMixin
class VenuelessSettingsForm(SettingsForm):
venueless_url = forms.URLField(
label=_("Venueless URL"),
required=False,
)
venueless_secret = SecretKeySettingsField(
label=_("Venueless secret"),
required=False,
)
venueless_issuer = forms.CharField(
label=_("Venueless issuer"),
required=False,
)
venueless_audience = forms.CharField(
label=_("Venueless audience"),
required=False,
)
venueless_start = RelativeDateTimeField(
label=_('Start of live event'),
required=False,
)
venueless_allow_pending = forms.BooleanField(
label=_('Allow users to access the live event before their order is paid'),
required=False,
)
venueless_all_items = forms.BooleanField(
label=_('Allow buyers of all admission products'),
required=False
)
venueless_items = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple(
attrs={
'class': 'scrolling-multiple-choice',
'data-inverse-dependency': '<[name$=venueless_all_items]'
}
),
label=_('Limit to products'),
required=False,
queryset=Item.objects.none(),
initial=None
)
def __init__(self, *args, **kwargs):
event = kwargs['obj']
super().__init__(*args, **kwargs)
self.fields['venueless_items'].queryset = event.items.all()
def clean(self):
data = super().clean()
for k, v in self.fields.items():
if isinstance(v, forms.ModelMultipleChoiceField):
answstr = [o.pk for o in data[k]]
data[k] = answstr
return data
class SettingsView(EventSettingsViewMixin, EventSettingsFormView):
model = Event
form_class = VenuelessSettingsForm
template_name = 'pretix_venueless/settings.html'
permission = 'can_change_settings'
def get_success_url(self) -> str:
return reverse('plugins:pretix_venueless:settings', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug
})
@method_decorator(xframe_options_exempt, 'dispatch')
class OrderPositionJoin(EventViewMixin, OrderPositionDetailMixin, View):
def post(self, request, *args, **kwargs):
if not self.position:
raise Http404(_('Unknown order code or not authorized to access this order.'))
forbidden = (
(self.order.status != Order.STATUS_PAID and not (self.order.status == Order.STATUS_PENDING and
request.event.settings.venueless_allow_pending))
or self.position.canceled
or not self.position.item.admission
)
if forbidden:
raise PermissionDenied()
if request.event.settings.venueless_start and request.event.settings.venueless_start.datetime(self.position.subevent or request.event) > now():
raise PermissionDenied()
iat = datetime.datetime.utcnow()
exp = iat + datetime.timedelta(days=30)
profile = None
if self.position.attendee_name:
profile = {
"display_name": self.position.attendee_name
}
payload = {
"iss": request.event.settings.venueless_issuer,
"aud": request.event.settings.venueless_audience,
"exp": exp,
"iat": iat,
"uid": self.position.pseudonymization_id,
"profile": profile,
"traits": list(
{
'pretix-event-{}'.format(request.event.slug),
'pretix-subevent-{}'.format(self.position.subevent_id),
'pretix-item-{}'.format(self.position.item_id),
'pretix-variation-{}'.format(self.position.variation_id),
'pretix-category-{}'.format(self.position.item.category_id),
} | {
'pretix-item-{}'.format(p.item_id)
for p in self.position.addons.all()
} | {
'pretix-variation-{}'.format(p.variation_id)
for p in self.position.addons.all() if p.variation_id
} | {
'pretix-category-{}'.format(p.item.category_id)
for p in self.position.addons.all() if p.item.category_id
}
)
}
token = jwt.encode(
payload, self.request.event.settings.venueless_secret, algorithm="HS256"
).decode("utf-8")
return redirect('{}/#token={}'.format(self.request.event.settings.venueless_url, token).replace("//#", "/#")) | pretix_venueless/views.py | import datetime
import jwt
from django import forms
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.views import View
from django.views.decorators.clickjacking import xframe_options_exempt
from pretix.base.forms import SettingsForm, SecretKeySettingsField
from pretix.base.models import Event, Order, Item
from pretix.base.reldate import RelativeDateTimeField
from pretix.control.views.event import EventSettingsFormView, EventSettingsViewMixin
from pretix.presale.views import EventViewMixin
from pretix.presale.views.order import OrderPositionDetailMixin
class VenuelessSettingsForm(SettingsForm):
venueless_url = forms.URLField(
label=_("Venueless URL"),
required=False,
)
venueless_secret = SecretKeySettingsField(
label=_("Venueless secret"),
required=False,
)
venueless_issuer = forms.CharField(
label=_("Venueless issuer"),
required=False,
)
venueless_audience = forms.CharField(
label=_("Venueless audience"),
required=False,
)
venueless_start = RelativeDateTimeField(
label=_('Start of live event'),
required=False,
)
venueless_allow_pending = forms.BooleanField(
label=_('Allow users to access the live event before their order is paid'),
required=False,
)
venueless_all_items = forms.BooleanField(
label=_('Allow buyers of all admission products'),
required=False
)
venueless_items = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple(
attrs={
'class': 'scrolling-multiple-choice',
'data-inverse-dependency': '<[name$=venueless_all_items]'
}
),
label=_('Limit to products'),
required=False,
queryset=Item.objects.none(),
initial=None
)
def __init__(self, *args, **kwargs):
event = kwargs['obj']
super().__init__(*args, **kwargs)
self.fields['venueless_items'].queryset = event.items.all()
def clean(self):
data = super().clean()
for k, v in self.fields.items():
if isinstance(v, forms.ModelMultipleChoiceField):
answstr = [o.pk for o in data[k]]
data[k] = answstr
return data
class SettingsView(EventSettingsViewMixin, EventSettingsFormView):
model = Event
form_class = VenuelessSettingsForm
template_name = 'pretix_venueless/settings.html'
permission = 'can_change_settings'
def get_success_url(self) -> str:
return reverse('plugins:pretix_venueless:settings', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug
})
@method_decorator(xframe_options_exempt, 'dispatch')
class OrderPositionJoin(EventViewMixin, OrderPositionDetailMixin, View):
def post(self, request, *args, **kwargs):
if not self.position:
raise Http404(_('Unknown order code or not authorized to access this order.'))
forbidden = (
(self.order.status != Order.STATUS_PAID and not (self.order.status == Order.STATUS_PENDING and
request.event.settings.venueless_allow_pending))
or self.position.canceled
or not self.position.item.admission
)
if forbidden:
raise PermissionDenied()
if request.event.settings.venueless_start and request.event.settings.venueless_start.datetime(self.position.subevent or request.event) > now():
raise PermissionDenied()
iat = datetime.datetime.utcnow()
exp = iat + datetime.timedelta(days=30)
profile = None
if self.position.attendee_name:
profile = {
"display_name": self.position.attendee_name
}
payload = {
"iss": request.event.settings.venueless_issuer,
"aud": request.event.settings.venueless_audience,
"exp": exp,
"iat": iat,
"uid": self.position.pseudonymization_id,
"profile": profile,
"traits": list(
{
'pretix-event-{}'.format(request.event.slug),
'pretix-subevent-{}'.format(self.position.subevent_id),
'pretix-item-{}'.format(self.position.item_id),
'pretix-variation-{}'.format(self.position.variation_id),
'pretix-category-{}'.format(self.position.item.category_id),
} | {
'pretix-item-{}'.format(p.item_id)
for p in self.position.addons.all()
} | {
'pretix-variation-{}'.format(p.variation_id)
for p in self.position.addons.all() if p.variation_id
} | {
'pretix-category-{}'.format(p.item.category_id)
for p in self.position.addons.all() if p.item.category_id
}
)
}
token = jwt.encode(
payload, self.request.event.settings.venueless_secret, algorithm="HS256"
).decode("utf-8")
return redirect('{}/#token={}'.format(self.request.event.settings.venueless_url, token).replace("//#", "/#")) | 0.470737 | 0.064742 |
import json, urlparse, sys, os, signal
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from subprocess import call
from BitbucketParse import BitbucketParse
class GitAutoDeploy(BaseHTTPRequestHandler):
CONFIG_FILEPATH = './GitAutoDeploy.conf.json'
config = None
quiet = False
daemon = False
@classmethod
def getConfig(myClass):
if myClass.config is None:
try:
configString = open(myClass.CONFIG_FILEPATH).read()
except:
sys.exit('Could not load ' + myClass.CONFIG_FILEPATH + ' file')
try:
myClass.config = json.loads(configString)
except:
sys.exit(myClass.CONFIG_FILEPATH + ' file is not valid json')
for repository in myClass.config['repositories']:
if (not os.path.isdir(repository['path'])):
sys.exit('Directory ' + repository['path'] + ' not found')
# Check for a repository with a local or a remote GIT_WORK_DIR
if not os.path.isdir(os.path.join(repository['path'], '.git')) \
and not os.path.isdir(os.path.join(repository['path'], 'objects')):
sys.exit('Directory ' + repository['path'] + ' is not a Git repository')
return myClass.config
def do_POST(self):
config = self.getConfig()
if self.getEvent() != 1:
self.respond(304)
return
self.respond(204)
if self.server == 'bitbucket':
self.bitbucketRequest(config)
repo = self.getRepository(config, self.fullname)
if repo is None:
print "Not found repository"
return
deployBranch = repo['deploy-branch']
if self.branch == deployBranch:
self.fetch(self.path)
self.deploy(self.path)
file = open(self.name + "_" + self.branch + ".txt", "w+")
file.write(str(self.lastCommitHash))
def do_GET(self):
self.respond(200)
self.wfile("hello")
def getEvent(self):
event = self.headers.getheader('X-Event-Key')
if event is None:
event = self.headers.getheader('X-GitHub-Event')
self.server = 'github'
else:
self.server = 'bitbucket'
if 'push' not in event:
print('Not a push request')
return 304
else:
return 1
def getRepository(self, config, name):
for repository in config['repositories']:
if repository['full-name'] == name:
return repository
else:
return None
def bitbucketRequest(self, config):
bitbucket = BitbucketParse(config, self.headers, self.rfile)
bitbucket.parseRequest()
bitbucket.getMatchingPaths()
self.branch = bitbucket.branch
self.name = bitbucket.name
self.owner = bitbucket.owner
self.fullname = bitbucket.fullname
self.url = bitbucket.url
self.path = bitbucket.path
self.lastCommitHash = bitbucket.lastCommitHash
def respond(self, code):
self.send_response(code)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def fetch(self, path):
if (not self.quiet):
print "\nPost push request received"
print 'Updating ' + path
call(['cd "' + path + '" && git pull origin ' + self.branch], shell=True)
print 'Completed'
def deploy(self, path):
config = self.getConfig()
for repository in config['repositories']:
if repository['path'] == path:
if 'deploy' in repository:
branch = None
if 'deploy-branch' in repository:
branch = repository['deploy-branch']
if branch is None or branch == self.branch:
if not self.quiet:
print 'Executing deploy command'
call(['cd "' + path + '" && ' + repository['deploy']], shell=True)
elif not self.quiet:
print 'Push to different branch (%s != %s), not deploying' % (branch, self.branch)
break
def reset(self, path, name, branch):
filename = name + "_" + branch + ".txt"
file = open(filename, 'r')
lastCommitHash = file.read()
call(['cd "' + path + '" && git reset --hard ' + lastCommitHash], shell=True)
def main():
try:
server = None
for arg in sys.argv:
if (arg == '-d' or arg == '--daemon-mode'):
GitAutoDeploy.daemon = True
GitAutoDeploy.quiet = True
if (arg == '-q' or arg == '--quiet'):
GitAutoDeploy.quiet = True
if (arg == '-s' or arg == '--stop'):
file = open("pid.txt", "r")
pid = file.read()
if (not pid.isdigit()):
return
else:
os.kill(int(pid), signal.SIGKILL)
print 'Stop Auto deploy'
return
if (GitAutoDeploy.daemon):
file = open("pid.txt", "w+")
pid = os.fork()
if (pid != 0):
file.write(str(pid))
sys.exit()
os.setsid()
if (not GitAutoDeploy.quiet):
print 'Github Autodeploy Service started'
else:
print 'Github Autodeploy Service started in daemon mode'
server = HTTPServer(('', GitAutoDeploy.getConfig()['port']), GitAutoDeploy)
server.serve_forever()
except (KeyboardInterrupt, SystemExit) as e:
if (e): # wtf, why is this creating a new line?
print >> sys.stderr, e
if (not server is None):
server.socket.close()
if (not GitAutoDeploy.quiet):
print 'Goodbye'
if __name__ == '__main__':
main() | GitAutoDeploy.py |
import json, urlparse, sys, os, signal
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from subprocess import call
from BitbucketParse import BitbucketParse
class GitAutoDeploy(BaseHTTPRequestHandler):
CONFIG_FILEPATH = './GitAutoDeploy.conf.json'
config = None
quiet = False
daemon = False
@classmethod
def getConfig(myClass):
if myClass.config is None:
try:
configString = open(myClass.CONFIG_FILEPATH).read()
except:
sys.exit('Could not load ' + myClass.CONFIG_FILEPATH + ' file')
try:
myClass.config = json.loads(configString)
except:
sys.exit(myClass.CONFIG_FILEPATH + ' file is not valid json')
for repository in myClass.config['repositories']:
if (not os.path.isdir(repository['path'])):
sys.exit('Directory ' + repository['path'] + ' not found')
# Check for a repository with a local or a remote GIT_WORK_DIR
if not os.path.isdir(os.path.join(repository['path'], '.git')) \
and not os.path.isdir(os.path.join(repository['path'], 'objects')):
sys.exit('Directory ' + repository['path'] + ' is not a Git repository')
return myClass.config
def do_POST(self):
config = self.getConfig()
if self.getEvent() != 1:
self.respond(304)
return
self.respond(204)
if self.server == 'bitbucket':
self.bitbucketRequest(config)
repo = self.getRepository(config, self.fullname)
if repo is None:
print "Not found repository"
return
deployBranch = repo['deploy-branch']
if self.branch == deployBranch:
self.fetch(self.path)
self.deploy(self.path)
file = open(self.name + "_" + self.branch + ".txt", "w+")
file.write(str(self.lastCommitHash))
def do_GET(self):
self.respond(200)
self.wfile("hello")
def getEvent(self):
event = self.headers.getheader('X-Event-Key')
if event is None:
event = self.headers.getheader('X-GitHub-Event')
self.server = 'github'
else:
self.server = 'bitbucket'
if 'push' not in event:
print('Not a push request')
return 304
else:
return 1
def getRepository(self, config, name):
for repository in config['repositories']:
if repository['full-name'] == name:
return repository
else:
return None
def bitbucketRequest(self, config):
bitbucket = BitbucketParse(config, self.headers, self.rfile)
bitbucket.parseRequest()
bitbucket.getMatchingPaths()
self.branch = bitbucket.branch
self.name = bitbucket.name
self.owner = bitbucket.owner
self.fullname = bitbucket.fullname
self.url = bitbucket.url
self.path = bitbucket.path
self.lastCommitHash = bitbucket.lastCommitHash
def respond(self, code):
self.send_response(code)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def fetch(self, path):
if (not self.quiet):
print "\nPost push request received"
print 'Updating ' + path
call(['cd "' + path + '" && git pull origin ' + self.branch], shell=True)
print 'Completed'
def deploy(self, path):
config = self.getConfig()
for repository in config['repositories']:
if repository['path'] == path:
if 'deploy' in repository:
branch = None
if 'deploy-branch' in repository:
branch = repository['deploy-branch']
if branch is None or branch == self.branch:
if not self.quiet:
print 'Executing deploy command'
call(['cd "' + path + '" && ' + repository['deploy']], shell=True)
elif not self.quiet:
print 'Push to different branch (%s != %s), not deploying' % (branch, self.branch)
break
def reset(self, path, name, branch):
filename = name + "_" + branch + ".txt"
file = open(filename, 'r')
lastCommitHash = file.read()
call(['cd "' + path + '" && git reset --hard ' + lastCommitHash], shell=True)
def main():
try:
server = None
for arg in sys.argv:
if (arg == '-d' or arg == '--daemon-mode'):
GitAutoDeploy.daemon = True
GitAutoDeploy.quiet = True
if (arg == '-q' or arg == '--quiet'):
GitAutoDeploy.quiet = True
if (arg == '-s' or arg == '--stop'):
file = open("pid.txt", "r")
pid = file.read()
if (not pid.isdigit()):
return
else:
os.kill(int(pid), signal.SIGKILL)
print 'Stop Auto deploy'
return
if (GitAutoDeploy.daemon):
file = open("pid.txt", "w+")
pid = os.fork()
if (pid != 0):
file.write(str(pid))
sys.exit()
os.setsid()
if (not GitAutoDeploy.quiet):
print 'Github Autodeploy Service started'
else:
print 'Github Autodeploy Service started in daemon mode'
server = HTTPServer(('', GitAutoDeploy.getConfig()['port']), GitAutoDeploy)
server.serve_forever()
except (KeyboardInterrupt, SystemExit) as e:
if (e): # wtf, why is this creating a new line?
print >> sys.stderr, e
if (not server is None):
server.socket.close()
if (not GitAutoDeploy.quiet):
print 'Goodbye'
if __name__ == '__main__':
main() | 0.200088 | 0.049982 |
# Core imports
import os
import copy
import sys
from datetime import datetime
# Scipy/numpy imports
import numpy as np
# Astropy imports
from astropy.table import Table
import astropy.units as u
from astropy.stats import sigma_clipped_stats
# Import astroimage
import astroimage as ai
ai.set_instrument('Mimir')
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\'
# These are the directories where polarimetry data are stored
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
stokesDir = os.path.join(polarimetryDir, 'stokesImages')
if (not os.path.isdir(stokesDir)):
os.mkdir(stokesDir, 0o755)
# Specify which (target, filter) pairs to process
targetFilterDict = {
'NGC2023':['H', 'Ks'],
'NGC7023':['H', 'Ks'],
'M78':['H', 'Ks']
}
# Initalize a dictionary to store all the IPPA images for this target
# Loop through each target-filter pairing
for thisTarget, filters in targetFilterDict.items():
# Quickly loop through filters and check if this target has already been done
stokesIdict = {}
for thisFilter in filters:
# Construct the expected output names filenames for this
# (target, filter) pair
stokesIfilename = '_'.join([thisTarget, thisFilter, 'I']) + '.fits'
stokesIfilename = os.path.join(stokesDir, stokesIfilename)
# Store the output Stokes filenames in the list and dictionary
stokesIdict[thisFilter] = ai.reduced.ReducedScience.read(
stokesIfilename
)
# Construct a calibration object
photCalibrator = ai.utilitywrappers.PhotometryCalibrator(
stokesIdict
)
# Run the calibraion method
calImgDict = photCalibrator.calibrate_photometry()
# Write the calibrated images to disk
for key, img in calImgDict.items():
# Determine if this is an intensity image
keyParts = key.split('_')
if len(keyParts) > 1:
filename = os.path.join(stokesDir, '_'.join([thisTarget, keyParts[0], 'I', 'cal']) + '.fits')
else:
filename = os.path.join(stokesDir, '_'.join([thisTarget, key, 'cal']) + '.fits')
img.write(filename, clobber=True)
print('Done!') | 06b_photometricCalibration.py | # Core imports
import os
import copy
import sys
from datetime import datetime
# Scipy/numpy imports
import numpy as np
# Astropy imports
from astropy.table import Table
import astropy.units as u
from astropy.stats import sigma_clipped_stats
# Import astroimage
import astroimage as ai
ai.set_instrument('Mimir')
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\'
# These are the directories where polarimetry data are stored
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
stokesDir = os.path.join(polarimetryDir, 'stokesImages')
if (not os.path.isdir(stokesDir)):
os.mkdir(stokesDir, 0o755)
# Specify which (target, filter) pairs to process
targetFilterDict = {
'NGC2023':['H', 'Ks'],
'NGC7023':['H', 'Ks'],
'M78':['H', 'Ks']
}
# Initalize a dictionary to store all the IPPA images for this target
# Loop through each target-filter pairing
for thisTarget, filters in targetFilterDict.items():
# Quickly loop through filters and check if this target has already been done
stokesIdict = {}
for thisFilter in filters:
# Construct the expected output names filenames for this
# (target, filter) pair
stokesIfilename = '_'.join([thisTarget, thisFilter, 'I']) + '.fits'
stokesIfilename = os.path.join(stokesDir, stokesIfilename)
# Store the output Stokes filenames in the list and dictionary
stokesIdict[thisFilter] = ai.reduced.ReducedScience.read(
stokesIfilename
)
# Construct a calibration object
photCalibrator = ai.utilitywrappers.PhotometryCalibrator(
stokesIdict
)
# Run the calibraion method
calImgDict = photCalibrator.calibrate_photometry()
# Write the calibrated images to disk
for key, img in calImgDict.items():
# Determine if this is an intensity image
keyParts = key.split('_')
if len(keyParts) > 1:
filename = os.path.join(stokesDir, '_'.join([thisTarget, keyParts[0], 'I', 'cal']) + '.fits')
else:
filename = os.path.join(stokesDir, '_'.join([thisTarget, key, 'cal']) + '.fits')
img.write(filename, clobber=True)
print('Done!') | 0.40486 | 0.319626 |
import struct
from . import packet_base
from ryu.lib import addrconv
# Slow Protocol Multicast destination
SLOW_PROTOCOL_MULTICAST = '01:80:c2:00:00:02'
# Slow Protocol SubType
SLOW_SUBTYPE_LACP = 0x01
SLOW_SUBTYPE_MARKER = 0x02
SLOW_SUBTYPE_OAM = 0x03
SLOW_SUBTYPE_OSSP = 0x0a
class slow(packet_base.PacketBase):
"""Slow Protocol header decoder class.
This class has only the parser method.
http://standards.ieee.org/getieee802/download/802.3-2012_section5.pdf
Slow Protocols Subtypes
+---------------+--------------------------------------------------+
| Subtype Value | Protocol Name |
+===============+==================================================+
| 0 | Unused - Illegal Value |
+---------------+--------------------------------------------------+
| 1 | Link Aggregation Control Protocol(LACP) |
+---------------+--------------------------------------------------+
| 2 | Link Aggregation - Marker Protocol |
+---------------+--------------------------------------------------+
| 3 | Operations, Administration, and Maintenance(OAM) |
+---------------+--------------------------------------------------+
| 4 - 9 | Reserved for future use |
+---------------+--------------------------------------------------+
| 10 | Organization Specific Slow Protocol(OSSP) |
+---------------+--------------------------------------------------+
| 11 - 255 | Unused - Illegal values |
+---------------+--------------------------------------------------+
"""
_PACK_STR = '!B'
@classmethod
def parser(cls, buf):
(subtype, ) = struct.unpack_from(cls._PACK_STR, buf)
switch = {
SLOW_SUBTYPE_LACP: lacp,
# TODO: make parsers of other subtypes.
SLOW_SUBTYPE_MARKER: None,
SLOW_SUBTYPE_OAM: None,
SLOW_SUBTYPE_OSSP: None,
}
cls_ = switch.get(subtype)
if cls_:
return cls_.parser(buf)
else:
return None, None, buf
class lacp(packet_base.PacketBase):
"""Link Aggregation Control Protocol(LACP, IEEE 802.1AX)
header encoder/decoder class.
http://standards.ieee.org/getieee802/download/802.1AX-2008.pdf
LACPDU format
+------------------------------------------------+--------+
| LACPDU structure | Octets |
+================================================+========+
| Subtype = LACP | 1 |
+------------------------------------------------+--------+
| Version Number | 1 |
+------------+-----------------------------------+--------+
| TLV | TLV_type = Actor Information | 1 |
| Actor | | |
+------------+-----------------------------------+--------+
| | Actor_Information_Length = 20 | 1 |
+------------+-----------------------------------+--------+
| | Actor_System_Priority | 2 |
+------------+-----------------------------------+--------+
| | Actor_System | 6 |
+------------+-----------------------------------+--------+
| | Actor_Key | 2 |
+------------+-----------------------------------+--------+
| | Actor_Port_Priority | 2 |
+------------+-----------------------------------+--------+
| | Actor_Port | 2 |
+------------+-----------------------------------+--------+
| | Actor_State | 1 |
+------------+-----------------------------------+--------+
| | Reserved | 3 |
+------------+-----------------------------------+--------+
| TLV | TLV_type = Partner Information | 1 |
| Partner | | |
+------------+-----------------------------------+--------+
| | Partner_Information_Length = 20 | 1 |
+------------+-----------------------------------+--------+
| | Partner_System_Priority | 2 |
+------------+-----------------------------------+--------+
| | Partner_System | 6 |
+------------+-----------------------------------+--------+
| | Partner_Key | 2 |
+------------+-----------------------------------+--------+
| | Partner_Port_Priority | 2 |
+------------+-----------------------------------+--------+
| | Partner_Port | 2 |
+------------+-----------------------------------+--------+
| | Partner_State | 1 |
+------------+-----------------------------------+--------+
| | Reserved | 3 |
+------------+-----------------------------------+--------+
| TLV | TLV_type = Collector Information | 1 |
| Collector | | |
+------------+-----------------------------------+--------+
| | Collector_Information_Length = 16 | 1 |
+------------+-----------------------------------+--------+
| | Collector_Max_Delay | 2 |
+------------+-----------------------------------+--------+
| | Reserved | 12 |
+------------+-----------------------------------+--------+
| TLV | TLV_type = Terminator | 1 |
| Terminator | | |
+------------+-----------------------------------+--------+
| | Terminator_Length = 0 | 1 |
+------------+-----------------------------------+--------+
| | Reserved | 50 |
+------------+-----------------------------------+--------+
Terminator information uses a length value of 0 (0x00).
NOTE--The use of a Terminator_Length of 0 is intentional.
In TLV encoding schemes it is common practice
for the terminator encoding to be 0 both
for the type and the length.
Actor_State and Partner_State encoded as individual bits within
a single octet as follows:
+------+------+------+------+------+------+------+------+
| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+======+======+======+======+======+======+======+======+
| EXPR | DFLT | DIST | CLCT | SYNC | AGGR | TMO | ACT |
+------+------+------+------+------+------+------+------+
ACT
bit 0.
about the activity control value with regard to this link.
TMO
bit 1.
about the timeout control value with regard to this link.
AGGR
bit 2.
about how the system regards this link from the point of view
of the aggregation.
SYNC
bit 3.
about how the system regards this link from the point of view
of the synchronization.
CLCT
bit 4.
about collecting of incoming frames.
DIST
bit 5.
about distributing of outgoing frames.
DFLT
bit 6.
about the opposite system information which the system use.
EXPR
bit 7.
about the expire state of the system.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============================== ====================================
Attribute Description
=============================== ====================================
version LACP version. This parameter must be
set to LACP_VERSION_NUMBER(i.e. 1).
actor_system_priority The priority assigned to this
System.
actor_system The Actor's System ID, encoded as
a MAC address.
actor_key The operational Key value assigned
to the port by the Actor.
actor_port_priority The priority assigned to this port.
actor_port The port number assigned to the
port by the Actor.
actor_state_activity .. _lacp_activity:
about the activity control value
with regard to this link.
LACP_STATE_ACTIVE(1)
LACP_STATE_PASSIVE(0)
actor_state_timeout .. _lacp_timeout:
about the timeout control value
with regard to this link.
LACP_STATE_SHORT_TIMEOUT(1)
LACP_STATE_LONG_TIMEOUT(0)
actor_state_aggregation .. _lacp_aggregation:
about how the system regards this
link from the point of view of the
aggregation.
LACP_STATE_AGGREGATEABLE(1)
LACP_STATE_INDIVIDUAL(0)
actor_state_synchronization .. _lacp_synchronization:
about how the system regards this
link from the point of view of the
synchronization.
LACP_STATE_IN_SYNC(1)
LACP_STATE_OUT_OF_SYNC(0)
actor_state_collecting .. _lacp_collecting:
about collecting of incoming frames.
LACP_STATE_COLLECTING_ENABLED(1)
LACP_STATE_COLLECTING_DISABLED(0)
actor_state_distributing .. _lacp_distributing:
about distributing of outgoing frames.
LACP_STATE_DISTRIBUTING_ENABLED(1)
LACP_STATE_DISTRIBUTING_DISABLED(0)
actor_state_defaulted .. _lacp_defaulted:
about the Partner information
which the the Actor use.
LACP_STATE_DEFAULTED_PARTNER(1)
LACP_STATE_OPERATIONAL_PARTNER(0)
actor_state_expired .. _lacp_expired:
about the state of the Actor.
LACP_STATE_EXPIRED(1)
LACP_STATE_NOT_EXPIRED(0)
partner_system_priority The priority assigned to the
Partner System.
partner_system The Partner's System ID, encoded
as a MAC address.
partner_key The operational Key value assigned
to the port by the Partner.
partner_port_priority The priority assigned to this port
by the Partner.
partner_port The port number assigned to the
port by the Partner.
partner_state_activity See :ref:`actor_state_activity\
<lacp_activity>`.
partner_state_timeout See :ref:`actor_state_timeout\
<lacp_timeout>`.
partner_state_aggregation See :ref:`actor_state_aggregation\
<lacp_aggregation>`.
partner_state_synchronization See
:ref:`actor_state_synchronization\
<lacp_synchronization>`.
partner_state_collecting See :ref:`actor_state_collecting\
<lacp_collecting>`.
partner_state_distributing See :ref:`actor_state_distributing\
<lacp_distributing>`.
partner_state_defaulted See :ref:`actor_state_defaulted\
<lacp_defaulted>`.
partner_state_expired See :ref:`actor_state_expired\
<lacp_expired>`.
collector_max_delay the maximum time that the Frame
Collector may delay.
=============================== ====================================
"""
LACP_VERSION_NUMBER = 1
# LACP TLV type
LACP_TLV_TYPE_ACTOR = 1
LACP_TLV_TYPE_PARTNER = 2
LACP_TLV_TYPE_COLLECTOR = 3
LACP_TLV_TYPE_TERMINATOR = 0
# LACP state(LACP_Activity)
LACP_STATE_ACTIVE = 1
LACP_STATE_PASSIVE = 0
# LACP state(LACP_Timeout)
LACP_STATE_SHORT_TIMEOUT = 1
LACP_STATE_LONG_TIMEOUT = 0
# LACP state(Aggregation)
LACP_STATE_AGGREGATEABLE = 1
LACP_STATE_INDIVIDUAL = 0
# LACP state(Synchronization)
LACP_STATE_IN_SYNC = 1
LACP_STATE_OUT_OF_SYNC = 0
# LACP state(Collecting)
LACP_STATE_COLLECTING_ENABLED = 1
LACP_STATE_COLELCTING_DISABLED = 0
# LACP state(Distributing)
LACP_STATE_DISTRIBUTING_ENABLED = 1
LACP_STATE_DISTRIBUTING_DISABLED = 0
# LACP state(Defaulted)
LACP_STATE_DEFAULED_PARTNER = 1
LACP_STATE_OPERATIONAL_PARTNER = 0
# LACP state(Expired)
LACP_STATE_EXPIRED = 1
LACP_STATE_NOT_EXPIRED = 0
# The number of seconds between periodic transmissions using
# Short Timeouts.
FAST_PERIODIC_TIME = 1
# The number of seconds between periodic transmissions using
# Long Timeouts.
SLOW_PERIODIC_TIME = 30
# The number of seconds before invalidating received LACPDU
# information when using Short Timeouts(3 x Fast_Periodic_Time).
SHORT_TIMEOUT_TIME = 3 * FAST_PERIODIC_TIME
# The number of seconds before invalidating received LACPDU
# information when using Long Timeouts (3 x Slow_Periodic_Time).
LONG_TIMEOUT_TIME = 3 * SLOW_PERIODIC_TIME
_HLEN_PACK_STR = '!BB'
_HLEN_PACK_LEN = struct.calcsize(_HLEN_PACK_STR)
_ACTPRT_INFO_PACK_STR = '!BBH6sHHHB3x'
_ACTPRT_INFO_PACK_LEN = struct.calcsize(_ACTPRT_INFO_PACK_STR)
_COL_INFO_PACK_STR = '!BBH12x'
_COL_INFO_PACK_LEN = struct.calcsize(_COL_INFO_PACK_STR)
_TRM_PACK_STR = '!BB50x'
_TRM_PACK_LEN = struct.calcsize(_TRM_PACK_STR)
_ALL_PACK_LEN = _HLEN_PACK_LEN + _ACTPRT_INFO_PACK_LEN * 2 + \
_COL_INFO_PACK_LEN + _TRM_PACK_LEN
_MIN_LEN = _ALL_PACK_LEN
_TYPE = {
'ascii': [
'actor_system', 'partner_system'
]
}
def __init__(self, version=LACP_VERSION_NUMBER,
actor_system_priority=0,
actor_system='00:00:00:00:00:00',
actor_key=0, actor_port_priority=0, actor_port=0,
actor_state_activity=0, actor_state_timeout=0,
actor_state_aggregation=0,
actor_state_synchronization=0,
actor_state_collecting=0, actor_state_distributing=0,
actor_state_defaulted=0, actor_state_expired=0,
partner_system_priority=0,
partner_system='00:00:00:00:00:00',
partner_key=0, partner_port_priority=0, partner_port=0,
partner_state_activity=0, partner_state_timeout=0,
partner_state_aggregation=0,
partner_state_synchronization=0,
partner_state_collecting=0,
partner_state_distributing=0,
partner_state_defaulted=0, partner_state_expired=0,
collector_max_delay=0):
super(lacp, self).__init__()
# parameter check
assert (1 == actor_state_activity | 1)
assert (1 == actor_state_timeout | 1)
assert (1 == actor_state_aggregation | 1)
assert (1 == actor_state_synchronization | 1)
assert (1 == actor_state_collecting | 1)
assert (1 == actor_state_distributing | 1)
assert (1 == actor_state_defaulted | 1)
assert (1 == actor_state_expired | 1)
assert (1 == partner_state_activity | 1)
assert (1 == partner_state_timeout | 1)
assert (1 == partner_state_aggregation | 1)
assert (1 == partner_state_synchronization | 1)
assert (1 == partner_state_collecting | 1)
assert (1 == partner_state_distributing | 1)
assert (1 == partner_state_defaulted | 1)
assert (1 == partner_state_expired | 1)
# ------------------------------
# Header
# ------------------------------
self._subtype = SLOW_SUBTYPE_LACP
self.version = version
# ------------------------------
# Actor Information
# ------------------------------
self._actor_tag = self.LACP_TLV_TYPE_ACTOR
self._actor_length = self._ACTPRT_INFO_PACK_LEN
self.actor_system_priority = actor_system_priority
self.actor_system = actor_system
self.actor_key = actor_key
self.actor_port_priority = actor_port_priority
self.actor_port = actor_port
self.actor_state_activity = actor_state_activity
self.actor_state_timeout = actor_state_timeout
self.actor_state_aggregation = actor_state_aggregation
self.actor_state_synchronization = actor_state_synchronization
self.actor_state_collecting = actor_state_collecting
self.actor_state_distributing = actor_state_distributing
self.actor_state_defaulted = actor_state_defaulted
self.actor_state_expired = actor_state_expired
self._actor_state = (
(self.actor_state_activity << 0) |
(self.actor_state_timeout << 1) |
(self.actor_state_aggregation << 2) |
(self.actor_state_synchronization << 3) |
(self.actor_state_collecting << 4) |
(self.actor_state_distributing << 5) |
(self.actor_state_defaulted << 6) |
(self.actor_state_expired << 7))
# ------------------------------
# Partner Information
# ------------------------------
self._partner_tag = self.LACP_TLV_TYPE_PARTNER
self._partner_length = self._ACTPRT_INFO_PACK_LEN
self.partner_system_priority = partner_system_priority
self.partner_system = partner_system
self.partner_key = partner_key
self.partner_port_priority = partner_port_priority
self.partner_port = partner_port
self.partner_state_activity = partner_state_activity
self.partner_state_timeout = partner_state_timeout
self.partner_state_aggregation = partner_state_aggregation
self.partner_state_synchronization = \
partner_state_synchronization
self.partner_state_collecting = partner_state_collecting
self.partner_state_distributing = partner_state_distributing
self.partner_state_defaulted = partner_state_defaulted
self.partner_state_expired = partner_state_expired
self._partner_state = (
(self.partner_state_activity << 0) |
(self.partner_state_timeout << 1) |
(self.partner_state_aggregation << 2) |
(self.partner_state_synchronization << 3) |
(self.partner_state_collecting << 4) |
(self.partner_state_distributing << 5) |
(self.partner_state_defaulted << 6) |
(self.partner_state_expired << 7))
# ------------------------------
# Collector Information
# ------------------------------
self._collector_tag = self.LACP_TLV_TYPE_COLLECTOR
self._collector_length = self._COL_INFO_PACK_LEN
self.collector_max_delay = collector_max_delay
# ------------------------------
# Terminator
# ------------------------------
self._terminator_tag = self.LACP_TLV_TYPE_TERMINATOR
self._terminator_length = 0
@classmethod
def parser(cls, buf):
assert cls._ALL_PACK_LEN == len(buf)
offset = 0
# ------------------------------
# Header
# ------------------------------
(subtype, version
) = struct.unpack_from(cls._HLEN_PACK_STR, buf, offset)
assert SLOW_SUBTYPE_LACP == subtype
assert cls.LACP_VERSION_NUMBER == version
offset += cls._HLEN_PACK_LEN
# ------------------------------
# Actor Information
# ------------------------------
(actor_tag, actor_length, actor_system_priority, actor_system,
actor_key, actor_port_priority, actor_port, actor_state
) = struct.unpack_from(cls._ACTPRT_INFO_PACK_STR, buf, offset)
assert cls.LACP_TLV_TYPE_ACTOR == actor_tag
assert cls._ACTPRT_INFO_PACK_LEN == actor_length
offset += cls._ACTPRT_INFO_PACK_LEN
actor_state_activity = (actor_state >> 0) & 1
actor_state_timeout = (actor_state >> 1) & 1
actor_state_aggregation = (actor_state >> 2) & 1
actor_state_synchronization = (actor_state >> 3) & 1
actor_state_collecting = (actor_state >> 4) & 1
actor_state_distributing = (actor_state >> 5) & 1
actor_state_defaulted = (actor_state >> 6) & 1
actor_state_expired = (actor_state >> 7) & 1
# ------------------------------
# Partner Information
# ------------------------------
(partner_tag, partner_length, partner_system_priority,
partner_system, partner_key, partner_port_priority,
partner_port, partner_state
) = struct.unpack_from(cls._ACTPRT_INFO_PACK_STR, buf, offset)
assert cls.LACP_TLV_TYPE_PARTNER == partner_tag
assert cls._ACTPRT_INFO_PACK_LEN == partner_length
offset += cls._ACTPRT_INFO_PACK_LEN
partner_state_activity = (partner_state >> 0) & 1
partner_state_timeout = (partner_state >> 1) & 1
partner_state_aggregation = (partner_state >> 2) & 1
partner_state_synchronization = (partner_state >> 3) & 1
partner_state_collecting = (partner_state >> 4) & 1
partner_state_distributing = (partner_state >> 5) & 1
partner_state_defaulted = (partner_state >> 6) & 1
partner_state_expired = (partner_state >> 7) & 1
# ------------------------------
# Collector Information
# ------------------------------
(collector_tag, collector_length, collector_max_delay
) = struct.unpack_from(cls._COL_INFO_PACK_STR, buf, offset)
assert cls.LACP_TLV_TYPE_COLLECTOR == collector_tag
assert cls._COL_INFO_PACK_LEN == collector_length
offset += cls._COL_INFO_PACK_LEN
# ------------------------------
# Terminator Information
# ------------------------------
(terminator_tag, terminator_length
) = struct.unpack_from(cls._TRM_PACK_STR, buf, offset)
assert cls.LACP_TLV_TYPE_TERMINATOR == terminator_tag
assert 0 == terminator_length
return cls(version,
actor_system_priority,
addrconv.mac.bin_to_text(actor_system),
actor_key, actor_port_priority,
actor_port, actor_state_activity,
actor_state_timeout, actor_state_aggregation,
actor_state_synchronization, actor_state_collecting,
actor_state_distributing, actor_state_defaulted,
actor_state_expired, partner_system_priority,
addrconv.mac.bin_to_text(partner_system),
partner_key, partner_port_priority,
partner_port, partner_state_activity,
partner_state_timeout, partner_state_aggregation,
partner_state_synchronization,
partner_state_collecting, partner_state_distributing,
partner_state_defaulted, partner_state_expired,
collector_max_delay), None, buf[lacp._ALL_PACK_LEN:]
def serialize(self, payload, prev):
header = struct.pack(self._HLEN_PACK_STR, self._subtype,
self.version)
actor = struct.pack(self._ACTPRT_INFO_PACK_STR,
self._actor_tag, self._actor_length,
self.actor_system_priority,
addrconv.mac.text_to_bin(self.actor_system),
self.actor_key,
self.actor_port_priority, self.actor_port,
self._actor_state)
partner = struct.pack(self._ACTPRT_INFO_PACK_STR,
self._partner_tag, self._partner_length,
self.partner_system_priority,
addrconv.mac.text_to_bin(self.partner_system),
self.partner_key,
self.partner_port_priority,
self.partner_port, self._partner_state)
collector = struct.pack(self._COL_INFO_PACK_STR,
self._collector_tag,
self._collector_length,
self.collector_max_delay)
terminator = struct.pack(self._TRM_PACK_STR,
self._terminator_tag,
self._terminator_length)
return header + actor + partner + collector + terminator | ryu/lib/packet/slow.py |
import struct
from . import packet_base
from ryu.lib import addrconv
# Slow Protocol Multicast destination
SLOW_PROTOCOL_MULTICAST = '01:80:c2:00:00:02'
# Slow Protocol SubType
SLOW_SUBTYPE_LACP = 0x01
SLOW_SUBTYPE_MARKER = 0x02
SLOW_SUBTYPE_OAM = 0x03
SLOW_SUBTYPE_OSSP = 0x0a
class slow(packet_base.PacketBase):
"""Slow Protocol header decoder class.
This class has only the parser method.
http://standards.ieee.org/getieee802/download/802.3-2012_section5.pdf
Slow Protocols Subtypes
+---------------+--------------------------------------------------+
| Subtype Value | Protocol Name |
+===============+==================================================+
| 0 | Unused - Illegal Value |
+---------------+--------------------------------------------------+
| 1 | Link Aggregation Control Protocol(LACP) |
+---------------+--------------------------------------------------+
| 2 | Link Aggregation - Marker Protocol |
+---------------+--------------------------------------------------+
| 3 | Operations, Administration, and Maintenance(OAM) |
+---------------+--------------------------------------------------+
| 4 - 9 | Reserved for future use |
+---------------+--------------------------------------------------+
| 10 | Organization Specific Slow Protocol(OSSP) |
+---------------+--------------------------------------------------+
| 11 - 255 | Unused - Illegal values |
+---------------+--------------------------------------------------+
"""
_PACK_STR = '!B'
@classmethod
def parser(cls, buf):
(subtype, ) = struct.unpack_from(cls._PACK_STR, buf)
switch = {
SLOW_SUBTYPE_LACP: lacp,
# TODO: make parsers of other subtypes.
SLOW_SUBTYPE_MARKER: None,
SLOW_SUBTYPE_OAM: None,
SLOW_SUBTYPE_OSSP: None,
}
cls_ = switch.get(subtype)
if cls_:
return cls_.parser(buf)
else:
return None, None, buf
class lacp(packet_base.PacketBase):
"""Link Aggregation Control Protocol(LACP, IEEE 802.1AX)
header encoder/decoder class.
http://standards.ieee.org/getieee802/download/802.1AX-2008.pdf
LACPDU format
+------------------------------------------------+--------+
| LACPDU structure | Octets |
+================================================+========+
| Subtype = LACP | 1 |
+------------------------------------------------+--------+
| Version Number | 1 |
+------------+-----------------------------------+--------+
| TLV | TLV_type = Actor Information | 1 |
| Actor | | |
+------------+-----------------------------------+--------+
| | Actor_Information_Length = 20 | 1 |
+------------+-----------------------------------+--------+
| | Actor_System_Priority | 2 |
+------------+-----------------------------------+--------+
| | Actor_System | 6 |
+------------+-----------------------------------+--------+
| | Actor_Key | 2 |
+------------+-----------------------------------+--------+
| | Actor_Port_Priority | 2 |
+------------+-----------------------------------+--------+
| | Actor_Port | 2 |
+------------+-----------------------------------+--------+
| | Actor_State | 1 |
+------------+-----------------------------------+--------+
| | Reserved | 3 |
+------------+-----------------------------------+--------+
| TLV | TLV_type = Partner Information | 1 |
| Partner | | |
+------------+-----------------------------------+--------+
| | Partner_Information_Length = 20 | 1 |
+------------+-----------------------------------+--------+
| | Partner_System_Priority | 2 |
+------------+-----------------------------------+--------+
| | Partner_System | 6 |
+------------+-----------------------------------+--------+
| | Partner_Key | 2 |
+------------+-----------------------------------+--------+
| | Partner_Port_Priority | 2 |
+------------+-----------------------------------+--------+
| | Partner_Port | 2 |
+------------+-----------------------------------+--------+
| | Partner_State | 1 |
+------------+-----------------------------------+--------+
| | Reserved | 3 |
+------------+-----------------------------------+--------+
| TLV | TLV_type = Collector Information | 1 |
| Collector | | |
+------------+-----------------------------------+--------+
| | Collector_Information_Length = 16 | 1 |
+------------+-----------------------------------+--------+
| | Collector_Max_Delay | 2 |
+------------+-----------------------------------+--------+
| | Reserved | 12 |
+------------+-----------------------------------+--------+
| TLV | TLV_type = Terminator | 1 |
| Terminator | | |
+------------+-----------------------------------+--------+
| | Terminator_Length = 0 | 1 |
+------------+-----------------------------------+--------+
| | Reserved | 50 |
+------------+-----------------------------------+--------+
Terminator information uses a length value of 0 (0x00).
NOTE--The use of a Terminator_Length of 0 is intentional.
In TLV encoding schemes it is common practice
for the terminator encoding to be 0 both
for the type and the length.
Actor_State and Partner_State encoded as individual bits within
a single octet as follows:
+------+------+------+------+------+------+------+------+
| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+======+======+======+======+======+======+======+======+
| EXPR | DFLT | DIST | CLCT | SYNC | AGGR | TMO | ACT |
+------+------+------+------+------+------+------+------+
ACT
bit 0.
about the activity control value with regard to this link.
TMO
bit 1.
about the timeout control value with regard to this link.
AGGR
bit 2.
about how the system regards this link from the point of view
of the aggregation.
SYNC
bit 3.
about how the system regards this link from the point of view
of the synchronization.
CLCT
bit 4.
about collecting of incoming frames.
DIST
bit 5.
about distributing of outgoing frames.
DFLT
bit 6.
about the opposite system information which the system use.
EXPR
bit 7.
about the expire state of the system.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============================== ====================================
Attribute Description
=============================== ====================================
version LACP version. This parameter must be
set to LACP_VERSION_NUMBER(i.e. 1).
actor_system_priority The priority assigned to this
System.
actor_system The Actor's System ID, encoded as
a MAC address.
actor_key The operational Key value assigned
to the port by the Actor.
actor_port_priority The priority assigned to this port.
actor_port The port number assigned to the
port by the Actor.
actor_state_activity .. _lacp_activity:
about the activity control value
with regard to this link.
LACP_STATE_ACTIVE(1)
LACP_STATE_PASSIVE(0)
actor_state_timeout .. _lacp_timeout:
about the timeout control value
with regard to this link.
LACP_STATE_SHORT_TIMEOUT(1)
LACP_STATE_LONG_TIMEOUT(0)
actor_state_aggregation .. _lacp_aggregation:
about how the system regards this
link from the point of view of the
aggregation.
LACP_STATE_AGGREGATEABLE(1)
LACP_STATE_INDIVIDUAL(0)
actor_state_synchronization .. _lacp_synchronization:
about how the system regards this
link from the point of view of the
synchronization.
LACP_STATE_IN_SYNC(1)
LACP_STATE_OUT_OF_SYNC(0)
actor_state_collecting .. _lacp_collecting:
about collecting of incoming frames.
LACP_STATE_COLLECTING_ENABLED(1)
LACP_STATE_COLLECTING_DISABLED(0)
actor_state_distributing .. _lacp_distributing:
about distributing of outgoing frames.
LACP_STATE_DISTRIBUTING_ENABLED(1)
LACP_STATE_DISTRIBUTING_DISABLED(0)
actor_state_defaulted .. _lacp_defaulted:
about the Partner information
which the the Actor use.
LACP_STATE_DEFAULTED_PARTNER(1)
LACP_STATE_OPERATIONAL_PARTNER(0)
actor_state_expired .. _lacp_expired:
about the state of the Actor.
LACP_STATE_EXPIRED(1)
LACP_STATE_NOT_EXPIRED(0)
partner_system_priority The priority assigned to the
Partner System.
partner_system The Partner's System ID, encoded
as a MAC address.
partner_key The operational Key value assigned
to the port by the Partner.
partner_port_priority The priority assigned to this port
by the Partner.
partner_port The port number assigned to the
port by the Partner.
partner_state_activity See :ref:`actor_state_activity\
<lacp_activity>`.
partner_state_timeout See :ref:`actor_state_timeout\
<lacp_timeout>`.
partner_state_aggregation See :ref:`actor_state_aggregation\
<lacp_aggregation>`.
partner_state_synchronization See
:ref:`actor_state_synchronization\
<lacp_synchronization>`.
partner_state_collecting See :ref:`actor_state_collecting\
<lacp_collecting>`.
partner_state_distributing See :ref:`actor_state_distributing\
<lacp_distributing>`.
partner_state_defaulted See :ref:`actor_state_defaulted\
<lacp_defaulted>`.
partner_state_expired See :ref:`actor_state_expired\
<lacp_expired>`.
collector_max_delay the maximum time that the Frame
Collector may delay.
=============================== ====================================
"""
LACP_VERSION_NUMBER = 1
# LACP TLV type
LACP_TLV_TYPE_ACTOR = 1
LACP_TLV_TYPE_PARTNER = 2
LACP_TLV_TYPE_COLLECTOR = 3
LACP_TLV_TYPE_TERMINATOR = 0
# LACP state(LACP_Activity)
LACP_STATE_ACTIVE = 1
LACP_STATE_PASSIVE = 0
# LACP state(LACP_Timeout)
LACP_STATE_SHORT_TIMEOUT = 1
LACP_STATE_LONG_TIMEOUT = 0
# LACP state(Aggregation)
LACP_STATE_AGGREGATEABLE = 1
LACP_STATE_INDIVIDUAL = 0
# LACP state(Synchronization)
LACP_STATE_IN_SYNC = 1
LACP_STATE_OUT_OF_SYNC = 0
# LACP state(Collecting)
LACP_STATE_COLLECTING_ENABLED = 1
LACP_STATE_COLELCTING_DISABLED = 0
# LACP state(Distributing)
LACP_STATE_DISTRIBUTING_ENABLED = 1
LACP_STATE_DISTRIBUTING_DISABLED = 0
# LACP state(Defaulted)
LACP_STATE_DEFAULED_PARTNER = 1
LACP_STATE_OPERATIONAL_PARTNER = 0
# LACP state(Expired)
LACP_STATE_EXPIRED = 1
LACP_STATE_NOT_EXPIRED = 0
# The number of seconds between periodic transmissions using
# Short Timeouts.
FAST_PERIODIC_TIME = 1
# The number of seconds between periodic transmissions using
# Long Timeouts.
SLOW_PERIODIC_TIME = 30
# The number of seconds before invalidating received LACPDU
# information when using Short Timeouts(3 x Fast_Periodic_Time).
SHORT_TIMEOUT_TIME = 3 * FAST_PERIODIC_TIME
# The number of seconds before invalidating received LACPDU
# information when using Long Timeouts (3 x Slow_Periodic_Time).
LONG_TIMEOUT_TIME = 3 * SLOW_PERIODIC_TIME
_HLEN_PACK_STR = '!BB'
_HLEN_PACK_LEN = struct.calcsize(_HLEN_PACK_STR)
_ACTPRT_INFO_PACK_STR = '!BBH6sHHHB3x'
_ACTPRT_INFO_PACK_LEN = struct.calcsize(_ACTPRT_INFO_PACK_STR)
_COL_INFO_PACK_STR = '!BBH12x'
_COL_INFO_PACK_LEN = struct.calcsize(_COL_INFO_PACK_STR)
_TRM_PACK_STR = '!BB50x'
_TRM_PACK_LEN = struct.calcsize(_TRM_PACK_STR)
_ALL_PACK_LEN = _HLEN_PACK_LEN + _ACTPRT_INFO_PACK_LEN * 2 + \
_COL_INFO_PACK_LEN + _TRM_PACK_LEN
_MIN_LEN = _ALL_PACK_LEN
_TYPE = {
'ascii': [
'actor_system', 'partner_system'
]
}
def __init__(self, version=LACP_VERSION_NUMBER,
actor_system_priority=0,
actor_system='00:00:00:00:00:00',
actor_key=0, actor_port_priority=0, actor_port=0,
actor_state_activity=0, actor_state_timeout=0,
actor_state_aggregation=0,
actor_state_synchronization=0,
actor_state_collecting=0, actor_state_distributing=0,
actor_state_defaulted=0, actor_state_expired=0,
partner_system_priority=0,
partner_system='00:00:00:00:00:00',
partner_key=0, partner_port_priority=0, partner_port=0,
partner_state_activity=0, partner_state_timeout=0,
partner_state_aggregation=0,
partner_state_synchronization=0,
partner_state_collecting=0,
partner_state_distributing=0,
partner_state_defaulted=0, partner_state_expired=0,
collector_max_delay=0):
super(lacp, self).__init__()
# parameter check
assert (1 == actor_state_activity | 1)
assert (1 == actor_state_timeout | 1)
assert (1 == actor_state_aggregation | 1)
assert (1 == actor_state_synchronization | 1)
assert (1 == actor_state_collecting | 1)
assert (1 == actor_state_distributing | 1)
assert (1 == actor_state_defaulted | 1)
assert (1 == actor_state_expired | 1)
assert (1 == partner_state_activity | 1)
assert (1 == partner_state_timeout | 1)
assert (1 == partner_state_aggregation | 1)
assert (1 == partner_state_synchronization | 1)
assert (1 == partner_state_collecting | 1)
assert (1 == partner_state_distributing | 1)
assert (1 == partner_state_defaulted | 1)
assert (1 == partner_state_expired | 1)
# ------------------------------
# Header
# ------------------------------
self._subtype = SLOW_SUBTYPE_LACP
self.version = version
# ------------------------------
# Actor Information
# ------------------------------
self._actor_tag = self.LACP_TLV_TYPE_ACTOR
self._actor_length = self._ACTPRT_INFO_PACK_LEN
self.actor_system_priority = actor_system_priority
self.actor_system = actor_system
self.actor_key = actor_key
self.actor_port_priority = actor_port_priority
self.actor_port = actor_port
self.actor_state_activity = actor_state_activity
self.actor_state_timeout = actor_state_timeout
self.actor_state_aggregation = actor_state_aggregation
self.actor_state_synchronization = actor_state_synchronization
self.actor_state_collecting = actor_state_collecting
self.actor_state_distributing = actor_state_distributing
self.actor_state_defaulted = actor_state_defaulted
self.actor_state_expired = actor_state_expired
self._actor_state = (
(self.actor_state_activity << 0) |
(self.actor_state_timeout << 1) |
(self.actor_state_aggregation << 2) |
(self.actor_state_synchronization << 3) |
(self.actor_state_collecting << 4) |
(self.actor_state_distributing << 5) |
(self.actor_state_defaulted << 6) |
(self.actor_state_expired << 7))
# ------------------------------
# Partner Information
# ------------------------------
self._partner_tag = self.LACP_TLV_TYPE_PARTNER
self._partner_length = self._ACTPRT_INFO_PACK_LEN
self.partner_system_priority = partner_system_priority
self.partner_system = partner_system
self.partner_key = partner_key
self.partner_port_priority = partner_port_priority
self.partner_port = partner_port
self.partner_state_activity = partner_state_activity
self.partner_state_timeout = partner_state_timeout
self.partner_state_aggregation = partner_state_aggregation
self.partner_state_synchronization = \
partner_state_synchronization
self.partner_state_collecting = partner_state_collecting
self.partner_state_distributing = partner_state_distributing
self.partner_state_defaulted = partner_state_defaulted
self.partner_state_expired = partner_state_expired
self._partner_state = (
(self.partner_state_activity << 0) |
(self.partner_state_timeout << 1) |
(self.partner_state_aggregation << 2) |
(self.partner_state_synchronization << 3) |
(self.partner_state_collecting << 4) |
(self.partner_state_distributing << 5) |
(self.partner_state_defaulted << 6) |
(self.partner_state_expired << 7))
# ------------------------------
# Collector Information
# ------------------------------
self._collector_tag = self.LACP_TLV_TYPE_COLLECTOR
self._collector_length = self._COL_INFO_PACK_LEN
self.collector_max_delay = collector_max_delay
# ------------------------------
# Terminator
# ------------------------------
self._terminator_tag = self.LACP_TLV_TYPE_TERMINATOR
self._terminator_length = 0
@classmethod
def parser(cls, buf):
assert cls._ALL_PACK_LEN == len(buf)
offset = 0
# ------------------------------
# Header
# ------------------------------
(subtype, version
) = struct.unpack_from(cls._HLEN_PACK_STR, buf, offset)
assert SLOW_SUBTYPE_LACP == subtype
assert cls.LACP_VERSION_NUMBER == version
offset += cls._HLEN_PACK_LEN
# ------------------------------
# Actor Information
# ------------------------------
(actor_tag, actor_length, actor_system_priority, actor_system,
actor_key, actor_port_priority, actor_port, actor_state
) = struct.unpack_from(cls._ACTPRT_INFO_PACK_STR, buf, offset)
assert cls.LACP_TLV_TYPE_ACTOR == actor_tag
assert cls._ACTPRT_INFO_PACK_LEN == actor_length
offset += cls._ACTPRT_INFO_PACK_LEN
actor_state_activity = (actor_state >> 0) & 1
actor_state_timeout = (actor_state >> 1) & 1
actor_state_aggregation = (actor_state >> 2) & 1
actor_state_synchronization = (actor_state >> 3) & 1
actor_state_collecting = (actor_state >> 4) & 1
actor_state_distributing = (actor_state >> 5) & 1
actor_state_defaulted = (actor_state >> 6) & 1
actor_state_expired = (actor_state >> 7) & 1
# ------------------------------
# Partner Information
# ------------------------------
(partner_tag, partner_length, partner_system_priority,
partner_system, partner_key, partner_port_priority,
partner_port, partner_state
) = struct.unpack_from(cls._ACTPRT_INFO_PACK_STR, buf, offset)
assert cls.LACP_TLV_TYPE_PARTNER == partner_tag
assert cls._ACTPRT_INFO_PACK_LEN == partner_length
offset += cls._ACTPRT_INFO_PACK_LEN
partner_state_activity = (partner_state >> 0) & 1
partner_state_timeout = (partner_state >> 1) & 1
partner_state_aggregation = (partner_state >> 2) & 1
partner_state_synchronization = (partner_state >> 3) & 1
partner_state_collecting = (partner_state >> 4) & 1
partner_state_distributing = (partner_state >> 5) & 1
partner_state_defaulted = (partner_state >> 6) & 1
partner_state_expired = (partner_state >> 7) & 1
# ------------------------------
# Collector Information
# ------------------------------
(collector_tag, collector_length, collector_max_delay
) = struct.unpack_from(cls._COL_INFO_PACK_STR, buf, offset)
assert cls.LACP_TLV_TYPE_COLLECTOR == collector_tag
assert cls._COL_INFO_PACK_LEN == collector_length
offset += cls._COL_INFO_PACK_LEN
# ------------------------------
# Terminator Information
# ------------------------------
(terminator_tag, terminator_length
) = struct.unpack_from(cls._TRM_PACK_STR, buf, offset)
assert cls.LACP_TLV_TYPE_TERMINATOR == terminator_tag
assert 0 == terminator_length
return cls(version,
actor_system_priority,
addrconv.mac.bin_to_text(actor_system),
actor_key, actor_port_priority,
actor_port, actor_state_activity,
actor_state_timeout, actor_state_aggregation,
actor_state_synchronization, actor_state_collecting,
actor_state_distributing, actor_state_defaulted,
actor_state_expired, partner_system_priority,
addrconv.mac.bin_to_text(partner_system),
partner_key, partner_port_priority,
partner_port, partner_state_activity,
partner_state_timeout, partner_state_aggregation,
partner_state_synchronization,
partner_state_collecting, partner_state_distributing,
partner_state_defaulted, partner_state_expired,
collector_max_delay), None, buf[lacp._ALL_PACK_LEN:]
def serialize(self, payload, prev):
header = struct.pack(self._HLEN_PACK_STR, self._subtype,
self.version)
actor = struct.pack(self._ACTPRT_INFO_PACK_STR,
self._actor_tag, self._actor_length,
self.actor_system_priority,
addrconv.mac.text_to_bin(self.actor_system),
self.actor_key,
self.actor_port_priority, self.actor_port,
self._actor_state)
partner = struct.pack(self._ACTPRT_INFO_PACK_STR,
self._partner_tag, self._partner_length,
self.partner_system_priority,
addrconv.mac.text_to_bin(self.partner_system),
self.partner_key,
self.partner_port_priority,
self.partner_port, self._partner_state)
collector = struct.pack(self._COL_INFO_PACK_STR,
self._collector_tag,
self._collector_length,
self.collector_max_delay)
terminator = struct.pack(self._TRM_PACK_STR,
self._terminator_tag,
self._terminator_length)
return header + actor + partner + collector + terminator | 0.444565 | 0.379666 |
import sys
import os
from os.path import isfile, join, exists
from os import listdir, stat, makedirs
from datetime import datetime
from time import strftime
from platform import platform
def getProjectList(wd,inpfile):
with open(join(wd,inpfile),'r') as inp:
lines = inp.readlines()
projects = []
for line in lines[1:]:
files.append(line.split(',')[0].replace('\n','').replace(' ',''))
return projects
def joinCSVs(wd,od,inpfile,csvname,globalcsv,dosort,col=0):
sims = getProjectList(wd,inpfile)
with open(join(wd,sims[0],'csv',csvname),'r') as csv:
lines = csv.readlines()
firstline = lines[0]
with open(join(od,globalcsv),'w') as csv:
csv.write(firstline)
data = []
for sim in sims:
try:
with open(join(wd,sim,'csv',csvname),'r') as csv:
lines = csv.readlines()
for line in lines[1:]:
if dosort:
parts = line.replace('\n','').split(',')
parts[col] = float(parts[col])
data.append(parts)
else:
data.append(line)
except Exception:
sys.exc_clear()
if dosort:
data = sorted(data, key=lambda m: m[col])
for j,element in data:
line = ''
for n,value in element:
if n>0:
line += ','
line += str(value)
data[j] = line
with open(join(od,globalcsv),'a') as csv:
for line in data:
csv.write(line + '\n')
def main(argv):
# Read the command line, throw error if not option is provided
try:
opts, args = getopt.getopt(argv,'hw:i:c:s:',['help','Help',"workdir", "workdirectory", "wdir","inputfile", "input","csvfile", "csv","sort","out","outdir"])
except getopt.GetoptError:
print('joinCSVs.py -w <working directory> -i <input file> -c <csv filename> -s <sort by column s> -o <output directory>')
sys.exit(2)
# Parse the options and create corresponding variables
for opt, arg in opts:
if opt in ('-h', '--help','--Help'):
print(' ')
print(' ')
print('*****************************************************************************************************')
print(' ')
print(' ')
print(' MECHANICS OF EXTREME THIN PLIES IN FIBER REINFORCED COMPOSITE LAMINATES')
print(' ')
print(' 2D PLANE STRAIN MICROMECHANICAL PARAMETRIC SIMULATION OF REFERENCE VOLUME ELEMENTS')
print(' ')
print(' JOIN DATA IN CSV FORMAT\n')
print(' ')
print(' by')
print(' ')
print(' <NAME>, 2016-2017')
print(' ')
print(' ')
print('*****************************************************************************************************')
print(' ')
print('Program syntax:')
print('joinCSVs.py -w <working directory> -i <input file> -c <csv filename> -s <sort by column s> -o <output directory>')
print(' ')
print('Mandatory arguments:')
print('-w <working directory>')
print('-i <input file>')
print('-c <csv filename>')
print(' ')
print('Optional arguments:')
print('-s <sort by column s>')
print('-o <output directory>')
print(' ')
print('Default values:')
print('-s <sort by column s> ===> left unsorted')
print('-o <output directory> ===> working directory')
print(' ')
print(' ')
print(' ')
sys.exit()
elif opt in ("-w", "--workdir", "--workdirectory", "--wdir"):
if arg[-1] != '/':
workdir = arg
else:
workdir = arg[:-1]
elif opt in ("-i", "--inputfile", "--input"):
parts = arg.split(".")
if len(parts) > 1:
inputfile = arg
else:
inputfile = arg + '.inp'
elif opt in ("-c", "--csv", "--csvfile"):
parts = arg.split(".")
if len(parts) > 1:
csvfile = arg
else:
csvfile = arg + '.csv'
elif opt in ("-s", "--sort"):
column = int(arg)
sort = True
elif opt in ("-o", "--out","--outdir"):
if arg[-1] != '/':
outdir = arg
else:
outdir = arg[:-1]
# Check the existence of variables: if a required variable is missing, an error is thrown and program is terminated; if an optional variable is missing, it is set to the default value
if 'workdir' not in locals():
print('Error: working directory not provided.')
sys.exit()
if 'inputfile' not in locals():
print('Error: status file not provided.')
sys.exit()
if 'csvfile' not in locals():
print('Error: status file not provided.')
sys.exit()
if 'column' not in locals():
sort = False
if 'outdir' not in locals():
outdir = workdir
globalCSVname = datetime.now().strftime('%Y-%m-%d') + '_JOINED_' + csvfile
if sort:
joinCSVs(workdir,outdir,inputfile,csvfile,globalCSVname,sort,column)
else:
joinCSVs(workdir,outdir,inputfile,csvfile,globalCSVname,sort)
if __name__ == "__main__":
main(sys.argv[1:]) | python/joinCSVs.py | import sys
import os
from os.path import isfile, join, exists
from os import listdir, stat, makedirs
from datetime import datetime
from time import strftime
from platform import platform
def getProjectList(wd,inpfile):
with open(join(wd,inpfile),'r') as inp:
lines = inp.readlines()
projects = []
for line in lines[1:]:
files.append(line.split(',')[0].replace('\n','').replace(' ',''))
return projects
def joinCSVs(wd,od,inpfile,csvname,globalcsv,dosort,col=0):
sims = getProjectList(wd,inpfile)
with open(join(wd,sims[0],'csv',csvname),'r') as csv:
lines = csv.readlines()
firstline = lines[0]
with open(join(od,globalcsv),'w') as csv:
csv.write(firstline)
data = []
for sim in sims:
try:
with open(join(wd,sim,'csv',csvname),'r') as csv:
lines = csv.readlines()
for line in lines[1:]:
if dosort:
parts = line.replace('\n','').split(',')
parts[col] = float(parts[col])
data.append(parts)
else:
data.append(line)
except Exception:
sys.exc_clear()
if dosort:
data = sorted(data, key=lambda m: m[col])
for j,element in data:
line = ''
for n,value in element:
if n>0:
line += ','
line += str(value)
data[j] = line
with open(join(od,globalcsv),'a') as csv:
for line in data:
csv.write(line + '\n')
def main(argv):
# Read the command line, throw error if not option is provided
try:
opts, args = getopt.getopt(argv,'hw:i:c:s:',['help','Help',"workdir", "workdirectory", "wdir","inputfile", "input","csvfile", "csv","sort","out","outdir"])
except getopt.GetoptError:
print('joinCSVs.py -w <working directory> -i <input file> -c <csv filename> -s <sort by column s> -o <output directory>')
sys.exit(2)
# Parse the options and create corresponding variables
for opt, arg in opts:
if opt in ('-h', '--help','--Help'):
print(' ')
print(' ')
print('*****************************************************************************************************')
print(' ')
print(' ')
print(' MECHANICS OF EXTREME THIN PLIES IN FIBER REINFORCED COMPOSITE LAMINATES')
print(' ')
print(' 2D PLANE STRAIN MICROMECHANICAL PARAMETRIC SIMULATION OF REFERENCE VOLUME ELEMENTS')
print(' ')
print(' JOIN DATA IN CSV FORMAT\n')
print(' ')
print(' by')
print(' ')
print(' <NAME>, 2016-2017')
print(' ')
print(' ')
print('*****************************************************************************************************')
print(' ')
print('Program syntax:')
print('joinCSVs.py -w <working directory> -i <input file> -c <csv filename> -s <sort by column s> -o <output directory>')
print(' ')
print('Mandatory arguments:')
print('-w <working directory>')
print('-i <input file>')
print('-c <csv filename>')
print(' ')
print('Optional arguments:')
print('-s <sort by column s>')
print('-o <output directory>')
print(' ')
print('Default values:')
print('-s <sort by column s> ===> left unsorted')
print('-o <output directory> ===> working directory')
print(' ')
print(' ')
print(' ')
sys.exit()
elif opt in ("-w", "--workdir", "--workdirectory", "--wdir"):
if arg[-1] != '/':
workdir = arg
else:
workdir = arg[:-1]
elif opt in ("-i", "--inputfile", "--input"):
parts = arg.split(".")
if len(parts) > 1:
inputfile = arg
else:
inputfile = arg + '.inp'
elif opt in ("-c", "--csv", "--csvfile"):
parts = arg.split(".")
if len(parts) > 1:
csvfile = arg
else:
csvfile = arg + '.csv'
elif opt in ("-s", "--sort"):
column = int(arg)
sort = True
elif opt in ("-o", "--out","--outdir"):
if arg[-1] != '/':
outdir = arg
else:
outdir = arg[:-1]
# Check the existence of variables: if a required variable is missing, an error is thrown and program is terminated; if an optional variable is missing, it is set to the default value
if 'workdir' not in locals():
print('Error: working directory not provided.')
sys.exit()
if 'inputfile' not in locals():
print('Error: status file not provided.')
sys.exit()
if 'csvfile' not in locals():
print('Error: status file not provided.')
sys.exit()
if 'column' not in locals():
sort = False
if 'outdir' not in locals():
outdir = workdir
globalCSVname = datetime.now().strftime('%Y-%m-%d') + '_JOINED_' + csvfile
if sort:
joinCSVs(workdir,outdir,inputfile,csvfile,globalCSVname,sort,column)
else:
joinCSVs(workdir,outdir,inputfile,csvfile,globalCSVname,sort)
if __name__ == "__main__":
main(sys.argv[1:]) | 0.111241 | 0.067148 |
import posixpath
import json
from ._2to3 import STRTYPE, iteritems_
from .index_constants import JSON_INDEX_TYPE
from .index_constants import TEXT_INDEX_TYPE
from .index_constants import SPECIAL_INDEX_TYPE
from .index_constants import TEXT_INDEX_ARGS
from .errors import CloudantArgumentError, CloudantException
class Index(object):
"""
Provides an interface for managing a JSON query index. Primarily
meant to be used by the database convenience methods
:func:`~cloudant.database.CloudantDatabase.create_query_index`,
:func:`~cloudant.database.CloudantDatabase.delete_query_index`, and
:func:`~cloudant.database.CloudantDatabase.get_query_indexes`. It is
recommended that you use those methods to manage an index rather than
directly interfacing with Index objects.
:param CloudantDatabase database: A Cloudant database instance used by the
Index.
:param str design_document_id: Optional identifier of the design document.
:param str name: Optional name of the index.
:param kwargs: Options used to construct the index definition for the
purposes of index creation. For more details on valid options See
:func:`~cloudant.database.CloudantDatabase.create_query_index`.
"""
def __init__(self, database, design_document_id=None, name=None, **kwargs):
self._database = database
self._r_session = self._database.r_session
self._ddoc_id = design_document_id
self._name = name
self._type = JSON_INDEX_TYPE
self._def = kwargs
@property
def index_url(self):
"""
Constructs and returns the index URL.
:returns: Index URL
"""
return posixpath.join(self._database.database_url, '_index')
@property
def design_document_id(self):
"""
Displays the design document id.
:returns: Design document that this index belongs to
"""
return self._ddoc_id
@property
def name(self):
"""
Displays the index name.
:returns: Name for this index
"""
return self._name
@property
def type(self):
"""
Displays the index type.
:returns: Type of this index
"""
return self._type
@property
def definition(self):
"""
Displays the index definition. This could be either the definiton to
be used to construct the index or the definition as it is returned by
a GET request to the *_index* endpoint.
:returns: Index definition as a dictionary
"""
return self._def
def as_a_dict(self):
"""
Displays the index as a dictionary. This includes the design document
id, index name, index type, and index definition.
:returns: Dictionary representation of the index as a dictionary
"""
index_dict = {
'ddoc': self._ddoc_id,
'name': self._name,
'type': self._type,
'def': self._def
}
return index_dict
def create(self):
"""
Creates the current index in the remote database.
"""
payload = {'type': self._type}
if self._ddoc_id and self._ddoc_id != '':
if isinstance(self._ddoc_id, STRTYPE):
if self._ddoc_id.startswith('_design/'):
payload['ddoc'] = self._ddoc_id[8:]
else:
payload['ddoc'] = self._ddoc_id
else:
msg = (
'The design document id: {0} is not a string.'
).format(self._ddoc_id)
raise CloudantArgumentError(msg)
if self._name and self._name != '':
if isinstance(self._name, STRTYPE):
payload['name'] = self._name
else:
msg = 'The index name: {0} is not a string.'.format(self._name)
raise CloudantArgumentError(msg)
self._def_check()
payload['index'] = self._def
headers = {'Content-Type': 'application/json'}
resp = self._r_session.post(
self.index_url,
data=json.dumps(payload),
headers=headers
)
resp.raise_for_status()
self._ddoc_id = resp.json()['id']
self._name = resp.json()['name']
return
def _def_check(self):
"""
Checks that the only definition provided is a "fields" definition.
"""
if list(self._def.keys()) != ['fields']:
msg = (
'{0} provided as argument(s). A JSON index requires that '
'only a \'fields\' argument is provided.'
).format(self._def)
raise CloudantArgumentError(msg)
def delete(self):
"""
Removes the current index from the remote database.
"""
if not self._ddoc_id:
msg = 'Deleting an index requires a design document id be provided.'
raise CloudantArgumentError(msg)
if not self._name:
msg = 'Deleting an index requires an index name be provided.'
raise CloudantArgumentError(msg)
ddoc_id = self._ddoc_id
if ddoc_id.startswith('_design/'):
ddoc_id = ddoc_id[8:]
url = posixpath.join(self.index_url, ddoc_id, self._type, self._name)
resp = self._r_session.delete(url)
resp.raise_for_status()
return
class TextIndex(Index):
"""
Provides an interface for managing a text query index. Primarily
meant to be used by the database convenience methods
:func:`~cloudant.database.CloudantDatabase.create_query_index`,
:func:`~cloudant.database.CloudantDatabase.delete_query_index`, and
:func:`~cloudant.database.CloudantDatabase.get_query_indexes`. It is
recommended that you use those methods to manage an index rather than
directly interfacing with TextIndex objects.
:param CloudantDatabase database: A Cloudant database instance used by the
TextIndex.
:param str design_document_id: Optional identifier of the design document.
:param str name: Optional name of the index.
:param kwargs: Options used to construct the index definition for the
purposes of index creation. For more details on valid options See
:func:`~cloudant.database.CloudantDatabase.create_query_index`.
"""
def __init__(self, database, design_document_id=None, name=None, **kwargs):
super(TextIndex, self).__init__(
database,
design_document_id,
name,
**kwargs
)
self._type = TEXT_INDEX_TYPE
def _def_check(self):
"""
Checks that the definition provided contains only valid arguments for a
text index.
"""
if self._def != dict():
for key, val in iteritems_(self._def):
if key not in list(TEXT_INDEX_ARGS.keys()):
msg = 'Invalid argument: {0}'.format(key)
raise CloudantArgumentError(msg)
if not isinstance(val, TEXT_INDEX_ARGS[key]):
msg = (
'Argument {0} is not an instance of expected type: {1}'
).format(key, TEXT_INDEX_ARGS[key])
raise CloudantArgumentError(msg)
class SpecialIndex(Index):
"""
Provides an interface for viewing the "special" primary index of a database.
Primarily meant to be used by the database convenience method
:func:`~cloudant.database.CloudantDatabase.get_query_indexes`. It is
recommended that you use that method to view the "special" index rather than
directly interfacing with the SpecialIndex object.
"""
def __init__(
self,
database,
design_document_id=None,
name='_all_docs',
**kwargs
):
super(SpecialIndex, self).__init__(
database,
design_document_id,
name,
**kwargs
)
self._type = SPECIAL_INDEX_TYPE
def create(self):
"""
A "special" index cannot be created. This method is disabled for a
SpecialIndex object.
"""
msg = 'Creating the \"special\" index is not allowed.'
raise CloudantException(msg)
def delete(self):
"""
A "special" index cannot be deleted. This method is disabled for a
SpecialIndex object.
"""
msg = 'Deleting the \"special\" index is not allowed.'
raise CloudantException(msg) | src/cloudant/indexes.py | import posixpath
import json
from ._2to3 import STRTYPE, iteritems_
from .index_constants import JSON_INDEX_TYPE
from .index_constants import TEXT_INDEX_TYPE
from .index_constants import SPECIAL_INDEX_TYPE
from .index_constants import TEXT_INDEX_ARGS
from .errors import CloudantArgumentError, CloudantException
class Index(object):
"""
Provides an interface for managing a JSON query index. Primarily
meant to be used by the database convenience methods
:func:`~cloudant.database.CloudantDatabase.create_query_index`,
:func:`~cloudant.database.CloudantDatabase.delete_query_index`, and
:func:`~cloudant.database.CloudantDatabase.get_query_indexes`. It is
recommended that you use those methods to manage an index rather than
directly interfacing with Index objects.
:param CloudantDatabase database: A Cloudant database instance used by the
Index.
:param str design_document_id: Optional identifier of the design document.
:param str name: Optional name of the index.
:param kwargs: Options used to construct the index definition for the
purposes of index creation. For more details on valid options See
:func:`~cloudant.database.CloudantDatabase.create_query_index`.
"""
def __init__(self, database, design_document_id=None, name=None, **kwargs):
self._database = database
self._r_session = self._database.r_session
self._ddoc_id = design_document_id
self._name = name
self._type = JSON_INDEX_TYPE
self._def = kwargs
@property
def index_url(self):
"""
Constructs and returns the index URL.
:returns: Index URL
"""
return posixpath.join(self._database.database_url, '_index')
@property
def design_document_id(self):
"""
Displays the design document id.
:returns: Design document that this index belongs to
"""
return self._ddoc_id
@property
def name(self):
"""
Displays the index name.
:returns: Name for this index
"""
return self._name
@property
def type(self):
"""
Displays the index type.
:returns: Type of this index
"""
return self._type
@property
def definition(self):
"""
Displays the index definition. This could be either the definiton to
be used to construct the index or the definition as it is returned by
a GET request to the *_index* endpoint.
:returns: Index definition as a dictionary
"""
return self._def
def as_a_dict(self):
"""
Displays the index as a dictionary. This includes the design document
id, index name, index type, and index definition.
:returns: Dictionary representation of the index as a dictionary
"""
index_dict = {
'ddoc': self._ddoc_id,
'name': self._name,
'type': self._type,
'def': self._def
}
return index_dict
def create(self):
"""
Creates the current index in the remote database.
"""
payload = {'type': self._type}
if self._ddoc_id and self._ddoc_id != '':
if isinstance(self._ddoc_id, STRTYPE):
if self._ddoc_id.startswith('_design/'):
payload['ddoc'] = self._ddoc_id[8:]
else:
payload['ddoc'] = self._ddoc_id
else:
msg = (
'The design document id: {0} is not a string.'
).format(self._ddoc_id)
raise CloudantArgumentError(msg)
if self._name and self._name != '':
if isinstance(self._name, STRTYPE):
payload['name'] = self._name
else:
msg = 'The index name: {0} is not a string.'.format(self._name)
raise CloudantArgumentError(msg)
self._def_check()
payload['index'] = self._def
headers = {'Content-Type': 'application/json'}
resp = self._r_session.post(
self.index_url,
data=json.dumps(payload),
headers=headers
)
resp.raise_for_status()
self._ddoc_id = resp.json()['id']
self._name = resp.json()['name']
return
def _def_check(self):
"""
Checks that the only definition provided is a "fields" definition.
"""
if list(self._def.keys()) != ['fields']:
msg = (
'{0} provided as argument(s). A JSON index requires that '
'only a \'fields\' argument is provided.'
).format(self._def)
raise CloudantArgumentError(msg)
def delete(self):
"""
Removes the current index from the remote database.
"""
if not self._ddoc_id:
msg = 'Deleting an index requires a design document id be provided.'
raise CloudantArgumentError(msg)
if not self._name:
msg = 'Deleting an index requires an index name be provided.'
raise CloudantArgumentError(msg)
ddoc_id = self._ddoc_id
if ddoc_id.startswith('_design/'):
ddoc_id = ddoc_id[8:]
url = posixpath.join(self.index_url, ddoc_id, self._type, self._name)
resp = self._r_session.delete(url)
resp.raise_for_status()
return
class TextIndex(Index):
"""
Provides an interface for managing a text query index. Primarily
meant to be used by the database convenience methods
:func:`~cloudant.database.CloudantDatabase.create_query_index`,
:func:`~cloudant.database.CloudantDatabase.delete_query_index`, and
:func:`~cloudant.database.CloudantDatabase.get_query_indexes`. It is
recommended that you use those methods to manage an index rather than
directly interfacing with TextIndex objects.
:param CloudantDatabase database: A Cloudant database instance used by the
TextIndex.
:param str design_document_id: Optional identifier of the design document.
:param str name: Optional name of the index.
:param kwargs: Options used to construct the index definition for the
purposes of index creation. For more details on valid options See
:func:`~cloudant.database.CloudantDatabase.create_query_index`.
"""
def __init__(self, database, design_document_id=None, name=None, **kwargs):
super(TextIndex, self).__init__(
database,
design_document_id,
name,
**kwargs
)
self._type = TEXT_INDEX_TYPE
def _def_check(self):
"""
Checks that the definition provided contains only valid arguments for a
text index.
"""
if self._def != dict():
for key, val in iteritems_(self._def):
if key not in list(TEXT_INDEX_ARGS.keys()):
msg = 'Invalid argument: {0}'.format(key)
raise CloudantArgumentError(msg)
if not isinstance(val, TEXT_INDEX_ARGS[key]):
msg = (
'Argument {0} is not an instance of expected type: {1}'
).format(key, TEXT_INDEX_ARGS[key])
raise CloudantArgumentError(msg)
class SpecialIndex(Index):
"""
Provides an interface for viewing the "special" primary index of a database.
Primarily meant to be used by the database convenience method
:func:`~cloudant.database.CloudantDatabase.get_query_indexes`. It is
recommended that you use that method to view the "special" index rather than
directly interfacing with the SpecialIndex object.
"""
def __init__(
self,
database,
design_document_id=None,
name='_all_docs',
**kwargs
):
super(SpecialIndex, self).__init__(
database,
design_document_id,
name,
**kwargs
)
self._type = SPECIAL_INDEX_TYPE
def create(self):
"""
A "special" index cannot be created. This method is disabled for a
SpecialIndex object.
"""
msg = 'Creating the \"special\" index is not allowed.'
raise CloudantException(msg)
def delete(self):
"""
A "special" index cannot be deleted. This method is disabled for a
SpecialIndex object.
"""
msg = 'Deleting the \"special\" index is not allowed.'
raise CloudantException(msg) | 0.788624 | 0.283589 |
from __future__ import division
import sys
from pprint import pprint as pp
import requests
import re
import string
import operator
import getopt
import time
class hist():
def __init__(self, data):
self.mi = float(data['m'])
self.ma = float(data['M'])
self.num = int(data['n'])
self.data = data
del self.data['m']
del self.data['M']
del self.data['n']
data2 = {}
for key, value in self.data.iteritems():
data2[int(key)] = int(value)
self.data = data2
return
def subtract(self, h1):
for key, value in h1.data.iteritems():
try:
IntKey = int(key)
self.data[IntKey] = self.data[IntKey] - h1.data[IntKey]
except:
continue
return
def __str__(self):
max_width = 80
val_max = max(self.data.iteritems(), key=operator.itemgetter(1))[1]
s = ""
for x in range(0, self.num+2):
bucket_count = 0
val = self.data.get(x, 0)
if val_max > 0:
bucket_count = int(round((max_width * val) / val_max))
bar = '#' * bucket_count
if bucket_count == 0 and val > 0:
bar = '.'
bucket_desc = (x-1) * (self.ma-self.mi) / self.num
if x == 0:
bucket_desc = float('nan')
elif x == self.num+1:
bucket_desc = float('nan')
line = '% 8.2f %s\n' % (bucket_desc, bar)
s = s + line
return s
class hist_print():
def __init__(self):
return
def get_imetrics(self, name, use_interval, interval):
r = requests.get('http://localhost:8085/imetrics/varz:hist')
match = re.search('^'+name+' (.*)$', r.text, flags=re.MULTILINE)
hist_line = match.group(1)
split = string.split(hist_line)
data = {}
for item in split:
match = re.search('^(.*):(.*)$', item, flags=0)
data[match.group(1)] = match.group(2)
h = hist(data)
if use_interval:
time.sleep(interval)
h1none, h2 = self.get_imetrics(name, False, interval)
return (h, h2)
return (None, h)
if __name__ == '__main__':
try:
arglist, args = getopt.getopt(
sys.argv[
2:], "i:", [
"interval="])
except:
print "Invalid Option!"
exit(1)
use_interval = False
interval = 1.0
for (field, val) in arglist:
if field in ("-i", "--interval"):
use_interval = True
interval = float(val)
hp = hist_print()
name = sys.argv[1]
h1, h2 = hp.get_imetrics(name, use_interval, interval)
if h1 is not None:
h2.subtract(h1)
print '%s' % str(h2) | bin/hist_print.py | from __future__ import division
import sys
from pprint import pprint as pp
import requests
import re
import string
import operator
import getopt
import time
class hist():
def __init__(self, data):
self.mi = float(data['m'])
self.ma = float(data['M'])
self.num = int(data['n'])
self.data = data
del self.data['m']
del self.data['M']
del self.data['n']
data2 = {}
for key, value in self.data.iteritems():
data2[int(key)] = int(value)
self.data = data2
return
def subtract(self, h1):
for key, value in h1.data.iteritems():
try:
IntKey = int(key)
self.data[IntKey] = self.data[IntKey] - h1.data[IntKey]
except:
continue
return
def __str__(self):
max_width = 80
val_max = max(self.data.iteritems(), key=operator.itemgetter(1))[1]
s = ""
for x in range(0, self.num+2):
bucket_count = 0
val = self.data.get(x, 0)
if val_max > 0:
bucket_count = int(round((max_width * val) / val_max))
bar = '#' * bucket_count
if bucket_count == 0 and val > 0:
bar = '.'
bucket_desc = (x-1) * (self.ma-self.mi) / self.num
if x == 0:
bucket_desc = float('nan')
elif x == self.num+1:
bucket_desc = float('nan')
line = '% 8.2f %s\n' % (bucket_desc, bar)
s = s + line
return s
class hist_print():
def __init__(self):
return
def get_imetrics(self, name, use_interval, interval):
r = requests.get('http://localhost:8085/imetrics/varz:hist')
match = re.search('^'+name+' (.*)$', r.text, flags=re.MULTILINE)
hist_line = match.group(1)
split = string.split(hist_line)
data = {}
for item in split:
match = re.search('^(.*):(.*)$', item, flags=0)
data[match.group(1)] = match.group(2)
h = hist(data)
if use_interval:
time.sleep(interval)
h1none, h2 = self.get_imetrics(name, False, interval)
return (h, h2)
return (None, h)
if __name__ == '__main__':
try:
arglist, args = getopt.getopt(
sys.argv[
2:], "i:", [
"interval="])
except:
print "Invalid Option!"
exit(1)
use_interval = False
interval = 1.0
for (field, val) in arglist:
if field in ("-i", "--interval"):
use_interval = True
interval = float(val)
hp = hist_print()
name = sys.argv[1]
h1, h2 = hp.get_imetrics(name, use_interval, interval)
if h1 is not None:
h2.subtract(h1)
print '%s' % str(h2) | 0.410047 | 0.139016 |
import clr
# Import python sys module
import sys
# Import os module
import os
# Import System.IO for saving and opening files
from System.IO import *
# Import C compatible List and String
from System import String
from System.Collections.Generic import List
# Add needed dll references
sys.path.append(os.environ['LIGHTFIELD_ROOT'])
sys.path.append(os.environ['LIGHTFIELD_ROOT']+"\\AddInViews")
clr.AddReference('PrincetonInstruments.LightFieldViewV5')
clr.AddReference('PrincetonInstruments.LightField.AutomationV5')
clr.AddReference('PrincetonInstruments.LightFieldAddInSupportServices')
# PI imports
from PrincetonInstruments.LightField.Automation import Automation
from PrincetonInstruments.LightField.AddIns import CameraSettings
from PrincetonInstruments.LightField.AddIns import ExperimentSettings
from PrincetonInstruments.LightField.AddIns import GatingMode
from PrincetonInstruments.LightField.AddIns import Pulse, DeviceType
def set_sequential_gating(starting_width, starting_delay,
ending_width, ending_delay):
# Check Gating Mode existence
if (experiment.Exists(CameraSettings.GatingMode)):
# Set sequential gating mode
experiment.SetValue(CameraSettings.GatingMode,
GatingMode.Sequential)
pulser = []
# Add PI Pulse type with parameters to pulser list
pulser.append(Pulse(starting_width, starting_delay))
# Add PI Pulse type with parameters to pulser list
pulser.append(Pulse(ending_width,ending_delay))
# Set sequential starting gate
experiment.SetValue(
CameraSettings.GatingSequentialStartingGate,
pulser[0])
# Set sequential ending gate
experiment.SetValue(
CameraSettings.GatingSequentialEndingGate,
pulser[1])
else:
print("System not capable of Gating Mode")
def set_on_chip_accumulations(accumulations):
# Set On-chip accumulations
experiment.SetValue(
CameraSettings.ReadoutControlAccumulations, accumulations)
def get_on_chip_accumulations():
print(String.Format("{0} {1}", "Current On-Chip Accumulations:",
experiment.GetValue(
CameraSettings.ReadoutControlAccumulations)))
def device_found():
# Find connected device
for device in experiment.ExperimentDevices:
if (device.Type == DeviceType.Camera and
"PI-MAX" in device.Model):
return True
# If connected device is not a camera inform the user
print("Camera not found. Please add ",
"PI-MAX type camera to LightField.")
return False
# Create the LightField Application (true for visible)
# The 2nd parameter forces LF to load with no experiment
auto = Automation(True, List[String]())
# Get experiment object
experiment = auto.LightFieldApplication.Experiment
# If PI-MAX3 or 4 found continue
if (device_found()==True):
# Set on-chip accumulations
set_on_chip_accumulations(3)
# Print on-chip accumulations
get_on_chip_accumulations()
# Set sequential starting and ending
# widths and delays
set_sequential_gating(100, 50, 1000, 50)
# Set number of frames
experiment.SetValue(ExperimentSettings.AcquisitionFramesToStore, 10)
# Acquire image
experiment.Acquire()
#Result: This sample will set/get a value for on chip accumulations.
# Set starting and ending sequential pulse width and delay.
# Set number of frames.
# Acquire an image. | LFAutomation/Python/sequential_gating.py | import clr
# Import python sys module
import sys
# Import os module
import os
# Import System.IO for saving and opening files
from System.IO import *
# Import C compatible List and String
from System import String
from System.Collections.Generic import List
# Add needed dll references
sys.path.append(os.environ['LIGHTFIELD_ROOT'])
sys.path.append(os.environ['LIGHTFIELD_ROOT']+"\\AddInViews")
clr.AddReference('PrincetonInstruments.LightFieldViewV5')
clr.AddReference('PrincetonInstruments.LightField.AutomationV5')
clr.AddReference('PrincetonInstruments.LightFieldAddInSupportServices')
# PI imports
from PrincetonInstruments.LightField.Automation import Automation
from PrincetonInstruments.LightField.AddIns import CameraSettings
from PrincetonInstruments.LightField.AddIns import ExperimentSettings
from PrincetonInstruments.LightField.AddIns import GatingMode
from PrincetonInstruments.LightField.AddIns import Pulse, DeviceType
def set_sequential_gating(starting_width, starting_delay,
ending_width, ending_delay):
# Check Gating Mode existence
if (experiment.Exists(CameraSettings.GatingMode)):
# Set sequential gating mode
experiment.SetValue(CameraSettings.GatingMode,
GatingMode.Sequential)
pulser = []
# Add PI Pulse type with parameters to pulser list
pulser.append(Pulse(starting_width, starting_delay))
# Add PI Pulse type with parameters to pulser list
pulser.append(Pulse(ending_width,ending_delay))
# Set sequential starting gate
experiment.SetValue(
CameraSettings.GatingSequentialStartingGate,
pulser[0])
# Set sequential ending gate
experiment.SetValue(
CameraSettings.GatingSequentialEndingGate,
pulser[1])
else:
print("System not capable of Gating Mode")
def set_on_chip_accumulations(accumulations):
# Set On-chip accumulations
experiment.SetValue(
CameraSettings.ReadoutControlAccumulations, accumulations)
def get_on_chip_accumulations():
print(String.Format("{0} {1}", "Current On-Chip Accumulations:",
experiment.GetValue(
CameraSettings.ReadoutControlAccumulations)))
def device_found():
# Find connected device
for device in experiment.ExperimentDevices:
if (device.Type == DeviceType.Camera and
"PI-MAX" in device.Model):
return True
# If connected device is not a camera inform the user
print("Camera not found. Please add ",
"PI-MAX type camera to LightField.")
return False
# Create the LightField Application (true for visible)
# The 2nd parameter forces LF to load with no experiment
auto = Automation(True, List[String]())
# Get experiment object
experiment = auto.LightFieldApplication.Experiment
# If PI-MAX3 or 4 found continue
if (device_found()==True):
# Set on-chip accumulations
set_on_chip_accumulations(3)
# Print on-chip accumulations
get_on_chip_accumulations()
# Set sequential starting and ending
# widths and delays
set_sequential_gating(100, 50, 1000, 50)
# Set number of frames
experiment.SetValue(ExperimentSettings.AcquisitionFramesToStore, 10)
# Acquire image
experiment.Acquire()
#Result: This sample will set/get a value for on chip accumulations.
# Set starting and ending sequential pulse width and delay.
# Set number of frames.
# Acquire an image. | 0.356447 | 0.109992 |
import cv2
from PySide2.QtWidgets import QMainWindow, QFileDialog, QHBoxLayout
from main_interface import gui_main_interface
from PySide2.QtCore import QCoreApplication, Slot, Qt
from tools import add_tree_item, show_image_data, modify_graphics, widget_set
from opencv_function import function_warpaffine, function_cvtcolor, function_inrange, function_resize, function_getrotationmatrix2d
class MainInterface(QMainWindow):
'''主界面类,用来组织所有的功能
@属性说明:
# TODO
@方法说明:
# TODO
'''
_translate = QCoreApplication.translate # 起代替作用
def __init__(self, parent=None):
super().__init__(parent)
self.ui = gui_main_interface.Ui_main_interface()
self.ui.setupUi(self)
self.class_name = self.__class__.__name__ # 获取类名
self._graphics_view = modify_graphics.ModifyQGraphicsView()
self.__init_layout()
self.__init_tree_widget()
self._init_slot_connect()
def __init_layout(self):
'''初始化布局
@参数说明:
无
@返回值:
无
@注意:
无
'''
self.ui.horizontalLayout = QHBoxLayout()
self.ui.horizontalLayout.addWidget(self._graphics_view)
self.ui.horizontalLayout.addWidget(self.ui.table_view)
self.ui.horizontalLayout.addWidget(self.ui.tree_widget)
self.ui.horizontalLayout.setStretch(0,4)
self.ui.horizontalLayout.setStretch(1,4)
self.ui.horizontalLayout.setStretch(2,1)
self.ui.centralwidget.setLayout(self.ui.horizontalLayout)
def __init_tree_widget(self):
'''初始化目录树
@参数说明:
无
@返回值:
无
@注意:
无
'''
# 清空目录树
self.ui.tree_widget.clear() # 清空函数树
# 设置目录树头标签
text = self._translate("MainInterface", "函数")
self.ui.tree_widget.setHeaderLabel(text) # 设置目录树头标签
# 添加顶层节点
text = "OpenCV函数"
self.tree_top_item = add_tree_item.add_tree_item(self.ui.tree_widget, add_tree_item.TreeItemType.top_item.value,
self.class_name, text, tree_top=True)
# 添加组节点
text = "OpenCV图像处理"
self.tree_group_item = add_tree_item.add_tree_item(self.tree_top_item, add_tree_item.TreeItemType.group_item.value,
self.class_name, text)
# 添加函数节点
text = "cv.cvtColor()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
text = "cv.inRange()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
text = "cv.resize()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
text = "cv.warpAffine()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
text = "cv.getRotationMatrix2D()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
def _init_slot_connect(self):
'''初始化槽函数连接
@参数说明:
无
@返回值:
无
@注意:
无
'''
self.ui.act_load_image.triggered.connect(self.load_image)
self.ui.tree_widget.itemDoubleClicked.connect(self.function_opencv)
self.ui.act_exit.triggered.connect(self.close)
@Slot()
def load_image(self):
'''槽函数,获取图片信息,显示图片并显示图片数据
@参数说明:
无
@返回值:
无
@注意:
无
'''
text1 = self._translate("MainInterface", "载入图片")
text2 = self._translate("MainInterface", "图片文件(*.bmp *.jpg *.png)")
# 获取文件的绝对路径
self._file_name_dir = QFileDialog.getOpenFileName(self, text1, ".", text2)[0]
# 获取图片数据
self._original_image_data = cv2.imread(self._file_name_dir, cv2.IMREAD_UNCHANGED)
# 获取图片的长和宽
self._original_image_h, self._original_image_w = self._original_image_data.shape[:2]
# 在 graphics_widget 里面显示图片
self._graphics_view.scanf_image_data(self._original_image_data)
self._graphics_view.dispaly_image()
# 在 table_view 里面显示图片数据
start_time = cv2.getTickCount()
table_view = show_image_data.TableView(self.ui.table_view, self._original_image_h,
self._original_image_w)
table_view.add_init_data(self._original_image_data, len(self._original_image_data.shape))
end_time = cv2.getTickCount()
print("Loading image spent time :", (end_time - start_time)/cv2.getTickFrequency())
def function_opencv(self, current_item):
'''槽函数,执行点击之后的函数
@参数说明:
无
@返回值:
无
@注意:
无
'''
# 获取点击的选项的文字
item_str = current_item.text(0)
# 进行匹配,来执行不同的函数
if item_str == "cv.cvtColor()":
cvt_color = function_cvtcolor.CvtColor(parent=self, input_image=self._original_image_data)
widget_set.widget_set(cvt_color, "cv.cvtColor()") # 窗口初始化设置
elif item_str == "cv.inRange()":
in_range = function_inrange.InRange(parent=self, input_image=self._original_image_data)
widget_set.widget_set(in_range , "cv.inRange()")
elif item_str == "cv.resize()":
resize = function_resize.Resize(parent=self, input_image=self._original_image_data)
widget_set.widget_set(resize, "cv.resize()")
elif item_str == "cv.warpAffine()":
warp_affine = function_warpaffine.WarpAffine(parent=self, input_image=self._original_image_data)
widget_set.widget_set(warp_affine, "cv.warpAffine()")
elif item_str == "cv.getRotationMatrix2D()":
resize = function_getrotationmatrix2d.GetRotationMatrix2D(parent=self)
widget_set.widget_set(resize, "cv.getRotationMatrix2D()") | src/main_interface/main_interface.py | import cv2
from PySide2.QtWidgets import QMainWindow, QFileDialog, QHBoxLayout
from main_interface import gui_main_interface
from PySide2.QtCore import QCoreApplication, Slot, Qt
from tools import add_tree_item, show_image_data, modify_graphics, widget_set
from opencv_function import function_warpaffine, function_cvtcolor, function_inrange, function_resize, function_getrotationmatrix2d
class MainInterface(QMainWindow):
'''主界面类,用来组织所有的功能
@属性说明:
# TODO
@方法说明:
# TODO
'''
_translate = QCoreApplication.translate # 起代替作用
def __init__(self, parent=None):
super().__init__(parent)
self.ui = gui_main_interface.Ui_main_interface()
self.ui.setupUi(self)
self.class_name = self.__class__.__name__ # 获取类名
self._graphics_view = modify_graphics.ModifyQGraphicsView()
self.__init_layout()
self.__init_tree_widget()
self._init_slot_connect()
def __init_layout(self):
'''初始化布局
@参数说明:
无
@返回值:
无
@注意:
无
'''
self.ui.horizontalLayout = QHBoxLayout()
self.ui.horizontalLayout.addWidget(self._graphics_view)
self.ui.horizontalLayout.addWidget(self.ui.table_view)
self.ui.horizontalLayout.addWidget(self.ui.tree_widget)
self.ui.horizontalLayout.setStretch(0,4)
self.ui.horizontalLayout.setStretch(1,4)
self.ui.horizontalLayout.setStretch(2,1)
self.ui.centralwidget.setLayout(self.ui.horizontalLayout)
def __init_tree_widget(self):
'''初始化目录树
@参数说明:
无
@返回值:
无
@注意:
无
'''
# 清空目录树
self.ui.tree_widget.clear() # 清空函数树
# 设置目录树头标签
text = self._translate("MainInterface", "函数")
self.ui.tree_widget.setHeaderLabel(text) # 设置目录树头标签
# 添加顶层节点
text = "OpenCV函数"
self.tree_top_item = add_tree_item.add_tree_item(self.ui.tree_widget, add_tree_item.TreeItemType.top_item.value,
self.class_name, text, tree_top=True)
# 添加组节点
text = "OpenCV图像处理"
self.tree_group_item = add_tree_item.add_tree_item(self.tree_top_item, add_tree_item.TreeItemType.group_item.value,
self.class_name, text)
# 添加函数节点
text = "cv.cvtColor()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
text = "cv.inRange()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
text = "cv.resize()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
text = "cv.warpAffine()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
text = "cv.getRotationMatrix2D()"
self.get_start_with_image_item = add_tree_item.add_tree_item(self.tree_group_item, add_tree_item.TreeItemType.function_item.value,
self.class_name, text)
def _init_slot_connect(self):
'''初始化槽函数连接
@参数说明:
无
@返回值:
无
@注意:
无
'''
self.ui.act_load_image.triggered.connect(self.load_image)
self.ui.tree_widget.itemDoubleClicked.connect(self.function_opencv)
self.ui.act_exit.triggered.connect(self.close)
@Slot()
def load_image(self):
'''槽函数,获取图片信息,显示图片并显示图片数据
@参数说明:
无
@返回值:
无
@注意:
无
'''
text1 = self._translate("MainInterface", "载入图片")
text2 = self._translate("MainInterface", "图片文件(*.bmp *.jpg *.png)")
# 获取文件的绝对路径
self._file_name_dir = QFileDialog.getOpenFileName(self, text1, ".", text2)[0]
# 获取图片数据
self._original_image_data = cv2.imread(self._file_name_dir, cv2.IMREAD_UNCHANGED)
# 获取图片的长和宽
self._original_image_h, self._original_image_w = self._original_image_data.shape[:2]
# 在 graphics_widget 里面显示图片
self._graphics_view.scanf_image_data(self._original_image_data)
self._graphics_view.dispaly_image()
# 在 table_view 里面显示图片数据
start_time = cv2.getTickCount()
table_view = show_image_data.TableView(self.ui.table_view, self._original_image_h,
self._original_image_w)
table_view.add_init_data(self._original_image_data, len(self._original_image_data.shape))
end_time = cv2.getTickCount()
print("Loading image spent time :", (end_time - start_time)/cv2.getTickFrequency())
def function_opencv(self, current_item):
'''槽函数,执行点击之后的函数
@参数说明:
无
@返回值:
无
@注意:
无
'''
# 获取点击的选项的文字
item_str = current_item.text(0)
# 进行匹配,来执行不同的函数
if item_str == "cv.cvtColor()":
cvt_color = function_cvtcolor.CvtColor(parent=self, input_image=self._original_image_data)
widget_set.widget_set(cvt_color, "cv.cvtColor()") # 窗口初始化设置
elif item_str == "cv.inRange()":
in_range = function_inrange.InRange(parent=self, input_image=self._original_image_data)
widget_set.widget_set(in_range , "cv.inRange()")
elif item_str == "cv.resize()":
resize = function_resize.Resize(parent=self, input_image=self._original_image_data)
widget_set.widget_set(resize, "cv.resize()")
elif item_str == "cv.warpAffine()":
warp_affine = function_warpaffine.WarpAffine(parent=self, input_image=self._original_image_data)
widget_set.widget_set(warp_affine, "cv.warpAffine()")
elif item_str == "cv.getRotationMatrix2D()":
resize = function_getrotationmatrix2d.GetRotationMatrix2D(parent=self)
widget_set.widget_set(resize, "cv.getRotationMatrix2D()") | 0.122418 | 0.114467 |
from sympy import oo
from sympy.core import igcd
from sympy.polys.monomials import monomial_min, monomial_div
from sympy.polys.orderings import monomial_key
import random
def poly_LC(f, K):
"""
Return leading coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_LC
>>> poly_LC([], ZZ)
0
>>> poly_LC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
1
"""
if not f:
return K.zero
else:
return f[0]
def poly_TC(f, K):
"""
Return trailing coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_TC
>>> poly_TC([], ZZ)
0
>>> poly_TC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
3
"""
if not f:
return K.zero
else:
return f[-1]
dup_LC = dmp_LC = poly_LC
dup_TC = dmp_TC = poly_TC
def dmp_ground_LC(f, u, K):
"""
Return the ground leading coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_LC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_LC(f, 2, ZZ)
1
"""
while u:
f = dmp_LC(f, K)
u -= 1
return dup_LC(f, K)
def dmp_ground_TC(f, u, K):
"""
Return the ground trailing coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_TC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_TC(f, 2, ZZ)
3
"""
while u:
f = dmp_TC(f, K)
u -= 1
return dup_TC(f, K)
def dmp_true_LT(f, u, K):
"""
Return the leading term ``c * x_1**n_1 ... x_k**n_k``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_true_LT
>>> f = ZZ.map([[4], [2, 0], [3, 0, 0]])
>>> dmp_true_LT(f, 1, ZZ)
((2, 0), 4)
"""
monom = []
while u:
monom.append(len(f) - 1)
f, u = f[0], u - 1
if not f:
monom.append(0)
else:
monom.append(len(f) - 1)
return tuple(monom), dup_LC(f, K)
def dup_degree(f):
"""
Return the leading degree of ``f`` in ``K[x]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_degree
>>> f = ZZ.map([1, 2, 0, 3])
>>> dup_degree(f)
3
"""
if not f:
return -oo
return len(f) - 1
def dmp_degree(f, u):
"""
Return the leading degree of ``f`` in ``x_0`` in ``K[X]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree
>>> dmp_degree([[[]]], 2)
-oo
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree(f, 1)
1
"""
if dmp_zero_p(f, u):
return -oo
else:
return len(f) - 1
def _rec_degree_in(g, v, i, j):
"""Recursive helper function for :func:`dmp_degree_in`."""
if i == j:
return dmp_degree(g, v)
v, i = v - 1, i + 1
return max([ _rec_degree_in(c, v, i, j) for c in g ])
def dmp_degree_in(f, j, u):
"""
Return the leading degree of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_in
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree_in(f, 0, 1)
1
>>> dmp_degree_in(f, 1, 1)
2
"""
if not j:
return dmp_degree(f, u)
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_degree_in(f, u, 0, j)
def _rec_degree_list(g, v, i, degs):
"""Recursive helper for :func:`dmp_degree_list`."""
degs[i] = max(degs[i], dmp_degree(g, v))
if v > 0:
v, i = v - 1, i + 1
for c in g:
_rec_degree_list(c, v, i, degs)
def dmp_degree_list(f, u):
"""
Return a list of degrees of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_list
>>> f = ZZ.map([[1], [1, 2, 3]])
>>> dmp_degree_list(f, 1)
(1, 2)
"""
degs = [-oo]*(u + 1)
_rec_degree_list(f, u, 0, degs)
return tuple(degs)
def dup_strip(f):
"""
Remove leading zeros from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.densebasic import dup_strip
>>> dup_strip([0, 0, 1, 2, 3, 0])
[1, 2, 3, 0]
"""
if not f or f[0]:
return f
i = 0
for cf in f:
if cf:
break
else:
i += 1
return f[i:]
def dmp_strip(f, u):
"""
Remove leading zeros from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_strip
>>> dmp_strip([[], [0, 1, 2], [1]], 1)
[[0, 1, 2], [1]]
"""
if not u:
return dup_strip(f)
if dmp_zero_p(f, u):
return f
i, v = 0, u - 1
for c in f:
if not dmp_zero_p(c, v):
break
else:
i += 1
if i == len(f):
return dmp_zero(u)
else:
return f[i:]
def _rec_validate(f, g, i, K):
"""Recursive helper for :func:`dmp_validate`."""
if type(g) is not list:
if K is not None and not K.of_type(g):
raise TypeError("%s in %s in not of type %s" % (g, f, K.dtype))
return {i - 1}
elif not g:
return {i}
else:
levels = set()
for c in g:
levels |= _rec_validate(f, c, i + 1, K)
return levels
def _rec_strip(g, v):
"""Recursive helper for :func:`_rec_strip`."""
if not v:
return dup_strip(g)
w = v - 1
return dmp_strip([ _rec_strip(c, w) for c in g ], v)
def dmp_validate(f, K=None):
"""
Return the number of levels in ``f`` and recursively strip it.
Examples
========
>>> from sympy.polys.densebasic import dmp_validate
>>> dmp_validate([[], [0, 1, 2], [1]])
([[1, 2], [1]], 1)
>>> dmp_validate([[1], 1])
Traceback (most recent call last):
...
ValueError: invalid data structure for a multivariate polynomial
"""
levels = _rec_validate(f, f, 0, K)
u = levels.pop()
if not levels:
return _rec_strip(f, u), u
else:
raise ValueError(
"invalid data structure for a multivariate polynomial")
def dup_reverse(f):
"""
Compute ``x**n * f(1/x)``, i.e.: reverse ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_reverse
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_reverse(f)
[3, 2, 1]
"""
return dup_strip(list(reversed(f)))
def dup_copy(f):
"""
Create a new copy of a polynomial ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return list(f)
def dmp_copy(f, u):
"""
Create a new copy of a polynomial ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_copy
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_copy(f, 1)
[[1], [1, 2]]
"""
if not u:
return list(f)
v = u - 1
return [ dmp_copy(c, v) for c in f ]
def dup_to_tuple(f):
"""
Convert `f` into a tuple.
This is needed for hashing. This is similar to dup_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return tuple(f)
def dmp_to_tuple(f, u):
"""
Convert `f` into a nested tuple of tuples.
This is needed for hashing. This is similar to dmp_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_to_tuple
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_to_tuple(f, 1)
((1,), (1, 2))
"""
if not u:
return tuple(f)
v = u - 1
return tuple(dmp_to_tuple(c, v) for c in f)
def dup_normal(f, K):
"""
Normalize univariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_normal
>>> dup_normal([0, 1.5, 2, 3], ZZ)
[1, 2, 3]
"""
return dup_strip([ K.normal(c) for c in f ])
def dmp_normal(f, u, K):
"""
Normalize a multivariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_normal
>>> dmp_normal([[], [0, 1.5, 2]], 1, ZZ)
[[1, 2]]
"""
if not u:
return dup_normal(f, K)
v = u - 1
return dmp_strip([ dmp_normal(c, v, K) for c in f ], u)
def dup_convert(f, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_convert
>>> R, x = ring("x", ZZ)
>>> dup_convert([R(1), R(2)], R.to_domain(), ZZ)
[1, 2]
>>> dup_convert([ZZ(1), ZZ(2)], ZZ, R.to_domain())
[1, 2]
"""
if K0 is not None and K0 == K1:
return f
else:
return dup_strip([ K1.convert(c, K0) for c in f ])
def dmp_convert(f, u, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_convert
>>> R, x = ring("x", ZZ)
>>> dmp_convert([[R(1)], [R(2)]], 1, R.to_domain(), ZZ)
[[1], [2]]
>>> dmp_convert([[ZZ(1)], [ZZ(2)]], 1, ZZ, R.to_domain())
[[1], [2]]
"""
if not u:
return dup_convert(f, K0, K1)
if K0 is not None and K0 == K1:
return f
v = u - 1
return dmp_strip([ dmp_convert(c, v, K0, K1) for c in f ], u)
def dup_from_sympy(f, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_sympy
>>> dup_from_sympy([S(1), S(2)], ZZ) == [ZZ(1), ZZ(2)]
True
"""
return dup_strip([ K.from_sympy(c) for c in f ])
def dmp_from_sympy(f, u, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_sympy
>>> dmp_from_sympy([[S(1)], [S(2)]], 1, ZZ) == [[ZZ(1)], [ZZ(2)]]
True
"""
if not u:
return dup_from_sympy(f, K)
v = u - 1
return dmp_strip([ dmp_from_sympy(c, v, K) for c in f ], u)
def dup_nth(f, n, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_nth
>>> f = ZZ.map([1, 2, 3])
>>> dup_nth(f, 0, ZZ)
3
>>> dup_nth(f, 4, ZZ)
0
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
return f[dup_degree(f) - n]
def dmp_nth(f, n, u, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nth
>>> f = ZZ.map([[1], [2], [3]])
>>> dmp_nth(f, 0, 1, ZZ)
[3]
>>> dmp_nth(f, 4, 1, ZZ)
[]
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return dmp_zero(u - 1)
else:
return f[dmp_degree(f, u) - n]
def dmp_ground_nth(f, N, u, K):
"""
Return the ground ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_nth
>>> f = ZZ.map([[1], [2, 3]])
>>> dmp_ground_nth(f, (0, 1), 1, ZZ)
2
"""
v = u
for n in N:
if n < 0:
raise IndexError("`n` must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
d = dmp_degree(f, v)
if d == -oo:
d = -1
f, v = f[d - n], v - 1
return f
def dmp_zero_p(f, u):
"""
Return ``True`` if ``f`` is zero in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero_p
>>> dmp_zero_p([[[[[]]]]], 4)
True
>>> dmp_zero_p([[[[[1]]]]], 4)
False
"""
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
return not f
def dmp_zero(u):
"""
Return a multivariate zero.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero
>>> dmp_zero(4)
[[[[[]]]]]
"""
r = []
for i in range(u):
r = [r]
return r
def dmp_one_p(f, u, K):
"""
Return ``True`` if ``f`` is one in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one_p
>>> dmp_one_p([[[ZZ(1)]]], 2, ZZ)
True
"""
return dmp_ground_p(f, K.one, u)
def dmp_one(u, K):
"""
Return a multivariate one over ``K``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one
>>> dmp_one(2, ZZ)
[[[1]]]
"""
return dmp_ground(K.one, u)
def dmp_ground_p(f, c, u):
"""
Return True if ``f`` is constant in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground_p
>>> dmp_ground_p([[[3]]], 3, 2)
True
>>> dmp_ground_p([[[4]]], None, 2)
True
"""
if c is not None and not c:
return dmp_zero_p(f, u)
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
if c is None:
return len(f) <= 1
else:
return f == [c]
def dmp_ground(c, u):
"""
Return a multivariate constant.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground
>>> dmp_ground(3, 5)
[[[[[[3]]]]]]
>>> dmp_ground(1, -1)
1
"""
if not c:
return dmp_zero(u)
for i in range(u + 1):
c = [c]
return c
def dmp_zeros(n, u, K):
"""
Return a list of multivariate zeros.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_zeros
>>> dmp_zeros(3, 2, ZZ)
[[[[]]], [[[]]], [[[]]]]
>>> dmp_zeros(3, -1, ZZ)
[0, 0, 0]
"""
if not n:
return []
if u < 0:
return [K.zero]*n
else:
return [ dmp_zero(u) for i in range(n) ]
def dmp_grounds(c, n, u):
"""
Return a list of multivariate constants.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_grounds
>>> dmp_grounds(ZZ(4), 3, 2)
[[[[4]]], [[[4]]], [[[4]]]]
>>> dmp_grounds(ZZ(4), 3, -1)
[4, 4, 4]
"""
if not n:
return []
if u < 0:
return [c]*n
else:
return [ dmp_ground(c, u) for i in range(n) ]
def dmp_negative_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is negative.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_negative_p
>>> dmp_negative_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
False
>>> dmp_negative_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
True
"""
return K.is_negative(dmp_ground_LC(f, u, K))
def dmp_positive_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is positive.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_positive_p
>>> dmp_positive_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
True
>>> dmp_positive_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
False
"""
return K.is_positive(dmp_ground_LC(f, u, K))
def dup_from_dict(f, K):
"""
Create a ``K[x]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_dict
>>> dup_from_dict({(0,): ZZ(7), (2,): ZZ(5), (4,): ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
>>> dup_from_dict({}, ZZ)
[]
"""
if not f:
return []
n, h = max(f.keys()), []
if type(n) is int:
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
else:
(n,) = n
for k in range(n, -1, -1):
h.append(f.get((k,), K.zero))
return dup_strip(h)
def dup_from_raw_dict(f, K):
"""
Create a ``K[x]`` polynomial from a raw ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_raw_dict
>>> dup_from_raw_dict({0: ZZ(7), 2: ZZ(5), 4: ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
"""
if not f:
return []
n, h = max(f.keys()), []
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
return dup_strip(h)
def dmp_from_dict(f, u, K):
"""
Create a ``K[X]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_dict
>>> dmp_from_dict({(0, 0): ZZ(3), (0, 1): ZZ(2), (2, 1): ZZ(1)}, 1, ZZ)
[[1, 0], [], [2, 3]]
>>> dmp_from_dict({}, 0, ZZ)
[]
"""
if not u:
return dup_from_dict(f, K)
if not f:
return dmp_zero(u)
coeffs = {}
for monom, coeff in f.items():
head, tail = monom[0], monom[1:]
if head in coeffs:
coeffs[head][tail] = coeff
else:
coeffs[head] = { tail: coeff }
n, v, h = max(coeffs.keys()), u - 1, []
for k in range(n, -1, -1):
coeff = coeffs.get(k)
if coeff is not None:
h.append(dmp_from_dict(coeff, v, K))
else:
h.append(dmp_zero(v))
return dmp_strip(h, u)
def dup_to_dict(f, K=None, zero=False):
"""
Convert ``K[x]`` polynomial to a ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_dict
>>> dup_to_dict([1, 0, 5, 0, 7])
{(0,): 7, (2,): 5, (4,): 1}
>>> dup_to_dict([])
{}
"""
if not f and zero:
return {(0,): K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[(k,)] = f[n - k]
return result
def dup_to_raw_dict(f, K=None, zero=False):
"""
Convert a ``K[x]`` polynomial to a raw ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_raw_dict
>>> dup_to_raw_dict([1, 0, 5, 0, 7])
{0: 7, 2: 5, 4: 1}
"""
if not f and zero:
return {0: K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[k] = f[n - k]
return result
def dmp_to_dict(f, u, K=None, zero=False):
"""
Convert a ``K[X]`` polynomial to a ``dict````.
Examples
========
>>> from sympy.polys.densebasic import dmp_to_dict
>>> dmp_to_dict([[1, 0], [], [2, 3]], 1)
{(0, 0): 3, (0, 1): 2, (2, 1): 1}
>>> dmp_to_dict([], 0)
{}
"""
if not u:
return dup_to_dict(f, K, zero=zero)
if dmp_zero_p(f, u) and zero:
return {(0,)*(u + 1): K.zero}
n, v, result = dmp_degree(f, u), u - 1, {}
if n == -oo:
n = -1
for k in range(0, n + 1):
h = dmp_to_dict(f[n - k], v)
for exp, coeff in h.items():
result[(k,) + exp] = coeff
return result
def dmp_swap(f, i, j, u, K):
"""
Transform ``K[..x_i..x_j..]`` to ``K[..x_j..x_i..]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_swap
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_swap(f, 0, 1, 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_swap(f, 1, 2, 2, ZZ)
[[[1], [2, 0]], [[]]]
>>> dmp_swap(f, 0, 2, 2, ZZ)
[[[1, 0]], [[2, 0], []]]
"""
if i < 0 or j < 0 or i > u or j > u:
raise IndexError("0 <= i < j <= %s expected" % u)
elif i == j:
return f
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
H[exp[:i] + (exp[j],) +
exp[i + 1:j] +
(exp[i],) + exp[j + 1:]] = coeff
return dmp_from_dict(H, u, K)
def dmp_permute(f, P, u, K):
"""
Return a polynomial in ``K[x_{P(1)},..,x_{P(n)}]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_permute
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_permute(f, [1, 0, 2], 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_permute(f, [1, 2, 0], 2, ZZ)
[[[1], []], [[2, 0], []]]
"""
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
new_exp = [0]*len(exp)
for e, p in zip(exp, P):
new_exp[p] = e
H[tuple(new_exp)] = coeff
return dmp_from_dict(H, u, K)
def dmp_nest(f, l, K):
"""
Return a multivariate value nested ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nest
>>> dmp_nest([[ZZ(1)]], 2, ZZ)
[[[[1]]]]
"""
if not isinstance(f, list):
return dmp_ground(f, l)
for i in range(l):
f = [f]
return f
def dmp_raise(f, l, u, K):
"""
Return a multivariate polynomial raised ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_raise
>>> f = ZZ.map([[], [1, 2]])
>>> dmp_raise(f, 2, 1, ZZ)
[[[[]]], [[[1]], [[2]]]]
"""
if not l:
return f
if not u:
if not f:
return dmp_zero(l)
k = l - 1
return [ dmp_ground(c, k) for c in f ]
v = u - 1
return [ dmp_raise(c, l, v, K) for c in f ]
def dup_deflate(f, K):
"""
Map ``x**m`` to ``y`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_deflate
>>> f = ZZ.map([1, 0, 0, 1, 0, 0, 1])
>>> dup_deflate(f, ZZ)
(3, [1, 1, 1])
"""
if dup_degree(f) <= 0:
return 1, f
g = 0
for i in range(len(f)):
if not f[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, f
return g, f[::g]
def dmp_deflate(f, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> dmp_deflate(f, 1, ZZ)
((2, 3), [[1, 2], [3, 4]])
"""
if dmp_zero_p(f, u):
return (1,)*(u + 1), f
F = dmp_to_dict(f, u)
B = [0]*(u + 1)
for M in F.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, f
H = {}
for A, coeff in F.items():
N = [ a // b for a, b in zip(A, B) ]
H[tuple(N)] = coeff
return B, dmp_from_dict(H, u, K)
def dup_multi_deflate(polys, K):
"""
Map ``x**m`` to ``y`` in a set of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_multi_deflate
>>> f = ZZ.map([1, 0, 2, 0, 3])
>>> g = ZZ.map([4, 0, 0])
>>> dup_multi_deflate((f, g), ZZ)
(2, ([1, 2, 3], [4, 0]))
"""
G = 0
for p in polys:
if dup_degree(p) <= 0:
return 1, polys
g = 0
for i in range(len(p)):
if not p[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, polys
G = igcd(G, g)
return G, tuple([ p[::G] for p in polys ])
def dmp_multi_deflate(polys, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a set of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_multi_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> g = ZZ.map([[1, 0, 2], [], [3, 0, 4]])
>>> dmp_multi_deflate((f, g), 1, ZZ)
((2, 1), ([[1, 0, 0, 2], [3, 0, 0, 4]], [[1, 0, 2], [3, 0, 4]]))
"""
if not u:
M, H = dup_multi_deflate(polys, K)
return (M,), H
F, B = [], [0]*(u + 1)
for p in polys:
f = dmp_to_dict(p, u)
if not dmp_zero_p(p, u):
for M in f.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
F.append(f)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, polys
H = []
for f in F:
h = {}
for A, coeff in f.items():
N = [ a // b for a, b in zip(A, B) ]
h[tuple(N)] = coeff
H.append(dmp_from_dict(h, u, K))
return B, tuple(H)
def dup_inflate(f, m, K):
"""
Map ``y`` to ``x**m`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_inflate
>>> f = ZZ.map([1, 1, 1])
>>> dup_inflate(f, 3, ZZ)
[1, 0, 0, 1, 0, 0, 1]
"""
if m <= 0:
raise IndexError("'m' must be positive, got %s" % m)
if m == 1 or not f:
return f
result = [f[0]]
for coeff in f[1:]:
result.extend([K.zero]*(m - 1))
result.append(coeff)
return result
def _rec_inflate(g, M, v, i, K):
"""Recursive helper for :func:`dmp_inflate`."""
if not v:
return dup_inflate(g, M[i], K)
if M[i] <= 0:
raise IndexError("all M[i] must be positive, got %s" % M[i])
w, j = v - 1, i + 1
g = [ _rec_inflate(c, M, w, j, K) for c in g ]
result = [g[0]]
for coeff in g[1:]:
for _ in range(1, M[i]):
result.append(dmp_zero(w))
result.append(coeff)
return result
def dmp_inflate(f, M, u, K):
"""
Map ``y_i`` to ``x_i**k_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inflate
>>> f = ZZ.map([[1, 2], [3, 4]])
>>> dmp_inflate(f, (2, 3), 1, ZZ)
[[1, 0, 0, 2], [], [3, 0, 0, 4]]
"""
if not u:
return dup_inflate(f, M[0], K)
if all(m == 1 for m in M):
return f
else:
return _rec_inflate(f, M, u, 0, K)
def dmp_exclude(f, u, K):
"""
Exclude useless levels from ``f``.
Return the levels excluded, the new excluded ``f``, and the new ``u``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_exclude
>>> f = ZZ.map([[[1]], [[1], [2]]])
>>> dmp_exclude(f, 2, ZZ)
([2], [[1], [1, 2]], 1)
"""
if not u or dmp_ground_p(f, None, u):
return [], f, u
J, F = [], dmp_to_dict(f, u)
for j in range(0, u + 1):
for monom in F.keys():
if monom[j]:
break
else:
J.append(j)
if not J:
return [], f, u
f = {}
for monom, coeff in F.items():
monom = list(monom)
for j in reversed(J):
del monom[j]
f[tuple(monom)] = coeff
u -= len(J)
return J, dmp_from_dict(f, u, K), u
def dmp_include(f, J, u, K):
"""
Include useless levels in ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_include
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_include(f, [2], 1, ZZ)
[[[1]], [[1], [2]]]
"""
if not J:
return f
F, f = dmp_to_dict(f, u), {}
for monom, coeff in F.items():
monom = list(monom)
for j in J:
monom.insert(j, 0)
f[tuple(monom)] = coeff
u += len(J)
return dmp_from_dict(f, u, K)
def dmp_inject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X][Y]`` to ``K[X,Y]``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inject
>>> R, x,y = ring("x,y", ZZ)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain())
([[[1]], [[1], [2]]], 2)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain(), front=True)
([[[1]], [[1, 2]]], 2)
"""
f, h = dmp_to_dict(f, u), {}
v = K.ngens - 1
for f_monom, g in f.items():
g = g.to_dict()
for g_monom, c in g.items():
if front:
h[g_monom + f_monom] = c
else:
h[f_monom + g_monom] = c
w = u + v + 1
return dmp_from_dict(h, w, K.dom), w
def dmp_eject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X,Y]`` to ``K[X][Y]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_eject
>>> dmp_eject([[[1]], [[1], [2]]], 2, ZZ['x', 'y'])
[1, x + 2]
"""
f, h = dmp_to_dict(f, u), {}
n = K.ngens
v = u - K.ngens + 1
for monom, c in f.items():
if front:
g_monom, f_monom = monom[:n], monom[n:]
else:
g_monom, f_monom = monom[-n:], monom[:-n]
if f_monom in h:
h[f_monom][g_monom] = c
else:
h[f_monom] = {g_monom: c}
for monom, c in h.items():
h[monom] = K(c)
return dmp_from_dict(h, v - 1, K)
def dup_terms_gcd(f, K):
"""
Remove GCD of terms from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_terms_gcd
>>> f = ZZ.map([1, 0, 1, 0, 0])
>>> dup_terms_gcd(f, ZZ)
(2, [1, 0, 1])
"""
if dup_TC(f, K) or not f:
return 0, f
i = 0
for c in reversed(f):
if not c:
i += 1
else:
break
return i, f[:-i]
def dmp_terms_gcd(f, u, K):
"""
Remove GCD of terms from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_terms_gcd
>>> f = ZZ.map([[1, 0], [1, 0, 0], [], []])
>>> dmp_terms_gcd(f, 1, ZZ)
((2, 1), [[1], [1, 0]])
"""
if dmp_ground_TC(f, u, K) or dmp_zero_p(f, u):
return (0,)*(u + 1), f
F = dmp_to_dict(f, u)
G = monomial_min(*list(F.keys()))
if all(g == 0 for g in G):
return G, f
f = {}
for monom, coeff in F.items():
f[monomial_div(monom, G)] = coeff
return G, dmp_from_dict(f, u, K)
def _rec_list_terms(g, v, monom):
"""Recursive helper for :func:`dmp_list_terms`."""
d, terms = dmp_degree(g, v), []
if not v:
for i, c in enumerate(g):
if not c:
continue
terms.append((monom + (d - i,), c))
else:
w = v - 1
for i, c in enumerate(g):
terms.extend(_rec_list_terms(c, w, monom + (d - i,)))
return terms
def dmp_list_terms(f, u, K, order=None):
"""
List all non-zero terms from ``f`` in the given order ``order``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_list_terms
>>> f = ZZ.map([[1, 1], [2, 3]])
>>> dmp_list_terms(f, 1, ZZ)
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
>>> dmp_list_terms(f, 1, ZZ, order='grevlex')
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
"""
def sort(terms, O):
return sorted(terms, key=lambda term: O(term[0]), reverse=True)
terms = _rec_list_terms(f, u, ())
if not terms:
return [((0,)*(u + 1), K.zero)]
if order is None:
return terms
else:
return sort(terms, monomial_key(order))
def dup_apply_pairs(f, g, h, args, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dup_apply_pairs([1, 2, 3], [3, 2, 1], h, (1,), ZZ)
[4, 5, 6]
"""
n, m = len(f), len(g)
if n != m:
if n > m:
g = [K.zero]*(n - m) + g
else:
f = [K.zero]*(m - n) + f
result = []
for a, b in zip(f, g):
result.append(h(a, b, *args))
return dup_strip(result)
def dmp_apply_pairs(f, g, h, args, u, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dmp_apply_pairs([[1], [2, 3]], [[3], [2, 1]], h, (1,), 1, ZZ)
[[4], [5, 6]]
"""
if not u:
return dup_apply_pairs(f, g, h, args, K)
n, m, v = len(f), len(g), u - 1
if n != m:
if n > m:
g = dmp_zeros(n - m, v, K) + g
else:
f = dmp_zeros(m - n, v, K) + f
result = []
for a, b in zip(f, g):
result.append(dmp_apply_pairs(a, b, h, args, v, K))
return dmp_strip(result, u)
def dup_slice(f, m, n, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[x]``. """
k = len(f)
if k >= m:
M = k - m
else:
M = 0
if k >= n:
N = k - n
else:
N = 0
f = f[N:M]
if not f:
return []
else:
return f + [K.zero]*m
def dmp_slice(f, m, n, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[X]``. """
return dmp_slice_in(f, m, n, 0, u, K)
def dmp_slice_in(f, m, n, j, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``x_j`` in ``K[X]``. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not u:
return dup_slice(f, m, n, K)
f, g = dmp_to_dict(f, u), {}
for monom, coeff in f.items():
k = monom[j]
if k < m or k >= n:
monom = monom[:j] + (0,) + monom[j + 1:]
if monom in g:
g[monom] += coeff
else:
g[monom] = coeff
return dmp_from_dict(g, u, K)
def dup_random(n, a, b, K):
"""
Return a polynomial of degree ``n`` with coefficients in ``[a, b]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_random
>>> dup_random(3, -10, 10, ZZ) #doctest: +SKIP
[-2, -8, 9, -4]
"""
f = [ K.convert(random.randint(a, b)) for _ in range(0, n + 1) ]
while not f[0]:
f[0] = K.convert(random.randint(a, b))
return f | sympy/polys/densebasic.py | from sympy import oo
from sympy.core import igcd
from sympy.polys.monomials import monomial_min, monomial_div
from sympy.polys.orderings import monomial_key
import random
def poly_LC(f, K):
"""
Return leading coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_LC
>>> poly_LC([], ZZ)
0
>>> poly_LC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
1
"""
if not f:
return K.zero
else:
return f[0]
def poly_TC(f, K):
"""
Return trailing coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_TC
>>> poly_TC([], ZZ)
0
>>> poly_TC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
3
"""
if not f:
return K.zero
else:
return f[-1]
dup_LC = dmp_LC = poly_LC
dup_TC = dmp_TC = poly_TC
def dmp_ground_LC(f, u, K):
"""
Return the ground leading coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_LC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_LC(f, 2, ZZ)
1
"""
while u:
f = dmp_LC(f, K)
u -= 1
return dup_LC(f, K)
def dmp_ground_TC(f, u, K):
"""
Return the ground trailing coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_TC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_TC(f, 2, ZZ)
3
"""
while u:
f = dmp_TC(f, K)
u -= 1
return dup_TC(f, K)
def dmp_true_LT(f, u, K):
"""
Return the leading term ``c * x_1**n_1 ... x_k**n_k``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_true_LT
>>> f = ZZ.map([[4], [2, 0], [3, 0, 0]])
>>> dmp_true_LT(f, 1, ZZ)
((2, 0), 4)
"""
monom = []
while u:
monom.append(len(f) - 1)
f, u = f[0], u - 1
if not f:
monom.append(0)
else:
monom.append(len(f) - 1)
return tuple(monom), dup_LC(f, K)
def dup_degree(f):
"""
Return the leading degree of ``f`` in ``K[x]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_degree
>>> f = ZZ.map([1, 2, 0, 3])
>>> dup_degree(f)
3
"""
if not f:
return -oo
return len(f) - 1
def dmp_degree(f, u):
"""
Return the leading degree of ``f`` in ``x_0`` in ``K[X]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree
>>> dmp_degree([[[]]], 2)
-oo
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree(f, 1)
1
"""
if dmp_zero_p(f, u):
return -oo
else:
return len(f) - 1
def _rec_degree_in(g, v, i, j):
"""Recursive helper function for :func:`dmp_degree_in`."""
if i == j:
return dmp_degree(g, v)
v, i = v - 1, i + 1
return max([ _rec_degree_in(c, v, i, j) for c in g ])
def dmp_degree_in(f, j, u):
"""
Return the leading degree of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_in
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree_in(f, 0, 1)
1
>>> dmp_degree_in(f, 1, 1)
2
"""
if not j:
return dmp_degree(f, u)
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_degree_in(f, u, 0, j)
def _rec_degree_list(g, v, i, degs):
"""Recursive helper for :func:`dmp_degree_list`."""
degs[i] = max(degs[i], dmp_degree(g, v))
if v > 0:
v, i = v - 1, i + 1
for c in g:
_rec_degree_list(c, v, i, degs)
def dmp_degree_list(f, u):
"""
Return a list of degrees of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_list
>>> f = ZZ.map([[1], [1, 2, 3]])
>>> dmp_degree_list(f, 1)
(1, 2)
"""
degs = [-oo]*(u + 1)
_rec_degree_list(f, u, 0, degs)
return tuple(degs)
def dup_strip(f):
"""
Remove leading zeros from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.densebasic import dup_strip
>>> dup_strip([0, 0, 1, 2, 3, 0])
[1, 2, 3, 0]
"""
if not f or f[0]:
return f
i = 0
for cf in f:
if cf:
break
else:
i += 1
return f[i:]
def dmp_strip(f, u):
"""
Remove leading zeros from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_strip
>>> dmp_strip([[], [0, 1, 2], [1]], 1)
[[0, 1, 2], [1]]
"""
if not u:
return dup_strip(f)
if dmp_zero_p(f, u):
return f
i, v = 0, u - 1
for c in f:
if not dmp_zero_p(c, v):
break
else:
i += 1
if i == len(f):
return dmp_zero(u)
else:
return f[i:]
def _rec_validate(f, g, i, K):
"""Recursive helper for :func:`dmp_validate`."""
if type(g) is not list:
if K is not None and not K.of_type(g):
raise TypeError("%s in %s in not of type %s" % (g, f, K.dtype))
return {i - 1}
elif not g:
return {i}
else:
levels = set()
for c in g:
levels |= _rec_validate(f, c, i + 1, K)
return levels
def _rec_strip(g, v):
"""Recursive helper for :func:`_rec_strip`."""
if not v:
return dup_strip(g)
w = v - 1
return dmp_strip([ _rec_strip(c, w) for c in g ], v)
def dmp_validate(f, K=None):
"""
Return the number of levels in ``f`` and recursively strip it.
Examples
========
>>> from sympy.polys.densebasic import dmp_validate
>>> dmp_validate([[], [0, 1, 2], [1]])
([[1, 2], [1]], 1)
>>> dmp_validate([[1], 1])
Traceback (most recent call last):
...
ValueError: invalid data structure for a multivariate polynomial
"""
levels = _rec_validate(f, f, 0, K)
u = levels.pop()
if not levels:
return _rec_strip(f, u), u
else:
raise ValueError(
"invalid data structure for a multivariate polynomial")
def dup_reverse(f):
"""
Compute ``x**n * f(1/x)``, i.e.: reverse ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_reverse
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_reverse(f)
[3, 2, 1]
"""
return dup_strip(list(reversed(f)))
def dup_copy(f):
"""
Create a new copy of a polynomial ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return list(f)
def dmp_copy(f, u):
"""
Create a new copy of a polynomial ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_copy
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_copy(f, 1)
[[1], [1, 2]]
"""
if not u:
return list(f)
v = u - 1
return [ dmp_copy(c, v) for c in f ]
def dup_to_tuple(f):
"""
Convert `f` into a tuple.
This is needed for hashing. This is similar to dup_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return tuple(f)
def dmp_to_tuple(f, u):
"""
Convert `f` into a nested tuple of tuples.
This is needed for hashing. This is similar to dmp_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_to_tuple
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_to_tuple(f, 1)
((1,), (1, 2))
"""
if not u:
return tuple(f)
v = u - 1
return tuple(dmp_to_tuple(c, v) for c in f)
def dup_normal(f, K):
"""
Normalize univariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_normal
>>> dup_normal([0, 1.5, 2, 3], ZZ)
[1, 2, 3]
"""
return dup_strip([ K.normal(c) for c in f ])
def dmp_normal(f, u, K):
"""
Normalize a multivariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_normal
>>> dmp_normal([[], [0, 1.5, 2]], 1, ZZ)
[[1, 2]]
"""
if not u:
return dup_normal(f, K)
v = u - 1
return dmp_strip([ dmp_normal(c, v, K) for c in f ], u)
def dup_convert(f, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_convert
>>> R, x = ring("x", ZZ)
>>> dup_convert([R(1), R(2)], R.to_domain(), ZZ)
[1, 2]
>>> dup_convert([ZZ(1), ZZ(2)], ZZ, R.to_domain())
[1, 2]
"""
if K0 is not None and K0 == K1:
return f
else:
return dup_strip([ K1.convert(c, K0) for c in f ])
def dmp_convert(f, u, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_convert
>>> R, x = ring("x", ZZ)
>>> dmp_convert([[R(1)], [R(2)]], 1, R.to_domain(), ZZ)
[[1], [2]]
>>> dmp_convert([[ZZ(1)], [ZZ(2)]], 1, ZZ, R.to_domain())
[[1], [2]]
"""
if not u:
return dup_convert(f, K0, K1)
if K0 is not None and K0 == K1:
return f
v = u - 1
return dmp_strip([ dmp_convert(c, v, K0, K1) for c in f ], u)
def dup_from_sympy(f, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_sympy
>>> dup_from_sympy([S(1), S(2)], ZZ) == [ZZ(1), ZZ(2)]
True
"""
return dup_strip([ K.from_sympy(c) for c in f ])
def dmp_from_sympy(f, u, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_sympy
>>> dmp_from_sympy([[S(1)], [S(2)]], 1, ZZ) == [[ZZ(1)], [ZZ(2)]]
True
"""
if not u:
return dup_from_sympy(f, K)
v = u - 1
return dmp_strip([ dmp_from_sympy(c, v, K) for c in f ], u)
def dup_nth(f, n, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_nth
>>> f = ZZ.map([1, 2, 3])
>>> dup_nth(f, 0, ZZ)
3
>>> dup_nth(f, 4, ZZ)
0
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
return f[dup_degree(f) - n]
def dmp_nth(f, n, u, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nth
>>> f = ZZ.map([[1], [2], [3]])
>>> dmp_nth(f, 0, 1, ZZ)
[3]
>>> dmp_nth(f, 4, 1, ZZ)
[]
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return dmp_zero(u - 1)
else:
return f[dmp_degree(f, u) - n]
def dmp_ground_nth(f, N, u, K):
"""
Return the ground ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_nth
>>> f = ZZ.map([[1], [2, 3]])
>>> dmp_ground_nth(f, (0, 1), 1, ZZ)
2
"""
v = u
for n in N:
if n < 0:
raise IndexError("`n` must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
d = dmp_degree(f, v)
if d == -oo:
d = -1
f, v = f[d - n], v - 1
return f
def dmp_zero_p(f, u):
"""
Return ``True`` if ``f`` is zero in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero_p
>>> dmp_zero_p([[[[[]]]]], 4)
True
>>> dmp_zero_p([[[[[1]]]]], 4)
False
"""
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
return not f
def dmp_zero(u):
"""
Return a multivariate zero.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero
>>> dmp_zero(4)
[[[[[]]]]]
"""
r = []
for i in range(u):
r = [r]
return r
def dmp_one_p(f, u, K):
"""
Return ``True`` if ``f`` is one in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one_p
>>> dmp_one_p([[[ZZ(1)]]], 2, ZZ)
True
"""
return dmp_ground_p(f, K.one, u)
def dmp_one(u, K):
"""
Return a multivariate one over ``K``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one
>>> dmp_one(2, ZZ)
[[[1]]]
"""
return dmp_ground(K.one, u)
def dmp_ground_p(f, c, u):
"""
Return True if ``f`` is constant in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground_p
>>> dmp_ground_p([[[3]]], 3, 2)
True
>>> dmp_ground_p([[[4]]], None, 2)
True
"""
if c is not None and not c:
return dmp_zero_p(f, u)
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
if c is None:
return len(f) <= 1
else:
return f == [c]
def dmp_ground(c, u):
"""
Return a multivariate constant.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground
>>> dmp_ground(3, 5)
[[[[[[3]]]]]]
>>> dmp_ground(1, -1)
1
"""
if not c:
return dmp_zero(u)
for i in range(u + 1):
c = [c]
return c
def dmp_zeros(n, u, K):
"""
Return a list of multivariate zeros.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_zeros
>>> dmp_zeros(3, 2, ZZ)
[[[[]]], [[[]]], [[[]]]]
>>> dmp_zeros(3, -1, ZZ)
[0, 0, 0]
"""
if not n:
return []
if u < 0:
return [K.zero]*n
else:
return [ dmp_zero(u) for i in range(n) ]
def dmp_grounds(c, n, u):
"""
Return a list of multivariate constants.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_grounds
>>> dmp_grounds(ZZ(4), 3, 2)
[[[[4]]], [[[4]]], [[[4]]]]
>>> dmp_grounds(ZZ(4), 3, -1)
[4, 4, 4]
"""
if not n:
return []
if u < 0:
return [c]*n
else:
return [ dmp_ground(c, u) for i in range(n) ]
def dmp_negative_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is negative.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_negative_p
>>> dmp_negative_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
False
>>> dmp_negative_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
True
"""
return K.is_negative(dmp_ground_LC(f, u, K))
def dmp_positive_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is positive.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_positive_p
>>> dmp_positive_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
True
>>> dmp_positive_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
False
"""
return K.is_positive(dmp_ground_LC(f, u, K))
def dup_from_dict(f, K):
"""
Create a ``K[x]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_dict
>>> dup_from_dict({(0,): ZZ(7), (2,): ZZ(5), (4,): ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
>>> dup_from_dict({}, ZZ)
[]
"""
if not f:
return []
n, h = max(f.keys()), []
if type(n) is int:
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
else:
(n,) = n
for k in range(n, -1, -1):
h.append(f.get((k,), K.zero))
return dup_strip(h)
def dup_from_raw_dict(f, K):
"""
Create a ``K[x]`` polynomial from a raw ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_raw_dict
>>> dup_from_raw_dict({0: ZZ(7), 2: ZZ(5), 4: ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
"""
if not f:
return []
n, h = max(f.keys()), []
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
return dup_strip(h)
def dmp_from_dict(f, u, K):
"""
Create a ``K[X]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_dict
>>> dmp_from_dict({(0, 0): ZZ(3), (0, 1): ZZ(2), (2, 1): ZZ(1)}, 1, ZZ)
[[1, 0], [], [2, 3]]
>>> dmp_from_dict({}, 0, ZZ)
[]
"""
if not u:
return dup_from_dict(f, K)
if not f:
return dmp_zero(u)
coeffs = {}
for monom, coeff in f.items():
head, tail = monom[0], monom[1:]
if head in coeffs:
coeffs[head][tail] = coeff
else:
coeffs[head] = { tail: coeff }
n, v, h = max(coeffs.keys()), u - 1, []
for k in range(n, -1, -1):
coeff = coeffs.get(k)
if coeff is not None:
h.append(dmp_from_dict(coeff, v, K))
else:
h.append(dmp_zero(v))
return dmp_strip(h, u)
def dup_to_dict(f, K=None, zero=False):
"""
Convert ``K[x]`` polynomial to a ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_dict
>>> dup_to_dict([1, 0, 5, 0, 7])
{(0,): 7, (2,): 5, (4,): 1}
>>> dup_to_dict([])
{}
"""
if not f and zero:
return {(0,): K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[(k,)] = f[n - k]
return result
def dup_to_raw_dict(f, K=None, zero=False):
"""
Convert a ``K[x]`` polynomial to a raw ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_raw_dict
>>> dup_to_raw_dict([1, 0, 5, 0, 7])
{0: 7, 2: 5, 4: 1}
"""
if not f and zero:
return {0: K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[k] = f[n - k]
return result
def dmp_to_dict(f, u, K=None, zero=False):
"""
Convert a ``K[X]`` polynomial to a ``dict````.
Examples
========
>>> from sympy.polys.densebasic import dmp_to_dict
>>> dmp_to_dict([[1, 0], [], [2, 3]], 1)
{(0, 0): 3, (0, 1): 2, (2, 1): 1}
>>> dmp_to_dict([], 0)
{}
"""
if not u:
return dup_to_dict(f, K, zero=zero)
if dmp_zero_p(f, u) and zero:
return {(0,)*(u + 1): K.zero}
n, v, result = dmp_degree(f, u), u - 1, {}
if n == -oo:
n = -1
for k in range(0, n + 1):
h = dmp_to_dict(f[n - k], v)
for exp, coeff in h.items():
result[(k,) + exp] = coeff
return result
def dmp_swap(f, i, j, u, K):
"""
Transform ``K[..x_i..x_j..]`` to ``K[..x_j..x_i..]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_swap
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_swap(f, 0, 1, 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_swap(f, 1, 2, 2, ZZ)
[[[1], [2, 0]], [[]]]
>>> dmp_swap(f, 0, 2, 2, ZZ)
[[[1, 0]], [[2, 0], []]]
"""
if i < 0 or j < 0 or i > u or j > u:
raise IndexError("0 <= i < j <= %s expected" % u)
elif i == j:
return f
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
H[exp[:i] + (exp[j],) +
exp[i + 1:j] +
(exp[i],) + exp[j + 1:]] = coeff
return dmp_from_dict(H, u, K)
def dmp_permute(f, P, u, K):
"""
Return a polynomial in ``K[x_{P(1)},..,x_{P(n)}]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_permute
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_permute(f, [1, 0, 2], 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_permute(f, [1, 2, 0], 2, ZZ)
[[[1], []], [[2, 0], []]]
"""
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
new_exp = [0]*len(exp)
for e, p in zip(exp, P):
new_exp[p] = e
H[tuple(new_exp)] = coeff
return dmp_from_dict(H, u, K)
def dmp_nest(f, l, K):
"""
Return a multivariate value nested ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nest
>>> dmp_nest([[ZZ(1)]], 2, ZZ)
[[[[1]]]]
"""
if not isinstance(f, list):
return dmp_ground(f, l)
for i in range(l):
f = [f]
return f
def dmp_raise(f, l, u, K):
"""
Return a multivariate polynomial raised ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_raise
>>> f = ZZ.map([[], [1, 2]])
>>> dmp_raise(f, 2, 1, ZZ)
[[[[]]], [[[1]], [[2]]]]
"""
if not l:
return f
if not u:
if not f:
return dmp_zero(l)
k = l - 1
return [ dmp_ground(c, k) for c in f ]
v = u - 1
return [ dmp_raise(c, l, v, K) for c in f ]
def dup_deflate(f, K):
"""
Map ``x**m`` to ``y`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_deflate
>>> f = ZZ.map([1, 0, 0, 1, 0, 0, 1])
>>> dup_deflate(f, ZZ)
(3, [1, 1, 1])
"""
if dup_degree(f) <= 0:
return 1, f
g = 0
for i in range(len(f)):
if not f[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, f
return g, f[::g]
def dmp_deflate(f, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> dmp_deflate(f, 1, ZZ)
((2, 3), [[1, 2], [3, 4]])
"""
if dmp_zero_p(f, u):
return (1,)*(u + 1), f
F = dmp_to_dict(f, u)
B = [0]*(u + 1)
for M in F.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, f
H = {}
for A, coeff in F.items():
N = [ a // b for a, b in zip(A, B) ]
H[tuple(N)] = coeff
return B, dmp_from_dict(H, u, K)
def dup_multi_deflate(polys, K):
"""
Map ``x**m`` to ``y`` in a set of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_multi_deflate
>>> f = ZZ.map([1, 0, 2, 0, 3])
>>> g = ZZ.map([4, 0, 0])
>>> dup_multi_deflate((f, g), ZZ)
(2, ([1, 2, 3], [4, 0]))
"""
G = 0
for p in polys:
if dup_degree(p) <= 0:
return 1, polys
g = 0
for i in range(len(p)):
if not p[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, polys
G = igcd(G, g)
return G, tuple([ p[::G] for p in polys ])
def dmp_multi_deflate(polys, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a set of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_multi_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> g = ZZ.map([[1, 0, 2], [], [3, 0, 4]])
>>> dmp_multi_deflate((f, g), 1, ZZ)
((2, 1), ([[1, 0, 0, 2], [3, 0, 0, 4]], [[1, 0, 2], [3, 0, 4]]))
"""
if not u:
M, H = dup_multi_deflate(polys, K)
return (M,), H
F, B = [], [0]*(u + 1)
for p in polys:
f = dmp_to_dict(p, u)
if not dmp_zero_p(p, u):
for M in f.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
F.append(f)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, polys
H = []
for f in F:
h = {}
for A, coeff in f.items():
N = [ a // b for a, b in zip(A, B) ]
h[tuple(N)] = coeff
H.append(dmp_from_dict(h, u, K))
return B, tuple(H)
def dup_inflate(f, m, K):
"""
Map ``y`` to ``x**m`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_inflate
>>> f = ZZ.map([1, 1, 1])
>>> dup_inflate(f, 3, ZZ)
[1, 0, 0, 1, 0, 0, 1]
"""
if m <= 0:
raise IndexError("'m' must be positive, got %s" % m)
if m == 1 or not f:
return f
result = [f[0]]
for coeff in f[1:]:
result.extend([K.zero]*(m - 1))
result.append(coeff)
return result
def _rec_inflate(g, M, v, i, K):
"""Recursive helper for :func:`dmp_inflate`."""
if not v:
return dup_inflate(g, M[i], K)
if M[i] <= 0:
raise IndexError("all M[i] must be positive, got %s" % M[i])
w, j = v - 1, i + 1
g = [ _rec_inflate(c, M, w, j, K) for c in g ]
result = [g[0]]
for coeff in g[1:]:
for _ in range(1, M[i]):
result.append(dmp_zero(w))
result.append(coeff)
return result
def dmp_inflate(f, M, u, K):
"""
Map ``y_i`` to ``x_i**k_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inflate
>>> f = ZZ.map([[1, 2], [3, 4]])
>>> dmp_inflate(f, (2, 3), 1, ZZ)
[[1, 0, 0, 2], [], [3, 0, 0, 4]]
"""
if not u:
return dup_inflate(f, M[0], K)
if all(m == 1 for m in M):
return f
else:
return _rec_inflate(f, M, u, 0, K)
def dmp_exclude(f, u, K):
"""
Exclude useless levels from ``f``.
Return the levels excluded, the new excluded ``f``, and the new ``u``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_exclude
>>> f = ZZ.map([[[1]], [[1], [2]]])
>>> dmp_exclude(f, 2, ZZ)
([2], [[1], [1, 2]], 1)
"""
if not u or dmp_ground_p(f, None, u):
return [], f, u
J, F = [], dmp_to_dict(f, u)
for j in range(0, u + 1):
for monom in F.keys():
if monom[j]:
break
else:
J.append(j)
if not J:
return [], f, u
f = {}
for monom, coeff in F.items():
monom = list(monom)
for j in reversed(J):
del monom[j]
f[tuple(monom)] = coeff
u -= len(J)
return J, dmp_from_dict(f, u, K), u
def dmp_include(f, J, u, K):
"""
Include useless levels in ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_include
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_include(f, [2], 1, ZZ)
[[[1]], [[1], [2]]]
"""
if not J:
return f
F, f = dmp_to_dict(f, u), {}
for monom, coeff in F.items():
monom = list(monom)
for j in J:
monom.insert(j, 0)
f[tuple(monom)] = coeff
u += len(J)
return dmp_from_dict(f, u, K)
def dmp_inject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X][Y]`` to ``K[X,Y]``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inject
>>> R, x,y = ring("x,y", ZZ)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain())
([[[1]], [[1], [2]]], 2)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain(), front=True)
([[[1]], [[1, 2]]], 2)
"""
f, h = dmp_to_dict(f, u), {}
v = K.ngens - 1
for f_monom, g in f.items():
g = g.to_dict()
for g_monom, c in g.items():
if front:
h[g_monom + f_monom] = c
else:
h[f_monom + g_monom] = c
w = u + v + 1
return dmp_from_dict(h, w, K.dom), w
def dmp_eject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X,Y]`` to ``K[X][Y]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_eject
>>> dmp_eject([[[1]], [[1], [2]]], 2, ZZ['x', 'y'])
[1, x + 2]
"""
f, h = dmp_to_dict(f, u), {}
n = K.ngens
v = u - K.ngens + 1
for monom, c in f.items():
if front:
g_monom, f_monom = monom[:n], monom[n:]
else:
g_monom, f_monom = monom[-n:], monom[:-n]
if f_monom in h:
h[f_monom][g_monom] = c
else:
h[f_monom] = {g_monom: c}
for monom, c in h.items():
h[monom] = K(c)
return dmp_from_dict(h, v - 1, K)
def dup_terms_gcd(f, K):
"""
Remove GCD of terms from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_terms_gcd
>>> f = ZZ.map([1, 0, 1, 0, 0])
>>> dup_terms_gcd(f, ZZ)
(2, [1, 0, 1])
"""
if dup_TC(f, K) or not f:
return 0, f
i = 0
for c in reversed(f):
if not c:
i += 1
else:
break
return i, f[:-i]
def dmp_terms_gcd(f, u, K):
"""
Remove GCD of terms from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_terms_gcd
>>> f = ZZ.map([[1, 0], [1, 0, 0], [], []])
>>> dmp_terms_gcd(f, 1, ZZ)
((2, 1), [[1], [1, 0]])
"""
if dmp_ground_TC(f, u, K) or dmp_zero_p(f, u):
return (0,)*(u + 1), f
F = dmp_to_dict(f, u)
G = monomial_min(*list(F.keys()))
if all(g == 0 for g in G):
return G, f
f = {}
for monom, coeff in F.items():
f[monomial_div(monom, G)] = coeff
return G, dmp_from_dict(f, u, K)
def _rec_list_terms(g, v, monom):
"""Recursive helper for :func:`dmp_list_terms`."""
d, terms = dmp_degree(g, v), []
if not v:
for i, c in enumerate(g):
if not c:
continue
terms.append((monom + (d - i,), c))
else:
w = v - 1
for i, c in enumerate(g):
terms.extend(_rec_list_terms(c, w, monom + (d - i,)))
return terms
def dmp_list_terms(f, u, K, order=None):
"""
List all non-zero terms from ``f`` in the given order ``order``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_list_terms
>>> f = ZZ.map([[1, 1], [2, 3]])
>>> dmp_list_terms(f, 1, ZZ)
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
>>> dmp_list_terms(f, 1, ZZ, order='grevlex')
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
"""
def sort(terms, O):
return sorted(terms, key=lambda term: O(term[0]), reverse=True)
terms = _rec_list_terms(f, u, ())
if not terms:
return [((0,)*(u + 1), K.zero)]
if order is None:
return terms
else:
return sort(terms, monomial_key(order))
def dup_apply_pairs(f, g, h, args, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dup_apply_pairs([1, 2, 3], [3, 2, 1], h, (1,), ZZ)
[4, 5, 6]
"""
n, m = len(f), len(g)
if n != m:
if n > m:
g = [K.zero]*(n - m) + g
else:
f = [K.zero]*(m - n) + f
result = []
for a, b in zip(f, g):
result.append(h(a, b, *args))
return dup_strip(result)
def dmp_apply_pairs(f, g, h, args, u, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dmp_apply_pairs([[1], [2, 3]], [[3], [2, 1]], h, (1,), 1, ZZ)
[[4], [5, 6]]
"""
if not u:
return dup_apply_pairs(f, g, h, args, K)
n, m, v = len(f), len(g), u - 1
if n != m:
if n > m:
g = dmp_zeros(n - m, v, K) + g
else:
f = dmp_zeros(m - n, v, K) + f
result = []
for a, b in zip(f, g):
result.append(dmp_apply_pairs(a, b, h, args, v, K))
return dmp_strip(result, u)
def dup_slice(f, m, n, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[x]``. """
k = len(f)
if k >= m:
M = k - m
else:
M = 0
if k >= n:
N = k - n
else:
N = 0
f = f[N:M]
if not f:
return []
else:
return f + [K.zero]*m
def dmp_slice(f, m, n, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[X]``. """
return dmp_slice_in(f, m, n, 0, u, K)
def dmp_slice_in(f, m, n, j, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``x_j`` in ``K[X]``. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not u:
return dup_slice(f, m, n, K)
f, g = dmp_to_dict(f, u), {}
for monom, coeff in f.items():
k = monom[j]
if k < m or k >= n:
monom = monom[:j] + (0,) + monom[j + 1:]
if monom in g:
g[monom] += coeff
else:
g[monom] = coeff
return dmp_from_dict(g, u, K)
def dup_random(n, a, b, K):
"""
Return a polynomial of degree ``n`` with coefficients in ``[a, b]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_random
>>> dup_random(3, -10, 10, ZZ) #doctest: +SKIP
[-2, -8, 9, -4]
"""
f = [ K.convert(random.randint(a, b)) for _ in range(0, n + 1) ]
while not f[0]:
f[0] = K.convert(random.randint(a, b))
return f | 0.861756 | 0.450239 |
import mmcv
import numpy as np
import trimesh
from os import path as osp
def _write_ply(points, out_filename):
"""Write points into ``ply`` format for meshlab visualization.
Args:
points (np.ndarray): Points in shape (N, dim).
out_filename (str): Filename to be saved.
"""
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
if points.shape[1] == 6:
c = points[i, 3:].astype(int)
fout.write(
'v %f %f %f %d %d %d\n' %
(points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
else:
fout.write('v %f %f %f\n' %
(points[i, 0], points[i, 1], points[i, 2]))
fout.close()
def _write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes.
Args:
scene_bbox(list[ndarray] or ndarray): xyz pos of center and
3 lengths (dx,dy,dz) and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename(str): Filename.
"""
def heading2rotmat(heading_angle):
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
if len(scene_bbox) == 0:
scene_bbox = np.zeros((1, 7))
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def show_result(points, gt_bboxes, pred_bboxes, out_dir, filename, show=True):
"""Convert results into format that is directly readable for meshlab.
Args:
points (np.ndarray): Points.
gt_bboxes (np.ndarray): Ground truth boxes.
pred_bboxes (np.ndarray): Predicted boxes.
out_dir (str): Path of output directory
filename (str): Filename of the current frame.
show (bool): Visualize the results online.
"""
if show:
from .open3d_vis import Visualizer
vis = Visualizer(points)
if pred_bboxes is not None:
vis.add_bboxes(bbox3d=pred_bboxes)
if gt_bboxes is not None:
vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1))
vis.show()
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
if points is not None:
_write_ply(points, osp.join(result_path, f'{filename}_points.obj'))
if gt_bboxes is not None:
# bottom center to gravity center
gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
# the positive direction for yaw in meshlab is clockwise
gt_bboxes[:, 6] *= -1
_write_oriented_bbox(gt_bboxes,
osp.join(result_path, f'{filename}_gt.ply'))
if pred_bboxes is not None:
# bottom center to gravity center
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
# the positive direction for yaw in meshlab is clockwise
pred_bboxes[:, 6] *= -1
_write_oriented_bbox(pred_bboxes,
osp.join(result_path, f'{filename}_pred.ply')) | mmdet3d/core/visualizer/show_result.py | import mmcv
import numpy as np
import trimesh
from os import path as osp
def _write_ply(points, out_filename):
"""Write points into ``ply`` format for meshlab visualization.
Args:
points (np.ndarray): Points in shape (N, dim).
out_filename (str): Filename to be saved.
"""
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
if points.shape[1] == 6:
c = points[i, 3:].astype(int)
fout.write(
'v %f %f %f %d %d %d\n' %
(points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
else:
fout.write('v %f %f %f\n' %
(points[i, 0], points[i, 1], points[i, 2]))
fout.close()
def _write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes.
Args:
scene_bbox(list[ndarray] or ndarray): xyz pos of center and
3 lengths (dx,dy,dz) and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename(str): Filename.
"""
def heading2rotmat(heading_angle):
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
if len(scene_bbox) == 0:
scene_bbox = np.zeros((1, 7))
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def show_result(points, gt_bboxes, pred_bboxes, out_dir, filename, show=True):
"""Convert results into format that is directly readable for meshlab.
Args:
points (np.ndarray): Points.
gt_bboxes (np.ndarray): Ground truth boxes.
pred_bboxes (np.ndarray): Predicted boxes.
out_dir (str): Path of output directory
filename (str): Filename of the current frame.
show (bool): Visualize the results online.
"""
if show:
from .open3d_vis import Visualizer
vis = Visualizer(points)
if pred_bboxes is not None:
vis.add_bboxes(bbox3d=pred_bboxes)
if gt_bboxes is not None:
vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1))
vis.show()
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
if points is not None:
_write_ply(points, osp.join(result_path, f'{filename}_points.obj'))
if gt_bboxes is not None:
# bottom center to gravity center
gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
# the positive direction for yaw in meshlab is clockwise
gt_bboxes[:, 6] *= -1
_write_oriented_bbox(gt_bboxes,
osp.join(result_path, f'{filename}_gt.ply'))
if pred_bboxes is not None:
# bottom center to gravity center
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
# the positive direction for yaw in meshlab is clockwise
pred_bboxes[:, 6] *= -1
_write_oriented_bbox(pred_bboxes,
osp.join(result_path, f'{filename}_pred.ply')) | 0.675551 | 0.591605 |
import asyncio
import os
import aiohttp
import discord
import orjson
import uvloop
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
Tenor_API_Key = os.getenv("Tenor_API_Key")
class TenorV1(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-search-multiple", aliases=["tsm"])
async def tenor_search(self, ctx, *, search: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"q": search,
"key": Tenor_API_Key,
"contentfilter": "medium",
"limit": 5,
"media_filter": "minimal",
}
async with session.get("https://g.tenor.com/v1/search", params=params) as r:
data = await r.json()
try:
embed1 = discord.Embed()
embed1.title = data["results"][0]["content_description"]
embed1.set_image(
url=data["results"][0]["media"][0]["gif"]["url"])
await ctx.send(embed=embed1)
embed2 = discord.Embed()
embed2.title = data["results"][1]["content_description"]
embed2.set_image(
url=data["results"][1]["media"][0]["gif"]["url"])
await ctx.send(embed=embed2)
embed3 = discord.Embed()
embed3.title = data["results"][2]["content_description"]
embed3.set_image(
url=data["results"][2]["media"][0]["gif"]["url"])
await ctx.send(embed=embed3)
embed4 = discord.Embed()
embed4.title = data["results"][3]["content_description"]
embed4.set_image(
url=data["results"][3]["media"][0]["gif"]["url"])
await ctx.send(embed=embed4)
embed5 = discord.Embed()
embed5.title = data["results"][4]["content_description"]
embed5.set_image(
url=data["results"][4]["media"][0]["gif"]["url"])
await ctx.send(embed=embed5)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = f"Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_search.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV2(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-search-one", aliases=["tso"])
async def tenor_search_one(self, ctx, *, search_one: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"q": search_one,
"key": Tenor_API_Key,
"contentfilter": "medium",
"limit": 2,
"media_filter": "minimal",
}
async with session.get(
"https://g.tenor.com/v1/search", params=params
) as re:
data2 = await re.json()
try:
embedVar1 = discord.Embed()
embedVar1.title = data2["results"][0]["content_description"]
embedVar1.set_image(
url=data2["results"][0]["media"][0]["gif"]["url"]
)
await ctx.send(embed=embedVar1)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = "Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_search_one.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV3(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-trending", aliases=["tt"])
async def tenor_trending(self, ctx):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"key": Tenor_API_Key,
"contentfilter": "medium",
"limit": 5,
"media_filter": "minimal",
}
async with session.get(
"https://g.tenor.com/v1/trending", params=params
) as response:
data3 = await response.json()
try:
embed1 = discord.Embed()
embed1.title = data3["results"][0]["content_description"]
embed1.set_image(
url=data3["results"][0]["media"][0]["gif"]["url"])
await ctx.send(embed=embed1)
embed2 = discord.Embed()
embed2.title = data3["results"][1]["content_description"]
embed2.set_image(
url=data3["results"][1]["media"][0]["gif"]["url"])
await ctx.send(embed=embed2)
embed3 = discord.Embed()
embed3.title = data3["results"][2]["content_description"]
embed3.set_image(
url=data3["results"][2]["media"][0]["gif"]["url"])
await ctx.send(embed=embed3)
embed4 = discord.Embed()
embed4.title = data3["results"][3]["content_description"]
embed4.set_image(
url=data3["results"][3]["media"][0]["gif"]["url"])
await ctx.send(embed=embed4)
embed5 = discord.Embed()
embed5.title = data3["results"][4]["content_description"]
embed5.set_image(
url=data3["results"][4]["media"][0]["gif"]["url"])
await ctx.send(embed=embed5)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = "Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV4(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-search-suggestions", aliases=["tss"])
async def tenor_search_suggestions(self, ctx, *, search_suggestion: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {"key": Tenor_API_Key,
"q": search_suggestion, "limit": 25}
async with session.get(
"https://g.tenor.com/v1/search_suggestions", params=params
) as resp:
data5 = await resp.json()
try:
embedVar = discord.Embed()
embedVar.title = "Search Suggestions"
embedVar.description = str(
[items for items in data5["results"]]
).replace("'", "")
await ctx.send(embed=embedVar)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = "Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_search_suggestions.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV5(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-trending-terms", aliases=["tt-terms"])
async def tenor_trending_terms(self, ctx):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {"key": Tenor_API_Key, "limit": 25}
async with session.get(
"https://g.tenor.com/v1/trending_terms", params=params
) as rep:
data6 = await rep.json()
try:
embedVar = discord.Embed()
embedVar.title = "Trending Search Terms"
embedVar.description = str(
[items for items in data6["results"]]
).replace("'", "")
await ctx.send(embed=embedVar)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = "Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_trending_terms.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV6(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-gif", aliases=["tg"])
async def tenor_gif(self, ctx, *, search_gif: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"key": Tenor_API_Key,
"q": search_gif,
"limit": 1,
"media_filter": "minimal",
}
async with session.get(
"https://g.tenor.com/v1/gifs", params=params
) as respon:
data7 = await respon.json()
try:
embedVar = discord.Embed()
embedVar.title = data7["results"][0]["content_description"]
embedVar.add_field(
name="GIF ID", value=data7["results"][0]["id"], inline=True
)
embedVar.add_field(
name="Item URL",
value=data7["results"][0]["itemurl"],
inline=True,
)
embedVar.add_field(
name="Tags",
value=[items for items in data7["results"][0]["tags"]],
inline=True,
)
embedVar.add_field(
names="Flags",
value=[items for items in data7["results"][0]["flags"]],
inline=True,
)
embedVar.add_field(
name="Shares", value=data7["results"][0]["shares"], inline=True
)
embedVar.add_field(
name="Has Audio",
value=data7["results"][0]["has_audio"],
inline=True,
)
embedVar.set_image(
url=data7["results"][0]["media"][0]["gif"]["url"]
)
await ctx.send(embed=embedVar)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = (
"Sorry, but the query failed. Please try again..."
)
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_gif.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV7(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-random", aliases=["tr"])
async def tenor_random(self, ctx, *, search_random: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"key": Tenor_API_Key,
"limit": 1,
"media_filter": "minimal",
"contentfilter": "medium",
"q": search_random,
}
async with session.get(
"https://g.tenor.com/v1/random", params=params
) as object3:
data8 = await object3.json()
try:
embedVar = discord.Embed()
embedVar.title = data8["results"][0]["content_description"]
embedVar.add_field(
name="GIF ID", value=data8["results"][0]["id"], inline=True
)
embedVar.add_field(
name="Item URL",
value=data8["results"][0]["itemurl"],
inline=True,
)
embedVar.set_image(
url=data8["results"][0]["media"][0]["gif"]["url"]
)
await ctx.send(embed=embedVar)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = (
"Sorry, but the query failed. Please try again..."
)
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_random.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
def setup(bot):
bot.add_cog(TenorV1(bot))
bot.add_cog(TenorV2(bot))
bot.add_cog(TenorV3(bot))
bot.add_cog(TenorV4(bot))
bot.add_cog(TenorV5(bot))
bot.add_cog(TenorV6(bot))
bot.add_cog(TenorV7(bot)) | Bot/Cogs/tenor.py | import asyncio
import os
import aiohttp
import discord
import orjson
import uvloop
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
Tenor_API_Key = os.getenv("Tenor_API_Key")
class TenorV1(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-search-multiple", aliases=["tsm"])
async def tenor_search(self, ctx, *, search: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"q": search,
"key": Tenor_API_Key,
"contentfilter": "medium",
"limit": 5,
"media_filter": "minimal",
}
async with session.get("https://g.tenor.com/v1/search", params=params) as r:
data = await r.json()
try:
embed1 = discord.Embed()
embed1.title = data["results"][0]["content_description"]
embed1.set_image(
url=data["results"][0]["media"][0]["gif"]["url"])
await ctx.send(embed=embed1)
embed2 = discord.Embed()
embed2.title = data["results"][1]["content_description"]
embed2.set_image(
url=data["results"][1]["media"][0]["gif"]["url"])
await ctx.send(embed=embed2)
embed3 = discord.Embed()
embed3.title = data["results"][2]["content_description"]
embed3.set_image(
url=data["results"][2]["media"][0]["gif"]["url"])
await ctx.send(embed=embed3)
embed4 = discord.Embed()
embed4.title = data["results"][3]["content_description"]
embed4.set_image(
url=data["results"][3]["media"][0]["gif"]["url"])
await ctx.send(embed=embed4)
embed5 = discord.Embed()
embed5.title = data["results"][4]["content_description"]
embed5.set_image(
url=data["results"][4]["media"][0]["gif"]["url"])
await ctx.send(embed=embed5)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = f"Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_search.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV2(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-search-one", aliases=["tso"])
async def tenor_search_one(self, ctx, *, search_one: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"q": search_one,
"key": Tenor_API_Key,
"contentfilter": "medium",
"limit": 2,
"media_filter": "minimal",
}
async with session.get(
"https://g.tenor.com/v1/search", params=params
) as re:
data2 = await re.json()
try:
embedVar1 = discord.Embed()
embedVar1.title = data2["results"][0]["content_description"]
embedVar1.set_image(
url=data2["results"][0]["media"][0]["gif"]["url"]
)
await ctx.send(embed=embedVar1)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = "Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_search_one.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV3(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-trending", aliases=["tt"])
async def tenor_trending(self, ctx):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"key": Tenor_API_Key,
"contentfilter": "medium",
"limit": 5,
"media_filter": "minimal",
}
async with session.get(
"https://g.tenor.com/v1/trending", params=params
) as response:
data3 = await response.json()
try:
embed1 = discord.Embed()
embed1.title = data3["results"][0]["content_description"]
embed1.set_image(
url=data3["results"][0]["media"][0]["gif"]["url"])
await ctx.send(embed=embed1)
embed2 = discord.Embed()
embed2.title = data3["results"][1]["content_description"]
embed2.set_image(
url=data3["results"][1]["media"][0]["gif"]["url"])
await ctx.send(embed=embed2)
embed3 = discord.Embed()
embed3.title = data3["results"][2]["content_description"]
embed3.set_image(
url=data3["results"][2]["media"][0]["gif"]["url"])
await ctx.send(embed=embed3)
embed4 = discord.Embed()
embed4.title = data3["results"][3]["content_description"]
embed4.set_image(
url=data3["results"][3]["media"][0]["gif"]["url"])
await ctx.send(embed=embed4)
embed5 = discord.Embed()
embed5.title = data3["results"][4]["content_description"]
embed5.set_image(
url=data3["results"][4]["media"][0]["gif"]["url"])
await ctx.send(embed=embed5)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = "Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV4(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-search-suggestions", aliases=["tss"])
async def tenor_search_suggestions(self, ctx, *, search_suggestion: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {"key": Tenor_API_Key,
"q": search_suggestion, "limit": 25}
async with session.get(
"https://g.tenor.com/v1/search_suggestions", params=params
) as resp:
data5 = await resp.json()
try:
embedVar = discord.Embed()
embedVar.title = "Search Suggestions"
embedVar.description = str(
[items for items in data5["results"]]
).replace("'", "")
await ctx.send(embed=embedVar)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = "Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_search_suggestions.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV5(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-trending-terms", aliases=["tt-terms"])
async def tenor_trending_terms(self, ctx):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {"key": Tenor_API_Key, "limit": 25}
async with session.get(
"https://g.tenor.com/v1/trending_terms", params=params
) as rep:
data6 = await rep.json()
try:
embedVar = discord.Embed()
embedVar.title = "Trending Search Terms"
embedVar.description = str(
[items for items in data6["results"]]
).replace("'", "")
await ctx.send(embed=embedVar)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = "Sorry, but the search for {search} has failed. Please try again..."
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_trending_terms.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV6(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-gif", aliases=["tg"])
async def tenor_gif(self, ctx, *, search_gif: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"key": Tenor_API_Key,
"q": search_gif,
"limit": 1,
"media_filter": "minimal",
}
async with session.get(
"https://g.tenor.com/v1/gifs", params=params
) as respon:
data7 = await respon.json()
try:
embedVar = discord.Embed()
embedVar.title = data7["results"][0]["content_description"]
embedVar.add_field(
name="GIF ID", value=data7["results"][0]["id"], inline=True
)
embedVar.add_field(
name="Item URL",
value=data7["results"][0]["itemurl"],
inline=True,
)
embedVar.add_field(
name="Tags",
value=[items for items in data7["results"][0]["tags"]],
inline=True,
)
embedVar.add_field(
names="Flags",
value=[items for items in data7["results"][0]["flags"]],
inline=True,
)
embedVar.add_field(
name="Shares", value=data7["results"][0]["shares"], inline=True
)
embedVar.add_field(
name="Has Audio",
value=data7["results"][0]["has_audio"],
inline=True,
)
embedVar.set_image(
url=data7["results"][0]["media"][0]["gif"]["url"]
)
await ctx.send(embed=embedVar)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = (
"Sorry, but the query failed. Please try again..."
)
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_gif.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class TenorV7(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tenor-random", aliases=["tr"])
async def tenor_random(self, ctx, *, search_random: str):
async with aiohttp.ClientSession(json_serialize=orjson.dumps) as session:
params = {
"key": Tenor_API_Key,
"limit": 1,
"media_filter": "minimal",
"contentfilter": "medium",
"q": search_random,
}
async with session.get(
"https://g.tenor.com/v1/random", params=params
) as object3:
data8 = await object3.json()
try:
embedVar = discord.Embed()
embedVar.title = data8["results"][0]["content_description"]
embedVar.add_field(
name="GIF ID", value=data8["results"][0]["id"], inline=True
)
embedVar.add_field(
name="Item URL",
value=data8["results"][0]["itemurl"],
inline=True,
)
embedVar.set_image(
url=data8["results"][0]["media"][0]["gif"]["url"]
)
await ctx.send(embed=embedVar)
except Exception as e:
embedVar = discord.Embed()
embedVar.description = (
"Sorry, but the query failed. Please try again..."
)
embedVar.add_field(name="Reason", value=e, inline=True)
await ctx.send(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
@tenor_random.error
async def on_message_error(
self, ctx: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.MissingRequiredArgument):
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 51, 51))
embedVar.description = f"Missing a required argument: {error.param}"
msg = await ctx.send(embed=embedVar, delete_after=10)
await msg.delete(delay=10)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
def setup(bot):
bot.add_cog(TenorV1(bot))
bot.add_cog(TenorV2(bot))
bot.add_cog(TenorV3(bot))
bot.add_cog(TenorV4(bot))
bot.add_cog(TenorV5(bot))
bot.add_cog(TenorV6(bot))
bot.add_cog(TenorV7(bot)) | 0.275909 | 0.080105 |
from io import BytesIO
import factory
import pytest
from django.core.management import call_command
from reversion.models import Version
from datahub.metadata.test.factories import SectorFactory
pytestmark = pytest.mark.django_db
def test_happy_path(s3_stubber):
"""Test that the command updates the specified records."""
old_sectors = ['sector_1_old', 'sector_2_old', 'sector_3_old']
new_sectors = ['sector_1_new', 'sector_2_new', 'sector_3_new']
sectors = SectorFactory.create_batch(
3,
segment=factory.Iterator(old_sectors),
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sectors[0].pk},{old_sectors[0]},{new_sectors[0]}
{sectors[1].pk},{old_sectors[1]},{new_sectors[1]}
{sectors[2].pk},{old_sectors[2]},{new_sectors[2]}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key)
for sector in sectors:
sector.refresh_from_db()
assert [sector.segment for sector in sectors] == new_sectors
def test_non_existent_sector(s3_stubber, caplog):
"""Test that the command logs an error when PK does not exist."""
caplog.set_level('ERROR')
old_sectors = ['sector_1_old', 'sector_2_old', 'sector_3_old']
new_sectors = ['sector_1_new', 'sector_2_new', 'sector_3_new']
sectors = SectorFactory.create_batch(
3,
segment=factory.Iterator(old_sectors),
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sectors[0].pk},{old_sectors[0]},{new_sectors[0]}
00000000-0000-0000-0000-000000000000,{old_sectors[1]},{new_sectors[1]}
{sectors[2].pk},{old_sectors[2]},{new_sectors[2]}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key)
for sector in sectors:
sector.refresh_from_db()
assert 'Sector matching query does not exist' in caplog.text
assert len(caplog.records) == 1
assert [sector.segment for sector in sectors] == [
new_sectors[0], old_sectors[1], new_sectors[2],
]
def test_no_change(s3_stubber, caplog):
"""Test that the command ignores records that haven't changed
or records with incorrect current values.
"""
caplog.set_level('WARNING')
old_sectors = ['sector_1_old', 'sector_2_old', 'sector_3_old']
new_sectors = ['sector_1_new', 'sector_2_new', 'sector_3_new']
sectors = SectorFactory.create_batch(
3,
segment=factory.Iterator(old_sectors),
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sectors[0].pk},{old_sectors[0]},{new_sectors[0]}
{sectors[1].pk},{old_sectors[1]},{old_sectors[1]}
{sectors[2].pk},bla,{new_sectors[2]}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key)
for sector in sectors:
sector.refresh_from_db()
assert f'Not updating sector {sectors[1]} as its segment has not changed' in caplog.text
assert f'Not updating sector {sectors[2]} as its segment has not changed' in caplog.text
assert len(caplog.records) == 2
assert [sector.segment for sector in sectors] == [
new_sectors[0], old_sectors[1], old_sectors[2],
]
def test_simulate(s3_stubber):
"""Test that the command simulates updates if --simulate is passed in."""
old_sectors = ['sector_1_old', 'sector_2_old', 'sector_3_old']
new_sectors = ['sector_1_new', 'sector_2_new', 'sector_3_new']
sectors = SectorFactory.create_batch(
3,
segment=factory.Iterator(old_sectors),
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sectors[0].pk},{old_sectors[0]},{new_sectors[0]}
{sectors[1].pk},{old_sectors[1]},{new_sectors[1]}
{sectors[2].pk},{old_sectors[2]},{new_sectors[2]}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key, simulate=True)
for sector in sectors:
sector.refresh_from_db()
assert [sector.segment for sector in sectors] == old_sectors
def test_audit_log(s3_stubber):
"""Test that reversion revisions are created."""
sector_without_change = SectorFactory(
segment='sector_1',
)
sector_with_change = SectorFactory(
segment='sector_2',
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sector_without_change.pk},{sector_without_change.segment},{sector_without_change.segment}
{sector_with_change.pk},{sector_with_change.segment},sector_new
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key)
versions = Version.objects.get_for_object(sector_without_change)
assert versions.count() == 0
versions = Version.objects.get_for_object(sector_with_change)
assert versions.count() == 1
assert versions[0].revision.get_comment() == 'Sector segment correction.' | datahub/dbmaintenance/test/commands/test_update_sector_segment.py | from io import BytesIO
import factory
import pytest
from django.core.management import call_command
from reversion.models import Version
from datahub.metadata.test.factories import SectorFactory
pytestmark = pytest.mark.django_db
def test_happy_path(s3_stubber):
"""Test that the command updates the specified records."""
old_sectors = ['sector_1_old', 'sector_2_old', 'sector_3_old']
new_sectors = ['sector_1_new', 'sector_2_new', 'sector_3_new']
sectors = SectorFactory.create_batch(
3,
segment=factory.Iterator(old_sectors),
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sectors[0].pk},{old_sectors[0]},{new_sectors[0]}
{sectors[1].pk},{old_sectors[1]},{new_sectors[1]}
{sectors[2].pk},{old_sectors[2]},{new_sectors[2]}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key)
for sector in sectors:
sector.refresh_from_db()
assert [sector.segment for sector in sectors] == new_sectors
def test_non_existent_sector(s3_stubber, caplog):
"""Test that the command logs an error when PK does not exist."""
caplog.set_level('ERROR')
old_sectors = ['sector_1_old', 'sector_2_old', 'sector_3_old']
new_sectors = ['sector_1_new', 'sector_2_new', 'sector_3_new']
sectors = SectorFactory.create_batch(
3,
segment=factory.Iterator(old_sectors),
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sectors[0].pk},{old_sectors[0]},{new_sectors[0]}
00000000-0000-0000-0000-000000000000,{old_sectors[1]},{new_sectors[1]}
{sectors[2].pk},{old_sectors[2]},{new_sectors[2]}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key)
for sector in sectors:
sector.refresh_from_db()
assert 'Sector matching query does not exist' in caplog.text
assert len(caplog.records) == 1
assert [sector.segment for sector in sectors] == [
new_sectors[0], old_sectors[1], new_sectors[2],
]
def test_no_change(s3_stubber, caplog):
"""Test that the command ignores records that haven't changed
or records with incorrect current values.
"""
caplog.set_level('WARNING')
old_sectors = ['sector_1_old', 'sector_2_old', 'sector_3_old']
new_sectors = ['sector_1_new', 'sector_2_new', 'sector_3_new']
sectors = SectorFactory.create_batch(
3,
segment=factory.Iterator(old_sectors),
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sectors[0].pk},{old_sectors[0]},{new_sectors[0]}
{sectors[1].pk},{old_sectors[1]},{old_sectors[1]}
{sectors[2].pk},bla,{new_sectors[2]}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key)
for sector in sectors:
sector.refresh_from_db()
assert f'Not updating sector {sectors[1]} as its segment has not changed' in caplog.text
assert f'Not updating sector {sectors[2]} as its segment has not changed' in caplog.text
assert len(caplog.records) == 2
assert [sector.segment for sector in sectors] == [
new_sectors[0], old_sectors[1], old_sectors[2],
]
def test_simulate(s3_stubber):
"""Test that the command simulates updates if --simulate is passed in."""
old_sectors = ['sector_1_old', 'sector_2_old', 'sector_3_old']
new_sectors = ['sector_1_new', 'sector_2_new', 'sector_3_new']
sectors = SectorFactory.create_batch(
3,
segment=factory.Iterator(old_sectors),
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sectors[0].pk},{old_sectors[0]},{new_sectors[0]}
{sectors[1].pk},{old_sectors[1]},{new_sectors[1]}
{sectors[2].pk},{old_sectors[2]},{new_sectors[2]}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key, simulate=True)
for sector in sectors:
sector.refresh_from_db()
assert [sector.segment for sector in sectors] == old_sectors
def test_audit_log(s3_stubber):
"""Test that reversion revisions are created."""
sector_without_change = SectorFactory(
segment='sector_1',
)
sector_with_change = SectorFactory(
segment='sector_2',
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,old_sector_segment,new_sector_segment
{sector_without_change.pk},{sector_without_change.segment},{sector_without_change.segment}
{sector_with_change.pk},{sector_with_change.segment},sector_new
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_sector_segment', bucket, object_key)
versions = Version.objects.get_for_object(sector_without_change)
assert versions.count() == 0
versions = Version.objects.get_for_object(sector_with_change)
assert versions.count() == 1
assert versions[0].revision.get_comment() == 'Sector segment correction.' | 0.788054 | 0.310172 |
import collections
import json
import os
import subprocess
import sys
import urllib
import constants
import io_stats_parser
class DeviceStatsMonitor(object):
"""Class for collecting device stats such as IO/CPU usage.
Args:
adb: Instance of AndroidComannds.
hz: Frequency at which to sample device stats.
"""
DEVICE_PATH = constants.TEST_EXECUTABLE_DIR + '/device_stats_monitor'
PROFILE_PATH = '/sdcard/Download/device_stats_monitor.profile'
RESULT_VIEWER_PATH = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'device_stats_monitor.html'))
def __init__(self, adb, hz, build_type):
self._adb = adb
host_path = os.path.abspath(os.path.join(
constants.CHROME_DIR, 'out', build_type, 'device_stats_monitor'))
self._adb.PushIfNeeded(host_path, DeviceStatsMonitor.DEVICE_PATH)
self._hz = hz
def Start(self):
"""Starts device stats monitor on the device."""
self._adb.SetFileContents(DeviceStatsMonitor.PROFILE_PATH, '')
self._process = subprocess.Popen(
['adb', 'shell', '%s --hz=%d %s' % (
DeviceStatsMonitor.DEVICE_PATH, self._hz,
DeviceStatsMonitor.PROFILE_PATH)])
def StopAndCollect(self, output_path):
"""Stops monitoring and saves results.
Args:
output_path: Path to save results.
Returns:
String of URL to load results in browser.
"""
assert self._process
self._adb.KillAll(DeviceStatsMonitor.DEVICE_PATH)
self._process.wait()
profile = self._adb.GetFileContents(DeviceStatsMonitor.PROFILE_PATH)
results = collections.defaultdict(list)
last_io_stats = None
last_cpu_stats = None
for line in profile:
if ' mmcblk0 ' in line:
stats = io_stats_parser.ParseIoStatsLine(line)
if last_io_stats:
results['sectors_read'].append(stats.num_sectors_read -
last_io_stats.num_sectors_read)
results['sectors_written'].append(stats.num_sectors_written -
last_io_stats.num_sectors_written)
last_io_stats = stats
elif line.startswith('cpu '):
stats = self._ParseCpuStatsLine(line)
if last_cpu_stats:
results['user'].append(stats.user - last_cpu_stats.user)
results['nice'].append(stats.nice - last_cpu_stats.nice)
results['system'].append(stats.system - last_cpu_stats.system)
results['idle'].append(stats.idle - last_cpu_stats.idle)
results['iowait'].append(stats.iowait - last_cpu_stats.iowait)
results['irq'].append(stats.irq - last_cpu_stats.irq)
results['softirq'].append(stats.softirq- last_cpu_stats.softirq)
last_cpu_stats = stats
units = {
'sectors_read': 'sectors',
'sectors_written': 'sectors',
'user': 'jiffies',
'nice': 'jiffies',
'system': 'jiffies',
'idle': 'jiffies',
'iowait': 'jiffies',
'irq': 'jiffies',
'softirq': 'jiffies',
}
with open(output_path, 'w') as f:
f.write('display(%d, %s, %s);' % (self._hz, json.dumps(results), units))
return 'file://%s?results=file://%s' % (
DeviceStatsMonitor.RESULT_VIEWER_PATH, urllib.quote(output_path))
@staticmethod
def _ParseCpuStatsLine(line):
"""Parses a line of cpu stats into a CpuStats named tuple."""
# Field definitions: http://www.linuxhowtos.org/System/procstat.htm
cpu_stats = collections.namedtuple('CpuStats',
['device',
'user',
'nice',
'system',
'idle',
'iowait',
'irq',
'softirq',
])
fields = line.split()
return cpu_stats._make([fields[0]] + [int(f) for f in fields[1:8]]) | build/android/pylib/device_stats_monitor.py | import collections
import json
import os
import subprocess
import sys
import urllib
import constants
import io_stats_parser
class DeviceStatsMonitor(object):
"""Class for collecting device stats such as IO/CPU usage.
Args:
adb: Instance of AndroidComannds.
hz: Frequency at which to sample device stats.
"""
DEVICE_PATH = constants.TEST_EXECUTABLE_DIR + '/device_stats_monitor'
PROFILE_PATH = '/sdcard/Download/device_stats_monitor.profile'
RESULT_VIEWER_PATH = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'device_stats_monitor.html'))
def __init__(self, adb, hz, build_type):
self._adb = adb
host_path = os.path.abspath(os.path.join(
constants.CHROME_DIR, 'out', build_type, 'device_stats_monitor'))
self._adb.PushIfNeeded(host_path, DeviceStatsMonitor.DEVICE_PATH)
self._hz = hz
def Start(self):
"""Starts device stats monitor on the device."""
self._adb.SetFileContents(DeviceStatsMonitor.PROFILE_PATH, '')
self._process = subprocess.Popen(
['adb', 'shell', '%s --hz=%d %s' % (
DeviceStatsMonitor.DEVICE_PATH, self._hz,
DeviceStatsMonitor.PROFILE_PATH)])
def StopAndCollect(self, output_path):
"""Stops monitoring and saves results.
Args:
output_path: Path to save results.
Returns:
String of URL to load results in browser.
"""
assert self._process
self._adb.KillAll(DeviceStatsMonitor.DEVICE_PATH)
self._process.wait()
profile = self._adb.GetFileContents(DeviceStatsMonitor.PROFILE_PATH)
results = collections.defaultdict(list)
last_io_stats = None
last_cpu_stats = None
for line in profile:
if ' mmcblk0 ' in line:
stats = io_stats_parser.ParseIoStatsLine(line)
if last_io_stats:
results['sectors_read'].append(stats.num_sectors_read -
last_io_stats.num_sectors_read)
results['sectors_written'].append(stats.num_sectors_written -
last_io_stats.num_sectors_written)
last_io_stats = stats
elif line.startswith('cpu '):
stats = self._ParseCpuStatsLine(line)
if last_cpu_stats:
results['user'].append(stats.user - last_cpu_stats.user)
results['nice'].append(stats.nice - last_cpu_stats.nice)
results['system'].append(stats.system - last_cpu_stats.system)
results['idle'].append(stats.idle - last_cpu_stats.idle)
results['iowait'].append(stats.iowait - last_cpu_stats.iowait)
results['irq'].append(stats.irq - last_cpu_stats.irq)
results['softirq'].append(stats.softirq- last_cpu_stats.softirq)
last_cpu_stats = stats
units = {
'sectors_read': 'sectors',
'sectors_written': 'sectors',
'user': 'jiffies',
'nice': 'jiffies',
'system': 'jiffies',
'idle': 'jiffies',
'iowait': 'jiffies',
'irq': 'jiffies',
'softirq': 'jiffies',
}
with open(output_path, 'w') as f:
f.write('display(%d, %s, %s);' % (self._hz, json.dumps(results), units))
return 'file://%s?results=file://%s' % (
DeviceStatsMonitor.RESULT_VIEWER_PATH, urllib.quote(output_path))
@staticmethod
def _ParseCpuStatsLine(line):
"""Parses a line of cpu stats into a CpuStats named tuple."""
# Field definitions: http://www.linuxhowtos.org/System/procstat.htm
cpu_stats = collections.namedtuple('CpuStats',
['device',
'user',
'nice',
'system',
'idle',
'iowait',
'irq',
'softirq',
])
fields = line.split()
return cpu_stats._make([fields[0]] + [int(f) for f in fields[1:8]]) | 0.44553 | 0.176388 |
import functools
import numpy as np
from arch.api.proto.feature_scale_meta_pb2 import ScaleMeta
from arch.api.proto.feature_scale_param_pb2 import ScaleParam
from arch.api.proto.feature_scale_param_pb2 import ColumnScaleParam
from arch.api.utils import log_utils
from federatedml.feature.feature_scale.base_scale import BaseScale
LOGGER = log_utils.getLogger()
class MinMaxScale(BaseScale):
"""
Transforms features by scaling each feature to a given range,e.g.between minimum and maximum. The transformation is given by:
X_scale = (X - X.min) / (X.max - X.min), while X.min is the minimum value of feature, and X.max is the maximum
"""
def __init__(self, params):
super().__init__(params)
self.mode = params.mode
self.column_range = None
@staticmethod
def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):
"""
Scale operator for each column. The input data type is data_instance
"""
for i in process_cols_list:
value = data.features[i]
if value > max_value_list[i]:
value = max_value_list[i]
elif value < min_value_list[i]:
value = min_value_list[i]
data.features[i] = np.around((value - min_value_list[i]) / scale_value_list[i], 6)
return data
def fit(self, data):
"""
Apply min-max scale for input data
Parameters
----------
data: data_instance, input data
Returns
----------
fit_data:data_instance, data after scale
"""
self.column_min_value, self.column_max_value = self._get_min_max_value(data)
self.scale_column_idx = self._get_scale_column_idx(data)
self.header = self._get_header(data)
self.column_range = []
for i in range(len(self.column_max_value)):
scale = self.column_max_value[i] - self.column_min_value[i]
if scale < 0:
raise ValueError("scale value should large than 0")
elif np.abs(scale - 0) < 1e-6:
scale = 1
self.column_range.append(scale)
f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,
min_value_list=self.column_min_value, scale_value_list=self.column_range,
process_cols_list=self.scale_column_idx)
fit_data = data.mapValues(f)
return fit_data
def transform(self, data):
"""
Transform input data using min-max scale with fit results
Parameters
----------
data: data_instance, input data
Returns
----------
transform_data:data_instance, data after transform
"""
self.column_range = []
for i in range(len(self.column_max_value)):
scale = self.column_max_value[i] - self.column_min_value[i]
if scale < 0:
raise ValueError("scale value should large than 0")
elif np.abs(scale - 0) < 1e-6:
scale = 1
self.column_range.append(scale)
f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,
min_value_list=self.column_min_value, scale_value_list=self.column_range,
process_cols_list=self.scale_column_idx)
transform_data = data.mapValues(f)
return transform_data
def __get_meta(self):
if self.header:
scale_column = [self.header[i] for i in self.scale_column_idx]
else:
scale_column = ["_".join(["col", str(i)]) for i in self.scale_column_idx]
if not self.data_shape:
self.data_shape = -1
meta_proto_obj = ScaleMeta(method="min_max_scale",
mode=self.mode,
area=self.area,
scale_column=scale_column,
feat_upper=self._get_upper(self.data_shape),
feat_lower=self._get_lower(self.data_shape)
)
return meta_proto_obj
def __get_param(self, need_run):
min_max_scale_param_dict = {}
if self.header:
for i, header in enumerate(self.header):
if i in self.scale_column_idx:
param_obj = ColumnScaleParam(column_upper=self.column_max_value[i],
column_lower=self.column_min_value[i])
min_max_scale_param_dict[header] = param_obj
param_proto_obj = ScaleParam(col_scale_param=min_max_scale_param_dict,
header=self.header,
need_run=need_run)
return param_proto_obj
def export_model(self, need_run):
meta_obj = self.__get_meta()
param_obj = self.__get_param(need_run)
result = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
return result | federatedml/feature/feature_scale/min_max_scale.py |
import functools
import numpy as np
from arch.api.proto.feature_scale_meta_pb2 import ScaleMeta
from arch.api.proto.feature_scale_param_pb2 import ScaleParam
from arch.api.proto.feature_scale_param_pb2 import ColumnScaleParam
from arch.api.utils import log_utils
from federatedml.feature.feature_scale.base_scale import BaseScale
LOGGER = log_utils.getLogger()
class MinMaxScale(BaseScale):
"""
Transforms features by scaling each feature to a given range,e.g.between minimum and maximum. The transformation is given by:
X_scale = (X - X.min) / (X.max - X.min), while X.min is the minimum value of feature, and X.max is the maximum
"""
def __init__(self, params):
super().__init__(params)
self.mode = params.mode
self.column_range = None
@staticmethod
def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):
"""
Scale operator for each column. The input data type is data_instance
"""
for i in process_cols_list:
value = data.features[i]
if value > max_value_list[i]:
value = max_value_list[i]
elif value < min_value_list[i]:
value = min_value_list[i]
data.features[i] = np.around((value - min_value_list[i]) / scale_value_list[i], 6)
return data
def fit(self, data):
"""
Apply min-max scale for input data
Parameters
----------
data: data_instance, input data
Returns
----------
fit_data:data_instance, data after scale
"""
self.column_min_value, self.column_max_value = self._get_min_max_value(data)
self.scale_column_idx = self._get_scale_column_idx(data)
self.header = self._get_header(data)
self.column_range = []
for i in range(len(self.column_max_value)):
scale = self.column_max_value[i] - self.column_min_value[i]
if scale < 0:
raise ValueError("scale value should large than 0")
elif np.abs(scale - 0) < 1e-6:
scale = 1
self.column_range.append(scale)
f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,
min_value_list=self.column_min_value, scale_value_list=self.column_range,
process_cols_list=self.scale_column_idx)
fit_data = data.mapValues(f)
return fit_data
def transform(self, data):
"""
Transform input data using min-max scale with fit results
Parameters
----------
data: data_instance, input data
Returns
----------
transform_data:data_instance, data after transform
"""
self.column_range = []
for i in range(len(self.column_max_value)):
scale = self.column_max_value[i] - self.column_min_value[i]
if scale < 0:
raise ValueError("scale value should large than 0")
elif np.abs(scale - 0) < 1e-6:
scale = 1
self.column_range.append(scale)
f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,
min_value_list=self.column_min_value, scale_value_list=self.column_range,
process_cols_list=self.scale_column_idx)
transform_data = data.mapValues(f)
return transform_data
def __get_meta(self):
if self.header:
scale_column = [self.header[i] for i in self.scale_column_idx]
else:
scale_column = ["_".join(["col", str(i)]) for i in self.scale_column_idx]
if not self.data_shape:
self.data_shape = -1
meta_proto_obj = ScaleMeta(method="min_max_scale",
mode=self.mode,
area=self.area,
scale_column=scale_column,
feat_upper=self._get_upper(self.data_shape),
feat_lower=self._get_lower(self.data_shape)
)
return meta_proto_obj
def __get_param(self, need_run):
min_max_scale_param_dict = {}
if self.header:
for i, header in enumerate(self.header):
if i in self.scale_column_idx:
param_obj = ColumnScaleParam(column_upper=self.column_max_value[i],
column_lower=self.column_min_value[i])
min_max_scale_param_dict[header] = param_obj
param_proto_obj = ScaleParam(col_scale_param=min_max_scale_param_dict,
header=self.header,
need_run=need_run)
return param_proto_obj
def export_model(self, need_run):
meta_obj = self.__get_meta()
param_obj = self.__get_param(need_run)
result = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
return result | 0.792183 | 0.308835 |
from functools import partial
from kivy.clock import Clock
from kivy.graphics import Rectangle
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.spinner import Spinner
from kivy.uix.scrollview import ScrollView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
class MatchingTiles(Popup):
def __init__(self, **kwargs):
super(MatchingTiles, self).__init__(**kwargs)
def display_matching_tiles(self):
# Check if Wang Tiles Map in Place
if self.wang_tiles_map:
self.wang_tiles_map.disable_user_interaction()
else:
print('Wang Tiles Map does not exist!')
return
popup_content = FloatLayout(size_hint=(1, 1), pos_hint={'center_x': 0.5, 'center_y': 0.5})
if len(self.wang_tiles_map.tiles.keys()) < 1:
print('No tiles found in Wang Tiles Map!')
print('Tiles:', self.wang_tiles_map.tiles)
return
# Add Widgets
popup = MatchingTiles(content=popup_content)
main_tile = Label(
pos_hint={'x': 0, 'top': 1.0},
size_hint=(None, None),
size=(200, 33),
text='_',
markup=True,
valign='middle',
halign='left'
)
main_tile.text_size = main_tile.size
with main_tile.canvas:
main_tile_rect = Rectangle(pos=(main_tile.pos[0] + 25, main_tile.pos[1]),
size=(self.displayed_size, self.displayed_size))
self.bind(pos=partial(self.update_lbl_rect, main_tile, main_tile_rect),
size=partial(self.update_lbl_rect, main_tile, main_tile_rect))
grid = GridLayout(rows=4, spacing=0, size_hint=(None, 1.0), pos_hint={'x': 0.0, 'y': 0.0})
north_lbl = Label(text='North\nMatches:', pos_hint={'x': 0, 'top': 0.8}, size_hint=(0.25, 0.2))
east_lbl = Label(text='East\nMatches:', pos_hint={'x': 0, 'top': 0.6}, size_hint=(0.25, 0.2))
south_lbl = Label(text='South\natches:', pos_hint={'x': 0, 'top': 0.4}, size_hint=(0.25, 0.2))
west_lbl = Label(text='West\nMatches:', pos_hint={'x': 0, 'top': 0.2}, size_hint=(0.25, 0.2))
popup_content.add_widget(north_lbl)
popup_content.add_widget(east_lbl)
popup_content.add_widget(south_lbl)
popup_content.add_widget(west_lbl)
north_box = BoxLayout(size_hint=(1, 0.25), orientation='horizontal')
east_box = BoxLayout(size_hint=(1, 0.25), orientation='horizontal')
south_box = BoxLayout(size_hint=(1, 0.25), orientation='horizontal')
west_box = BoxLayout(size_hint=(1, 0.25), orientation='horizontal')
# north_box.bind(size_hint_min_x=west_box.setter('width'))
# east_box.bind(size_hint_min_x=west_box.setter('width'))
# south_box.bind(size_hint_min_x=west_box.setter('width'))
# west_box.bind(size_hint_min_x=west_box.setter('width'))
# grid.add_widget(north_box)
# grid.add_widget(east_box)
# grid.add_widget(south_box)
# grid.add_widget(west_box)
grid.bind(minimum_width=grid.setter('width'))
scrollview = ScrollView(size_hint=(0.8, 0.8), size=popup.size, do_scroll_x=True, do_scroll_y=False,
pos_hint={'x': 0.2, 'y': 0})
scrollview.add_widget(grid)
popup_content.add_widget(scrollview)
spinner = Spinner(
pos_hint={'right': 1.0, 'top': 1.0},
size_hint=(None, None),
size=(100, 33),
values=sorted(self.wang_tiles_map.tiles.keys())
)
spinner.bind(
text=partial(self._update_match_spinner, main_tile, main_tile_rect, grid, north_box, east_box, south_box,
west_box))
popup_content.add_widget(main_tile)
popup_content.add_widget(spinner)
popup.bind(on_dismiss=self.wang_tiles_map.enable_user_interaction)
popup.open()
Clock.schedule_once(partial(self.update_lbl_rect, main_tile, main_tile_rect), 0.05)
class TileProbability(Popup):
def __init__(self, **kwargs):
super(TileProbability, self).__init__(**kwargs)
class TilesetChooser(Popup):
def __init__(self, **kwargs):
super(TilesetChooser, self).__init__(**kwargs)
class MapSizePopup(Popup):
def __init__(self, **kwargs):
super(MapSizePopup, self).__init__(**kwargs) | ui/popups.py | from functools import partial
from kivy.clock import Clock
from kivy.graphics import Rectangle
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.spinner import Spinner
from kivy.uix.scrollview import ScrollView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
class MatchingTiles(Popup):
def __init__(self, **kwargs):
super(MatchingTiles, self).__init__(**kwargs)
def display_matching_tiles(self):
# Check if Wang Tiles Map in Place
if self.wang_tiles_map:
self.wang_tiles_map.disable_user_interaction()
else:
print('Wang Tiles Map does not exist!')
return
popup_content = FloatLayout(size_hint=(1, 1), pos_hint={'center_x': 0.5, 'center_y': 0.5})
if len(self.wang_tiles_map.tiles.keys()) < 1:
print('No tiles found in Wang Tiles Map!')
print('Tiles:', self.wang_tiles_map.tiles)
return
# Add Widgets
popup = MatchingTiles(content=popup_content)
main_tile = Label(
pos_hint={'x': 0, 'top': 1.0},
size_hint=(None, None),
size=(200, 33),
text='_',
markup=True,
valign='middle',
halign='left'
)
main_tile.text_size = main_tile.size
with main_tile.canvas:
main_tile_rect = Rectangle(pos=(main_tile.pos[0] + 25, main_tile.pos[1]),
size=(self.displayed_size, self.displayed_size))
self.bind(pos=partial(self.update_lbl_rect, main_tile, main_tile_rect),
size=partial(self.update_lbl_rect, main_tile, main_tile_rect))
grid = GridLayout(rows=4, spacing=0, size_hint=(None, 1.0), pos_hint={'x': 0.0, 'y': 0.0})
north_lbl = Label(text='North\nMatches:', pos_hint={'x': 0, 'top': 0.8}, size_hint=(0.25, 0.2))
east_lbl = Label(text='East\nMatches:', pos_hint={'x': 0, 'top': 0.6}, size_hint=(0.25, 0.2))
south_lbl = Label(text='South\natches:', pos_hint={'x': 0, 'top': 0.4}, size_hint=(0.25, 0.2))
west_lbl = Label(text='West\nMatches:', pos_hint={'x': 0, 'top': 0.2}, size_hint=(0.25, 0.2))
popup_content.add_widget(north_lbl)
popup_content.add_widget(east_lbl)
popup_content.add_widget(south_lbl)
popup_content.add_widget(west_lbl)
north_box = BoxLayout(size_hint=(1, 0.25), orientation='horizontal')
east_box = BoxLayout(size_hint=(1, 0.25), orientation='horizontal')
south_box = BoxLayout(size_hint=(1, 0.25), orientation='horizontal')
west_box = BoxLayout(size_hint=(1, 0.25), orientation='horizontal')
# north_box.bind(size_hint_min_x=west_box.setter('width'))
# east_box.bind(size_hint_min_x=west_box.setter('width'))
# south_box.bind(size_hint_min_x=west_box.setter('width'))
# west_box.bind(size_hint_min_x=west_box.setter('width'))
# grid.add_widget(north_box)
# grid.add_widget(east_box)
# grid.add_widget(south_box)
# grid.add_widget(west_box)
grid.bind(minimum_width=grid.setter('width'))
scrollview = ScrollView(size_hint=(0.8, 0.8), size=popup.size, do_scroll_x=True, do_scroll_y=False,
pos_hint={'x': 0.2, 'y': 0})
scrollview.add_widget(grid)
popup_content.add_widget(scrollview)
spinner = Spinner(
pos_hint={'right': 1.0, 'top': 1.0},
size_hint=(None, None),
size=(100, 33),
values=sorted(self.wang_tiles_map.tiles.keys())
)
spinner.bind(
text=partial(self._update_match_spinner, main_tile, main_tile_rect, grid, north_box, east_box, south_box,
west_box))
popup_content.add_widget(main_tile)
popup_content.add_widget(spinner)
popup.bind(on_dismiss=self.wang_tiles_map.enable_user_interaction)
popup.open()
Clock.schedule_once(partial(self.update_lbl_rect, main_tile, main_tile_rect), 0.05)
class TileProbability(Popup):
def __init__(self, **kwargs):
super(TileProbability, self).__init__(**kwargs)
class TilesetChooser(Popup):
def __init__(self, **kwargs):
super(TilesetChooser, self).__init__(**kwargs)
class MapSizePopup(Popup):
def __init__(self, **kwargs):
super(MapSizePopup, self).__init__(**kwargs) | 0.608245 | 0.172939 |
from __future__ import absolute_import
from __future__ import unicode_literals
import fluff
from casexml.apps.case.models import CommCareCase
from corehq.fluff.calculators.case import CasePropertyFilter
from custom.care_pathways import DOMAINS
from custom.care_pathways.utils import get_domain_configuration
# This calculator is necessary to generate 'date' field which is required in the database
from custom.utils.utils import flat_field
class Numerator(fluff.Calculator):
@fluff.null_emitter
def numerator(self, case):
yield None
class Property(fluff.Calculator):
@fluff.date_emitter
def value(self, case):
config = get_domain_configuration(case.domain).by_type_hierarchy
val = (case.get_case_property('crop_id') or case.get_case_property('crop_name') or '')
for chain in config:
if chain.val == val.lower():
for domain in chain.next:
for practice in domain.next:
ppt_prop = case.get_case_property(practice.val)
if ppt_prop:
yield {
'date': case.opened_on,
'value': 1 if ppt_prop == 'Y' else 0,
'group_by': [case.domain, chain.val, domain.val, practice.val]
}
def get_property(case, property):
configuration = get_domain_configuration(case.domain)
if property in configuration.geography_hierarchy:
result = case.get_case_property(configuration.geography_hierarchy[property]['prop'])
return result.lower() if result else result
return None
def get_mapping(case):
value_chains = get_domain_configuration(case.domain).by_type_hierarchy
return list({vc.val for vc in value_chains})
def get_domains_with_next(case):
configuration = get_domain_configuration(case.domain).by_type_hierarchy
domains = []
for chain in configuration:
domains.extend(chain.next)
return domains
def get_domains(case):
domains = get_domains_with_next(case)
return list({d.val for d in domains})
def get_practices(case):
domains = get_domains_with_next(case)
practices = []
for domain in domains:
practices.extend(domain.next)
return list({p.val for p in practices})
def get_gender(case):
gender = case.get_case_property('farmer_gender')
return '1' if gender and gender[0].lower() == 'f' else '0'
def get_ppt_year(case):
ppt_year = case.get_case_property('ppt_year')
return ppt_year.split('/')[0]
def get_group_leadership(case):
if case.domain in ('care-macf-malawi', 'care-macf-bangladesh'):
return 'Y' if case.get_case_property('farmer_role') == 'office_bearer' else 'N'
return case.get_case_property('farmer_is_leader')
def case_property(property):
return flat_field(lambda case: get_property(case, property))
class GeographyFluff(fluff.IndicatorDocument):
document_class = CommCareCase
document_filter = CasePropertyFilter(type='farmer_record')
domains = DOMAINS
group_by = ('domain',)
numerator = Numerator()
lvl_1 = case_property('lvl_1')
lvl_2 = case_property('lvl_2')
lvl_3 = case_property('lvl_3')
lvl_4 = case_property('lvl_4')
lvl_5 = case_property("lvl_5")
class FarmerRecordFluff(fluff.IndicatorDocument):
document_class = CommCareCase
document_filter = CasePropertyFilter(type='farmer_record')
domains = DOMAINS
group_by = ('domain',
fluff.AttributeGetter('value_chain', lambda c: get_mapping(c)),
fluff.AttributeGetter('domains', lambda c: get_domains(c)),
fluff.AttributeGetter('practices', lambda c: get_practices(c)))
lvl_1 = case_property('lvl_1')
lvl_2 = case_property('lvl_2')
lvl_3 = case_property('lvl_3')
lvl_4 = case_property('lvl_4')
lvl_5 = case_property("lvl_5")
case_status = flat_field(lambda c: c.get_case_property('case_status'))
group_id = flat_field(lambda c: c.get_case_property('group_id'))
group_name = flat_field(lambda c: c.get_case_property('group_name'))
ppt_year = flat_field(lambda c: get_ppt_year(c))
owner_id = flat_field(lambda c: c.get_case_property('owner_id'))
gender = flat_field(lambda c: get_gender(c))
group_leadership = flat_field(get_group_leadership)
real_or_test = flat_field(lambda c: c.get_case_property('test_or_real'))
schedule = flat_field(lambda c: (c.get_case_property('farmer_social_category') or '').lower())
group_case_id = flat_field(lambda c: c.get_case_property('group_case_id'))
prop = Property()
GeographyFluffPillow = GeographyFluff.pillow()
FarmerRecordFluffPillow = FarmerRecordFluff.pillow() | custom/care_pathways/models.py | from __future__ import absolute_import
from __future__ import unicode_literals
import fluff
from casexml.apps.case.models import CommCareCase
from corehq.fluff.calculators.case import CasePropertyFilter
from custom.care_pathways import DOMAINS
from custom.care_pathways.utils import get_domain_configuration
# This calculator is necessary to generate 'date' field which is required in the database
from custom.utils.utils import flat_field
class Numerator(fluff.Calculator):
@fluff.null_emitter
def numerator(self, case):
yield None
class Property(fluff.Calculator):
@fluff.date_emitter
def value(self, case):
config = get_domain_configuration(case.domain).by_type_hierarchy
val = (case.get_case_property('crop_id') or case.get_case_property('crop_name') or '')
for chain in config:
if chain.val == val.lower():
for domain in chain.next:
for practice in domain.next:
ppt_prop = case.get_case_property(practice.val)
if ppt_prop:
yield {
'date': case.opened_on,
'value': 1 if ppt_prop == 'Y' else 0,
'group_by': [case.domain, chain.val, domain.val, practice.val]
}
def get_property(case, property):
configuration = get_domain_configuration(case.domain)
if property in configuration.geography_hierarchy:
result = case.get_case_property(configuration.geography_hierarchy[property]['prop'])
return result.lower() if result else result
return None
def get_mapping(case):
value_chains = get_domain_configuration(case.domain).by_type_hierarchy
return list({vc.val for vc in value_chains})
def get_domains_with_next(case):
configuration = get_domain_configuration(case.domain).by_type_hierarchy
domains = []
for chain in configuration:
domains.extend(chain.next)
return domains
def get_domains(case):
domains = get_domains_with_next(case)
return list({d.val for d in domains})
def get_practices(case):
domains = get_domains_with_next(case)
practices = []
for domain in domains:
practices.extend(domain.next)
return list({p.val for p in practices})
def get_gender(case):
gender = case.get_case_property('farmer_gender')
return '1' if gender and gender[0].lower() == 'f' else '0'
def get_ppt_year(case):
ppt_year = case.get_case_property('ppt_year')
return ppt_year.split('/')[0]
def get_group_leadership(case):
if case.domain in ('care-macf-malawi', 'care-macf-bangladesh'):
return 'Y' if case.get_case_property('farmer_role') == 'office_bearer' else 'N'
return case.get_case_property('farmer_is_leader')
def case_property(property):
return flat_field(lambda case: get_property(case, property))
class GeographyFluff(fluff.IndicatorDocument):
document_class = CommCareCase
document_filter = CasePropertyFilter(type='farmer_record')
domains = DOMAINS
group_by = ('domain',)
numerator = Numerator()
lvl_1 = case_property('lvl_1')
lvl_2 = case_property('lvl_2')
lvl_3 = case_property('lvl_3')
lvl_4 = case_property('lvl_4')
lvl_5 = case_property("lvl_5")
class FarmerRecordFluff(fluff.IndicatorDocument):
document_class = CommCareCase
document_filter = CasePropertyFilter(type='farmer_record')
domains = DOMAINS
group_by = ('domain',
fluff.AttributeGetter('value_chain', lambda c: get_mapping(c)),
fluff.AttributeGetter('domains', lambda c: get_domains(c)),
fluff.AttributeGetter('practices', lambda c: get_practices(c)))
lvl_1 = case_property('lvl_1')
lvl_2 = case_property('lvl_2')
lvl_3 = case_property('lvl_3')
lvl_4 = case_property('lvl_4')
lvl_5 = case_property("lvl_5")
case_status = flat_field(lambda c: c.get_case_property('case_status'))
group_id = flat_field(lambda c: c.get_case_property('group_id'))
group_name = flat_field(lambda c: c.get_case_property('group_name'))
ppt_year = flat_field(lambda c: get_ppt_year(c))
owner_id = flat_field(lambda c: c.get_case_property('owner_id'))
gender = flat_field(lambda c: get_gender(c))
group_leadership = flat_field(get_group_leadership)
real_or_test = flat_field(lambda c: c.get_case_property('test_or_real'))
schedule = flat_field(lambda c: (c.get_case_property('farmer_social_category') or '').lower())
group_case_id = flat_field(lambda c: c.get_case_property('group_case_id'))
prop = Property()
GeographyFluffPillow = GeographyFluff.pillow()
FarmerRecordFluffPillow = FarmerRecordFluff.pillow() | 0.534612 | 0.172137 |
from context import _loss_func_semi_vectorized
from context import _loss_func_theano
import unittest
import sklearn.preprocessing
import theano
import numpy as np
class TheanoLossFunctionsTestSuite(unittest.TestCase):
"""Advanced test cases."""
def test_hinge_loss(self):
W = np.random.random((10, 1000)).astype(theano.config.floatX)
X = np.random.random((10000, 1000)).astype(theano.config.floatX)
Y = np.random.randint(0, 10, 10000).astype(np.int32)[:, np.newaxis]
to_binary_label = sklearn.preprocessing.MultiLabelBinarizer()
Y = to_binary_label.fit_transform(Y).astype(theano.config.floatX).T
reference_loss = _loss_func_semi_vectorized.hinge_loss(W, X, Y)
reference_gradient = _loss_func_semi_vectorized.hinge_loss_derivatives(W, X, Y)
hinge_loss, hinge_loss_derivatives = _loss_func_theano._compile_hinge_loss_func(compile_=True)
loss = hinge_loss(W, X, Y)
gradient = hinge_loss_derivatives(W, X, Y)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
def test_softmax_loss(self):
W = np.random.random((10, 1000)).astype(theano.config.floatX)
X = np.random.random((10000, 1000)).astype(theano.config.floatX)
Y = np.random.randint(0, 10, 10000).astype(np.int32)[:, np.newaxis]
to_binary_label = sklearn.preprocessing.MultiLabelBinarizer()
Y = to_binary_label.fit_transform(Y).astype(theano.config.floatX).T
reference_loss = _loss_func_semi_vectorized.softmax_loss(W, X, Y)
reference_gradient = _loss_func_semi_vectorized.softmax_loss_derivatives(W, X, Y)
softmax_loss, softmax_loss_derivatives = _loss_func_theano._compile_softmax_loss_func(compile_=True)
loss = softmax_loss(W, X, Y)
gradient = softmax_loss_derivatives(W, X, Y)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
def test_l1_penalty(self):
W = np.random.random((10, 1000))
reference_loss = _loss_func_semi_vectorized.l1_penalty(W)
reference_gradient = _loss_func_semi_vectorized.l1_penalty_der(W)
l1_penalty, l1_penalty_der = _loss_func_theano._compile_l1_penalty_func(compile_=True)
loss = l1_penalty(W)
gradient = l1_penalty_der(W)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
def test_l2_penalty(self):
W = np.random.random((10, 1000))
reference_loss = _loss_func_semi_vectorized.l2_penalty(W)
reference_gradient = _loss_func_semi_vectorized.l2_penalty_der(W)
loss_func, gradient_func = _loss_func_theano._compile_l2_penalty_func(compile_=True)
loss = loss_func(W)
gradient = gradient_func(W)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
def test_get_loss_function(self):
W = np.random.random((10, 1000)).astype(theano.config.floatX).ravel()
X = np.random.random((10000, 1000)).astype(theano.config.floatX)
Y = np.random.randint(0, 10, 10000).astype(np.int32)[:, np.newaxis]
to_binary_label = sklearn.preprocessing.MultiLabelBinarizer()
Y = to_binary_label.fit_transform(Y).astype(theano.config.floatX).T
reg_values = [0, 0.5]
loss_values = ['softmax', 'hinge']
penalty_values = ['L1', 'L2']
for reg in reg_values:
for loss_fn_name in loss_values:
for penalty in penalty_values:
loss_ref_fun, loss_der_ref_fun = _loss_func_semi_vectorized.get_loss_function(loss_fn_name, penalty)
reference_loss = loss_ref_fun(W, X, Y, reg)
reference_gradient = loss_der_ref_fun(W, X, Y, reg)
loss_fun, loss_der_fun = _loss_func_theano.get_loss_function(loss_fn_name, penalty)
loss = loss_fun(W, X, Y, reg)
gradient = loss_der_fun(W, X, Y, reg)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
if __name__ == '__main__':
unittest.main() | tests/test_theano_loss_functios.py |
from context import _loss_func_semi_vectorized
from context import _loss_func_theano
import unittest
import sklearn.preprocessing
import theano
import numpy as np
class TheanoLossFunctionsTestSuite(unittest.TestCase):
"""Advanced test cases."""
def test_hinge_loss(self):
W = np.random.random((10, 1000)).astype(theano.config.floatX)
X = np.random.random((10000, 1000)).astype(theano.config.floatX)
Y = np.random.randint(0, 10, 10000).astype(np.int32)[:, np.newaxis]
to_binary_label = sklearn.preprocessing.MultiLabelBinarizer()
Y = to_binary_label.fit_transform(Y).astype(theano.config.floatX).T
reference_loss = _loss_func_semi_vectorized.hinge_loss(W, X, Y)
reference_gradient = _loss_func_semi_vectorized.hinge_loss_derivatives(W, X, Y)
hinge_loss, hinge_loss_derivatives = _loss_func_theano._compile_hinge_loss_func(compile_=True)
loss = hinge_loss(W, X, Y)
gradient = hinge_loss_derivatives(W, X, Y)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
def test_softmax_loss(self):
W = np.random.random((10, 1000)).astype(theano.config.floatX)
X = np.random.random((10000, 1000)).astype(theano.config.floatX)
Y = np.random.randint(0, 10, 10000).astype(np.int32)[:, np.newaxis]
to_binary_label = sklearn.preprocessing.MultiLabelBinarizer()
Y = to_binary_label.fit_transform(Y).astype(theano.config.floatX).T
reference_loss = _loss_func_semi_vectorized.softmax_loss(W, X, Y)
reference_gradient = _loss_func_semi_vectorized.softmax_loss_derivatives(W, X, Y)
softmax_loss, softmax_loss_derivatives = _loss_func_theano._compile_softmax_loss_func(compile_=True)
loss = softmax_loss(W, X, Y)
gradient = softmax_loss_derivatives(W, X, Y)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
def test_l1_penalty(self):
W = np.random.random((10, 1000))
reference_loss = _loss_func_semi_vectorized.l1_penalty(W)
reference_gradient = _loss_func_semi_vectorized.l1_penalty_der(W)
l1_penalty, l1_penalty_der = _loss_func_theano._compile_l1_penalty_func(compile_=True)
loss = l1_penalty(W)
gradient = l1_penalty_der(W)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
def test_l2_penalty(self):
W = np.random.random((10, 1000))
reference_loss = _loss_func_semi_vectorized.l2_penalty(W)
reference_gradient = _loss_func_semi_vectorized.l2_penalty_der(W)
loss_func, gradient_func = _loss_func_theano._compile_l2_penalty_func(compile_=True)
loss = loss_func(W)
gradient = gradient_func(W)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
def test_get_loss_function(self):
W = np.random.random((10, 1000)).astype(theano.config.floatX).ravel()
X = np.random.random((10000, 1000)).astype(theano.config.floatX)
Y = np.random.randint(0, 10, 10000).astype(np.int32)[:, np.newaxis]
to_binary_label = sklearn.preprocessing.MultiLabelBinarizer()
Y = to_binary_label.fit_transform(Y).astype(theano.config.floatX).T
reg_values = [0, 0.5]
loss_values = ['softmax', 'hinge']
penalty_values = ['L1', 'L2']
for reg in reg_values:
for loss_fn_name in loss_values:
for penalty in penalty_values:
loss_ref_fun, loss_der_ref_fun = _loss_func_semi_vectorized.get_loss_function(loss_fn_name, penalty)
reference_loss = loss_ref_fun(W, X, Y, reg)
reference_gradient = loss_der_ref_fun(W, X, Y, reg)
loss_fun, loss_der_fun = _loss_func_theano.get_loss_function(loss_fn_name, penalty)
loss = loss_fun(W, X, Y, reg)
gradient = loss_der_fun(W, X, Y, reg)
np.testing.assert_almost_equal(reference_loss, loss)
np.testing.assert_array_almost_equal(reference_gradient, gradient)
if __name__ == '__main__':
unittest.main() | 0.841403 | 0.665438 |
import asyncio
import audioop
import enum
import functools
import logging
from typing import Callable, Dict, Optional, Union
import discord
from concord.ext.audio.exceptions import AudioExtensionError
log = logging.getLogger(__name__)
class State:
"""Global state with guild's audio states."""
_audio_states: Dict[int, "AudioState"]
def __init__(self):
self._audio_states = {}
def get_audio_state(
self, voice_client_source: Union[discord.Guild, discord.abc.Connectable]
) -> "AudioState":
"""Returns audio state for given voice client source.
Take a note, that returned audio state may be connected to another
channel, due to it's voice client key is equal to given channel's key.
Check for connected channel and move to desired one, if needed.
Audio state will be created, if isn't created yet.
Args:
voice_client_source: The source, by which voice client can be
identified (where voice client is in using) and audio state can
be found.
Returns:
Audio state instance.
"""
key_id = None
if isinstance(voice_client_source, discord.Guild):
key_id = voice_client_source.id
elif isinstance(voice_client_source, discord.abc.Connectable):
key_id, _ = voice_client_source._get_voice_client_key()
audio_state = self._audio_states.get(key_id)
if audio_state is None:
audio_state = self._audio_states[key_id] = AudioState(key_id)
return audio_state
class AudioStatus(enum.Enum):
SOURCE_ENDED = enum.auto()
SOURCE_CLEANED = enum.auto()
SOURCE_REMOVED = enum.auto()
VOICE_CLIENT_DISCONNECTED = enum.auto()
VOICE_CLIENT_REMOVED = enum.auto()
class AudioState(discord.AudioSource):
"""Audio state class.
Contains audio sources, voice client and other connection-related
information for each active voice connection.
.. warning::
Public API is not thread safe.
Attributes:
_key_id: The voice client key ID this state is associated to.
_voice_client: Voice client instance.
_voice_client_disconnect_source: The source ``disconnect`` method of
voice client. It's needed due to it will be replaced with a
listener while voice client is owned by audio state.
_loop: Loop, where main tasks of audio state should happen.
_audio_sources: List of audio sources, with finalizers, if provided.
_master_source: Built master source.
"""
_key_id: int
_voice_client: Optional[discord.VoiceClient]
_voice_client_disconnect_source: Optional[Callable]
_loop: asyncio.AbstractEventLoop
_audio_sources: Dict[discord.AudioSource, Callable]
_master_source: discord.PCMVolumeTransformer
def __init__(self, key_id):
self._key_id = key_id
self._voice_client = None
self._voice_client_disconnect_source = None
self._loop = None
self._audio_sources = {}
self._master_source = discord.PCMVolumeTransformer(self)
log.info(
f"Audio state initialized (Voice client key ID #{self._key_id})"
)
@property
def voice_client(self) -> Optional[discord.VoiceClient]:
"""Voice client of the state.
It can be ``None``, if voice client is not created yet, or it was
removed by disconnecting.
"""
return self._voice_client
@property
def channel(self) -> discord.abc.Connectable:
"""Channel currently connected to."""
return self._voice_client.channel if self._voice_client else None
@property
def guild(self) -> Optional[discord.Guild]:
"""Guild currently connected to, if applicable."""
return self._voice_client.guild if self._voice_client else None
@property
def master_volume(self) -> float:
"""Master volume for all audio sources.
Each audio source can have their own volume, if needed. Master volume
and audio sources' volume are independent.
Value is a float and can be from 0.0 to 2.0.
"""
return self._master_source.volume
@master_volume.setter
def master_volume(self, value: float):
self._master_source.volume = float(max(min(value, 2.0), 0.0))
def set_voice_client(self, voice_client: discord.VoiceClient):
"""Set new voice client to the state.
If the same client is provided, does nothing.
If other voice client is present, it will be removed and all playing
audio sources will be immediately finished first.
TODO: Hey, we can change voice client, that is owned by guild/channel
with a voice client key != our voice client key. Do something!
Args:
voice_client: Voice client to set.
Raises:
ValueError: If not a :class:`discord.VoiceClient` provided, or voice
client is not connected.
"""
if not isinstance(voice_client, discord.VoiceClient):
raise ValueError("Not a voice client")
if voice_client == self._voice_client:
return
if not voice_client.is_connected():
raise ValueError("Voice client is not connected")
if self._voice_client is not None:
self.remove_voice_client()
self._loop = voice_client.loop
self._voice_client = voice_client
self._voice_client_disconnect_source = voice_client.disconnect
voice_client.disconnect = self._on_disconnect
log.debug(f"Voice client has set (Voice client key ID #{self._key_id})")
def remove_voice_client(self):
"""Removes voice client from the state.
All currently playing audio sources will be immediately finished.
"""
if self._voice_client is None:
return
self._on_end(reason=AudioStatus.VOICE_CLIENT_REMOVED)
self._voice_client.stop()
self._voice_client.disconnect = self._voice_client_disconnect_source
self._voice_client_disconnect_source = None
self._voice_client = None
self._loop = None
log.debug(
f"Voice client has removed (Voice client key ID #{self._key_id})"
)
def add_source(
self,
source: discord.AudioSource,
*,
finalizer: Optional[Callable] = None,
):
"""Add audio source and transmit it via voice client.
If audio source is already present, the ``finalizer`` will be replaced.
Args:
source: Audio source to add.
finalizer: The finalizer that will be called in case of source is
removed. Possible reasons to remove is enumerated in the
:class:`AudioStatus`.
Raises:
ValueError: If not a :class:`AudioSource` instance provided.
concord.ext.audio.exceptions.AudioExtensionError: If voice client is
not present.
"""
if not isinstance(source, discord.AudioSource):
raise ValueError("Not an audio source")
if self._voice_client is None:
raise AudioExtensionError("Voice client is not present")
self._audio_sources[source] = finalizer
log.debug(f"Source has added (Voice client key ID #{self._key_id})")
# TODO: Fast adding after player stopping can clean this source as well.
if self._voice_client._player is None:
self._voice_client.play(self._master_source)
def remove_source(
self, source: discord.AudioSource, *, reason=AudioStatus.SOURCE_REMOVED
):
"""Remove audio source and stop transmit it via voice client.
Args:
source: Audio source to remove.
reason: Reason, provided to the audio source's finalizer.
Raises:
KeyError: If source is not present.
"""
finalizer = self._audio_sources.pop(source)
finalizer(source, reason)
log.debug(f"Source has removed (Voice client key ID #{self._key_id})")
def _on_end(self, *, reason=AudioStatus.SOURCE_REMOVED):
while len(self._audio_sources) > 0:
for source in self._audio_sources:
try:
self.remove_source(source, reason=reason)
except KeyError:
continue
async def _on_disconnect(self, *args, **kwargs):
await self._voice_client_disconnect_source(*args, **kwargs)
self._on_end(reason=AudioStatus.VOICE_CLIENT_DISCONNECTED)
self.remove_voice_client()
def read(self) -> bytes:
fragments = []
# TODO: We need to fix this somehow...
# Copying dict each time is not a good way
for source in self._audio_sources.copy():
fragment = source.read()
if len(fragment) == 0:
self._loop.call_soon_threadsafe(
functools.partial(
self.remove_source,
source,
reason=AudioStatus.SOURCE_ENDED,
)
)
continue
fragments.append(fragment)
if len(fragments) == 0:
return b""
min_size = functools.reduce(
lambda x, y: min(x, len(y)), fragments, len(fragments[0])
)
fragments = [
fragment[0:min_size] if len(fragment) > min_size else fragment
for fragment in fragments
]
return functools.reduce(lambda x, y: audioop.add(x, y, 2), fragments)
def cleanup(self):
self._voice_client.stop()
self._loop.call_soon_threadsafe(
functools.partial(self._on_end, reason=AudioStatus.SOURCE_CLEANED)
) | concord/ext/audio/state.py | import asyncio
import audioop
import enum
import functools
import logging
from typing import Callable, Dict, Optional, Union
import discord
from concord.ext.audio.exceptions import AudioExtensionError
log = logging.getLogger(__name__)
class State:
"""Global state with guild's audio states."""
_audio_states: Dict[int, "AudioState"]
def __init__(self):
self._audio_states = {}
def get_audio_state(
self, voice_client_source: Union[discord.Guild, discord.abc.Connectable]
) -> "AudioState":
"""Returns audio state for given voice client source.
Take a note, that returned audio state may be connected to another
channel, due to it's voice client key is equal to given channel's key.
Check for connected channel and move to desired one, if needed.
Audio state will be created, if isn't created yet.
Args:
voice_client_source: The source, by which voice client can be
identified (where voice client is in using) and audio state can
be found.
Returns:
Audio state instance.
"""
key_id = None
if isinstance(voice_client_source, discord.Guild):
key_id = voice_client_source.id
elif isinstance(voice_client_source, discord.abc.Connectable):
key_id, _ = voice_client_source._get_voice_client_key()
audio_state = self._audio_states.get(key_id)
if audio_state is None:
audio_state = self._audio_states[key_id] = AudioState(key_id)
return audio_state
class AudioStatus(enum.Enum):
SOURCE_ENDED = enum.auto()
SOURCE_CLEANED = enum.auto()
SOURCE_REMOVED = enum.auto()
VOICE_CLIENT_DISCONNECTED = enum.auto()
VOICE_CLIENT_REMOVED = enum.auto()
class AudioState(discord.AudioSource):
"""Audio state class.
Contains audio sources, voice client and other connection-related
information for each active voice connection.
.. warning::
Public API is not thread safe.
Attributes:
_key_id: The voice client key ID this state is associated to.
_voice_client: Voice client instance.
_voice_client_disconnect_source: The source ``disconnect`` method of
voice client. It's needed due to it will be replaced with a
listener while voice client is owned by audio state.
_loop: Loop, where main tasks of audio state should happen.
_audio_sources: List of audio sources, with finalizers, if provided.
_master_source: Built master source.
"""
_key_id: int
_voice_client: Optional[discord.VoiceClient]
_voice_client_disconnect_source: Optional[Callable]
_loop: asyncio.AbstractEventLoop
_audio_sources: Dict[discord.AudioSource, Callable]
_master_source: discord.PCMVolumeTransformer
def __init__(self, key_id):
self._key_id = key_id
self._voice_client = None
self._voice_client_disconnect_source = None
self._loop = None
self._audio_sources = {}
self._master_source = discord.PCMVolumeTransformer(self)
log.info(
f"Audio state initialized (Voice client key ID #{self._key_id})"
)
@property
def voice_client(self) -> Optional[discord.VoiceClient]:
"""Voice client of the state.
It can be ``None``, if voice client is not created yet, or it was
removed by disconnecting.
"""
return self._voice_client
@property
def channel(self) -> discord.abc.Connectable:
"""Channel currently connected to."""
return self._voice_client.channel if self._voice_client else None
@property
def guild(self) -> Optional[discord.Guild]:
"""Guild currently connected to, if applicable."""
return self._voice_client.guild if self._voice_client else None
@property
def master_volume(self) -> float:
"""Master volume for all audio sources.
Each audio source can have their own volume, if needed. Master volume
and audio sources' volume are independent.
Value is a float and can be from 0.0 to 2.0.
"""
return self._master_source.volume
@master_volume.setter
def master_volume(self, value: float):
self._master_source.volume = float(max(min(value, 2.0), 0.0))
def set_voice_client(self, voice_client: discord.VoiceClient):
"""Set new voice client to the state.
If the same client is provided, does nothing.
If other voice client is present, it will be removed and all playing
audio sources will be immediately finished first.
TODO: Hey, we can change voice client, that is owned by guild/channel
with a voice client key != our voice client key. Do something!
Args:
voice_client: Voice client to set.
Raises:
ValueError: If not a :class:`discord.VoiceClient` provided, or voice
client is not connected.
"""
if not isinstance(voice_client, discord.VoiceClient):
raise ValueError("Not a voice client")
if voice_client == self._voice_client:
return
if not voice_client.is_connected():
raise ValueError("Voice client is not connected")
if self._voice_client is not None:
self.remove_voice_client()
self._loop = voice_client.loop
self._voice_client = voice_client
self._voice_client_disconnect_source = voice_client.disconnect
voice_client.disconnect = self._on_disconnect
log.debug(f"Voice client has set (Voice client key ID #{self._key_id})")
def remove_voice_client(self):
"""Removes voice client from the state.
All currently playing audio sources will be immediately finished.
"""
if self._voice_client is None:
return
self._on_end(reason=AudioStatus.VOICE_CLIENT_REMOVED)
self._voice_client.stop()
self._voice_client.disconnect = self._voice_client_disconnect_source
self._voice_client_disconnect_source = None
self._voice_client = None
self._loop = None
log.debug(
f"Voice client has removed (Voice client key ID #{self._key_id})"
)
def add_source(
self,
source: discord.AudioSource,
*,
finalizer: Optional[Callable] = None,
):
"""Add audio source and transmit it via voice client.
If audio source is already present, the ``finalizer`` will be replaced.
Args:
source: Audio source to add.
finalizer: The finalizer that will be called in case of source is
removed. Possible reasons to remove is enumerated in the
:class:`AudioStatus`.
Raises:
ValueError: If not a :class:`AudioSource` instance provided.
concord.ext.audio.exceptions.AudioExtensionError: If voice client is
not present.
"""
if not isinstance(source, discord.AudioSource):
raise ValueError("Not an audio source")
if self._voice_client is None:
raise AudioExtensionError("Voice client is not present")
self._audio_sources[source] = finalizer
log.debug(f"Source has added (Voice client key ID #{self._key_id})")
# TODO: Fast adding after player stopping can clean this source as well.
if self._voice_client._player is None:
self._voice_client.play(self._master_source)
def remove_source(
self, source: discord.AudioSource, *, reason=AudioStatus.SOURCE_REMOVED
):
"""Remove audio source and stop transmit it via voice client.
Args:
source: Audio source to remove.
reason: Reason, provided to the audio source's finalizer.
Raises:
KeyError: If source is not present.
"""
finalizer = self._audio_sources.pop(source)
finalizer(source, reason)
log.debug(f"Source has removed (Voice client key ID #{self._key_id})")
def _on_end(self, *, reason=AudioStatus.SOURCE_REMOVED):
while len(self._audio_sources) > 0:
for source in self._audio_sources:
try:
self.remove_source(source, reason=reason)
except KeyError:
continue
async def _on_disconnect(self, *args, **kwargs):
await self._voice_client_disconnect_source(*args, **kwargs)
self._on_end(reason=AudioStatus.VOICE_CLIENT_DISCONNECTED)
self.remove_voice_client()
def read(self) -> bytes:
fragments = []
# TODO: We need to fix this somehow...
# Copying dict each time is not a good way
for source in self._audio_sources.copy():
fragment = source.read()
if len(fragment) == 0:
self._loop.call_soon_threadsafe(
functools.partial(
self.remove_source,
source,
reason=AudioStatus.SOURCE_ENDED,
)
)
continue
fragments.append(fragment)
if len(fragments) == 0:
return b""
min_size = functools.reduce(
lambda x, y: min(x, len(y)), fragments, len(fragments[0])
)
fragments = [
fragment[0:min_size] if len(fragment) > min_size else fragment
for fragment in fragments
]
return functools.reduce(lambda x, y: audioop.add(x, y, 2), fragments)
def cleanup(self):
self._voice_client.stop()
self._loop.call_soon_threadsafe(
functools.partial(self._on_end, reason=AudioStatus.SOURCE_CLEANED)
) | 0.83767 | 0.154089 |
from zvt.domain import FinanceDebtpayingAbility
from zvt.recorders.emquantapi.finance.base_china_stock_finance_recorder import EmBaseChinaStockFinanceRecorder
from zvt.utils.utils import add_func_to_value, first_item_to_float
financial_debtpayingability_map = {
'debt_asset_ratio': 'LIBILITYTOASSET', # 资产负债率
'conservative_quick_ratio': 'CONSERVQUICKRATIO', # 保守速动比率
'equity_ratio': 'LIBILITYTOEQUITY', # 产权比率
'equity_to_interest_libility': 'EQUITYTOINTERESTLIBILITY', # 归属母公司股东的权益与带息债务之比
'equity_to_libility': 'EQUITYTOLIBILITY', # 归属母公司股东的权益与负债合计之比
# 'cash_to_current_libility': 'CASHTOCL', # 货币资金与流动负债之比
'cfo_to_interest_libility': 'CFOTOINTERESTLIBILITY', # 经营活动产生的现金流量净额与带息债务之比
'cfo_to_libility': 'CFOTOLIBILITY', # 经营活动产生的现金流量净额与负债合计之比
'cfo_to_net_libility': 'CFOTONETLIBILITY', # 经营活动产生的现金流量净额与净债务之比
'cfo_to_cl': 'CFOTOSHORTLIBILITY', # 经营活动产生的现金流量净额与流动负债之比
'current_ratio': 'CURRENTTATIO', # 流动比率
'quick_ratio': 'QUICKTATIO', # 速动比率
# 'ebitda_to_int_libility': 'EBITDATOINTLIBILITY', # 息税折旧摊销前利润与带息债务之比
'ebitda_to_libility': 'EBITDATOLIBILITY', # 息税折旧摊销前利润与负债合计之比
# 'op_to_libility': 'OPTOLIBILITY', # 营业利润与负债合计之比
# 'op_to_cl': 'OPTOCL', # 营业利润与流动负债之比
'tangible_asset_to_interest_libility': 'TANGIBLEASSETTOINTERESTLIBILITY', # 有形资产与带息债务之比
'tangible_asset_to_libility': 'TANGIBLEASSETTOLIBILITY', # 有形资产与负债合计之比
'tangible_asset_to_net_libility': 'TANGIBLEASSETTONETLIBILITY', # 有形资产与净债务之比
# 'times_inte_cf': '', # 现金流量利息保障倍数
# 'n_cf_opa_ncl': '', # 经营活动现金流量净额与非流动负债之比
# 'cash_icl': '', # 货币资金与带息流动负债之比
# 'tl_teap': '', # 负债合计与归属于母公司的股东权益之比
# 'ncl_wc': '', # 非流动负债与营运资金比率之比
# 'n_cf_nfa_cl': '', # 非筹资性现金流量净额与流动负债之比
# 'n_cf_nfa_liab': '', # 非筹资性现金流量净额与负债总额之比
# 'times_inte_ebit': '', # EBIT利息保障倍数
# 'times_inte_ebitda': '', # EBITDA利息保障倍数
}
add_func_to_value(financial_debtpayingability_map, first_item_to_float)
class ChinaStockFinanceDebtpayingAbilityRecorder(EmBaseChinaStockFinanceRecorder):
"""
财务指标-偿债能力
"""
data_schema = FinanceDebtpayingAbility
finance_report_type = 'FinanceDebtpayingAbility'
data_type = 5
def get_data_map(self):
return financial_debtpayingability_map
__all__ = ['ChinaStockFinanceDebtpayingAbilityRecorder']
if __name__ == '__main__':
# init_log('income_statement.log')
recorder = ChinaStockFinanceDebtpayingAbilityRecorder(codes=['002572'])
recorder.run() | zvt/recorders/emquantapi/finance/china_stock_finance_debtpayingability.py | from zvt.domain import FinanceDebtpayingAbility
from zvt.recorders.emquantapi.finance.base_china_stock_finance_recorder import EmBaseChinaStockFinanceRecorder
from zvt.utils.utils import add_func_to_value, first_item_to_float
financial_debtpayingability_map = {
'debt_asset_ratio': 'LIBILITYTOASSET', # 资产负债率
'conservative_quick_ratio': 'CONSERVQUICKRATIO', # 保守速动比率
'equity_ratio': 'LIBILITYTOEQUITY', # 产权比率
'equity_to_interest_libility': 'EQUITYTOINTERESTLIBILITY', # 归属母公司股东的权益与带息债务之比
'equity_to_libility': 'EQUITYTOLIBILITY', # 归属母公司股东的权益与负债合计之比
# 'cash_to_current_libility': 'CASHTOCL', # 货币资金与流动负债之比
'cfo_to_interest_libility': 'CFOTOINTERESTLIBILITY', # 经营活动产生的现金流量净额与带息债务之比
'cfo_to_libility': 'CFOTOLIBILITY', # 经营活动产生的现金流量净额与负债合计之比
'cfo_to_net_libility': 'CFOTONETLIBILITY', # 经营活动产生的现金流量净额与净债务之比
'cfo_to_cl': 'CFOTOSHORTLIBILITY', # 经营活动产生的现金流量净额与流动负债之比
'current_ratio': 'CURRENTTATIO', # 流动比率
'quick_ratio': 'QUICKTATIO', # 速动比率
# 'ebitda_to_int_libility': 'EBITDATOINTLIBILITY', # 息税折旧摊销前利润与带息债务之比
'ebitda_to_libility': 'EBITDATOLIBILITY', # 息税折旧摊销前利润与负债合计之比
# 'op_to_libility': 'OPTOLIBILITY', # 营业利润与负债合计之比
# 'op_to_cl': 'OPTOCL', # 营业利润与流动负债之比
'tangible_asset_to_interest_libility': 'TANGIBLEASSETTOINTERESTLIBILITY', # 有形资产与带息债务之比
'tangible_asset_to_libility': 'TANGIBLEASSETTOLIBILITY', # 有形资产与负债合计之比
'tangible_asset_to_net_libility': 'TANGIBLEASSETTONETLIBILITY', # 有形资产与净债务之比
# 'times_inte_cf': '', # 现金流量利息保障倍数
# 'n_cf_opa_ncl': '', # 经营活动现金流量净额与非流动负债之比
# 'cash_icl': '', # 货币资金与带息流动负债之比
# 'tl_teap': '', # 负债合计与归属于母公司的股东权益之比
# 'ncl_wc': '', # 非流动负债与营运资金比率之比
# 'n_cf_nfa_cl': '', # 非筹资性现金流量净额与流动负债之比
# 'n_cf_nfa_liab': '', # 非筹资性现金流量净额与负债总额之比
# 'times_inte_ebit': '', # EBIT利息保障倍数
# 'times_inte_ebitda': '', # EBITDA利息保障倍数
}
add_func_to_value(financial_debtpayingability_map, first_item_to_float)
class ChinaStockFinanceDebtpayingAbilityRecorder(EmBaseChinaStockFinanceRecorder):
"""
财务指标-偿债能力
"""
data_schema = FinanceDebtpayingAbility
finance_report_type = 'FinanceDebtpayingAbility'
data_type = 5
def get_data_map(self):
return financial_debtpayingability_map
__all__ = ['ChinaStockFinanceDebtpayingAbilityRecorder']
if __name__ == '__main__':
# init_log('income_statement.log')
recorder = ChinaStockFinanceDebtpayingAbilityRecorder(codes=['002572'])
recorder.run() | 0.310172 | 0.242923 |
import re
from collections import namedtuple
from itertools import chain
from pokertools import (
CANONICAL_HOLECARDS_NAMES,
SUIT_COMBINATIONS,
SUIT_PERMUATIONS,
SUITS,
get_numerical_rank,
get_string_rank,
holecards,
)
#------------------------------------------------------------------------------
# Tokeniser
token_specification = [ # Examples:
("RANGE", r"[2-9AKQJT]{2}(s|o)-[2-9AKQJT]{2}\2"), # AKs-A2s
("RANGE_PAIR", r"([2-9AKQJT])\4-([2-9AKQJT])\5"), # 99-55
("PAIR", r"([2-9AKQJT])\7\+?"), # 33
("SINGLE_COMBO", r"([2-9AKQJT][cdhs]){2}"), # AhKh
("MULTI_COMBO", r"[2-9AKQJT]{2}(s|o)\+?"), # QJo
("SEPERATOR", r"\s*,\s*"),
("CATCHALL", r".+")
]
master_pat = re.compile("|".join("(?P<{}>{})".format(*pair) for pair in token_specification))
Token = namedtuple("Token", ["type", "value"])
class TokeniserError(Exception):
pass
def generate_tokens(pattern, text):
scanner = pattern.scanner(text)
for m in iter(scanner.match, None):
token = Token(m.lastgroup, m.group())
yield token
def canonise(holecards):
"""
Takes a single pair of cards and returns the canonical representation of
that pair according to CANONICAL_HOLECARDS_NAMES
"""
if holecards in CANONICAL_HOLECARDS_NAMES:
return holecards
else:
return "{} {}".format(holecards[3:5], holecards[0:2])
def process_one_name(stove_name):
"""
Translates a single PokerStove-style name of holecards into an
expanded list of pokertools-style names.
For example:
"AKs" -> ["Ac Kc", "Ad Kd", "Ah Kh", "As Ks"]
"66" -> ["6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d"]
"""
if len(stove_name) == 3:
rank1, rank2, suit_mark = stove_name
if suit_mark == "s":
return [
"{}{} {}{}".format(rank1, suit, rank2, suit)
for suit in SUITS
]
elif suit_mark == "o":
return [
"{}{} {}{}".format(rank1, suit1, rank2, suit2)
for suit1, suit2 in SUIT_PERMUATIONS
]
else:
raise TokeniserError("incorrect suit_mark in stove_name: {}".format(stove_name))
else:
rank1, rank2 = stove_name
if rank1 == rank2:
return [
"{}{} {}{}".format(rank1, suit1, rank2, suit2)
for suit1, suit2 in SUIT_COMBINATIONS
]
else:
raise TokeniserError("rank1 != rank2 in stove_name: {}".format(stove_name))
def process_one_token(token):
"""
Translates any given single token. For example:
"77-55" -> ["7c 7d", "7c 7h", "7c 7s", "7d 7h", "7d 7s", "7c 7d",
"6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d",
"5c 5d", "5c 5h", "5c 5s", "5d 5h", "5d 5s", "5c 5d"]
"""
# Let's say token.value is "A5s-A2s". Our naming convention is this:
# 'A' is the 'const_rank'
# '5' is the 'high_rank'
# '2' is the 'low_rank'
# 's' is the 'suit_mark'
if token.type == "RANGE":
const_rank, high_rank, low_rank, suit_mark = token.value[0], token.value[1], token.value[5], token.value[2]
high = get_numerical_rank(high_rank)
low = get_numerical_rank(low_rank)
# Produce a list such as ["A5s", "A4s", "A3s", "A2s"] for processing
names = [
"{}{}{}".format(const_rank, get_string_rank(i), suit_mark)
for i in range(high, (low - 1), -1)
]
return list(chain.from_iterable(process_one_name(name) for name in names))
elif token.type == "RANGE_PAIR":
high_rank, low_rank = token.value[1], token.value[3]
high = get_numerical_rank(high_rank)
low = get_numerical_rank(low_rank)
# Produce a list such as ["77", "66", "55"] for processing
names = [
get_string_rank(i) * 2
for i in range(high, (low - 1), -1)
]
return list(chain.from_iterable(process_one_name(name) for name in names))
elif token.type == "PAIR":
if token.value.endswith("+"):
# '55+' is equivalent to 'AA-55'
return process_one_token(Token("RANGE_PAIR", "AA" + "-" + token.value[0:2]))
else:
return process_one_name(token.value)
elif token.type == "SINGLE_COMBO":
card1, card2 = token.value[0:2], token.value[2:4]
return ["{} {}".format(card1, card2)]
elif token.type == "MULTI_COMBO":
if token.value.endswith("+"):
# 'Q2s+' is equivalent to 'QJs-Q2s'
const_rank, low_rank, suit_mark = token.value[0], token.value[1], token.value[2]
const = get_numerical_rank(const_rank)
high_rank = get_string_rank(const - 1)
new_token = Token("RANGE", "{}{}{}-{}{}{}".format(
const_rank, high_rank, suit_mark,
const_rank, low_rank, suit_mark
))
return process_one_token(new_token)
else:
return process_one_name(token.value)
else:
raise TokeniserError("unexpected token: {}".format(token))
def translate(text):
"""
Translates a string of PokerStove-style names of holecards into the
corresponding string of names from CANONICAL_HOLECARDS_NAMES.
>>> stove_string = "JJ+, 66-22, A5s-A2s, Q9s+, J9s+, 8d7d, ATo+, KTo+"
>>> len(list(translate(stove_string)))
175
"""
tokens = list(generate_tokens(master_pat, text))
errors = [t for t in tokens if t.type == "CATCHALL"]
if errors:
raise TokeniserError("unexpected tokens: {}".format(errors))
for token in tokens:
if token.type != "SEPERATOR":
yield from (canonise(name) for name in process_one_token(token))
def to_cards(text):
return [holecards(name) for name in translate(text)] | examples/translation.py | import re
from collections import namedtuple
from itertools import chain
from pokertools import (
CANONICAL_HOLECARDS_NAMES,
SUIT_COMBINATIONS,
SUIT_PERMUATIONS,
SUITS,
get_numerical_rank,
get_string_rank,
holecards,
)
#------------------------------------------------------------------------------
# Tokeniser
token_specification = [ # Examples:
("RANGE", r"[2-9AKQJT]{2}(s|o)-[2-9AKQJT]{2}\2"), # AKs-A2s
("RANGE_PAIR", r"([2-9AKQJT])\4-([2-9AKQJT])\5"), # 99-55
("PAIR", r"([2-9AKQJT])\7\+?"), # 33
("SINGLE_COMBO", r"([2-9AKQJT][cdhs]){2}"), # AhKh
("MULTI_COMBO", r"[2-9AKQJT]{2}(s|o)\+?"), # QJo
("SEPERATOR", r"\s*,\s*"),
("CATCHALL", r".+")
]
master_pat = re.compile("|".join("(?P<{}>{})".format(*pair) for pair in token_specification))
Token = namedtuple("Token", ["type", "value"])
class TokeniserError(Exception):
pass
def generate_tokens(pattern, text):
scanner = pattern.scanner(text)
for m in iter(scanner.match, None):
token = Token(m.lastgroup, m.group())
yield token
def canonise(holecards):
"""
Takes a single pair of cards and returns the canonical representation of
that pair according to CANONICAL_HOLECARDS_NAMES
"""
if holecards in CANONICAL_HOLECARDS_NAMES:
return holecards
else:
return "{} {}".format(holecards[3:5], holecards[0:2])
def process_one_name(stove_name):
"""
Translates a single PokerStove-style name of holecards into an
expanded list of pokertools-style names.
For example:
"AKs" -> ["Ac Kc", "Ad Kd", "Ah Kh", "As Ks"]
"66" -> ["6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d"]
"""
if len(stove_name) == 3:
rank1, rank2, suit_mark = stove_name
if suit_mark == "s":
return [
"{}{} {}{}".format(rank1, suit, rank2, suit)
for suit in SUITS
]
elif suit_mark == "o":
return [
"{}{} {}{}".format(rank1, suit1, rank2, suit2)
for suit1, suit2 in SUIT_PERMUATIONS
]
else:
raise TokeniserError("incorrect suit_mark in stove_name: {}".format(stove_name))
else:
rank1, rank2 = stove_name
if rank1 == rank2:
return [
"{}{} {}{}".format(rank1, suit1, rank2, suit2)
for suit1, suit2 in SUIT_COMBINATIONS
]
else:
raise TokeniserError("rank1 != rank2 in stove_name: {}".format(stove_name))
def process_one_token(token):
"""
Translates any given single token. For example:
"77-55" -> ["7c 7d", "7c 7h", "7c 7s", "7d 7h", "7d 7s", "7c 7d",
"6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d",
"5c 5d", "5c 5h", "5c 5s", "5d 5h", "5d 5s", "5c 5d"]
"""
# Let's say token.value is "A5s-A2s". Our naming convention is this:
# 'A' is the 'const_rank'
# '5' is the 'high_rank'
# '2' is the 'low_rank'
# 's' is the 'suit_mark'
if token.type == "RANGE":
const_rank, high_rank, low_rank, suit_mark = token.value[0], token.value[1], token.value[5], token.value[2]
high = get_numerical_rank(high_rank)
low = get_numerical_rank(low_rank)
# Produce a list such as ["A5s", "A4s", "A3s", "A2s"] for processing
names = [
"{}{}{}".format(const_rank, get_string_rank(i), suit_mark)
for i in range(high, (low - 1), -1)
]
return list(chain.from_iterable(process_one_name(name) for name in names))
elif token.type == "RANGE_PAIR":
high_rank, low_rank = token.value[1], token.value[3]
high = get_numerical_rank(high_rank)
low = get_numerical_rank(low_rank)
# Produce a list such as ["77", "66", "55"] for processing
names = [
get_string_rank(i) * 2
for i in range(high, (low - 1), -1)
]
return list(chain.from_iterable(process_one_name(name) for name in names))
elif token.type == "PAIR":
if token.value.endswith("+"):
# '55+' is equivalent to 'AA-55'
return process_one_token(Token("RANGE_PAIR", "AA" + "-" + token.value[0:2]))
else:
return process_one_name(token.value)
elif token.type == "SINGLE_COMBO":
card1, card2 = token.value[0:2], token.value[2:4]
return ["{} {}".format(card1, card2)]
elif token.type == "MULTI_COMBO":
if token.value.endswith("+"):
# 'Q2s+' is equivalent to 'QJs-Q2s'
const_rank, low_rank, suit_mark = token.value[0], token.value[1], token.value[2]
const = get_numerical_rank(const_rank)
high_rank = get_string_rank(const - 1)
new_token = Token("RANGE", "{}{}{}-{}{}{}".format(
const_rank, high_rank, suit_mark,
const_rank, low_rank, suit_mark
))
return process_one_token(new_token)
else:
return process_one_name(token.value)
else:
raise TokeniserError("unexpected token: {}".format(token))
def translate(text):
"""
Translates a string of PokerStove-style names of holecards into the
corresponding string of names from CANONICAL_HOLECARDS_NAMES.
>>> stove_string = "JJ+, 66-22, A5s-A2s, Q9s+, J9s+, 8d7d, ATo+, KTo+"
>>> len(list(translate(stove_string)))
175
"""
tokens = list(generate_tokens(master_pat, text))
errors = [t for t in tokens if t.type == "CATCHALL"]
if errors:
raise TokeniserError("unexpected tokens: {}".format(errors))
for token in tokens:
if token.type != "SEPERATOR":
yield from (canonise(name) for name in process_one_token(token))
def to_cards(text):
return [holecards(name) for name in translate(text)] | 0.538498 | 0.310407 |
from django.shortcuts import render
from push_notifications.api.rest_framework import GCMDeviceSerializer
from push_notifications.models import GCMDevice
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework.generics import CreateAPIView, DestroyAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from members.models import User
# device 등록
from notification.models import Notification
from notification.serializers import NotificationSerializer, NotificationNoticeSerializer
class ApiFcmDeviceRegister(CreateAPIView):
serializer_class = GCMDeviceSerializer
authentication_classes = [TokenAuthentication]
def create(self, request, *args, **kwargs):
print('ApiFcmDeviceRegister')
user = self.request.user
registration_id = request.data.get('registration_id')
data = {'name': user.username,
'registration_id': registration_id,
'cloud_message_type': 'FCM', }
print('data : ', data)
serializer = self.get_serializer(data=data)
if serializer.is_valid():
serializer.save(user=user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_409_CONFLICT)
# FCM 발송
class ApiSendFcm(CreateAPIView):
serializer_class = NotificationSerializer
authentication_classes = [TokenAuthentication]
def create(self, request, *args, **kwargs):
user = self.request.user
sender = GCMDevice.objects.get(name=user.username)
receiver = GCMDevice.objects.get(name=user.username)
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
notification = serializer.save(sender=sender, receiver=receiver)
receiver.send_message(notification.body, extra={"title": notification.title})
return Response(status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# 알림 발송
class ApiSendNotice(CreateAPIView):
serializer_class = NotificationNoticeSerializer
authentication_classes = [TokenAuthentication]
def create(self, request, *args, **kwargs):
user = self.request.user
if not user.is_staff:
return Response('this user is not admin', status=status.HTTP_401_UNAUTHORIZED)
sender = GCMDevice.objects.get(name='관리자')
receivers = GCMDevice.objects.all().exclude(name='관리자')
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
for receiver in receivers:
notification = serializer.save(sender=sender, receiver=receiver)
if notification.title == '':
receiver.send_message(notification.body)
else:
receiver.send_message(notification.body,
extra={"title": notification.title, "type": "notice"},
badge=1)
return Response(status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# 채팅 알림 발송
class ApiSendChat(CreateAPIView):
serializer_class = NotificationSerializer
authentication_classes = [TokenAuthentication]
def create(self, request, *args, **kwargs):
user = self.request.user
receiver_username = request.data.get('receiver')
sender = GCMDevice.objects.get(name=user.username)
receiver = GCMDevice.objects.get(name=receiver_username)
notification = Notification.objects.create(sender=sender,
receiver=receiver,
title='채팅 알람이 들어왔습니다.',
body='채팅을 확인해주세요.',
type='chat',)
receiver.send_message(notification.body,
extra={"title": notification.title, "type": "chat"},
badge=1)
return Response(status=status.HTTP_200_OK)
# 알림 리스트 가져오기
class ApiListNotice(ListAPIView):
serializer_class = NotificationSerializer
def get_queryset(self):
user = self.request.user
receiver = GCMDevice.objects.get(name=user.username)
notifications = Notification.objects.filter(receiver=receiver, type='notice').order_by('created')
return notifications
# 알림 삭제
class ApiDeleteNotice(DestroyAPIView):
def destroy(self, request, *args, **kwargs):
pass | app/notification/views.py | from django.shortcuts import render
from push_notifications.api.rest_framework import GCMDeviceSerializer
from push_notifications.models import GCMDevice
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework.generics import CreateAPIView, DestroyAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from members.models import User
# device 등록
from notification.models import Notification
from notification.serializers import NotificationSerializer, NotificationNoticeSerializer
class ApiFcmDeviceRegister(CreateAPIView):
serializer_class = GCMDeviceSerializer
authentication_classes = [TokenAuthentication]
def create(self, request, *args, **kwargs):
print('ApiFcmDeviceRegister')
user = self.request.user
registration_id = request.data.get('registration_id')
data = {'name': user.username,
'registration_id': registration_id,
'cloud_message_type': 'FCM', }
print('data : ', data)
serializer = self.get_serializer(data=data)
if serializer.is_valid():
serializer.save(user=user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_409_CONFLICT)
# FCM 발송
class ApiSendFcm(CreateAPIView):
serializer_class = NotificationSerializer
authentication_classes = [TokenAuthentication]
def create(self, request, *args, **kwargs):
user = self.request.user
sender = GCMDevice.objects.get(name=user.username)
receiver = GCMDevice.objects.get(name=user.username)
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
notification = serializer.save(sender=sender, receiver=receiver)
receiver.send_message(notification.body, extra={"title": notification.title})
return Response(status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# 알림 발송
class ApiSendNotice(CreateAPIView):
serializer_class = NotificationNoticeSerializer
authentication_classes = [TokenAuthentication]
def create(self, request, *args, **kwargs):
user = self.request.user
if not user.is_staff:
return Response('this user is not admin', status=status.HTTP_401_UNAUTHORIZED)
sender = GCMDevice.objects.get(name='관리자')
receivers = GCMDevice.objects.all().exclude(name='관리자')
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
for receiver in receivers:
notification = serializer.save(sender=sender, receiver=receiver)
if notification.title == '':
receiver.send_message(notification.body)
else:
receiver.send_message(notification.body,
extra={"title": notification.title, "type": "notice"},
badge=1)
return Response(status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# 채팅 알림 발송
class ApiSendChat(CreateAPIView):
serializer_class = NotificationSerializer
authentication_classes = [TokenAuthentication]
def create(self, request, *args, **kwargs):
user = self.request.user
receiver_username = request.data.get('receiver')
sender = GCMDevice.objects.get(name=user.username)
receiver = GCMDevice.objects.get(name=receiver_username)
notification = Notification.objects.create(sender=sender,
receiver=receiver,
title='채팅 알람이 들어왔습니다.',
body='채팅을 확인해주세요.',
type='chat',)
receiver.send_message(notification.body,
extra={"title": notification.title, "type": "chat"},
badge=1)
return Response(status=status.HTTP_200_OK)
# 알림 리스트 가져오기
class ApiListNotice(ListAPIView):
serializer_class = NotificationSerializer
def get_queryset(self):
user = self.request.user
receiver = GCMDevice.objects.get(name=user.username)
notifications = Notification.objects.filter(receiver=receiver, type='notice').order_by('created')
return notifications
# 알림 삭제
class ApiDeleteNotice(DestroyAPIView):
def destroy(self, request, *args, **kwargs):
pass | 0.561215 | 0.055618 |
from abc import abstractmethod
from ..radiosonde import Radiosonde
from ..radiosonde import RadiosondeList
class BaseSondeLoader:
"""Base loader interface for radiosonde to define common loading
interface
To use:
>>> loader = SondeLoader(filepath=path)
>>> loader.list_vars()
['U','UDir', 'z', 'p' ..]
>>> loader.available()
TODO: Some representation of radiosonde here.
>>> loader.load(start='2018-09-13 11:00:00', '2018-09-13 20:00:00')
TODO: Some representation of radiosonde here.
"""
def __init__(self, filepath):
"""Init.
Args:
filepath (str) : filepath or directory path (for list of files)
"""
self.filepath = filepath
@abstractmethod
def list_vars(self):
"""Return a list of available parameters in the given source
TODO: There should be a check to ensure uniformity of all files.
Returns:
list[str] : variables names
"""
assert self._is_var_uniform(), "varaibles needs to be uniform"
def _is_var_uniform(self):
"""Check if variables names across files are the same.
TODO
Note:
To implement, make sure clear criteria is sticked to.
"""
return True
def _is_criteria_valid(self):
"""Check if the criteria passed to available and load are valid
TODO
"""
return True
@abstractmethod
def available(self, criteria : dict):
"""Peek first understand available radiosondes based on certain criteria.
Args:
criteria (dict) : dictionary for criteria when testing radiosonde
data
Notes:
criteria is arbitrary based on the available vars and data format
(for recursing into all files). Specific implementation is
required.
"""
pass
@abstractmethod
def load_one(self, launchtime):
"""Abstract interface for radiosonde loader
Args:
start, end (str) : iso_format timestring used for bounding sounde
launchtimes
Returns:
Radiosonde : The composite type of radiosonde collections.
"""
pass
def load_many(self, launchtime_list, verbose=True):
sondeList = RadiosondeList()
for time in launchtime_list:
try:
sondeList.add(self.load_one(time))
print(f"Sonde at {time} downloaded succesfully")
except:
print(f"[Error] Somethign went wrong for {time}. Skipped...")
return sondeList | src/radiosonde/loader/base_loader.py | from abc import abstractmethod
from ..radiosonde import Radiosonde
from ..radiosonde import RadiosondeList
class BaseSondeLoader:
"""Base loader interface for radiosonde to define common loading
interface
To use:
>>> loader = SondeLoader(filepath=path)
>>> loader.list_vars()
['U','UDir', 'z', 'p' ..]
>>> loader.available()
TODO: Some representation of radiosonde here.
>>> loader.load(start='2018-09-13 11:00:00', '2018-09-13 20:00:00')
TODO: Some representation of radiosonde here.
"""
def __init__(self, filepath):
"""Init.
Args:
filepath (str) : filepath or directory path (for list of files)
"""
self.filepath = filepath
@abstractmethod
def list_vars(self):
"""Return a list of available parameters in the given source
TODO: There should be a check to ensure uniformity of all files.
Returns:
list[str] : variables names
"""
assert self._is_var_uniform(), "varaibles needs to be uniform"
def _is_var_uniform(self):
"""Check if variables names across files are the same.
TODO
Note:
To implement, make sure clear criteria is sticked to.
"""
return True
def _is_criteria_valid(self):
"""Check if the criteria passed to available and load are valid
TODO
"""
return True
@abstractmethod
def available(self, criteria : dict):
"""Peek first understand available radiosondes based on certain criteria.
Args:
criteria (dict) : dictionary for criteria when testing radiosonde
data
Notes:
criteria is arbitrary based on the available vars and data format
(for recursing into all files). Specific implementation is
required.
"""
pass
@abstractmethod
def load_one(self, launchtime):
"""Abstract interface for radiosonde loader
Args:
start, end (str) : iso_format timestring used for bounding sounde
launchtimes
Returns:
Radiosonde : The composite type of radiosonde collections.
"""
pass
def load_many(self, launchtime_list, verbose=True):
sondeList = RadiosondeList()
for time in launchtime_list:
try:
sondeList.add(self.load_one(time))
print(f"Sonde at {time} downloaded succesfully")
except:
print(f"[Error] Somethign went wrong for {time}. Skipped...")
return sondeList | 0.429429 | 0.476823 |
import asyncio
import aiohttp
from rest_framework import status
from presqt.targets.osf.utilities import OSFForbiddenError, OSFNotFoundError
from presqt.targets.utilities import get_page_total, run_urls_async
from presqt.utilities import PresQTResponseException
from presqt.targets.utilities.utils.session import PresQTSession
class OSFBase(object):
"""
Base class for all OSF classes and the main OSF object.
"""
def __init__(self, json, session=None):
# Set the session attribute with the existing session or a new one if one doesn't exist.
if session is None:
self.session = PresQTSession('https://api.osf.io/v2')
else:
self.session = session
def _json(self, response):
"""
Extract JSON from response.
"""
return response.json()
def _get_all_paginated_data(self, url):
"""
Get all data for the requesting user.
Parameters
----------
url : str
URL to the current data to get
Returns
-------
Data dictionary of the data points gathered up until now.
"""
# Get initial data
response_json = self._json(self.get(url))
data = response_json['data']
meta = response_json['links']['meta']
# Calculate pagination pages
if '?filter' in url or '?page' in url:
# We already have all the data we need for this request
return data
else:
page_total = get_page_total(meta['total'], meta['per_page'])
url_list = ['{}?page={}'.format(url, number) for number in range(2, page_total + 1)]
# Call all pagination pages asynchronously
children_data = run_urls_async(self, url_list)
[data.extend(child['data']) for child in children_data]
return data
@staticmethod
def _get_follow_next_urls(data_list):
"""
Get a list of 'next' urls to run asynchronously.
Parameters
----------
data_list: list
List of json data.
Returns
-------
List of urls
"""
url_list = []
for data in data_list:
if data: #ToDo: doing this to avoid private file errors look into it
meta = data['links']['meta']
next_url = data['links']['next']
if next_url:
page_total = get_page_total(meta['total'], meta['per_page'])
[url_list.append('{}{}'.format(
next_url[:-1], number)) for number in range(2, page_total + 1)]
return url_list
def get(self, url, *args, **kwargs):
"""
Handle any errors that may pop up while making GET requests through the session.
Parameters
----------
url: str
URL to make the GET request to.
Returns
-------
HTTP Response object
"""
response = self.session.get(url, *args, **kwargs)
if response.status_code == 200:
return response
elif response.status_code == 410:
raise PresQTResponseException("The requested resource is no longer available.", status.HTTP_410_GONE)
elif response.status_code == 404:
raise OSFNotFoundError("Resource not found.", status.HTTP_404_NOT_FOUND)
elif response.status_code == 403:
raise OSFForbiddenError(
"User does not have access to this resource with the token provided.", status.HTTP_403_FORBIDDEN)
def put(self, url, *args, **kwargs):
"""
Handle any errors that may pop up while making PUT requests through the session.
Parameters
----------
url: str
URL to make the PUT request to.
Returns
-------
HTTP Response object
"""
response = self.session.put(url, *args, **kwargs)
return response
def post(self, url, *args, **kwargs):
"""
Handle any errors that may pop up while making POST requests through the session.
Parameters
----------
url: str
URL to make the POST request to.
Returns
-------
HTTP Response object
"""
response = self.session.post(url, *args, **kwargs)
return response | presqt/targets/osf/classes/base.py | import asyncio
import aiohttp
from rest_framework import status
from presqt.targets.osf.utilities import OSFForbiddenError, OSFNotFoundError
from presqt.targets.utilities import get_page_total, run_urls_async
from presqt.utilities import PresQTResponseException
from presqt.targets.utilities.utils.session import PresQTSession
class OSFBase(object):
"""
Base class for all OSF classes and the main OSF object.
"""
def __init__(self, json, session=None):
# Set the session attribute with the existing session or a new one if one doesn't exist.
if session is None:
self.session = PresQTSession('https://api.osf.io/v2')
else:
self.session = session
def _json(self, response):
"""
Extract JSON from response.
"""
return response.json()
def _get_all_paginated_data(self, url):
"""
Get all data for the requesting user.
Parameters
----------
url : str
URL to the current data to get
Returns
-------
Data dictionary of the data points gathered up until now.
"""
# Get initial data
response_json = self._json(self.get(url))
data = response_json['data']
meta = response_json['links']['meta']
# Calculate pagination pages
if '?filter' in url or '?page' in url:
# We already have all the data we need for this request
return data
else:
page_total = get_page_total(meta['total'], meta['per_page'])
url_list = ['{}?page={}'.format(url, number) for number in range(2, page_total + 1)]
# Call all pagination pages asynchronously
children_data = run_urls_async(self, url_list)
[data.extend(child['data']) for child in children_data]
return data
@staticmethod
def _get_follow_next_urls(data_list):
"""
Get a list of 'next' urls to run asynchronously.
Parameters
----------
data_list: list
List of json data.
Returns
-------
List of urls
"""
url_list = []
for data in data_list:
if data: #ToDo: doing this to avoid private file errors look into it
meta = data['links']['meta']
next_url = data['links']['next']
if next_url:
page_total = get_page_total(meta['total'], meta['per_page'])
[url_list.append('{}{}'.format(
next_url[:-1], number)) for number in range(2, page_total + 1)]
return url_list
def get(self, url, *args, **kwargs):
"""
Handle any errors that may pop up while making GET requests through the session.
Parameters
----------
url: str
URL to make the GET request to.
Returns
-------
HTTP Response object
"""
response = self.session.get(url, *args, **kwargs)
if response.status_code == 200:
return response
elif response.status_code == 410:
raise PresQTResponseException("The requested resource is no longer available.", status.HTTP_410_GONE)
elif response.status_code == 404:
raise OSFNotFoundError("Resource not found.", status.HTTP_404_NOT_FOUND)
elif response.status_code == 403:
raise OSFForbiddenError(
"User does not have access to this resource with the token provided.", status.HTTP_403_FORBIDDEN)
def put(self, url, *args, **kwargs):
"""
Handle any errors that may pop up while making PUT requests through the session.
Parameters
----------
url: str
URL to make the PUT request to.
Returns
-------
HTTP Response object
"""
response = self.session.put(url, *args, **kwargs)
return response
def post(self, url, *args, **kwargs):
"""
Handle any errors that may pop up while making POST requests through the session.
Parameters
----------
url: str
URL to make the POST request to.
Returns
-------
HTTP Response object
"""
response = self.session.post(url, *args, **kwargs)
return response | 0.62395 | 0.175786 |
import random
from typing import List
from unittest.mock import Mock, mock_open, patch
import pytest
from hypothesis import given
from hypothesis.strategies import builds, integers, lists
from rplugin.python3.ultest.handler.finder import TestFinder
from rplugin.python3.ultest.models.test import Test
from tests.mocks.test_files import mock_python_file
def sorted_tests(
min_line: int = 1, max_line: int = 1000, min_length: int = 10, max_length: int = 20
):
return lists(
builds(
Test,
line=integers(min_value=min_line, max_value=max_line).map(
lambda line: line * 2
),
),
min_size=min_length,
max_size=max_length,
unique_by=lambda test: test.line, # type: ignore
).map(lambda tests: sorted(tests, key=lambda test: test.line))
vim = Mock()
vim.launch = lambda f, _: f()
finder = TestFinder(vim)
@given(sorted_tests())
def test_get_nearest_from_strict_match(tests: List[Test]):
test_i = int(random.random() * len(tests))
expected = tests[test_i]
result = finder.get_nearest_from(expected.line, tests, strict=True)
assert expected == result
@given(sorted_tests())
def test_get_nearest_from_strict_no_match(tests: List[Test]):
test_i = int(random.random() * len(tests))
result = finder.get_nearest_from(tests[test_i].line + 1, tests, strict=True)
assert result is None
@given(sorted_tests())
def test_get_nearest_from_non_strict_match(tests: List[Test]):
test_i = int(random.random() * len(tests))
expected = tests[test_i]
result = finder.get_nearest_from(expected.line + 1, tests, strict=False)
assert expected == result
@given(sorted_tests(min_line=20))
def test_get_nearest_from_non_strict_no_match(tests: List[Test]):
line = 10
result = finder.get_nearest_from(line, tests, strict=False)
assert result is None
@patch("builtins.open", mock_open(read_data=mock_python_file))
@patch("builtins.hash", lambda o: len(".".join(o)))
@patch("os.path.isfile", lambda _: True)
@pytest.mark.asyncio
async def test_find_python_tests():
patterns = {
"test": [r"\v^\s*%(async )?def (test_\w+)"],
"namespace": [r"\v^\s*class (\w+)"],
}
expected = [
Test(
id="test_a3025",
name="test_a30",
file="",
line=4,
col=1,
running=0,
),
Test(
id="test_a4341",
name="test_a43",
file="",
line=7,
col=1,
running=0,
),
]
result = await finder.find_all("", patterns)
assert result == expected | tests/unit/handler/test_finder.py | import random
from typing import List
from unittest.mock import Mock, mock_open, patch
import pytest
from hypothesis import given
from hypothesis.strategies import builds, integers, lists
from rplugin.python3.ultest.handler.finder import TestFinder
from rplugin.python3.ultest.models.test import Test
from tests.mocks.test_files import mock_python_file
def sorted_tests(
min_line: int = 1, max_line: int = 1000, min_length: int = 10, max_length: int = 20
):
return lists(
builds(
Test,
line=integers(min_value=min_line, max_value=max_line).map(
lambda line: line * 2
),
),
min_size=min_length,
max_size=max_length,
unique_by=lambda test: test.line, # type: ignore
).map(lambda tests: sorted(tests, key=lambda test: test.line))
vim = Mock()
vim.launch = lambda f, _: f()
finder = TestFinder(vim)
@given(sorted_tests())
def test_get_nearest_from_strict_match(tests: List[Test]):
test_i = int(random.random() * len(tests))
expected = tests[test_i]
result = finder.get_nearest_from(expected.line, tests, strict=True)
assert expected == result
@given(sorted_tests())
def test_get_nearest_from_strict_no_match(tests: List[Test]):
test_i = int(random.random() * len(tests))
result = finder.get_nearest_from(tests[test_i].line + 1, tests, strict=True)
assert result is None
@given(sorted_tests())
def test_get_nearest_from_non_strict_match(tests: List[Test]):
test_i = int(random.random() * len(tests))
expected = tests[test_i]
result = finder.get_nearest_from(expected.line + 1, tests, strict=False)
assert expected == result
@given(sorted_tests(min_line=20))
def test_get_nearest_from_non_strict_no_match(tests: List[Test]):
line = 10
result = finder.get_nearest_from(line, tests, strict=False)
assert result is None
@patch("builtins.open", mock_open(read_data=mock_python_file))
@patch("builtins.hash", lambda o: len(".".join(o)))
@patch("os.path.isfile", lambda _: True)
@pytest.mark.asyncio
async def test_find_python_tests():
patterns = {
"test": [r"\v^\s*%(async )?def (test_\w+)"],
"namespace": [r"\v^\s*class (\w+)"],
}
expected = [
Test(
id="test_a3025",
name="test_a30",
file="",
line=4,
col=1,
running=0,
),
Test(
id="test_a4341",
name="test_a43",
file="",
line=7,
col=1,
running=0,
),
]
result = await finder.find_all("", patterns)
assert result == expected | 0.730482 | 0.649516 |
import shutil
import subprocess
from os import path, getenv
import requests
from requests.exceptions import ConnectionError
def is_responsive(url):
"""Check if something responds to ``url``."""
try:
response = requests.get(url)
if response.status_code == 204:
return True
except ConnectionError:
return False
def test_main_fixtures_work(docker_ip, docker_services):
"""Showcase the power of our Docker fixtures!"""
# Build URL to service listening on random port.
url = "http://%s:%d/" % (docker_ip, docker_services.port_for("hello", 80))
assert not getenv('DOCKER_HOST')
assert not getenv('PYTEST_DOCKER_HOST')
endpoint_host, endpoint_port = docker_services.endpoint_for("hello", 80)
assert endpoint_host == '127.0.0.1'
assert endpoint_port > 80
docker_services.wait_until_responsive(
check=lambda: is_responsive(url), timeout=30.0, pause=0.1
)
# Contact the service.
response = requests.get(url)
# this is set up in the test image
assert response.status_code == 204
def test_containers_and_volumes_get_cleaned_up(
testdir, tmpdir, docker_compose_file
):
_copy_compose_files_to_testdir(testdir, docker_compose_file)
project_name_file_path = path.join(str(tmpdir), "project_name.txt")
testdir.makepyfile(
"""
import subprocess
def _check_volume_exists(project_name):
check_proc = subprocess.Popen(
"docker volume ls".split(),
stdout=subprocess.PIPE,
)
assert project_name.encode() in check_proc.stdout.read()
def _check_container_exists(project_name):
check_proc = subprocess.Popen(
"docker ps".split(),
stdout=subprocess.PIPE,
)
assert project_name.encode() in check_proc.stdout.read()
def test_whatever(docker_services, docker_compose_project_name):
_check_volume_exists(docker_compose_project_name)
_check_container_exists(docker_compose_project_name)
with open('{}', 'w') as project_name_file:
project_name_file.write(docker_compose_project_name)
""".format(
str(project_name_file_path)
)
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
with open(str(project_name_file_path), "rb") as project_name_file:
compose_project_name = project_name_file.read().decode()
_check_volume_is_gone(compose_project_name)
_check_container_is_gone(compose_project_name)
def _copy_compose_files_to_testdir(testdir, compose_file_path):
directory_for_compose_files = testdir.mkdir("tests")
shutil.copy(compose_file_path, str(directory_for_compose_files))
container_build_files_dir = path.realpath(
path.join(compose_file_path, "../containers")
)
shutil.copytree(
container_build_files_dir, str(directory_for_compose_files) + "/containers"
)
def _check_volume_is_gone(project_name):
check_proc = subprocess.Popen("docker volume ls".split(), stdout=subprocess.PIPE)
assert project_name.encode() not in check_proc.stdout.read()
def _check_container_is_gone(project_name):
check_proc = subprocess.Popen("docker ps".split(), stdout=subprocess.PIPE)
assert project_name.encode() not in check_proc.stdout.read() | tests/test_integration.py | import shutil
import subprocess
from os import path, getenv
import requests
from requests.exceptions import ConnectionError
def is_responsive(url):
"""Check if something responds to ``url``."""
try:
response = requests.get(url)
if response.status_code == 204:
return True
except ConnectionError:
return False
def test_main_fixtures_work(docker_ip, docker_services):
"""Showcase the power of our Docker fixtures!"""
# Build URL to service listening on random port.
url = "http://%s:%d/" % (docker_ip, docker_services.port_for("hello", 80))
assert not getenv('DOCKER_HOST')
assert not getenv('PYTEST_DOCKER_HOST')
endpoint_host, endpoint_port = docker_services.endpoint_for("hello", 80)
assert endpoint_host == '127.0.0.1'
assert endpoint_port > 80
docker_services.wait_until_responsive(
check=lambda: is_responsive(url), timeout=30.0, pause=0.1
)
# Contact the service.
response = requests.get(url)
# this is set up in the test image
assert response.status_code == 204
def test_containers_and_volumes_get_cleaned_up(
testdir, tmpdir, docker_compose_file
):
_copy_compose_files_to_testdir(testdir, docker_compose_file)
project_name_file_path = path.join(str(tmpdir), "project_name.txt")
testdir.makepyfile(
"""
import subprocess
def _check_volume_exists(project_name):
check_proc = subprocess.Popen(
"docker volume ls".split(),
stdout=subprocess.PIPE,
)
assert project_name.encode() in check_proc.stdout.read()
def _check_container_exists(project_name):
check_proc = subprocess.Popen(
"docker ps".split(),
stdout=subprocess.PIPE,
)
assert project_name.encode() in check_proc.stdout.read()
def test_whatever(docker_services, docker_compose_project_name):
_check_volume_exists(docker_compose_project_name)
_check_container_exists(docker_compose_project_name)
with open('{}', 'w') as project_name_file:
project_name_file.write(docker_compose_project_name)
""".format(
str(project_name_file_path)
)
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
with open(str(project_name_file_path), "rb") as project_name_file:
compose_project_name = project_name_file.read().decode()
_check_volume_is_gone(compose_project_name)
_check_container_is_gone(compose_project_name)
def _copy_compose_files_to_testdir(testdir, compose_file_path):
directory_for_compose_files = testdir.mkdir("tests")
shutil.copy(compose_file_path, str(directory_for_compose_files))
container_build_files_dir = path.realpath(
path.join(compose_file_path, "../containers")
)
shutil.copytree(
container_build_files_dir, str(directory_for_compose_files) + "/containers"
)
def _check_volume_is_gone(project_name):
check_proc = subprocess.Popen("docker volume ls".split(), stdout=subprocess.PIPE)
assert project_name.encode() not in check_proc.stdout.read()
def _check_container_is_gone(project_name):
check_proc = subprocess.Popen("docker ps".split(), stdout=subprocess.PIPE)
assert project_name.encode() not in check_proc.stdout.read() | 0.482673 | 0.229417 |
"""Bound on integer range."""
from typing import List, TYPE_CHECKING
from chb.invariants.FnDictionaryRecord import FnXprDictionaryRecord, xprregistry
from chb.invariants.XNumerical import XNumerical
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
from chb.invariants.FnXprDictionary import FnXprDictionary
class XBound(FnXprDictionaryRecord):
def __init__(
self,
xd: "FnXprDictionary",
ixval: IndexedTableValue) -> None:
FnXprDictionaryRecord.__init__(self, xd, ixval)
@property
def is_min_inf(self) -> bool:
return False
@property
def is_max_inf(self) -> bool:
return False
@property
def is_bounded(self) -> bool:
return False
@property
def bound(self) -> XNumerical:
raise UF.CHBError("bound not defined on " + str(self))
@xprregistry.register_tag("m", XBound)
class XMinusInfBound(XBound):
"""Minus infinity bound."""
def __init__(
self,
xd: "FnXprDictionary",
ixval: IndexedTableValue) -> None:
XBound.__init__(self, xd, ixval)
@property
def is_min_inf(self) -> bool:
return True
def __str__(self) -> str:
return "minus infinity"
@xprregistry.register_tag("p", XBound)
class XPlusInfBound(XBound):
"""Plus infinity bound."""
def __init__(
self,
xd: "FnXprDictionary",
ixval: IndexedTableValue) -> None:
XBound.__init__(self, xd, ixval)
@property
def is_max_inf(self) -> bool:
return True
def __str__(self) -> str:
return "plus infinity"
@xprregistry.register_tag("n", XBound)
class XNumberBound(XBound):
"""Numerical bound.
args[0]: index of numerical in xd
"""
def __init__(
self,
xd: "FnXprDictionary",
ixval: IndexedTableValue) -> None:
XBound.__init__(self, xd, ixval)
@property
def is_bounded(self) -> bool:
return True
@property
def bound(self) -> XNumerical:
return self.xd.numerical(self.args[0])
def __str__(self) -> str:
return str(self.bound) | chb/invariants/XBound.py | """Bound on integer range."""
from typing import List, TYPE_CHECKING
from chb.invariants.FnDictionaryRecord import FnXprDictionaryRecord, xprregistry
from chb.invariants.XNumerical import XNumerical
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
from chb.invariants.FnXprDictionary import FnXprDictionary
class XBound(FnXprDictionaryRecord):
def __init__(
self,
xd: "FnXprDictionary",
ixval: IndexedTableValue) -> None:
FnXprDictionaryRecord.__init__(self, xd, ixval)
@property
def is_min_inf(self) -> bool:
return False
@property
def is_max_inf(self) -> bool:
return False
@property
def is_bounded(self) -> bool:
return False
@property
def bound(self) -> XNumerical:
raise UF.CHBError("bound not defined on " + str(self))
@xprregistry.register_tag("m", XBound)
class XMinusInfBound(XBound):
"""Minus infinity bound."""
def __init__(
self,
xd: "FnXprDictionary",
ixval: IndexedTableValue) -> None:
XBound.__init__(self, xd, ixval)
@property
def is_min_inf(self) -> bool:
return True
def __str__(self) -> str:
return "minus infinity"
@xprregistry.register_tag("p", XBound)
class XPlusInfBound(XBound):
"""Plus infinity bound."""
def __init__(
self,
xd: "FnXprDictionary",
ixval: IndexedTableValue) -> None:
XBound.__init__(self, xd, ixval)
@property
def is_max_inf(self) -> bool:
return True
def __str__(self) -> str:
return "plus infinity"
@xprregistry.register_tag("n", XBound)
class XNumberBound(XBound):
"""Numerical bound.
args[0]: index of numerical in xd
"""
def __init__(
self,
xd: "FnXprDictionary",
ixval: IndexedTableValue) -> None:
XBound.__init__(self, xd, ixval)
@property
def is_bounded(self) -> bool:
return True
@property
def bound(self) -> XNumerical:
return self.xd.numerical(self.args[0])
def __str__(self) -> str:
return str(self.bound) | 0.934932 | 0.37502 |
from bs4 import BeautifulSoup
from dotenv import load_dotenv
from email.message import EmailMessage
from email.utils import make_msgid
import chevron
import io
import json
import logging
import math
import os
import requests
import smtplib
import sys
import traceback
BASE_URL = "https://www.hasznaltauto.hu"
PAGE_SIZE = 20
load_dotenv()
log = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
payload = {
"HirdetesSzemelyautoSearch[evjarat_min]": os.getenv("CAR_MIN_YEAR"),
"HirdetesSzemelyautoSearch[futottkm_min]": os.getenv("CAR_DISTANCE_MIN"),
"HirdetesSzemelyautoSearch[futottkm_max]": os.getenv("CAR_DISTANCE_MAX"),
"HirdetesSzemelyautoSearch[kivitel][]": os.getenv("CAR_BODY_TYPE"),
"HirdetesSzemelyautoSearch[marka_id]": os.getenv("CAR_MAKE"),
"HirdetesSzemelyautoSearch[modell_id]": os.getenv("CAR_MODEL"),
"HirdetesSzemelyautoSearch[vetelar_max]": os.getenv("CAR_MAX_PRICE"),
"HirdetesSzemelyautoSearch[vetelar_min]": os.getenv("CAR_MIN_PRICE"),
"HirdetesSzemelyautoSearch[uzemanyag][]": os.getenv("CAR_FUEL_TYPE"),
}
def __crawl(url):
divs = BeautifulSoup(__get(url).content, "html.parser").find_all(
"div", {"class": "talalati-sor"}
)
if len(divs) > 0:
return __parse(divs)
return []
def __create_message(car):
msg = EmailMessage()
msg["From"] = "Putt-Putt <{}>".format(os.getenv("SMTP_FROM"))
msg["Subject"] = "[{0}] {1}".format(car.get("id"), car.get("title"))
msg["To"] = os.getenv("SMTP_TO")
msg["X-Priority"] = "2"
with io.open(
os.path.join(os.path.dirname(__file__), "templates/mail.mustache"),
"r",
encoding="utf-8",
) as f:
if car["image"]:
img_id = make_msgid()
car["img_id"] = img_id[1:-1]
msg.add_alternative(chevron.render(f, car), subtype="html")
msg.get_payload()[0].add_related(
car.get("image").read(), "image", "jpeg", cid=img_id
)
else:
msg.add_alternative(chevron.render(f, car), subtype="html")
return msg
def __get(url, stream=False):
res = requests.get(
url, headers={"User-Agent": os.getenv("USER_AGENT")}, stream=stream
)
if res.ok:
return res
else:
log.error(res.status_code)
sys.exit(res.status_code)
def __get_image(url):
res = __get(url, True)
if res.ok:
res.raw.decode_content = True
return res.raw
else:
log.error(res.status_code)
sys.exit(res.status_code)
def __get_search_key():
headers = {
"Accept": "application/json, text/javascript, */*, q=0.01",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": os.getenv("USER_AGENT"),
"Content-Type": "Application/X-WWW-Form-URLEncoded; Charset=UTF-8",
}
return json.loads(
__post(
BASE_URL + "/egyszeru/szemelyauto", dict(payload, getSearchUrl=1), headers
).content
)["formUrl"].rsplit("/", 1)[-1]
def __get_total():
headers = {
"Accept": "application/json, text/javascript, */*, q=0.01",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": os.getenv("USER_AGENT"),
"Content-Type": "Application/X-WWW-Form-URLEncoded; Charset=UTF-8",
}
return json.loads(
__post(BASE_URL + "/egyszeru/szemelyauto", payload, headers).content
)["totalCount"]
def __parse(divs):
cars = []
for div in divs:
a = div.find("h3").find("a", href=True)
cars.append(
{
"details": div.find("div", {"class": "talalatisor-info adatok"}).text,
"id": div.find("div", {"class": "talalatisor-hirkod"}).text.split()[1][
:-1
],
"image": __get_image(
div.find("img", {"class": "img-responsive"})[
"data-lazyurl"
].replace("_1t", "")
)
if div.find("img", {"class": "img-responsive"}).has_attr("data-lazyurl")
else None,
"price": div.find("div", {"class": "vetelar"}).text,
"title": a.text,
"url": a["href"],
}
)
return cars
def __post(url, payload, headers={"User-Agent": os.getenv("USER_AGENT")}):
res = requests.post(url, data=payload, headers=headers)
if res.ok:
return res
else:
log.error(res.status_code)
sys.exit(res.status_code)
def __load_database():
if os.path.exists(os.getenv("DB_PATH")):
with open(os.getenv("DB_PATH"), "r") as f:
try:
return json.load(f)
except:
log.error(traceback.format_exc())
else:
log.error(
'The given path "{}" is not a valid path'.format(os.getenv("DB_PATH"))
)
return []
def __save_database(data):
with open(os.getenv("DB_PATH"), "w") as f:
try:
json.dump(data, f)
except:
log.error(traceback.format_exc())
sys.exit(1)
def __update_database(cars):
diff = []
db = __load_database()
for c in cars:
t = c.copy()
t.pop("image")
if t not in db:
db.append(t)
diff.append(c)
if len(diff) > 0:
__save_database(db)
return diff
def __send_mails(cars):
log.info("Sending email(s)...")
try:
server = smtplib.SMTP(os.getenv("SMTP_HOST"), os.getenv("SMTP_PORT"))
server.starttls()
server.login(os.getenv("SMTP_USERNAME"), os.getenv("SMTP_PASSWORD"))
for car in cars:
server.sendmail(
os.getenv("SMTP_FROM"),
os.getenv("SMTP_TO"),
__create_message(car).as_string(),
)
server.close()
except:
log.error(traceback.format_exc())
sys.exit(1)
def main():
cars = []
curr = 0
last = math.ceil(__get_total() / PAGE_SIZE)
search_key = __get_search_key()
while curr < last:
cars += __crawl(
"{}/talalatilista/{}/page{}".format(BASE_URL, search_key, str(curr + 1))
)
curr += 1
if len(cars) > 0:
diff = __update_database(cars)
log.info('Found "{}" new car(s)'.format(len(diff)))
if len(diff) > 0:
__send_mails(diff)
else:
log.info("The search returned no results")
if __name__ == "__main__":
main() | hahu/main.py | from bs4 import BeautifulSoup
from dotenv import load_dotenv
from email.message import EmailMessage
from email.utils import make_msgid
import chevron
import io
import json
import logging
import math
import os
import requests
import smtplib
import sys
import traceback
BASE_URL = "https://www.hasznaltauto.hu"
PAGE_SIZE = 20
load_dotenv()
log = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
payload = {
"HirdetesSzemelyautoSearch[evjarat_min]": os.getenv("CAR_MIN_YEAR"),
"HirdetesSzemelyautoSearch[futottkm_min]": os.getenv("CAR_DISTANCE_MIN"),
"HirdetesSzemelyautoSearch[futottkm_max]": os.getenv("CAR_DISTANCE_MAX"),
"HirdetesSzemelyautoSearch[kivitel][]": os.getenv("CAR_BODY_TYPE"),
"HirdetesSzemelyautoSearch[marka_id]": os.getenv("CAR_MAKE"),
"HirdetesSzemelyautoSearch[modell_id]": os.getenv("CAR_MODEL"),
"HirdetesSzemelyautoSearch[vetelar_max]": os.getenv("CAR_MAX_PRICE"),
"HirdetesSzemelyautoSearch[vetelar_min]": os.getenv("CAR_MIN_PRICE"),
"HirdetesSzemelyautoSearch[uzemanyag][]": os.getenv("CAR_FUEL_TYPE"),
}
def __crawl(url):
divs = BeautifulSoup(__get(url).content, "html.parser").find_all(
"div", {"class": "talalati-sor"}
)
if len(divs) > 0:
return __parse(divs)
return []
def __create_message(car):
msg = EmailMessage()
msg["From"] = "Putt-Putt <{}>".format(os.getenv("SMTP_FROM"))
msg["Subject"] = "[{0}] {1}".format(car.get("id"), car.get("title"))
msg["To"] = os.getenv("SMTP_TO")
msg["X-Priority"] = "2"
with io.open(
os.path.join(os.path.dirname(__file__), "templates/mail.mustache"),
"r",
encoding="utf-8",
) as f:
if car["image"]:
img_id = make_msgid()
car["img_id"] = img_id[1:-1]
msg.add_alternative(chevron.render(f, car), subtype="html")
msg.get_payload()[0].add_related(
car.get("image").read(), "image", "jpeg", cid=img_id
)
else:
msg.add_alternative(chevron.render(f, car), subtype="html")
return msg
def __get(url, stream=False):
res = requests.get(
url, headers={"User-Agent": os.getenv("USER_AGENT")}, stream=stream
)
if res.ok:
return res
else:
log.error(res.status_code)
sys.exit(res.status_code)
def __get_image(url):
res = __get(url, True)
if res.ok:
res.raw.decode_content = True
return res.raw
else:
log.error(res.status_code)
sys.exit(res.status_code)
def __get_search_key():
headers = {
"Accept": "application/json, text/javascript, */*, q=0.01",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": os.getenv("USER_AGENT"),
"Content-Type": "Application/X-WWW-Form-URLEncoded; Charset=UTF-8",
}
return json.loads(
__post(
BASE_URL + "/egyszeru/szemelyauto", dict(payload, getSearchUrl=1), headers
).content
)["formUrl"].rsplit("/", 1)[-1]
def __get_total():
headers = {
"Accept": "application/json, text/javascript, */*, q=0.01",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": os.getenv("USER_AGENT"),
"Content-Type": "Application/X-WWW-Form-URLEncoded; Charset=UTF-8",
}
return json.loads(
__post(BASE_URL + "/egyszeru/szemelyauto", payload, headers).content
)["totalCount"]
def __parse(divs):
cars = []
for div in divs:
a = div.find("h3").find("a", href=True)
cars.append(
{
"details": div.find("div", {"class": "talalatisor-info adatok"}).text,
"id": div.find("div", {"class": "talalatisor-hirkod"}).text.split()[1][
:-1
],
"image": __get_image(
div.find("img", {"class": "img-responsive"})[
"data-lazyurl"
].replace("_1t", "")
)
if div.find("img", {"class": "img-responsive"}).has_attr("data-lazyurl")
else None,
"price": div.find("div", {"class": "vetelar"}).text,
"title": a.text,
"url": a["href"],
}
)
return cars
def __post(url, payload, headers={"User-Agent": os.getenv("USER_AGENT")}):
res = requests.post(url, data=payload, headers=headers)
if res.ok:
return res
else:
log.error(res.status_code)
sys.exit(res.status_code)
def __load_database():
if os.path.exists(os.getenv("DB_PATH")):
with open(os.getenv("DB_PATH"), "r") as f:
try:
return json.load(f)
except:
log.error(traceback.format_exc())
else:
log.error(
'The given path "{}" is not a valid path'.format(os.getenv("DB_PATH"))
)
return []
def __save_database(data):
with open(os.getenv("DB_PATH"), "w") as f:
try:
json.dump(data, f)
except:
log.error(traceback.format_exc())
sys.exit(1)
def __update_database(cars):
diff = []
db = __load_database()
for c in cars:
t = c.copy()
t.pop("image")
if t not in db:
db.append(t)
diff.append(c)
if len(diff) > 0:
__save_database(db)
return diff
def __send_mails(cars):
log.info("Sending email(s)...")
try:
server = smtplib.SMTP(os.getenv("SMTP_HOST"), os.getenv("SMTP_PORT"))
server.starttls()
server.login(os.getenv("SMTP_USERNAME"), os.getenv("SMTP_PASSWORD"))
for car in cars:
server.sendmail(
os.getenv("SMTP_FROM"),
os.getenv("SMTP_TO"),
__create_message(car).as_string(),
)
server.close()
except:
log.error(traceback.format_exc())
sys.exit(1)
def main():
cars = []
curr = 0
last = math.ceil(__get_total() / PAGE_SIZE)
search_key = __get_search_key()
while curr < last:
cars += __crawl(
"{}/talalatilista/{}/page{}".format(BASE_URL, search_key, str(curr + 1))
)
curr += 1
if len(cars) > 0:
diff = __update_database(cars)
log.info('Found "{}" new car(s)'.format(len(diff)))
if len(diff) > 0:
__send_mails(diff)
else:
log.info("The search returned no results")
if __name__ == "__main__":
main() | 0.209712 | 0.088072 |
import numpy as np
from ... import opcodes as OperandDef
from ...core import TilesError
from ...serialize import KeyField, BoolField
from ...utils import check_chunks_unknown_shape
from ..operands import TensorOperand, TensorOperandMixin
from ..datasource import tensor as astensor
from ..array_utils import as_same_device, device
from ..core import TensorOrder
from .ravel import ravel
class TensorIsIn(TensorOperand, TensorOperandMixin):
_op_type_ = OperandDef.ISIN
_element = KeyField('element')
_test_elements = KeyField('test_elements')
_assume_unique = BoolField('assume_unique')
_invert = BoolField('invert')
def __init__(self, assume_unique=None, invert=None, dtype=None, **kw):
dtype = np.dtype(bool) if dtype is None else dtype
super().__init__(_assume_unique=assume_unique, _invert=invert,
dtype=dtype, **kw)
@property
def element(self):
return self._element
@property
def test_elements(self):
return self._test_elements
@property
def assume_unique(self):
return self._assume_unique
@property
def invert(self):
return self._invert
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._element = self._inputs[0]
self._test_elements = self._inputs[1]
def __call__(self, element, test_elements):
element, test_elements = astensor(element), ravel(astensor(test_elements))
return self.new_tensor([element, test_elements], element.shape, order=TensorOrder.C_ORDER)
@classmethod
def tile(cls, op):
in_tensor = op.element
test_elements = op.test_elements
out_tensor = op.outputs[0]
if len(test_elements.chunks) != 1:
check_chunks_unknown_shape([test_elements], TilesError)
test_elements = test_elements.rechunk(len(test_elements))._inplace_tile()
test_elements_chunk = test_elements.chunks[0]
out_chunks = []
for c in in_tensor.chunks:
chunk_op = op.copy().reset_key()
out_chunk = chunk_op.new_chunk([c, test_elements_chunk], shape=c.shape,
index=c.index, order=out_tensor.order)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_tensors([in_tensor, test_elements], out_tensor.shape,
order=out_tensor.order, chunks=out_chunks,
nsplits=in_tensor.nsplits)
@classmethod
def execute(cls, ctx, op):
(element, test_elements), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
ctx[op.outputs[0].key] = xp.isin(element, test_elements,
assume_unique=op.assume_unique,
invert=op.invert)
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
Returns a boolean array of the same shape as `element` that is True
where an element of `element` is in `test_elements` and False otherwise.
Parameters
----------
element : array_like
Input tensor.
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if it is a tensor or array_like.
See notes for behavior with non-array-like parameters.
assume_unique : bool, optional
If True, the input tensors are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned tensor are inverted, as if
calculating `element not in test_elements`. Default is False.
``mt.isin(a, b, invert=True)`` is equivalent to (but faster
than) ``mt.invert(mt.isin(a, b))``.
Returns
-------
isin : Tensor, bool
Has the same shape as `element`. The values `element[isin]`
are in `test_elements`.
See Also
--------
in1d : Flattened version of this function.
Notes
-----
`isin` is an element-wise function version of the python keyword `in`.
``isin(a, b)`` is roughly equivalent to
``mt.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
`element` and `test_elements` are converted to tensors if they are not
already. If `test_elements` is a set (or other non-sequence collection)
it will be converted to an object tensor with one element, rather than a
tensor of the values contained in `test_elements`. This is a consequence
of the `tensor` constructor's way of handling non-sequence collections.
Converting the set to a list usually gives the desired behavior.
Examples
--------
>>> import mars.tensor as mt
>>> element = 2*mt.arange(4).reshape((2, 2))
>>> element.execute()
array([[0, 2],
[4, 6]])
>>> test_elements = [1, 2, 4, 8]
>>> mask = mt.isin(element, test_elements)
>>> mask.execute()
array([[ False, True],
[ True, False]])
>>> element[mask].execute()
array([2, 4])
>>> mask = mt.isin(element, test_elements, invert=True)
>>> mask.execute()
array([[ True, False],
[ False, True]])
>>> element[mask]
array([0, 6])
Because of how `array` handles sets, the following does not
work as expected:
>>> test_set = {1, 2, 4, 8}
>>> mt.isin(element, test_set).execute()
array([[ False, False],
[ False, False]])
Casting the set to a list gives the expected result:
>>> mt.isin(element, list(test_set)).execute()
array([[ False, True],
[ True, False]])
"""
op = TensorIsIn(assume_unique, invert)
return op(element, test_elements) | mars/tensor/base/isin.py |
import numpy as np
from ... import opcodes as OperandDef
from ...core import TilesError
from ...serialize import KeyField, BoolField
from ...utils import check_chunks_unknown_shape
from ..operands import TensorOperand, TensorOperandMixin
from ..datasource import tensor as astensor
from ..array_utils import as_same_device, device
from ..core import TensorOrder
from .ravel import ravel
class TensorIsIn(TensorOperand, TensorOperandMixin):
_op_type_ = OperandDef.ISIN
_element = KeyField('element')
_test_elements = KeyField('test_elements')
_assume_unique = BoolField('assume_unique')
_invert = BoolField('invert')
def __init__(self, assume_unique=None, invert=None, dtype=None, **kw):
dtype = np.dtype(bool) if dtype is None else dtype
super().__init__(_assume_unique=assume_unique, _invert=invert,
dtype=dtype, **kw)
@property
def element(self):
return self._element
@property
def test_elements(self):
return self._test_elements
@property
def assume_unique(self):
return self._assume_unique
@property
def invert(self):
return self._invert
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._element = self._inputs[0]
self._test_elements = self._inputs[1]
def __call__(self, element, test_elements):
element, test_elements = astensor(element), ravel(astensor(test_elements))
return self.new_tensor([element, test_elements], element.shape, order=TensorOrder.C_ORDER)
@classmethod
def tile(cls, op):
in_tensor = op.element
test_elements = op.test_elements
out_tensor = op.outputs[0]
if len(test_elements.chunks) != 1:
check_chunks_unknown_shape([test_elements], TilesError)
test_elements = test_elements.rechunk(len(test_elements))._inplace_tile()
test_elements_chunk = test_elements.chunks[0]
out_chunks = []
for c in in_tensor.chunks:
chunk_op = op.copy().reset_key()
out_chunk = chunk_op.new_chunk([c, test_elements_chunk], shape=c.shape,
index=c.index, order=out_tensor.order)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_tensors([in_tensor, test_elements], out_tensor.shape,
order=out_tensor.order, chunks=out_chunks,
nsplits=in_tensor.nsplits)
@classmethod
def execute(cls, ctx, op):
(element, test_elements), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
ctx[op.outputs[0].key] = xp.isin(element, test_elements,
assume_unique=op.assume_unique,
invert=op.invert)
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
Returns a boolean array of the same shape as `element` that is True
where an element of `element` is in `test_elements` and False otherwise.
Parameters
----------
element : array_like
Input tensor.
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if it is a tensor or array_like.
See notes for behavior with non-array-like parameters.
assume_unique : bool, optional
If True, the input tensors are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned tensor are inverted, as if
calculating `element not in test_elements`. Default is False.
``mt.isin(a, b, invert=True)`` is equivalent to (but faster
than) ``mt.invert(mt.isin(a, b))``.
Returns
-------
isin : Tensor, bool
Has the same shape as `element`. The values `element[isin]`
are in `test_elements`.
See Also
--------
in1d : Flattened version of this function.
Notes
-----
`isin` is an element-wise function version of the python keyword `in`.
``isin(a, b)`` is roughly equivalent to
``mt.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
`element` and `test_elements` are converted to tensors if they are not
already. If `test_elements` is a set (or other non-sequence collection)
it will be converted to an object tensor with one element, rather than a
tensor of the values contained in `test_elements`. This is a consequence
of the `tensor` constructor's way of handling non-sequence collections.
Converting the set to a list usually gives the desired behavior.
Examples
--------
>>> import mars.tensor as mt
>>> element = 2*mt.arange(4).reshape((2, 2))
>>> element.execute()
array([[0, 2],
[4, 6]])
>>> test_elements = [1, 2, 4, 8]
>>> mask = mt.isin(element, test_elements)
>>> mask.execute()
array([[ False, True],
[ True, False]])
>>> element[mask].execute()
array([2, 4])
>>> mask = mt.isin(element, test_elements, invert=True)
>>> mask.execute()
array([[ True, False],
[ False, True]])
>>> element[mask]
array([0, 6])
Because of how `array` handles sets, the following does not
work as expected:
>>> test_set = {1, 2, 4, 8}
>>> mt.isin(element, test_set).execute()
array([[ False, False],
[ False, False]])
Casting the set to a list gives the expected result:
>>> mt.isin(element, list(test_set)).execute()
array([[ False, True],
[ True, False]])
"""
op = TensorIsIn(assume_unique, invert)
return op(element, test_elements) | 0.878731 | 0.581184 |
import itertools
from fractions import Fraction
from io import BytesIO
from typing import Callable, Tuple
import cairosvg
import filetype
from PIL import Image, ImageSequence
from streamdeck_ui.display.filter import Filter
class ImageFilter(Filter):
"""
Represents a static image. It transforms the input image by replacing it with a static image.
"""
def __init__(self, file: str):
super(ImageFilter, self).__init__()
self.file = file
def initialize(self, size: Tuple[int, int]):
# Each frame needs to have a unique hashcode. Start with file name as baseline.
image_hash = hash((self.__class__, self.file))
frame_duration = []
frame_hash = []
try:
kind = filetype.guess(self.file)
if kind is None:
svg_code = open(self.file).read()
png = cairosvg.svg2png(svg_code, output_height=size[1], output_width=size[0])
image_file = BytesIO(png)
image = Image.open(image_file)
frame_duration.append(-1)
frame_hash.append(image_hash)
else:
image = Image.open(self.file)
image.seek(0)
# Frame number is used to create unique hash
frame_number = 1
while True:
try:
frame_duration.append(image.info["duration"])
# Create tuple and hash it, to combine the image and frame hashcodes
frame_hash.append(hash((image_hash, frame_number)))
image.seek(image.tell() + 1)
frame_number += 1
except EOFError:
# Reached the final frame
break
except KeyError:
# If the key 'duration' can't be found, it's not an animation
frame_duration.append(-1)
frame_hash.append(image_hash)
break
except (OSError, IOError) as icon_error:
# FIXME: caller should handle this?
print(f"Unable to load icon {self.file} with error {icon_error}")
image = Image.new("RGB", size)
frame_duration.append(-1)
frame_hash.append(image_hash)
frames = ImageSequence.Iterator(image)
# Scale all the frames to the target size
self.frames = []
for frame, milliseconds, hashcode in zip(frames, frame_duration, frame_hash):
frame = frame.copy()
frame.thumbnail(size, Image.LANCZOS)
self.frames.append((frame, milliseconds, hashcode))
self.frame_cycle = itertools.cycle(self.frames)
self.current_frame = next(self.frame_cycle)
self.frame_time = Fraction()
def transform(self, get_input: Callable[[], Image.Image], get_output: Callable[[int], Image.Image], input_changed: bool, time: Fraction) -> Tuple[Image.Image, int]:
"""
The transformation returns the loaded image, ando overwrites whatever came before.
"""
# Unpack tuple to make code a bit easier to understand
frame, duration, hashcode = self.current_frame
if duration >= 0 and time - self.frame_time > duration / 1000:
self.frame_time = time
self.current_frame = next(self.frame_cycle)
# Unpack updated value
frame, duration, hashcode = self.current_frame
image = get_output(hashcode)
if image:
return (image, hashcode)
input = get_input()
if frame.mode == "RGBA":
# Use the transparency mask of the image to paste
input.paste(frame, frame)
else:
input.paste(frame)
return (input, hashcode)
if input_changed:
image = get_output(hashcode)
if image:
return (image, hashcode)
input = get_input()
if frame.mode == "RGBA":
# Use the transparency mask of the image to paste
input.paste(frame, frame)
else:
input.paste(frame)
return (input, hashcode)
else:
return (None, hashcode) | streamdeck_ui/display/image_filter.py | import itertools
from fractions import Fraction
from io import BytesIO
from typing import Callable, Tuple
import cairosvg
import filetype
from PIL import Image, ImageSequence
from streamdeck_ui.display.filter import Filter
class ImageFilter(Filter):
"""
Represents a static image. It transforms the input image by replacing it with a static image.
"""
def __init__(self, file: str):
super(ImageFilter, self).__init__()
self.file = file
def initialize(self, size: Tuple[int, int]):
# Each frame needs to have a unique hashcode. Start with file name as baseline.
image_hash = hash((self.__class__, self.file))
frame_duration = []
frame_hash = []
try:
kind = filetype.guess(self.file)
if kind is None:
svg_code = open(self.file).read()
png = cairosvg.svg2png(svg_code, output_height=size[1], output_width=size[0])
image_file = BytesIO(png)
image = Image.open(image_file)
frame_duration.append(-1)
frame_hash.append(image_hash)
else:
image = Image.open(self.file)
image.seek(0)
# Frame number is used to create unique hash
frame_number = 1
while True:
try:
frame_duration.append(image.info["duration"])
# Create tuple and hash it, to combine the image and frame hashcodes
frame_hash.append(hash((image_hash, frame_number)))
image.seek(image.tell() + 1)
frame_number += 1
except EOFError:
# Reached the final frame
break
except KeyError:
# If the key 'duration' can't be found, it's not an animation
frame_duration.append(-1)
frame_hash.append(image_hash)
break
except (OSError, IOError) as icon_error:
# FIXME: caller should handle this?
print(f"Unable to load icon {self.file} with error {icon_error}")
image = Image.new("RGB", size)
frame_duration.append(-1)
frame_hash.append(image_hash)
frames = ImageSequence.Iterator(image)
# Scale all the frames to the target size
self.frames = []
for frame, milliseconds, hashcode in zip(frames, frame_duration, frame_hash):
frame = frame.copy()
frame.thumbnail(size, Image.LANCZOS)
self.frames.append((frame, milliseconds, hashcode))
self.frame_cycle = itertools.cycle(self.frames)
self.current_frame = next(self.frame_cycle)
self.frame_time = Fraction()
def transform(self, get_input: Callable[[], Image.Image], get_output: Callable[[int], Image.Image], input_changed: bool, time: Fraction) -> Tuple[Image.Image, int]:
"""
The transformation returns the loaded image, ando overwrites whatever came before.
"""
# Unpack tuple to make code a bit easier to understand
frame, duration, hashcode = self.current_frame
if duration >= 0 and time - self.frame_time > duration / 1000:
self.frame_time = time
self.current_frame = next(self.frame_cycle)
# Unpack updated value
frame, duration, hashcode = self.current_frame
image = get_output(hashcode)
if image:
return (image, hashcode)
input = get_input()
if frame.mode == "RGBA":
# Use the transparency mask of the image to paste
input.paste(frame, frame)
else:
input.paste(frame)
return (input, hashcode)
if input_changed:
image = get_output(hashcode)
if image:
return (image, hashcode)
input = get_input()
if frame.mode == "RGBA":
# Use the transparency mask of the image to paste
input.paste(frame, frame)
else:
input.paste(frame)
return (input, hashcode)
else:
return (None, hashcode) | 0.705176 | 0.264486 |
from .util import UrsadbTestContext, store_files, check_query, get_index_hash, UrsadbConfig
from .util import ursadb # noqa
import pytest
def test_indexing_small(ursadb: UrsadbTestContext):
store_files(ursadb, "gram3", {"kot": b"Ala ma kota ale czy kot ma Ale?"})
ursadb.check_request(
"topology;",
{
"datasets": {
"#UNK#": {
"file_count": 1,
"indexes": [{"type": "gram3", "size": "#UNK#"}],
"size": "#UNK#",
"taints": [],
}
}
},
)
check_query(ursadb, '"ale"', ["kot"])
check_query(ursadb, '":hmm:"', [])
def test_indexing_big(ursadb: UrsadbTestContext):
store_files(
ursadb,
"gram3",
{"kot": b"!" * 1024 * 1024 * 20 + b"ale bitmap index builder here!"},
)
check_query(ursadb, '"ale"', ["kot"])
check_query(ursadb, '":hmm:"', [])
def test_indexing_list(ursadb: UrsadbTestContext):
tmpdir = ursadb.tmpdir()
(tmpdir / "test").mkdir()
(tmpdir / "test" / "file").write_bytes(b"asdfgh")
(tmpdir / "list.txt").write_text(str(tmpdir / "test"))
ursadb.check_request(f"index from list \"{str(tmpdir / 'list.txt')}\";")
check_query(ursadb, '"asdfgh"', ["file"])
def test_gram3_index_works_as_expected(ursadb: UrsadbTestContext):
store_files(
ursadb,
"gram3",
{
"kot": b"aaaaabb bbccccc",
"zzz": b"aaaaabbccccc",
"yyy": b"\xff\xff\xff",
},
)
check_query(ursadb, '"abbc"', ["kot", "zzz"])
check_query(ursadb, "{ff ff ff}", ["yyy"])
assert get_index_hash(ursadb, "gram3")[:16] == "ca4a0662863a42b9"
def test_text4_index_works_as_expected(ursadb: UrsadbTestContext):
store_files(
ursadb,
"text4",
{
"kot": b"aaaaabb bbccccc",
"zzz": b"aaaaabbccccc",
"yyy": b"\xff\xff\xff",
},
)
check_query(ursadb, '"abbc"', ["zzz"])
check_query(ursadb, "{ff ff ff}", ["kot", "zzz", "yyy"])
assert get_index_hash(ursadb, "text4")[:16] == "32078e5136ea7705"
@pytest.mark.parametrize(
"ursadb",
[UrsadbConfig(query_max_ngram=256)],
indirect=["ursadb"],
)
def test_wide8_index_works_as_expected(ursadb: UrsadbTestContext):
store_files(
ursadb,
"wide8",
{
"kot": b"aaaaabb bbccccc",
"zzz": b"aaaaabbccccc",
"yyy": b"\xff\xff\xff",
"vvv": b"a\x00b\x00c\x00d\x00efgh",
"qqq": b"a\x00c\x00b\x00d\x00efgh",
},
)
check_query(ursadb, '"abbc"', ["kot", "zzz", "yyy", "vvv", "qqq"])
check_query(ursadb, "{ff ff ff}", ["kot", "zzz", "yyy", "vvv", "qqq"])
check_query(ursadb, '"a\\x00b\\x00c\\x00d\\x00"', ["vvv"])
check_query(
ursadb,
"{61 (00|01) (62|63) (00|01) (63|62) (00|01) 64 00}",
["vvv", "qqq"],
)
assert get_index_hash(ursadb, "wide8")[:16] == "c73b55c36445ca6b"
def test_select_with_taints(ursadb: UrsadbTestContext):
store_files(
ursadb, "gram3", {"tainted": b"test",},
)
topology = ursadb.check_request("topology;")
dsname = list(topology["result"]["datasets"].keys())[0]
store_files(
ursadb, "gram3", {"untainted": b"test",},
)
ursadb.check_request(f'dataset "{dsname}" taint "test";')
check_query(ursadb, 'with taints [] "test"', ["tainted", "untainted"])
check_query(ursadb, 'with taints ["test"] "test"', ["tainted"])
check_query(ursadb, 'with taints ["other"] "test"', [])
check_query(ursadb, 'with taints ["test", "other"] "test"', [])
def test_select_with_datasets(ursadb: UrsadbTestContext):
store_files(
ursadb, "gram3", {"first": b"test",},
)
topology = ursadb.check_request("topology;")
dsname = list(topology["result"]["datasets"].keys())[0]
store_files(
ursadb, "gram3", {"second": b"test",},
)
check_query(ursadb, f'with datasets ["{dsname}"] "test"', ["first"])
def test_index_with_taints(ursadb: UrsadbTestContext):
store_files(ursadb, "gram3", {"kot": b"Random file"}, taints=["taint"])
ursadb.check_request(
"topology;",
{
"datasets": {
"#UNK#": {
"file_count": 1,
"indexes": [{"type": "gram3", "size": "#UNK#"}],
"size": "#UNK#",
"taints": ["taint"],
}
}
},
)
check_query(ursadb, '"file"', ["kot"])
check_query(ursadb, 'with taints ["taint"] "file"', ["kot"])
check_query(ursadb, 'with taints ["zzz"] "file"', []) | teste2e/test_indexing.py | from .util import UrsadbTestContext, store_files, check_query, get_index_hash, UrsadbConfig
from .util import ursadb # noqa
import pytest
def test_indexing_small(ursadb: UrsadbTestContext):
store_files(ursadb, "gram3", {"kot": b"Ala ma kota ale czy kot ma Ale?"})
ursadb.check_request(
"topology;",
{
"datasets": {
"#UNK#": {
"file_count": 1,
"indexes": [{"type": "gram3", "size": "#UNK#"}],
"size": "#UNK#",
"taints": [],
}
}
},
)
check_query(ursadb, '"ale"', ["kot"])
check_query(ursadb, '":hmm:"', [])
def test_indexing_big(ursadb: UrsadbTestContext):
store_files(
ursadb,
"gram3",
{"kot": b"!" * 1024 * 1024 * 20 + b"ale bitmap index builder here!"},
)
check_query(ursadb, '"ale"', ["kot"])
check_query(ursadb, '":hmm:"', [])
def test_indexing_list(ursadb: UrsadbTestContext):
tmpdir = ursadb.tmpdir()
(tmpdir / "test").mkdir()
(tmpdir / "test" / "file").write_bytes(b"asdfgh")
(tmpdir / "list.txt").write_text(str(tmpdir / "test"))
ursadb.check_request(f"index from list \"{str(tmpdir / 'list.txt')}\";")
check_query(ursadb, '"asdfgh"', ["file"])
def test_gram3_index_works_as_expected(ursadb: UrsadbTestContext):
store_files(
ursadb,
"gram3",
{
"kot": b"aaaaabb bbccccc",
"zzz": b"aaaaabbccccc",
"yyy": b"\xff\xff\xff",
},
)
check_query(ursadb, '"abbc"', ["kot", "zzz"])
check_query(ursadb, "{ff ff ff}", ["yyy"])
assert get_index_hash(ursadb, "gram3")[:16] == "ca4a0662863a42b9"
def test_text4_index_works_as_expected(ursadb: UrsadbTestContext):
store_files(
ursadb,
"text4",
{
"kot": b"aaaaabb bbccccc",
"zzz": b"aaaaabbccccc",
"yyy": b"\xff\xff\xff",
},
)
check_query(ursadb, '"abbc"', ["zzz"])
check_query(ursadb, "{ff ff ff}", ["kot", "zzz", "yyy"])
assert get_index_hash(ursadb, "text4")[:16] == "32078e5136ea7705"
@pytest.mark.parametrize(
"ursadb",
[UrsadbConfig(query_max_ngram=256)],
indirect=["ursadb"],
)
def test_wide8_index_works_as_expected(ursadb: UrsadbTestContext):
store_files(
ursadb,
"wide8",
{
"kot": b"aaaaabb bbccccc",
"zzz": b"aaaaabbccccc",
"yyy": b"\xff\xff\xff",
"vvv": b"a\x00b\x00c\x00d\x00efgh",
"qqq": b"a\x00c\x00b\x00d\x00efgh",
},
)
check_query(ursadb, '"abbc"', ["kot", "zzz", "yyy", "vvv", "qqq"])
check_query(ursadb, "{ff ff ff}", ["kot", "zzz", "yyy", "vvv", "qqq"])
check_query(ursadb, '"a\\x00b\\x00c\\x00d\\x00"', ["vvv"])
check_query(
ursadb,
"{61 (00|01) (62|63) (00|01) (63|62) (00|01) 64 00}",
["vvv", "qqq"],
)
assert get_index_hash(ursadb, "wide8")[:16] == "c73b55c36445ca6b"
def test_select_with_taints(ursadb: UrsadbTestContext):
store_files(
ursadb, "gram3", {"tainted": b"test",},
)
topology = ursadb.check_request("topology;")
dsname = list(topology["result"]["datasets"].keys())[0]
store_files(
ursadb, "gram3", {"untainted": b"test",},
)
ursadb.check_request(f'dataset "{dsname}" taint "test";')
check_query(ursadb, 'with taints [] "test"', ["tainted", "untainted"])
check_query(ursadb, 'with taints ["test"] "test"', ["tainted"])
check_query(ursadb, 'with taints ["other"] "test"', [])
check_query(ursadb, 'with taints ["test", "other"] "test"', [])
def test_select_with_datasets(ursadb: UrsadbTestContext):
store_files(
ursadb, "gram3", {"first": b"test",},
)
topology = ursadb.check_request("topology;")
dsname = list(topology["result"]["datasets"].keys())[0]
store_files(
ursadb, "gram3", {"second": b"test",},
)
check_query(ursadb, f'with datasets ["{dsname}"] "test"', ["first"])
def test_index_with_taints(ursadb: UrsadbTestContext):
store_files(ursadb, "gram3", {"kot": b"Random file"}, taints=["taint"])
ursadb.check_request(
"topology;",
{
"datasets": {
"#UNK#": {
"file_count": 1,
"indexes": [{"type": "gram3", "size": "#UNK#"}],
"size": "#UNK#",
"taints": ["taint"],
}
}
},
)
check_query(ursadb, '"file"', ["kot"])
check_query(ursadb, 'with taints ["taint"] "file"', ["kot"])
check_query(ursadb, 'with taints ["zzz"] "file"', []) | 0.374562 | 0.47591 |
from jqdatapy.api import run_query
from zvt.contract.recorder import TimeSeriesDataRecorder
from zvt.domain import Index
from zvt.domain import StockSummary
from zvt.utils.time_utils import to_time_str
from zvt.utils.utils import multiple_number
# 聚宽编码
# 322001 上海市场
# 322002 上海A股
# 322003 上海B股
# 322004 深圳市场 该市场交易所未公布成交量和成交笔数
# 322005 深市主板
# 322006 中小企业板
# 322007 创业板
code_map_jq = {
'000001': '322002',
'399106': '322004',
'399001': '322005',
'399005': '322006',
'399006': '322007'
}
class StockSummaryRecorder(TimeSeriesDataRecorder):
entity_provider = 'exchange'
entity_schema = Index
provider = 'joinquant'
data_schema = StockSummary
def __init__(self, force_update=False, sleeping_time=5, exchanges=None, entity_ids=None, day_data=False,
entity_filters=None, ignore_failed=True, real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None) -> None:
# 上海A股,深圳市场,深圳成指,中小板,创业板
codes = ['000001', '399106', '399001', '399005', '399006']
super().__init__(force_update, sleeping_time, exchanges, entity_ids, codes=codes, day_data=day_data,
entity_filters=entity_filters, ignore_failed=ignore_failed, real_time=real_time,
fix_duplicate_way=fix_duplicate_way, start_timestamp=start_timestamp,
end_timestamp=end_timestamp)
def record(self, entity, start, end, size, timestamps):
jq_code = code_map_jq.get(entity.code)
df = run_query(table='finance.STK_EXCHANGE_TRADE_INFO',
conditions=f'exchange_code#=#{jq_code}&date#>=#{to_time_str(start)}', parse_dates=['date'])
print(df)
json_results = []
for item in df.to_dict(orient='records'):
result = {
'provider': self.provider,
'timestamp': item['date'],
'name': entity.name,
'pe': item['pe_average'],
'total_value': multiple_number(item['total_market_cap'], 100000000),
'total_tradable_vaule': multiple_number(item['circulating_market_cap'], 100000000),
'volume': multiple_number(item['volume'], 10000),
'turnover': multiple_number(item['money'], 100000000),
'turnover_rate': item['turnover_ratio']
}
json_results.append(result)
if len(json_results) < 100:
self.one_shot = True
return json_results
def get_data_map(self):
return None
if __name__ == '__main__':
StockSummaryRecorder().run()
# the __all__ is generated
__all__ = ['StockSummaryRecorder'] | zvt/recorders/joinquant/overall/jq_stock_summary_recorder.py | from jqdatapy.api import run_query
from zvt.contract.recorder import TimeSeriesDataRecorder
from zvt.domain import Index
from zvt.domain import StockSummary
from zvt.utils.time_utils import to_time_str
from zvt.utils.utils import multiple_number
# 聚宽编码
# 322001 上海市场
# 322002 上海A股
# 322003 上海B股
# 322004 深圳市场 该市场交易所未公布成交量和成交笔数
# 322005 深市主板
# 322006 中小企业板
# 322007 创业板
code_map_jq = {
'000001': '322002',
'399106': '322004',
'399001': '322005',
'399005': '322006',
'399006': '322007'
}
class StockSummaryRecorder(TimeSeriesDataRecorder):
entity_provider = 'exchange'
entity_schema = Index
provider = 'joinquant'
data_schema = StockSummary
def __init__(self, force_update=False, sleeping_time=5, exchanges=None, entity_ids=None, day_data=False,
entity_filters=None, ignore_failed=True, real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None) -> None:
# 上海A股,深圳市场,深圳成指,中小板,创业板
codes = ['000001', '399106', '399001', '399005', '399006']
super().__init__(force_update, sleeping_time, exchanges, entity_ids, codes=codes, day_data=day_data,
entity_filters=entity_filters, ignore_failed=ignore_failed, real_time=real_time,
fix_duplicate_way=fix_duplicate_way, start_timestamp=start_timestamp,
end_timestamp=end_timestamp)
def record(self, entity, start, end, size, timestamps):
jq_code = code_map_jq.get(entity.code)
df = run_query(table='finance.STK_EXCHANGE_TRADE_INFO',
conditions=f'exchange_code#=#{jq_code}&date#>=#{to_time_str(start)}', parse_dates=['date'])
print(df)
json_results = []
for item in df.to_dict(orient='records'):
result = {
'provider': self.provider,
'timestamp': item['date'],
'name': entity.name,
'pe': item['pe_average'],
'total_value': multiple_number(item['total_market_cap'], 100000000),
'total_tradable_vaule': multiple_number(item['circulating_market_cap'], 100000000),
'volume': multiple_number(item['volume'], 10000),
'turnover': multiple_number(item['money'], 100000000),
'turnover_rate': item['turnover_ratio']
}
json_results.append(result)
if len(json_results) < 100:
self.one_shot = True
return json_results
def get_data_map(self):
return None
if __name__ == '__main__':
StockSummaryRecorder().run()
# the __all__ is generated
__all__ = ['StockSummaryRecorder'] | 0.416085 | 0.169475 |
import cgi
import datetime
import time
from tempfile import NamedTemporaryFile
from fabric.api import *
from fabric import colors
@task
def update():
"""Requires code_root env variable. Does a git pull and restarts the web server"""
require('code_root')
git_pull()
restart_web_server()
@task
def git_pull():
"""Does a git stash then a git pull on the project"""
run('cd %s; git stash; git pull' % (env.code_root))
@task
def restart_web_server():
"""Restart the web server"""
run('%s/apache2/bin/restart' % env.code_root_parent)
@task
def migrate():
"""Runs python manage.py migrate"""
run('cd %s; python manage.py migrate --settings=%s' % (env.code_root, env.settings_file))
@task
def collect_static():
"""Runs python manage.py collect_static --noinput"""
run('cd %s; python manage.py collectstatic --settings=%s --noinput' % (env.code_root, env.settings_file))
@task
def pip_install():
"""Runs pip install -r requirements/frozen.txt (for example site)"""
run('cd %s; pip install -r requirements/frozen.txt' % (env.code_root))
@task
def publish_changes():
"""Runs these functions in order (git_pull, pip_install, migrate, collect_static, restart_web_server)"""
git_pull()
pip_install()
migrate()
collect_static()
restart_web_server()
@task
def do_nothing():
for x in range(0, 20):
print 'nothing {}'.format(x)
time.sleep(0.2)
input = prompt('Enter something:')
for x in range(0, 20):
print 'nothing {} - {}'.format(x, input)
time.sleep(0.2)
@task
def color_test():
number = 1
for x in range(0, 2):
print colors.blue('{}: Blue text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.cyan('{}: cyan text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.green('{}: green text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.magenta('{}: magenta text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.red('{}: red text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.white('{}: white text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.yellow('{}: yellow text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.blue('{}: Blue text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.cyan('{}: cyan text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.green('{}: green text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.magenta('{}: magenta text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.red('{}: red text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.white('{}: white text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.yellow('{}: yellow text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print
@task
def test_env(argument="nothing"):
print("Task Arguments:")
print argument
print
print("Task Env:")
for x, y in env.iteritems():
print '{}: {}'.format(x, y)
@task
def update_sandbox_site(comment_text):
"""put's a text file on the server"""
file_to_deliver = NamedTemporaryFile(delete=False)
file_text = "Deployed at: {} <br /> Comment: {}".format(datetime.datetime.now().strftime('%c'), cgi.escape(comment_text))
file_to_deliver.write(file_text)
file_to_deliver.close()
put(file_to_deliver.name, '/var/www/html/index.html', use_sudo=True) | fabric_bolt/fabfile.py | import cgi
import datetime
import time
from tempfile import NamedTemporaryFile
from fabric.api import *
from fabric import colors
@task
def update():
"""Requires code_root env variable. Does a git pull and restarts the web server"""
require('code_root')
git_pull()
restart_web_server()
@task
def git_pull():
"""Does a git stash then a git pull on the project"""
run('cd %s; git stash; git pull' % (env.code_root))
@task
def restart_web_server():
"""Restart the web server"""
run('%s/apache2/bin/restart' % env.code_root_parent)
@task
def migrate():
"""Runs python manage.py migrate"""
run('cd %s; python manage.py migrate --settings=%s' % (env.code_root, env.settings_file))
@task
def collect_static():
"""Runs python manage.py collect_static --noinput"""
run('cd %s; python manage.py collectstatic --settings=%s --noinput' % (env.code_root, env.settings_file))
@task
def pip_install():
"""Runs pip install -r requirements/frozen.txt (for example site)"""
run('cd %s; pip install -r requirements/frozen.txt' % (env.code_root))
@task
def publish_changes():
"""Runs these functions in order (git_pull, pip_install, migrate, collect_static, restart_web_server)"""
git_pull()
pip_install()
migrate()
collect_static()
restart_web_server()
@task
def do_nothing():
for x in range(0, 20):
print 'nothing {}'.format(x)
time.sleep(0.2)
input = prompt('Enter something:')
for x in range(0, 20):
print 'nothing {} - {}'.format(x, input)
time.sleep(0.2)
@task
def color_test():
number = 1
for x in range(0, 2):
print colors.blue('{}: Blue text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.cyan('{}: cyan text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.green('{}: green text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.magenta('{}: magenta text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.red('{}: red text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.white('{}: white text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.yellow('{}: yellow text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.blue('{}: Blue text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.cyan('{}: cyan text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.green('{}: green text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.magenta('{}: magenta text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.red('{}: red text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.white('{}: white text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.yellow('{}: yellow text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print
@task
def test_env(argument="nothing"):
print("Task Arguments:")
print argument
print
print("Task Env:")
for x, y in env.iteritems():
print '{}: {}'.format(x, y)
@task
def update_sandbox_site(comment_text):
"""put's a text file on the server"""
file_to_deliver = NamedTemporaryFile(delete=False)
file_text = "Deployed at: {} <br /> Comment: {}".format(datetime.datetime.now().strftime('%c'), cgi.escape(comment_text))
file_to_deliver.write(file_text)
file_to_deliver.close()
put(file_to_deliver.name, '/var/www/html/index.html', use_sudo=True) | 0.293607 | 0.158826 |
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING
from warnings import warn
from sanic.exceptions import SanicException
if TYPE_CHECKING:
from sanic import Sanic
class AsyncioServer:
"""
Wraps an asyncio server with functionality that might be useful to
a user who needs to manage the server lifecycle manually.
"""
__slots__ = ("app", "connections", "loop", "serve_coro", "server")
def __init__(
self,
app: Sanic,
loop,
serve_coro,
connections,
):
# Note, Sanic already called "before_server_start" events
# before this helper was even created. So we don't need it here.
self.app = app
self.connections = connections
self.loop = loop
self.serve_coro = serve_coro
self.server = None
@property
def init(self):
warn(
"AsyncioServer.init has been deprecated and will be removed "
"in v22.6. Use Sanic.state.is_started instead.",
DeprecationWarning,
)
return self.app.state.is_started
def startup(self):
"""
Trigger "before_server_start" events
"""
return self.app._startup()
def before_start(self):
"""
Trigger "before_server_start" events
"""
return self._server_event("init", "before")
def after_start(self):
"""
Trigger "after_server_start" events
"""
return self._server_event("init", "after")
def before_stop(self):
"""
Trigger "before_server_stop" events
"""
return self._server_event("shutdown", "before")
def after_stop(self):
"""
Trigger "after_server_stop" events
"""
return self._server_event("shutdown", "after")
def is_serving(self) -> bool:
if self.server:
return self.server.is_serving()
return False
def wait_closed(self):
if self.server:
return self.server.wait_closed()
def close(self):
if self.server:
self.server.close()
coro = self.wait_closed()
task = asyncio.ensure_future(coro, loop=self.loop)
return task
def start_serving(self):
return self._serve(self.server.start_serving)
def serve_forever(self):
return self._serve(self.server.serve_forever)
def _serve(self, serve_func):
if self.server:
if not self.app.state.is_started:
raise SanicException(
"Cannot run Sanic server without first running "
"await server.startup()"
)
try:
return serve_func()
except AttributeError:
name = serve_func.__name__
raise NotImplementedError(
f"server.{name} not available in this version "
"of asyncio or uvloop."
)
def _server_event(self, concern: str, action: str):
if not self.app.state.is_started:
raise SanicException(
"Cannot dispatch server event without "
"first running await server.startup()"
)
return self.app._server_event(concern, action, loop=self.loop)
def __await__(self):
"""
Starts the asyncio server, returns AsyncServerCoro
"""
task = asyncio.ensure_future(self.serve_coro)
while not task.done():
yield
self.server = task.result()
return self | sanic/server/async_server.py | from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING
from warnings import warn
from sanic.exceptions import SanicException
if TYPE_CHECKING:
from sanic import Sanic
class AsyncioServer:
"""
Wraps an asyncio server with functionality that might be useful to
a user who needs to manage the server lifecycle manually.
"""
__slots__ = ("app", "connections", "loop", "serve_coro", "server")
def __init__(
self,
app: Sanic,
loop,
serve_coro,
connections,
):
# Note, Sanic already called "before_server_start" events
# before this helper was even created. So we don't need it here.
self.app = app
self.connections = connections
self.loop = loop
self.serve_coro = serve_coro
self.server = None
@property
def init(self):
warn(
"AsyncioServer.init has been deprecated and will be removed "
"in v22.6. Use Sanic.state.is_started instead.",
DeprecationWarning,
)
return self.app.state.is_started
def startup(self):
"""
Trigger "before_server_start" events
"""
return self.app._startup()
def before_start(self):
"""
Trigger "before_server_start" events
"""
return self._server_event("init", "before")
def after_start(self):
"""
Trigger "after_server_start" events
"""
return self._server_event("init", "after")
def before_stop(self):
"""
Trigger "before_server_stop" events
"""
return self._server_event("shutdown", "before")
def after_stop(self):
"""
Trigger "after_server_stop" events
"""
return self._server_event("shutdown", "after")
def is_serving(self) -> bool:
if self.server:
return self.server.is_serving()
return False
def wait_closed(self):
if self.server:
return self.server.wait_closed()
def close(self):
if self.server:
self.server.close()
coro = self.wait_closed()
task = asyncio.ensure_future(coro, loop=self.loop)
return task
def start_serving(self):
return self._serve(self.server.start_serving)
def serve_forever(self):
return self._serve(self.server.serve_forever)
def _serve(self, serve_func):
if self.server:
if not self.app.state.is_started:
raise SanicException(
"Cannot run Sanic server without first running "
"await server.startup()"
)
try:
return serve_func()
except AttributeError:
name = serve_func.__name__
raise NotImplementedError(
f"server.{name} not available in this version "
"of asyncio or uvloop."
)
def _server_event(self, concern: str, action: str):
if not self.app.state.is_started:
raise SanicException(
"Cannot dispatch server event without "
"first running await server.startup()"
)
return self.app._server_event(concern, action, loop=self.loop)
def __await__(self):
"""
Starts the asyncio server, returns AsyncServerCoro
"""
task = asyncio.ensure_future(self.serve_coro)
while not task.done():
yield
self.server = task.result()
return self | 0.822153 | 0.139602 |
from more_or_less import PageOfHeight
from more_or_less.fixed_size_screen import FixedSizeScreen
from more_or_less.input import Input
from more_or_less.more_page_builder import MorePageBuilder
from more_or_less.output import Output
from more_or_less.page_builder import StopOutput
from more_or_less.wrapped_page import WrappedPage
from unittest.mock import Mock
import unittest
class TestUtil(unittest.TestCase):
def assertIsPageOfType(self, page, page_type):
''' assertIsInstance, but will first strip page-wrappers '''
page = _skip_page_wrappers(page)
self.assertIsInstance(page, page_type)
def assertIsPageOfHeight(self, page, height):
self.assertIsPageOfType(page, PageOfHeight)
self.assertEqual(height, page.height)
def assertIsFullscreenPage(self, page, screen_height=1000):
self.assertIsPageOfHeight(page, _page_height_for_screen(screen_height))
def get_more_page_builder(self, output=None, input=None, plugins=None, screen_height=1000):
return MorePageBuilder(
input=input or Mock(Input),
output=output or Mock(Output),
screen_dimensions=FixedSizeScreen(height=screen_height),
plugins=plugins,
)
class TestMorePageBuilder(TestUtil):
def test_build_first_page_returns_page_of_screen_height_minus_one(self):
screen_height = 10
builder = self.get_more_page_builder(screen_height=screen_height)
page = builder.build_first_page()
self.assertIsPageOfHeight(page, screen_height - 1)
def test_build_next_page_prompts_user_for_action(self):
input = Mock(Input)
input.get_character.return_value = ' '
builder = self.get_more_page_builder(input=input)
builder.build_next_page()
input.get_character.assert_called_once_with('--More--')
def test_returns_full_screen_page_if_user_presses_space(self):
screen_height = 10
input = Mock(Input)
builder = self.get_more_page_builder(input=input, screen_height=10)
input.get_character.return_value = ' '
page = builder.build_next_page()
self.assertIsFullscreenPage(page, screen_height)
def test_returns_one_line_page_if_user_presses_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = '\r'
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 1)
def test_enter_works_both_on_newline_and_carriage_return(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = '\n'
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 1)
def test_stops_output_if_user_presses_q(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = 'q'
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_stops_output_if_user_presses_Q(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = 'Q'
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_stops_output_on_ctrl_c(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = KeyboardInterrupt
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_ignores_unexpected_user_input(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['a', 'b', 'c', '\r']
builder.build_next_page()
self.assertEqual(4, input.get_character.call_count)
def test_user_can_enter_count_before_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '\n']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 5)
def test_count_becomes_the_new_default_for_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '\n']
builder.build_next_page()
input.get_character.side_effect = ['\n']
second_page = builder.build_next_page()
self.assertIsPageOfHeight(second_page, 5)
def test_can_specify_count_bigger_than_10(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '0', '0', '\n']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 500)
def test_user_can_enter_count_before_space(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', ' ']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 5)
def test_count_does_not_become_the_new_default_for_space(self):
input = Mock(Input)
screen_height = 666
builder = self.get_more_page_builder(input=input, screen_height=screen_height)
input.get_character.side_effect = ['5', ' ']
builder.build_next_page()
input.get_character.side_effect = [' ']
second_page = builder.build_next_page()
self.assertIsFullscreenPage(second_page, screen_height)
def _page_height_for_screen(screen_height):
height_reserved_for_more_prompt = 1
return screen_height - height_reserved_for_more_prompt
def _skip_page_wrappers(page):
while isinstance(page, WrappedPage):
page = page.wrapped_page
return page | tests/test_more_page_builder.py | from more_or_less import PageOfHeight
from more_or_less.fixed_size_screen import FixedSizeScreen
from more_or_less.input import Input
from more_or_less.more_page_builder import MorePageBuilder
from more_or_less.output import Output
from more_or_less.page_builder import StopOutput
from more_or_less.wrapped_page import WrappedPage
from unittest.mock import Mock
import unittest
class TestUtil(unittest.TestCase):
def assertIsPageOfType(self, page, page_type):
''' assertIsInstance, but will first strip page-wrappers '''
page = _skip_page_wrappers(page)
self.assertIsInstance(page, page_type)
def assertIsPageOfHeight(self, page, height):
self.assertIsPageOfType(page, PageOfHeight)
self.assertEqual(height, page.height)
def assertIsFullscreenPage(self, page, screen_height=1000):
self.assertIsPageOfHeight(page, _page_height_for_screen(screen_height))
def get_more_page_builder(self, output=None, input=None, plugins=None, screen_height=1000):
return MorePageBuilder(
input=input or Mock(Input),
output=output or Mock(Output),
screen_dimensions=FixedSizeScreen(height=screen_height),
plugins=plugins,
)
class TestMorePageBuilder(TestUtil):
def test_build_first_page_returns_page_of_screen_height_minus_one(self):
screen_height = 10
builder = self.get_more_page_builder(screen_height=screen_height)
page = builder.build_first_page()
self.assertIsPageOfHeight(page, screen_height - 1)
def test_build_next_page_prompts_user_for_action(self):
input = Mock(Input)
input.get_character.return_value = ' '
builder = self.get_more_page_builder(input=input)
builder.build_next_page()
input.get_character.assert_called_once_with('--More--')
def test_returns_full_screen_page_if_user_presses_space(self):
screen_height = 10
input = Mock(Input)
builder = self.get_more_page_builder(input=input, screen_height=10)
input.get_character.return_value = ' '
page = builder.build_next_page()
self.assertIsFullscreenPage(page, screen_height)
def test_returns_one_line_page_if_user_presses_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = '\r'
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 1)
def test_enter_works_both_on_newline_and_carriage_return(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = '\n'
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 1)
def test_stops_output_if_user_presses_q(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = 'q'
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_stops_output_if_user_presses_Q(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = 'Q'
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_stops_output_on_ctrl_c(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = KeyboardInterrupt
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_ignores_unexpected_user_input(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['a', 'b', 'c', '\r']
builder.build_next_page()
self.assertEqual(4, input.get_character.call_count)
def test_user_can_enter_count_before_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '\n']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 5)
def test_count_becomes_the_new_default_for_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '\n']
builder.build_next_page()
input.get_character.side_effect = ['\n']
second_page = builder.build_next_page()
self.assertIsPageOfHeight(second_page, 5)
def test_can_specify_count_bigger_than_10(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '0', '0', '\n']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 500)
def test_user_can_enter_count_before_space(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', ' ']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 5)
def test_count_does_not_become_the_new_default_for_space(self):
input = Mock(Input)
screen_height = 666
builder = self.get_more_page_builder(input=input, screen_height=screen_height)
input.get_character.side_effect = ['5', ' ']
builder.build_next_page()
input.get_character.side_effect = [' ']
second_page = builder.build_next_page()
self.assertIsFullscreenPage(second_page, screen_height)
def _page_height_for_screen(screen_height):
height_reserved_for_more_prompt = 1
return screen_height - height_reserved_for_more_prompt
def _skip_page_wrappers(page):
while isinstance(page, WrappedPage):
page = page.wrapped_page
return page | 0.716615 | 0.312737 |
import os
import numpy as np
import pandas as pd
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, ReduceLROnPlateau
from keras.layers import Conv2D, Concatenate, MaxPooling2D, Conv2DTranspose, UpSampling2D, Dropout, BatchNormalization
from keras.models import Input, Model
from keras.optimizers import Adam
from img_segmentation.image_gen import ImageGenerator
from img_segmentation.utils import f1_loss, f1_np, iou_np, precision_np, recall_np, error_np, load_images, channel_mean_stdev, \
store_prediction, load_img_msk_paths
def conv_block(m, dim, acti, bn, res, do=0):
""" creates convolutional block for creating u-net
"""
n = Conv2D(dim, 3, activation=acti, padding='same')(m)
n = BatchNormalization()(n) if bn else n
n = Dropout(do)(n) if do else n
n = Conv2D(dim, 3, activation=acti, padding='same')(n)
n = BatchNormalization()(n) if bn else n
return Concatenate()([m, n]) if res else n
def level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
if depth > 0:
n = conv_block(m, dim, acti, bn, res)
m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n)
m = level_block(m, int(inc*dim), depth-1, inc, acti, do, bn, mp, up, res)
if up:
m = UpSampling2D()(m)
m = Conv2D(dim, 2, activation=acti, padding='same')(m)
else:
m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m)
n = Concatenate()([n, m])
m = conv_block(n, dim, acti, bn, res)
else:
m = conv_block(m, dim, acti, bn, res, do)
return m
class UNet(object):
""" Class which create UNet model and trains it and test it
U-Net: Convolutional Networks for Biomedical Image Segmentation
(https://arxiv.org/abs/1505.04597)
Arguments:
img_shape: (height, width, channels)
n_class: number of output channels, classes to predict in one-hot coding
root_features: number of channels of the first conv
layers: zero indexed depth of the U-structure, number of layers
inc_rate: rate at which the conv channels will increase
activation: activation function after convolutions
dropout: amount of dropout in the contracting part
batch_norm: adds Batch Normalization if true
max_pool: use strided conv instead of maxpooling if false
up_conv: use transposed conv instead of upsamping + conv if false
residual: add residual connections around each conv block if true
"""
def __init__(self, img_shape, n_class=2, root_features=64, layers=4, inc_rate=1., activation='relu', dropout=0.5,
batch_norm=False, max_pool=True, up_conv=True, residual=False):
self.img_shape = img_shape
self.n_class = n_class
self.root_features = root_features
self.layers = layers
self.inc_rate = inc_rate
self.activation = activation
self.dropout = dropout
self.batch_norm = batch_norm
self.max_pool = max_pool
self.up_conv = up_conv
self.residual = residual
self.tr_mean = None
self.tr_std = None
# define model
i = Input(shape=img_shape)
o = level_block(i, root_features, layers, inc_rate, activation, dropout, batch_norm, max_pool, up_conv, residual)
o = Conv2D(n_class, 1, activation='sigmoid')(o)
self.model = Model(inputs=i, outputs=o)
def normalize(self, x):
#self.tr_mean = np.array([69.7399, 69.8885, 65.1602])
#self.tr_std = np.array([72.9841, 72.3374, 71.6508])
if self.tr_mean is None:
print('mean and standard deviation of training pictures not calculated yet, calculating...')
self.tr_mean, self.tr_std = channel_mean_stdev(x)
print('mean: ', self.tr_mean, 'std: ', self.tr_std)
x_norm = (x - self.tr_mean.astype('float32')) / self.tr_std.astype('float32')
# x_norm = (x - np.amin(x)) / np.amax(x)
# img_eq = exposure.equalize_hist(x_norm)
return x_norm
def train(self, model_dir, train_dir, valid_dir, epochs=20, batch_size=3, augmentation=True, normalisation=True, base_dir=None, trainable_index=14, save_aug=False, learning_rate=0.01):
""" trains a unet instance on keras. With on-line data augmentation to diversify training samples in each batch.
example of defining paths
train_dir = "E:\\watson_for_trend\\3_select_for_labelling\\train_cityscape\\"
model_dir = "E:\\watson_for_trend\\5_train\\cityscape_l5f64c3n8e20\\"
"""
# define callbacks
mc = ModelCheckpoint(os.path.join(model_dir, 'model.h5'), save_best_only=True, save_weights_only=False)
es = EarlyStopping(monitor='val_loss', patience=30)
tb = TensorBoard(log_dir=model_dir, write_graph=True) # write_images=True, write_grads=True, histogram_freq=5
lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, verbose=1, min_lr=0.0000001)
# define weights (not used now, keras does not support it with segmentation)
class_weights = {0: 0.5, 1: 0.5}
if base_dir is not None:
self.model.load_weights(os.path.join(base_dir, 'model.h5'))
for layer in self.model.layers[:-trainable_index]:
layer.trainable = False
# Check the trainable status of the individual layers
for layer in self.model.layers:
print(layer.name, layer.trainable)
# compile model with optimizer and loss function
self.model.compile(optimizer=Adam(lr=learning_rate), loss=f1_loss,
metrics=['acc', 'categorical_crossentropy'])
# summary of parameters in each layer
self.model.summary()
path_tr = load_img_msk_paths(train_dir)
path_va = load_img_msk_paths(valid_dir)
if save_aug is True:
aug_path = os.path.join(model_dir, 'augmentations')
if not os.path.exists(aug_path):
print('created augmentation dir', aug_path)
os.makedirs(aug_path)
else:
aug_path = None
# augmentation are defined here and can be changed
aug_dict = dict(horizontal_flip=0.5, vertical_flip=0.0, rotation_range=(0.0, 0.0),
width_shift_range=(-0.2, 0.2), height_shift_range=(-0.2, 0.2), contrast_range=(0.5, 1.5),
zoom_range=(1.0, 1.33), grayscale_range=(0.0, 0.8), brightness_range=(-80, 20),
crop_range=(0, 0), blur_range=(0.0, 1.0), shear_range=(0.0, 0.0), prob=0.2)
train_generator = ImageGenerator(list(path_tr.keys()), masks=path_tr, batch_size=batch_size, dim=(512, 512), shuffle=True,
normalize='std_norm', save_to_dir=aug_path, augmentation=augmentation, aug_dict=aug_dict)
valid_generator = ImageGenerator(list(path_va.keys()), masks=path_va, batch_size=batch_size, dim=(512, 512), shuffle=True,
normalize='std_norm', augmentation=augmentation, aug_dict=aug_dict)
# train unet with image_generator
self.model.fit_generator(train_generator,
validation_data=valid_generator,
epochs=epochs,
verbose=1,
callbacks=[mc, tb, es, lr],
use_multiprocessing=False,
workers=4)
print('Training completed')
def test(self, model_dir, test_img_dirs, output_dir, csv_path=None, roi=None):
path_test = load_img_msk_paths(test_img_dirs)
img_gen_norm = ImageGenerator(list(path_test.keys()), masks=path_test, batch_size=1, shuffle=False, normalize='std_norm', augmentation=False)
img_gen = ImageGenerator(list(path_test.keys()), masks=path_test, batch_size=1, shuffle=False, normalize=None, augmentation=False)
n = len(img_gen)
x_va = np.empty((n, 512, 512, 3))
y_va = np.empty((n, 512, 512, 2))
for i in range(n):
x_va[i, ], y_va[i,] = img_gen[i]
self.model.compile(optimizer=Adam(lr=0.001), loss=f1_loss, metrics=['acc', 'categorical_crossentropy'])
self.model.load_weights(os.path.join(model_dir, 'model.h5'))
p_va = self.model.predict_generator(generator=img_gen_norm, verbose=1)
scores = self.model.evaluate_generator(img_gen_norm, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=1)
store_prediction(p_va, x_va, output_dir)
if roi is not None:
y_va = y_va[:,roi[1]:(roi[1] + roi[3]), roi[0]:(roi[0] + roi[2]),:]
p_va = p_va[:,roi[1]:(roi[1] + roi[3]), roi[0]:(roi[0] + roi[2]),:]
res = {'DICE': [f1_np(y_va, p_va)], 'IoU': [iou_np(y_va, p_va)], 'Precision': [precision_np(y_va, p_va)],
'Recall': [recall_np(y_va, p_va)], 'Error': [error_np(y_va, p_va)]}
if csv_path is None:
pd.DataFrame(res).to_csv(os.path.join(model_dir, 'result.csv'))
else:
pd.DataFrame(res).to_csv(os.path.join(csv_path))
print('DICE: ' + str(f1_np(y_va, p_va)))
print('IoU: ' + str(iou_np(y_va, p_va)))
print('Precision: ' + str(precision_np(y_va, p_va)))
print('Recall: ' + str(recall_np(y_va, p_va)))
print('Error: ' + str(error_np(y_va, p_va)))
print('Scores: ', scores)
def predict(self, model_dir, img_dir, output_dir, batch_size=4, train_dir=None):
x_va = load_images(os.path.join(img_dir), sort=True, target_size=(512, 512))
self.tr_mean = np.array([69.739934, 69.88847943, 65.16021837])
self.tr_std = np.array([72.98415532, 72.33742881, 71.6508131])
if train_dir is not None and self.tr_mean is None:
x_tr = load_images(os.path.join(train_dir), sort=True, target=(512, 512))
self.normalize(x_tr)
# pre-process
if self.tr_mean is not None:
x_va_norm = self.normalize(x_va)
self.model.compile(optimizer=Adam(lr=0.001), loss=f1_loss, metrics=['acc', 'categorical_crossentropy'])
self.model.load_weights(os.path.join(model_dir, 'model.h5'))
p_va = self.model.predict(x_va_norm, batch_size=batch_size, verbose=1)
store_prediction(p_va, x_va, output_dir) | img_segmentation/model.py | import os
import numpy as np
import pandas as pd
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, ReduceLROnPlateau
from keras.layers import Conv2D, Concatenate, MaxPooling2D, Conv2DTranspose, UpSampling2D, Dropout, BatchNormalization
from keras.models import Input, Model
from keras.optimizers import Adam
from img_segmentation.image_gen import ImageGenerator
from img_segmentation.utils import f1_loss, f1_np, iou_np, precision_np, recall_np, error_np, load_images, channel_mean_stdev, \
store_prediction, load_img_msk_paths
def conv_block(m, dim, acti, bn, res, do=0):
""" creates convolutional block for creating u-net
"""
n = Conv2D(dim, 3, activation=acti, padding='same')(m)
n = BatchNormalization()(n) if bn else n
n = Dropout(do)(n) if do else n
n = Conv2D(dim, 3, activation=acti, padding='same')(n)
n = BatchNormalization()(n) if bn else n
return Concatenate()([m, n]) if res else n
def level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
if depth > 0:
n = conv_block(m, dim, acti, bn, res)
m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n)
m = level_block(m, int(inc*dim), depth-1, inc, acti, do, bn, mp, up, res)
if up:
m = UpSampling2D()(m)
m = Conv2D(dim, 2, activation=acti, padding='same')(m)
else:
m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m)
n = Concatenate()([n, m])
m = conv_block(n, dim, acti, bn, res)
else:
m = conv_block(m, dim, acti, bn, res, do)
return m
class UNet(object):
""" Class which create UNet model and trains it and test it
U-Net: Convolutional Networks for Biomedical Image Segmentation
(https://arxiv.org/abs/1505.04597)
Arguments:
img_shape: (height, width, channels)
n_class: number of output channels, classes to predict in one-hot coding
root_features: number of channels of the first conv
layers: zero indexed depth of the U-structure, number of layers
inc_rate: rate at which the conv channels will increase
activation: activation function after convolutions
dropout: amount of dropout in the contracting part
batch_norm: adds Batch Normalization if true
max_pool: use strided conv instead of maxpooling if false
up_conv: use transposed conv instead of upsamping + conv if false
residual: add residual connections around each conv block if true
"""
def __init__(self, img_shape, n_class=2, root_features=64, layers=4, inc_rate=1., activation='relu', dropout=0.5,
batch_norm=False, max_pool=True, up_conv=True, residual=False):
self.img_shape = img_shape
self.n_class = n_class
self.root_features = root_features
self.layers = layers
self.inc_rate = inc_rate
self.activation = activation
self.dropout = dropout
self.batch_norm = batch_norm
self.max_pool = max_pool
self.up_conv = up_conv
self.residual = residual
self.tr_mean = None
self.tr_std = None
# define model
i = Input(shape=img_shape)
o = level_block(i, root_features, layers, inc_rate, activation, dropout, batch_norm, max_pool, up_conv, residual)
o = Conv2D(n_class, 1, activation='sigmoid')(o)
self.model = Model(inputs=i, outputs=o)
def normalize(self, x):
#self.tr_mean = np.array([69.7399, 69.8885, 65.1602])
#self.tr_std = np.array([72.9841, 72.3374, 71.6508])
if self.tr_mean is None:
print('mean and standard deviation of training pictures not calculated yet, calculating...')
self.tr_mean, self.tr_std = channel_mean_stdev(x)
print('mean: ', self.tr_mean, 'std: ', self.tr_std)
x_norm = (x - self.tr_mean.astype('float32')) / self.tr_std.astype('float32')
# x_norm = (x - np.amin(x)) / np.amax(x)
# img_eq = exposure.equalize_hist(x_norm)
return x_norm
def train(self, model_dir, train_dir, valid_dir, epochs=20, batch_size=3, augmentation=True, normalisation=True, base_dir=None, trainable_index=14, save_aug=False, learning_rate=0.01):
""" trains a unet instance on keras. With on-line data augmentation to diversify training samples in each batch.
example of defining paths
train_dir = "E:\\watson_for_trend\\3_select_for_labelling\\train_cityscape\\"
model_dir = "E:\\watson_for_trend\\5_train\\cityscape_l5f64c3n8e20\\"
"""
# define callbacks
mc = ModelCheckpoint(os.path.join(model_dir, 'model.h5'), save_best_only=True, save_weights_only=False)
es = EarlyStopping(monitor='val_loss', patience=30)
tb = TensorBoard(log_dir=model_dir, write_graph=True) # write_images=True, write_grads=True, histogram_freq=5
lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, verbose=1, min_lr=0.0000001)
# define weights (not used now, keras does not support it with segmentation)
class_weights = {0: 0.5, 1: 0.5}
if base_dir is not None:
self.model.load_weights(os.path.join(base_dir, 'model.h5'))
for layer in self.model.layers[:-trainable_index]:
layer.trainable = False
# Check the trainable status of the individual layers
for layer in self.model.layers:
print(layer.name, layer.trainable)
# compile model with optimizer and loss function
self.model.compile(optimizer=Adam(lr=learning_rate), loss=f1_loss,
metrics=['acc', 'categorical_crossentropy'])
# summary of parameters in each layer
self.model.summary()
path_tr = load_img_msk_paths(train_dir)
path_va = load_img_msk_paths(valid_dir)
if save_aug is True:
aug_path = os.path.join(model_dir, 'augmentations')
if not os.path.exists(aug_path):
print('created augmentation dir', aug_path)
os.makedirs(aug_path)
else:
aug_path = None
# augmentation are defined here and can be changed
aug_dict = dict(horizontal_flip=0.5, vertical_flip=0.0, rotation_range=(0.0, 0.0),
width_shift_range=(-0.2, 0.2), height_shift_range=(-0.2, 0.2), contrast_range=(0.5, 1.5),
zoom_range=(1.0, 1.33), grayscale_range=(0.0, 0.8), brightness_range=(-80, 20),
crop_range=(0, 0), blur_range=(0.0, 1.0), shear_range=(0.0, 0.0), prob=0.2)
train_generator = ImageGenerator(list(path_tr.keys()), masks=path_tr, batch_size=batch_size, dim=(512, 512), shuffle=True,
normalize='std_norm', save_to_dir=aug_path, augmentation=augmentation, aug_dict=aug_dict)
valid_generator = ImageGenerator(list(path_va.keys()), masks=path_va, batch_size=batch_size, dim=(512, 512), shuffle=True,
normalize='std_norm', augmentation=augmentation, aug_dict=aug_dict)
# train unet with image_generator
self.model.fit_generator(train_generator,
validation_data=valid_generator,
epochs=epochs,
verbose=1,
callbacks=[mc, tb, es, lr],
use_multiprocessing=False,
workers=4)
print('Training completed')
def test(self, model_dir, test_img_dirs, output_dir, csv_path=None, roi=None):
path_test = load_img_msk_paths(test_img_dirs)
img_gen_norm = ImageGenerator(list(path_test.keys()), masks=path_test, batch_size=1, shuffle=False, normalize='std_norm', augmentation=False)
img_gen = ImageGenerator(list(path_test.keys()), masks=path_test, batch_size=1, shuffle=False, normalize=None, augmentation=False)
n = len(img_gen)
x_va = np.empty((n, 512, 512, 3))
y_va = np.empty((n, 512, 512, 2))
for i in range(n):
x_va[i, ], y_va[i,] = img_gen[i]
self.model.compile(optimizer=Adam(lr=0.001), loss=f1_loss, metrics=['acc', 'categorical_crossentropy'])
self.model.load_weights(os.path.join(model_dir, 'model.h5'))
p_va = self.model.predict_generator(generator=img_gen_norm, verbose=1)
scores = self.model.evaluate_generator(img_gen_norm, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=1)
store_prediction(p_va, x_va, output_dir)
if roi is not None:
y_va = y_va[:,roi[1]:(roi[1] + roi[3]), roi[0]:(roi[0] + roi[2]),:]
p_va = p_va[:,roi[1]:(roi[1] + roi[3]), roi[0]:(roi[0] + roi[2]),:]
res = {'DICE': [f1_np(y_va, p_va)], 'IoU': [iou_np(y_va, p_va)], 'Precision': [precision_np(y_va, p_va)],
'Recall': [recall_np(y_va, p_va)], 'Error': [error_np(y_va, p_va)]}
if csv_path is None:
pd.DataFrame(res).to_csv(os.path.join(model_dir, 'result.csv'))
else:
pd.DataFrame(res).to_csv(os.path.join(csv_path))
print('DICE: ' + str(f1_np(y_va, p_va)))
print('IoU: ' + str(iou_np(y_va, p_va)))
print('Precision: ' + str(precision_np(y_va, p_va)))
print('Recall: ' + str(recall_np(y_va, p_va)))
print('Error: ' + str(error_np(y_va, p_va)))
print('Scores: ', scores)
def predict(self, model_dir, img_dir, output_dir, batch_size=4, train_dir=None):
x_va = load_images(os.path.join(img_dir), sort=True, target_size=(512, 512))
self.tr_mean = np.array([69.739934, 69.88847943, 65.16021837])
self.tr_std = np.array([72.98415532, 72.33742881, 71.6508131])
if train_dir is not None and self.tr_mean is None:
x_tr = load_images(os.path.join(train_dir), sort=True, target=(512, 512))
self.normalize(x_tr)
# pre-process
if self.tr_mean is not None:
x_va_norm = self.normalize(x_va)
self.model.compile(optimizer=Adam(lr=0.001), loss=f1_loss, metrics=['acc', 'categorical_crossentropy'])
self.model.load_weights(os.path.join(model_dir, 'model.h5'))
p_va = self.model.predict(x_va_norm, batch_size=batch_size, verbose=1)
store_prediction(p_va, x_va, output_dir) | 0.897907 | 0.419826 |
from __future__ import unicode_literals
import argparse
import json
import sys
import time
from googleapiclient import discovery
from googleapiclient import errors as apierrors
#pylint: disable=no-member
class SlaveManager(object):
"""Class for managing Jenkins Slaves."""
DEFAULT_SCOPES = ['https://www.googleapis.com/auth/devstorage.read_write']
def __init__(self, project, zone=None):
"""Create a new SlaveManager.
Args:
project (str): the GCE project name.
zone (str): the destination GCP zone.
"""
self._project = project
self._zone = zone
self._client = self._CreateComputeClient()
def _CreateComputeClient(self):
"""Creates an API client to do compute operations with.
Returns:
Resource: an object with methods for interacting with the service.
"""
return discovery.build('compute', 'v1')
def _WaitForOperation(self, operation):
"""Waits for an API operation to complete.
Args:
operation (dict): the API request.
Returns:
dict: the API call response.
"""
while True:
result = self._client.zoneOperations().get(
project=self._project, zone=self._zone, operation=operation['name']
).execute()
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return result
time.sleep(1)
def _BuildPersistentDiskList(self, persistent_disks):
"""Builds a list of dicts describing all disks to attach.
Args:
persistent_disks (dict(str:str)]): list of disks to attach, in the form
{'persistent_disk_name': 'device_name'}.
Returns:
list (dict): the list of disks to attach.
"""
disk_list = list()
mode = 'READ_ONLY'
if persistent_disks:
for disk_name, device in persistent_disks.items():
source_url = (
'https://www.googleapis.com/compute/v1/projects/{0:s}/zones/{1:s}/'
'disks/{2:s}').format(self._project, self._zone, disk_name)
disk_list.append(
{
'deviceName': device,
'source': source_url,
'mode': mode
}
)
return disk_list
def CreateInstance(
self, instance_name, disk_size=None, source_image=None, machinetype=None,
metadata=None, network=None, persistent_disks=None, scopes=None):
"""Creates a GCE instance.
Args:
instance_name (str): the name to give to the instance.
disk_size (Optional[int]): the size of the system disk, in GB.
Must be larger than the image size.
source_image (Optional[str]): the path to the disk image to use.
Must be in the form: '/projects/<project_name>/zones/images/...'])
machinetype (Optional[str]): the type of the machine to use.
For a list of valid values, see:
https://cloud.google.com/compute/docs/machine-types
metadata (Optional[dict]): optional metadata to set for the instance.
network (Optional[str]): type of network to use (default: 'default')
persistent_disks (Optional[dict(str:str)]): list of disks to attach to
the instance, in the form {'persistent_disk_name': 'device_name'}.
scopes (Optional[list[str]]): the list of scopes to set for the instance
"""
scopes = scopes or self.DEFAULT_SCOPES
print 'Creating new instance {0:s}'.format(instance_name)
project_url = 'compute/v1/projects/{0:s}'.format(self._project)
machine_type_url = '{0:s}/zones/{1:s}/machineTypes/{2:s}'.format(
project_url, self._zone, machinetype)
network_url = '{0:s}/global/networks/{1:s}'.format(project_url, network)
disks = [
{
'index': 0,
'boot': True,
'mode': 'READ_WRITE',
'autoDelete': True,
'initializeParams': {
'diskName': '{0:s}-bootdisk'.format(instance_name),
'diskSizeGb': disk_size,
'sourceImage': source_image,
}
}
]
persistent_disks = self._BuildPersistentDiskList(persistent_disks)
for persistent_disk in persistent_disks:
disks.append(persistent_disk)
instance_dict = {
'name': instance_name,
'machineType': machine_type_url,
'disks': disks,
'networkInterfaces': [{
'accessConfigs': [{
'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}],
'network': network_url, }],
'serviceAccounts': [{
'email': 'default',
'scopes': scopes,
}],
}
if metadata:
instance_dict['metadata'] = metadata
operation = self._client.instances().insert(
project=self._project, body=instance_dict, zone=self._zone).execute()
self._WaitForOperation(operation)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--attach_persistent_disk', action='append', required=False,
metavar=('PERSISTENT_DISK'), help=(
'Attach PERSISTENT_DISK to the instance (ie: "evidence-images"). '
'It will be attached as /dev/disk/by-id/google-PERSISTENT_DISK')
)
parser.add_argument(
'--attach_persistent_disk_with_name', action='append', required=False,
metavar=('PERSISTENT_DISK:DEVICE_NAME'), help=(
'Attach PERSISTENT_DISK to the instance (ie: "evidence-images"). '
'It will be attached as /dev/disk/by-id/google-DEVICE_NAME')
)
parser.add_argument(
'--disk_size', action='store', required=False, default=200, type=int,
help='Boot disk size, in GB (Default: %(default)s)')
parser.add_argument(
'--instance_name', action='store', required=True, help='Name of instance')
parser.add_argument(
'--source_image', action='store', required=True,
help='Path to the image, ie: /projects/<project_name>/zones/images/...')
parser.add_argument(
'--linux_startup_script_url', action='store', required=False,
metavar=('SCRIPT_URL'),
help='GCS url to a startup script for a Linux instance')
parser.add_argument(
'--machine_type', action='store', required=False, default='n1-standard-8',
help=('Type of machine (Default: "%(default)s)". For a list of valid '
'values, see https://cloud.google.com/compute/docs/machine-types'))
parser.add_argument(
'--network', action='store', required=False, default='default',
help='Type of network to use (Default: "%(default)s")')
parser.add_argument(
'--project', action='store', required=True, help='Name of the project')
parser.add_argument(
'--ssh_public_key', action='append', required=False,
help=('Specify SSH public keys to use. '
'Example: \'root:ssh-rsa AAAA... root\''))
parser.add_argument(
'--windows_startup_script_url', action='store', required=False,
metavar=('SCRIPT_URL'),
help='GCS url to a startup script for a Windows instance')
parser.add_argument(
'--zone', action='store', required=True, help='The zone for the instance')
flags = parser.parse_args(sys.argv[1:])
instance_metadata = None
manager = SlaveManager(project=flags.project, zone=flags.zone)
instance_metadata = {'items': []}
if flags.windows_startup_script_url:
startup_item = {
'key': 'windows-startup-script-url',
'value': flags.windows_startup_script_url
}
instance_metadata['items'].append(startup_item)
if flags.linux_startup_script_url:
startup_item = {
'key': 'startup-script-url',
'value': flags.linux_startup_script_url
}
instance_metadata['items'].append(startup_item)
if flags.ssh_public_key:
ssh_key_item = {
'key': 'ssh-keys',
'value': '\n'.join(flags.ssh_public_key)
}
instance_metadata['items'].append(ssh_key_item)
persistent_disks_dict = {}
pd_name = flags.attach_persistent_disk
if pd_name:
persistent_disks_dict[pd_name] = pd_name
if flags.attach_persistent_disk_with_name:
pd_name, device_name = flags.attach_persistent_disk_with_name.split(':')
persistent_disks_dict[device_name] = pd_name
try:
manager.CreateInstance(
flags.instance_name, persistent_disks=persistent_disks_dict,
source_image=flags.source_image, machinetype=flags.machine_type,
metadata=instance_metadata, network=flags.network)
except apierrors.HttpError as error:
error_dict = json.loads(error.content)
status = error_dict['error'].get('code', None)
error_message = error_dict['error'].get('message', '')
if status == 409 and error_message.endswith('already exists'):
print error_message
if status == 400 and error_message.endswith(
'The referenced image resource cannot be found.'):
print error_message
else:
raise error | config/jenkins/start_slave.py |
from __future__ import unicode_literals
import argparse
import json
import sys
import time
from googleapiclient import discovery
from googleapiclient import errors as apierrors
#pylint: disable=no-member
class SlaveManager(object):
"""Class for managing Jenkins Slaves."""
DEFAULT_SCOPES = ['https://www.googleapis.com/auth/devstorage.read_write']
def __init__(self, project, zone=None):
"""Create a new SlaveManager.
Args:
project (str): the GCE project name.
zone (str): the destination GCP zone.
"""
self._project = project
self._zone = zone
self._client = self._CreateComputeClient()
def _CreateComputeClient(self):
"""Creates an API client to do compute operations with.
Returns:
Resource: an object with methods for interacting with the service.
"""
return discovery.build('compute', 'v1')
def _WaitForOperation(self, operation):
"""Waits for an API operation to complete.
Args:
operation (dict): the API request.
Returns:
dict: the API call response.
"""
while True:
result = self._client.zoneOperations().get(
project=self._project, zone=self._zone, operation=operation['name']
).execute()
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return result
time.sleep(1)
def _BuildPersistentDiskList(self, persistent_disks):
"""Builds a list of dicts describing all disks to attach.
Args:
persistent_disks (dict(str:str)]): list of disks to attach, in the form
{'persistent_disk_name': 'device_name'}.
Returns:
list (dict): the list of disks to attach.
"""
disk_list = list()
mode = 'READ_ONLY'
if persistent_disks:
for disk_name, device in persistent_disks.items():
source_url = (
'https://www.googleapis.com/compute/v1/projects/{0:s}/zones/{1:s}/'
'disks/{2:s}').format(self._project, self._zone, disk_name)
disk_list.append(
{
'deviceName': device,
'source': source_url,
'mode': mode
}
)
return disk_list
def CreateInstance(
self, instance_name, disk_size=None, source_image=None, machinetype=None,
metadata=None, network=None, persistent_disks=None, scopes=None):
"""Creates a GCE instance.
Args:
instance_name (str): the name to give to the instance.
disk_size (Optional[int]): the size of the system disk, in GB.
Must be larger than the image size.
source_image (Optional[str]): the path to the disk image to use.
Must be in the form: '/projects/<project_name>/zones/images/...'])
machinetype (Optional[str]): the type of the machine to use.
For a list of valid values, see:
https://cloud.google.com/compute/docs/machine-types
metadata (Optional[dict]): optional metadata to set for the instance.
network (Optional[str]): type of network to use (default: 'default')
persistent_disks (Optional[dict(str:str)]): list of disks to attach to
the instance, in the form {'persistent_disk_name': 'device_name'}.
scopes (Optional[list[str]]): the list of scopes to set for the instance
"""
scopes = scopes or self.DEFAULT_SCOPES
print 'Creating new instance {0:s}'.format(instance_name)
project_url = 'compute/v1/projects/{0:s}'.format(self._project)
machine_type_url = '{0:s}/zones/{1:s}/machineTypes/{2:s}'.format(
project_url, self._zone, machinetype)
network_url = '{0:s}/global/networks/{1:s}'.format(project_url, network)
disks = [
{
'index': 0,
'boot': True,
'mode': 'READ_WRITE',
'autoDelete': True,
'initializeParams': {
'diskName': '{0:s}-bootdisk'.format(instance_name),
'diskSizeGb': disk_size,
'sourceImage': source_image,
}
}
]
persistent_disks = self._BuildPersistentDiskList(persistent_disks)
for persistent_disk in persistent_disks:
disks.append(persistent_disk)
instance_dict = {
'name': instance_name,
'machineType': machine_type_url,
'disks': disks,
'networkInterfaces': [{
'accessConfigs': [{
'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}],
'network': network_url, }],
'serviceAccounts': [{
'email': 'default',
'scopes': scopes,
}],
}
if metadata:
instance_dict['metadata'] = metadata
operation = self._client.instances().insert(
project=self._project, body=instance_dict, zone=self._zone).execute()
self._WaitForOperation(operation)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--attach_persistent_disk', action='append', required=False,
metavar=('PERSISTENT_DISK'), help=(
'Attach PERSISTENT_DISK to the instance (ie: "evidence-images"). '
'It will be attached as /dev/disk/by-id/google-PERSISTENT_DISK')
)
parser.add_argument(
'--attach_persistent_disk_with_name', action='append', required=False,
metavar=('PERSISTENT_DISK:DEVICE_NAME'), help=(
'Attach PERSISTENT_DISK to the instance (ie: "evidence-images"). '
'It will be attached as /dev/disk/by-id/google-DEVICE_NAME')
)
parser.add_argument(
'--disk_size', action='store', required=False, default=200, type=int,
help='Boot disk size, in GB (Default: %(default)s)')
parser.add_argument(
'--instance_name', action='store', required=True, help='Name of instance')
parser.add_argument(
'--source_image', action='store', required=True,
help='Path to the image, ie: /projects/<project_name>/zones/images/...')
parser.add_argument(
'--linux_startup_script_url', action='store', required=False,
metavar=('SCRIPT_URL'),
help='GCS url to a startup script for a Linux instance')
parser.add_argument(
'--machine_type', action='store', required=False, default='n1-standard-8',
help=('Type of machine (Default: "%(default)s)". For a list of valid '
'values, see https://cloud.google.com/compute/docs/machine-types'))
parser.add_argument(
'--network', action='store', required=False, default='default',
help='Type of network to use (Default: "%(default)s")')
parser.add_argument(
'--project', action='store', required=True, help='Name of the project')
parser.add_argument(
'--ssh_public_key', action='append', required=False,
help=('Specify SSH public keys to use. '
'Example: \'root:ssh-rsa AAAA... root\''))
parser.add_argument(
'--windows_startup_script_url', action='store', required=False,
metavar=('SCRIPT_URL'),
help='GCS url to a startup script for a Windows instance')
parser.add_argument(
'--zone', action='store', required=True, help='The zone for the instance')
flags = parser.parse_args(sys.argv[1:])
instance_metadata = None
manager = SlaveManager(project=flags.project, zone=flags.zone)
instance_metadata = {'items': []}
if flags.windows_startup_script_url:
startup_item = {
'key': 'windows-startup-script-url',
'value': flags.windows_startup_script_url
}
instance_metadata['items'].append(startup_item)
if flags.linux_startup_script_url:
startup_item = {
'key': 'startup-script-url',
'value': flags.linux_startup_script_url
}
instance_metadata['items'].append(startup_item)
if flags.ssh_public_key:
ssh_key_item = {
'key': 'ssh-keys',
'value': '\n'.join(flags.ssh_public_key)
}
instance_metadata['items'].append(ssh_key_item)
persistent_disks_dict = {}
pd_name = flags.attach_persistent_disk
if pd_name:
persistent_disks_dict[pd_name] = pd_name
if flags.attach_persistent_disk_with_name:
pd_name, device_name = flags.attach_persistent_disk_with_name.split(':')
persistent_disks_dict[device_name] = pd_name
try:
manager.CreateInstance(
flags.instance_name, persistent_disks=persistent_disks_dict,
source_image=flags.source_image, machinetype=flags.machine_type,
metadata=instance_metadata, network=flags.network)
except apierrors.HttpError as error:
error_dict = json.loads(error.content)
status = error_dict['error'].get('code', None)
error_message = error_dict['error'].get('message', '')
if status == 409 and error_message.endswith('already exists'):
print error_message
if status == 400 and error_message.endswith(
'The referenced image resource cannot be found.'):
print error_message
else:
raise error | 0.639511 | 0.179315 |
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.algorithm.imtool import merge
class ROIMergeDialog(QDialog):
"""A dialog for ROI selection and merging."""
def __init__(self, model, parent=None):
super(ROIMergeDialog, self).__init__(parent)
self._model = model
self._init_gui()
self._create_actions()
def _init_gui(self):
"""Initialize GUI."""
self.setWindowTitle('Selecet Volumes')
imgs = []
vol_list = self._model.getItemList()
for item in vol_list:
imgs.append(QCheckBox(item))
self.imgs = imgs
vboxlayout = QVBoxLayout()
hboxlayout = QHBoxLayout()
for item in imgs:
vboxlayout.addWidget(item)
self.run_button = QPushButton("Run")
self.cancel_button = QPushButton("Cancel")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.run_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(vboxlayout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.run_button.clicked.connect(self._merge)
self.cancel_button.clicked.connect(self.done)
def _merge(self):
img_iter = enumerate(self.imgs)
first_data, tmp_idx, vol_name = (None, None, [])
for idx, first in img_iter:
if first.isChecked():
first_data = self._model.data(self._model.index(idx),
Qt.UserRole + 5)
tmp_idx = idx
vol_name.append(self.imgs[idx].text())
break
if first_data is not None:
for idx, item in img_iter:
if item.isChecked():
data = self._model.data(self._model.index(idx),
Qt.UserRole + 5)
try:
first_data = merge(first_data, data)
vol_name.append(self.imgs[idx].text())
except ValueError:
QMessageBox.critical(self, "Conflicts dectected %s" %
self.imgs[idx].text(),
"Please modify ROI by hands")
return
self._model.addItem(first_data,
None,
'_'.join(map(str, vol_name)),
self._model._data[0].get_header(),
None, None, 255, 'rainbow')
self.done(0) | froi/gui/component/roimergedialog.py |
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.algorithm.imtool import merge
class ROIMergeDialog(QDialog):
"""A dialog for ROI selection and merging."""
def __init__(self, model, parent=None):
super(ROIMergeDialog, self).__init__(parent)
self._model = model
self._init_gui()
self._create_actions()
def _init_gui(self):
"""Initialize GUI."""
self.setWindowTitle('Selecet Volumes')
imgs = []
vol_list = self._model.getItemList()
for item in vol_list:
imgs.append(QCheckBox(item))
self.imgs = imgs
vboxlayout = QVBoxLayout()
hboxlayout = QHBoxLayout()
for item in imgs:
vboxlayout.addWidget(item)
self.run_button = QPushButton("Run")
self.cancel_button = QPushButton("Cancel")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.run_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(vboxlayout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.run_button.clicked.connect(self._merge)
self.cancel_button.clicked.connect(self.done)
def _merge(self):
img_iter = enumerate(self.imgs)
first_data, tmp_idx, vol_name = (None, None, [])
for idx, first in img_iter:
if first.isChecked():
first_data = self._model.data(self._model.index(idx),
Qt.UserRole + 5)
tmp_idx = idx
vol_name.append(self.imgs[idx].text())
break
if first_data is not None:
for idx, item in img_iter:
if item.isChecked():
data = self._model.data(self._model.index(idx),
Qt.UserRole + 5)
try:
first_data = merge(first_data, data)
vol_name.append(self.imgs[idx].text())
except ValueError:
QMessageBox.critical(self, "Conflicts dectected %s" %
self.imgs[idx].text(),
"Please modify ROI by hands")
return
self._model.addItem(first_data,
None,
'_'.join(map(str, vol_name)),
self._model._data[0].get_header(),
None, None, 255, 'rainbow')
self.done(0) | 0.527073 | 0.08061 |
from datetime import datetime
from six.moves import http_client
from django.core.urlresolvers import reverse
from freezegun import freeze_time
from common.test_utils import CassandraTestCase
from common.models import CassandraThingMultiplePK
@freeze_time('14-06-15 15:44:25')
def create_thing():
return CassandraThingMultiplePK.objects.create(created_on=datetime.now())
@freeze_time('14-06-15 15:44:25')
class TestViewSet(CassandraTestCase):
def test_get_when_no_records_exist(self):
response = self.client.get(reverse('thing_viewset_api'))
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json(), [])
def test_get(self):
thing = create_thing()
response = self.client.get(reverse('thing_viewset_api'))
self.assertEqual(response.status_code, http_client.OK)
expected_response = [{
'created_on': '2015-06-14T15:44:25',
'data_abstract': None,
'another_id': str(thing.another_id),
'id': str(thing.id)}
]
self.assertEqual(response.json(), expected_response)
@freeze_time('14-06-15 15:44:25')
class TestListCreateAPIView(CassandraTestCase):
def test_get_when_no_records_exist(self):
response = self.client.get(reverse('thing_listcreate_api'))
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json(), [])
def test_post(self):
response = self.client.post(
reverse('thing_listcreate_api'),
{
'created_on': '2015-06-14T15:44:25'
}
)
self.assertEqual(response.status_code, http_client.CREATED)
assert CassandraThingMultiplePK.objects.all().count() == 1
@freeze_time('14-06-15 15:44:25')
class TestListAPIView(CassandraTestCase):
def test_get(self):
thing = create_thing()
response = self.client.get(reverse('thing_listview_api'))
self.assertEqual(response.status_code, http_client.OK)
expected_response = [{
'created_on': '2015-06-14T15:44:25',
'data_abstract': None,
'another_id': str(thing.another_id),
'id': str(thing.id)}
]
self.assertEqual(response.json(), expected_response) | testproject/common/tests/test_views.py | from datetime import datetime
from six.moves import http_client
from django.core.urlresolvers import reverse
from freezegun import freeze_time
from common.test_utils import CassandraTestCase
from common.models import CassandraThingMultiplePK
@freeze_time('14-06-15 15:44:25')
def create_thing():
return CassandraThingMultiplePK.objects.create(created_on=datetime.now())
@freeze_time('14-06-15 15:44:25')
class TestViewSet(CassandraTestCase):
def test_get_when_no_records_exist(self):
response = self.client.get(reverse('thing_viewset_api'))
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json(), [])
def test_get(self):
thing = create_thing()
response = self.client.get(reverse('thing_viewset_api'))
self.assertEqual(response.status_code, http_client.OK)
expected_response = [{
'created_on': '2015-06-14T15:44:25',
'data_abstract': None,
'another_id': str(thing.another_id),
'id': str(thing.id)}
]
self.assertEqual(response.json(), expected_response)
@freeze_time('14-06-15 15:44:25')
class TestListCreateAPIView(CassandraTestCase):
def test_get_when_no_records_exist(self):
response = self.client.get(reverse('thing_listcreate_api'))
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json(), [])
def test_post(self):
response = self.client.post(
reverse('thing_listcreate_api'),
{
'created_on': '2015-06-14T15:44:25'
}
)
self.assertEqual(response.status_code, http_client.CREATED)
assert CassandraThingMultiplePK.objects.all().count() == 1
@freeze_time('14-06-15 15:44:25')
class TestListAPIView(CassandraTestCase):
def test_get(self):
thing = create_thing()
response = self.client.get(reverse('thing_listview_api'))
self.assertEqual(response.status_code, http_client.OK)
expected_response = [{
'created_on': '2015-06-14T15:44:25',
'data_abstract': None,
'another_id': str(thing.another_id),
'id': str(thing.id)}
]
self.assertEqual(response.json(), expected_response) | 0.613237 | 0.167355 |
import numpy as np
import torch
from torch.nn import BCEWithLogitsLoss as _BCEWithLogitsLoss
# pylint: disable=too-few-public-methods
class BCELoss:
"""
Applies a BCE Loss function to the model.
BCE Loss automatically applies a Sigmoid Layer
at the end of the model, so there is no need to add
a Sigmoid layer.
Supported Arguments
weight=None : (Numpy Array | List) Manual rescaling of classes
reduction='mean' : (String) Specifies the reduction
that is to be applied to the output.
post_weight=None : (Numpy Array | List) A weight of positive examples
"""
def __init__(self, weight=None, reduction='mean', pos_weight=None):
"""
__init__ method for BCELoss
Supported Arguments
weight=None : (Numpy Array | List) Manual rescaling of classes
reduction='mean' : (String) Specifies the reduction
that is to be applied to the output.
post_weight=None : (Numpy Array | List) A weight of positive examples
"""
if weight is not None and not (
isinstance(weight, list) or
type(weight).__module__ == np.__name__):
raise ValueError("Invalid weight")
if reduction not in ["none", "mean", "sum"]:
raise ValueError("Invalid reduction")
if pos_weight is not None and not (
isinstance(pos_weight, list) or
type(pos_weight).__module__ == np.__name__):
raise ValueError("Invalid pos_weight")
self.__weight = weight
self.__reduction = reduction
self.__pos_weight = pos_weight
def get_loss_function(self):
"""
Returns the details of the loss function
There is no need to call this method as this is used by the
Sequential model to build the model
"""
# If weight provided, then converting it into torch tensor
# pylint: disable=not-callable
weight = None
if self.__weight is not None:
weight = torch.tensor(self.__weight).float()
# pos_weight provided, then converting in into torch tensor
pos_weight = None
if self.__pos_weight is not None:
pos_weight = torch.tensor(self.__pos_weight).float()
return {
'loss_function': _BCEWithLogitsLoss,
'keyword_arguments': {
'weight': weight,
'reduction': self.__reduction,
'pos_weight': pos_weight
}
} | neuralpy/loss_functions/bce_loss.py |
import numpy as np
import torch
from torch.nn import BCEWithLogitsLoss as _BCEWithLogitsLoss
# pylint: disable=too-few-public-methods
class BCELoss:
"""
Applies a BCE Loss function to the model.
BCE Loss automatically applies a Sigmoid Layer
at the end of the model, so there is no need to add
a Sigmoid layer.
Supported Arguments
weight=None : (Numpy Array | List) Manual rescaling of classes
reduction='mean' : (String) Specifies the reduction
that is to be applied to the output.
post_weight=None : (Numpy Array | List) A weight of positive examples
"""
def __init__(self, weight=None, reduction='mean', pos_weight=None):
"""
__init__ method for BCELoss
Supported Arguments
weight=None : (Numpy Array | List) Manual rescaling of classes
reduction='mean' : (String) Specifies the reduction
that is to be applied to the output.
post_weight=None : (Numpy Array | List) A weight of positive examples
"""
if weight is not None and not (
isinstance(weight, list) or
type(weight).__module__ == np.__name__):
raise ValueError("Invalid weight")
if reduction not in ["none", "mean", "sum"]:
raise ValueError("Invalid reduction")
if pos_weight is not None and not (
isinstance(pos_weight, list) or
type(pos_weight).__module__ == np.__name__):
raise ValueError("Invalid pos_weight")
self.__weight = weight
self.__reduction = reduction
self.__pos_weight = pos_weight
def get_loss_function(self):
"""
Returns the details of the loss function
There is no need to call this method as this is used by the
Sequential model to build the model
"""
# If weight provided, then converting it into torch tensor
# pylint: disable=not-callable
weight = None
if self.__weight is not None:
weight = torch.tensor(self.__weight).float()
# pos_weight provided, then converting in into torch tensor
pos_weight = None
if self.__pos_weight is not None:
pos_weight = torch.tensor(self.__pos_weight).float()
return {
'loss_function': _BCEWithLogitsLoss,
'keyword_arguments': {
'weight': weight,
'reduction': self.__reduction,
'pos_weight': pos_weight
}
} | 0.915835 | 0.502563 |
"""Command for deleting a service."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.events import eventflow_operations
from googlecloudsdk.command_lib.events import exceptions
from googlecloudsdk.command_lib.events import resource_args
from googlecloudsdk.command_lib.events import util
from googlecloudsdk.command_lib.run import connection_context
from googlecloudsdk.command_lib.run import flags as serverless_flags
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.command_lib.util.concepts import presentation_specs
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class Delete(base.Command):
"""Delete a trigger."""
detailed_help = {
'DESCRIPTION': """\
{description}
""",
'EXAMPLES': """\
To delete a trigger:
$ {command} TRIGGER
""",
}
@staticmethod
def CommonArgs(parser):
"""Defines arguments common to all release tracks."""
trigger_presentation = presentation_specs.ResourcePresentationSpec(
'trigger',
resource_args.GetTriggerResourceSpec(),
'Name of the trigger to delete',
required=True)
concept_parsers.ConceptParser([trigger_presentation]).AddToParser(parser)
@staticmethod
def Args(parser):
Delete.CommonArgs(parser)
def Run(self, args):
"""Executes when the user runs the delete command."""
conn_context = connection_context.GetConnectionContext(
args, product=connection_context.Product.EVENTS)
trigger_ref = args.CONCEPTS.trigger.Parse()
console_io.PromptContinue(
message='Trigger [{}] will be deleted.'.format(trigger_ref.Name()),
throw_if_unattended=True,
cancel_on_no=True)
with eventflow_operations.Connect(conn_context) as client:
# TODO(b/147308604): Don't delete source when Odin supports ownerRefs
if serverless_flags.GetPlatform() == serverless_flags.PLATFORM_MANAGED:
trigger_obj = client.GetTrigger(trigger_ref)
if trigger_obj is not None:
source_crds = client.ListSourceCustomResourceDefinitions()
source_ref, source_crd = util.GetSourceRefAndCrdForTrigger(
trigger_obj, source_crds)
if source_ref and source_crd:
# Delete the source before the trigger because we need the trigger
# to exist to be able to find the source. Otherwise, we could end up
# losing a reference to the source if trigger deletion succeeds but
# source deletion fails.
try:
client.DeleteSource(source_ref, source_crd)
except exceptions.SourceNotFound:
# Source could have been deleted but trigger deletion failed
# and this command was re-run, which is fine.
pass
client.DeleteTrigger(trigger_ref)
log.DeletedResource(trigger_ref.Name(), 'trigger') | google-cloud-sdk/lib/surface/events/triggers/delete.py | """Command for deleting a service."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.events import eventflow_operations
from googlecloudsdk.command_lib.events import exceptions
from googlecloudsdk.command_lib.events import resource_args
from googlecloudsdk.command_lib.events import util
from googlecloudsdk.command_lib.run import connection_context
from googlecloudsdk.command_lib.run import flags as serverless_flags
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.command_lib.util.concepts import presentation_specs
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class Delete(base.Command):
"""Delete a trigger."""
detailed_help = {
'DESCRIPTION': """\
{description}
""",
'EXAMPLES': """\
To delete a trigger:
$ {command} TRIGGER
""",
}
@staticmethod
def CommonArgs(parser):
"""Defines arguments common to all release tracks."""
trigger_presentation = presentation_specs.ResourcePresentationSpec(
'trigger',
resource_args.GetTriggerResourceSpec(),
'Name of the trigger to delete',
required=True)
concept_parsers.ConceptParser([trigger_presentation]).AddToParser(parser)
@staticmethod
def Args(parser):
Delete.CommonArgs(parser)
def Run(self, args):
"""Executes when the user runs the delete command."""
conn_context = connection_context.GetConnectionContext(
args, product=connection_context.Product.EVENTS)
trigger_ref = args.CONCEPTS.trigger.Parse()
console_io.PromptContinue(
message='Trigger [{}] will be deleted.'.format(trigger_ref.Name()),
throw_if_unattended=True,
cancel_on_no=True)
with eventflow_operations.Connect(conn_context) as client:
# TODO(b/147308604): Don't delete source when Odin supports ownerRefs
if serverless_flags.GetPlatform() == serverless_flags.PLATFORM_MANAGED:
trigger_obj = client.GetTrigger(trigger_ref)
if trigger_obj is not None:
source_crds = client.ListSourceCustomResourceDefinitions()
source_ref, source_crd = util.GetSourceRefAndCrdForTrigger(
trigger_obj, source_crds)
if source_ref and source_crd:
# Delete the source before the trigger because we need the trigger
# to exist to be able to find the source. Otherwise, we could end up
# losing a reference to the source if trigger deletion succeeds but
# source deletion fails.
try:
client.DeleteSource(source_ref, source_crd)
except exceptions.SourceNotFound:
# Source could have been deleted but trigger deletion failed
# and this command was re-run, which is fine.
pass
client.DeleteTrigger(trigger_ref)
log.DeletedResource(trigger_ref.Name(), 'trigger') | 0.572603 | 0.088939 |
from redditimagespider.items import RedditImageFileItem
import scrapy
import json
class RedditSpider(scrapy.Spider):
name = 'reddit-spider'
start_urls = ["https://gateway.reddit.com/desktopapi/v1/subreddits/gifs?sort=new&allow_over18=1"]
page_limit = 10
i = 0
def parse(self, response):
self.i += 1
data = json.loads(response.text)
last_id=''
for postId in data['posts']:
if data['posts'][postId]['media'] is not None:
media_type = data['posts'][postId]['media']['type']
if media_type != 'text':
title = data['posts'][postId]['title']
id = data['posts'][postId]['id']
subreddit_name = data['posts'][postId]['permalink'].split('/')[4]
meta = {'title': title, 'id': id, 'subreddit_name': subreddit_name, 'type': media_type}
if 'gfycat' in data['posts'][postId]['domain']:
url = data['posts'][postId]['source']['url']
if 'thumbs.gfycat' in url:
yield RedditImageFileItem(id = id, title = title, file_urls = [url],
subreddit_name = subreddit_name, media_type=media_type)
else:
yield scrapy.Request(url, callback=self.parse_gfycat, meta=meta)
elif 'giphy' in data['posts'][postId]['domain']:
url = data['posts'][postId]['source']['url']
slash_indices = [i for i, a in enumerate(url) if a == '/']
url = url.replace(url[slash_indices[4]:], '/giphy.webp')
url = url.replace(url[slash_indices[1]:slash_indices[2]], '/i.giphy.com')
yield RedditImageFileItem(id = id, title = title, file_urls = [url],
subreddit_name = subreddit_name, media_type=media_type)
elif 'imgur' in data['posts'][postId]['domain']:
url = data['posts'][postId]['source']['url']
yield scrapy.Request(url, callback=self.parse_imgur, meta=meta)
else:
image_url = data['posts'][postId]['media']['content']
yield RedditImageFileItem(id = id, title = title, file_urls = [image_url],
subreddit_name = subreddit_name, media_type=media_type)
if self.i < self.page_limit :
last_id = data['postIds'][-1]
url = response.url
if 'after' in response.url:
url = response.url[:response.url.rfind('&')]
yield scrapy.Request(url + '&after={}'.format(last_id), self.parse)
def parse_gfycat(self, response):
image_url = response.css('.actual-gif-image').xpath('@src').get()
yield RedditImageFileItem(id = response.meta['id'], title = response.meta['title'],
subreddit_name = response.meta['subreddit_name'],
file_urls = [image_url], media_type=response.meta['type'])
def parse_imgur(self, response):
image_urls = {}
id = response.meta['id']
title = response.meta['title']
subreddit_name = response.meta['subreddit_name']
media_type = response.meta['type']
if media_type == 'embed':
image_containers = response.css('.post-image-container')
for image_container in image_containers:
name = id + '_{}'.format(image_container.xpath('@id').get())
id = image_container.xpath('@id').get()
image_type = image_container.xpath('@itemtype').get()
ext = 'jpg'
if 'VideoObject' in image_type or 'MusicVideoObject' in image_type or 'Clip' in image_type:
ext = 'gifv'
image_urls[name] = 'https://i.imgur.com/{}.{}'.format(id, ext)
else:
image_urls[id] = response.url
for image_id, image_url in image_urls.items():
if 'gif' in image_url:
content_type = response.headers['Content-Type'].decode('utf-8')
if 'image' not in content_type and 'video' not in content_type:
src = response.css('.video-elements').xpath('source/@src')
if src.get() is not None:
image_url = 'https:' + src.get()
yield RedditImageFileItem(id = image_id, title = title,
subreddit_name = subreddit_name,
file_urls = [image_url], media_type = media_type) | redditimagespider/redditimagespider/spiders/redditspider.py | from redditimagespider.items import RedditImageFileItem
import scrapy
import json
class RedditSpider(scrapy.Spider):
name = 'reddit-spider'
start_urls = ["https://gateway.reddit.com/desktopapi/v1/subreddits/gifs?sort=new&allow_over18=1"]
page_limit = 10
i = 0
def parse(self, response):
self.i += 1
data = json.loads(response.text)
last_id=''
for postId in data['posts']:
if data['posts'][postId]['media'] is not None:
media_type = data['posts'][postId]['media']['type']
if media_type != 'text':
title = data['posts'][postId]['title']
id = data['posts'][postId]['id']
subreddit_name = data['posts'][postId]['permalink'].split('/')[4]
meta = {'title': title, 'id': id, 'subreddit_name': subreddit_name, 'type': media_type}
if 'gfycat' in data['posts'][postId]['domain']:
url = data['posts'][postId]['source']['url']
if 'thumbs.gfycat' in url:
yield RedditImageFileItem(id = id, title = title, file_urls = [url],
subreddit_name = subreddit_name, media_type=media_type)
else:
yield scrapy.Request(url, callback=self.parse_gfycat, meta=meta)
elif 'giphy' in data['posts'][postId]['domain']:
url = data['posts'][postId]['source']['url']
slash_indices = [i for i, a in enumerate(url) if a == '/']
url = url.replace(url[slash_indices[4]:], '/giphy.webp')
url = url.replace(url[slash_indices[1]:slash_indices[2]], '/i.giphy.com')
yield RedditImageFileItem(id = id, title = title, file_urls = [url],
subreddit_name = subreddit_name, media_type=media_type)
elif 'imgur' in data['posts'][postId]['domain']:
url = data['posts'][postId]['source']['url']
yield scrapy.Request(url, callback=self.parse_imgur, meta=meta)
else:
image_url = data['posts'][postId]['media']['content']
yield RedditImageFileItem(id = id, title = title, file_urls = [image_url],
subreddit_name = subreddit_name, media_type=media_type)
if self.i < self.page_limit :
last_id = data['postIds'][-1]
url = response.url
if 'after' in response.url:
url = response.url[:response.url.rfind('&')]
yield scrapy.Request(url + '&after={}'.format(last_id), self.parse)
def parse_gfycat(self, response):
image_url = response.css('.actual-gif-image').xpath('@src').get()
yield RedditImageFileItem(id = response.meta['id'], title = response.meta['title'],
subreddit_name = response.meta['subreddit_name'],
file_urls = [image_url], media_type=response.meta['type'])
def parse_imgur(self, response):
image_urls = {}
id = response.meta['id']
title = response.meta['title']
subreddit_name = response.meta['subreddit_name']
media_type = response.meta['type']
if media_type == 'embed':
image_containers = response.css('.post-image-container')
for image_container in image_containers:
name = id + '_{}'.format(image_container.xpath('@id').get())
id = image_container.xpath('@id').get()
image_type = image_container.xpath('@itemtype').get()
ext = 'jpg'
if 'VideoObject' in image_type or 'MusicVideoObject' in image_type or 'Clip' in image_type:
ext = 'gifv'
image_urls[name] = 'https://i.imgur.com/{}.{}'.format(id, ext)
else:
image_urls[id] = response.url
for image_id, image_url in image_urls.items():
if 'gif' in image_url:
content_type = response.headers['Content-Type'].decode('utf-8')
if 'image' not in content_type and 'video' not in content_type:
src = response.css('.video-elements').xpath('source/@src')
if src.get() is not None:
image_url = 'https:' + src.get()
yield RedditImageFileItem(id = image_id, title = title,
subreddit_name = subreddit_name,
file_urls = [image_url], media_type = media_type) | 0.209793 | 0.129458 |
# setup the paths
from opentamiltests import *
import tamil.utf8 as utf8
from tamil.tscii import TSCII
import codecs
if PYTHON3:
class long(int):
pass
class NumeralStringLimitTests(unittest.TestCase):
def test_case_basic(self):
self.assertEqual(u"புள்ளி மூன்று மூன்று",tamil.numeral.num2tamilstr('0.33'))
self.assertEqual(u"புள்ளி ஒன்பது எட்டு ஏழு ஆறு",tamil.numeral.num2tamilstr('0.9876'))
def test_case_american(self):
self.assertEqual(u"புள்ளி மூன்று மூன்று",tamil.numeral.num2tamilstr_american('0.33'))
self.assertEqual(u"புள்ளி ஒன்பது எட்டு ஏழு ஆறு",tamil.numeral.num2tamilstr_american('0.9876'))
class NumeralTestAmerican(unittest.TestCase):
def runTest(self,var,nos):
for numerStr,num in zip(var,nos):
print('Testing ---> ',num)
self.assertEqual( numerStr, tamil.numeral.num2tamilstr_american( num ), num )
return
def test_friend_of_rama( self ):
ramanujan = 1729
gometra = tamil.numeral.num2tamilstr( ramanujan )
expected = u"ஓர் ஆயிரத்து எழுநூற்று இருபத்தொன்பது"
self.assertEqual( gometra, expected )
def test_units( self ):
units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10
self.runTest( units, range(0,11) )
return
def test_basic_pulli(self):
numerals = (u'புள்ளி ஐந்து', u'ஒன்று புள்ளி ஐந்து', u'இரண்டு புள்ளி ஐந்து', u'மூன்று புள்ளி ஐந்து', u'நான்கு புள்ளி ஐந்து', u'ஐந்து புள்ளி ஐந்து', u'ஆறு புள்ளி ஐந்து', u'ஏழு புள்ளி ஐந்து', u'எட்டு புள்ளி ஐந்து', u'ஒன்பது புள்ளி ஐந்து', u'பத்து புள்ளி ஐந்து')
numbers = [i+0.5 for i in range(0,11)]
self.runTest( numerals, numbers )
return
def test_teens( self ):
teens = (u'பதினொன்று ', u'பனிரண்டு ', u'பதிமூன்று ', u'பதினான்கு ', u'பதினைந்து ',u'பதினாறு ', u'பதினேழு ', u'பதினெட்டு ', u'பத்தொன்பது ') # 11-19
self.runTest( teens, range(11,20) )
return
def test_tens ( self ):
tens = (u'பத்து', u'இருபது', u'முப்பது', u'நாற்பது', u'ஐம்பது',u'அறுபது', u'எழுபது', u'எண்பது', u'தொன்னூறு') # 10-90
self.runTest( tens, range(10,100,10) )
return
def test_100s( self ):
hundreds = ( u'நூறு', u'இருநூறு ', u'முன்னூறு ', u'நாநூறு ',u'ஐநூறு ', u'அறுநூறு ', u'எழுநூறு ', u'எண்ணூறு ', u'தொள்ளாயிரம் ') #100 - 900
self.runTest( hundreds, range(100,1000,100) )
return
def test_max( self ):
maxno = long(1e15 - 1)
expected = u'தொள்ளாயிரத்து தொன்னூற்றொன்பது டிரில்லியன் தொள்ளாயிரத்து தொன்னூற்றொன்பது பில்லியன் தொள்ளாயிரத்து தொன்னூற்றொன்பது மில்லியன் தொள்ளாயிரத்து தொன்னூற்றொன்பது ஆயிரத்து தொள்ளாயிரத்து தொன்னூற்றொன்பது'
self.assertEqual( tamil.numeral.num2tamilstr_american( maxno ), expected )
return
def test_numerals(self):
var = {0:u"பூஜ்ஜியம்",
long(1e7):u"பத்து மில்லியன்",
long(1e9-1):u"தொள்ளாயிரத்து தொன்னூற்றொன்பது மில்லியன் தொள்ளாயிரத்து தொன்னூற்றொன்பது ஆயிரத்து தொள்ளாயிரத்து தொன்னூற்றொன்பது",
3060:u"மூன்று ஆயிரத்து அறுபது",
1:u"ஒன்று",
2:u"இரண்டு",
3:u"மூன்று",
5:u"ஐந்து",
10:u"பத்து",
11:u"பதினொன்று ",
17:u"பதினேழு ",
19:u"பத்தொன்பது ",
20:u"இருபது",
21:u"இருபத்தொன்று",
1051:u"ஓர் ஆயிரத்து ஐம்பத்தொன்று",
100000:u"நூறு ஆயிரம்",
100001:u"நூறு ஆயிரத்து ஒன்று",
10011:u"பத்து ஆயிரத்து பதினொன்று ",
49:u"நாற்பத்தொன்பது",
50:u"ஐம்பது",
55:u"ஐம்பத்தைந்து",
1000001:u"ஒரு மில்லியன் ஒன்று",
90:u"தொன்னூறு",
99:u"தொன்னூற்றொன்பது",
100:u"நூறு",
101:u"நூற்றி ஒன்று",
1000:u"ஓர் ஆயிரம்",
111:u"நூற்றி பதினொன்று ",
1000000000000:u"ஒரு டிரில்லியன்",
1011:u"ஓர் ஆயிரத்து பதினொன்று "}
for k,actual_v in var.items():
v = tamil.numeral.num2tamilstr_american(k)
print('verifying => # %d'%k)
self.assertEqual(v,actual_v,k)
return
class NumeralTest(unittest.TestCase):
def runTest(self,var,nos):
for numerStr,num in zip(var,nos):
print('Testing ---> ',num)
self.assertEqual( numerStr, tamil.numeral.num2tamilstr( num ), num )
return
def test_units( self ):
units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10
self.runTest( units, range(0,11) )
return
def test_teens( self ):
teens = (u'பதினொன்று ', u'பனிரண்டு ', u'பதிமூன்று ', u'பதினான்கு ', u'பதினைந்து ',u'பதினாறு ', u'பதினேழு ', u'பதினெட்டு ', u'பத்தொன்பது ') # 11-19
self.runTest( teens, range(11,20) )
return
def test_tens ( self ):
tens = (u'பத்து', u'இருபது', u'முப்பது', u'நாற்பது', u'ஐம்பது',u'அறுபது', u'எழுபது', u'எண்பது', u'தொன்னூறு') # 10-90
self.runTest( tens, range(10,100,10) )
return
def test_100s( self ):
hundreds = ( u'நூறு', u'இருநூறு ', u'முன்னூறு ', u'நாநூறு ',u'ஐநூறு ', u'அறுநூறு ', u'எழுநூறு ', u'எண்ணூறு ', u'தொள்ளாயிரம் ') #100 - 900
self.runTest( hundreds, range(100,1000,100) )
return
def test_max( self ):
maxno = long(1e12 - 1 )
expected = u'தொன்னூற்றொன்பது ஆயிரத்து தொள்ளாயிரத்து தொன்னூற்றொன்பது கோடியே தொன்னூற்றொன்பது இலட்சத்து தொன்னூற்றொன்பது ஆயிரத்து தொள்ளாயிரத்து தொன்னூற்றொன்பது'
self.assertEqual( tamil.numeral.num2tamilstr( maxno ), expected )
return
def test_numerals(self):
var = {0:u"பூஜ்ஜியம்",
3060:u"மூன்று ஆயிரத்து அறுபது",
1:u"ஒன்று",
2:u"இரண்டு",
3:u"மூன்று",
5:u"ஐந்து",
10:u"பத்து",
11:u"பதினொன்று ",
17:u"பதினேழு ",
19:u"பத்தொன்பது ",
20:u"இருபது",
21:u"இருபத்தொன்று",
1051:u"ஓர் ஆயிரத்து ஐம்பத்தொன்று",
100000:u"ஒரு இலட்சம்",
100001:u"ஒரு இலட்சத்து ஒன்று",
10011:u"பத்து ஆயிரத்து பதினொன்று ",
49:u"நாற்பத்தொன்பது",
50:u"ஐம்பது",
55:u"ஐம்பத்தைந்து",
1000001:u"பத்து இலட்சத்து ஒன்று",
90:u"தொன்னூறு",
99:u"தொன்னூற்றொன்பது",
100:u"நூறு",
101:u"நூற்றி ஒன்று",
1000:u"ஓர் ஆயிரம்",
111:u"நூற்றி பதினொன்று ",
1000000000000:u"ஒரு இலட்சம் கோடி ",
1011:u"ஓர் ஆயிரத்து பதினொன்று "}
for k,actual_v in var.items():
v = tamil.numeral.num2tamilstr(k)
print('verifying => # %d'%k)
self.assertEqual(v,actual_v,k)
return
class NumeralNegTest(unittest.TestCase):
def runTest(self,var,nos):
for numerStr,num in zip(var,nos):
print('Testing ---> ',num)
print('NumerString',numerStr)
self.maxDiff = None
self.assertEqual( numerStr, tamil.numeral.num2tamilstr( num ), num )
return
def test_100s( self ):
hundreds = ( u'- நூறு', u'- இருநூறு ', u'- முன்னூறு ', u'- நாநூறு ',u'- ஐநூறு ', u'- அறுநூறு ', u'- எழுநூறு ', u'- எண்ணூறு ', u'- தொள்ளாயிரம் ') #100 - 900
self.runTest( hundreds, range(-100,-1000,-100) )
return
def test_USA(self):
ramanujan = -1729
gometra = tamil.numeral.num2tamilstr( ramanujan )
expected = u"- ஓர் ஆயிரத்து எழுநூற்று இருபத்தொன்பது"
self.assertEqual( gometra, expected )
def test_3LKPLUS1(self):
x1 = 3e5 + 1
actual = tamil.numeral.num2tamilstr( x1 )
expected = u'மூன்று இலட்சத்து ஒன்று'
self.assertEqual( actual, expected )
def test_PI(self):
if PYTHON3:
print("Python3 has different rounding")
return
pie = 3.1415
expected = u'மூன்று புள்ளி ஒன்று நான்கு ஒன்று ஐந்து'
actual = tamil.numeral.num2tamilstr(pie)
actual_USA = tamil.numeral.num2tamilstr_american(pie)
self.assertEqual(actual,expected)
self.assertEqual(actual_USA,expected)
def test_PI_million(self):
pie = 3e6 + 0.1415
expected = u'மூன்று மில்லியன் புள்ளி ஒன்று நான்கு ஒன்று'
actual_USA = tamil.numeral.num2tamilstr_american(pie)
self.assertEqual(actual_USA[0:len(expected)],expected)
def test_PI_lakshalu(self):
pie = 3e5+0.1415
expected = u'மூன்று இலட்சம் புள்ளி ஒன்று நான்கு ஒன்று ஐந்து'
actual_IN = tamil.numeral.num2tamilstr(pie)
self.assertEqual(actual_IN[0:len(expected)],expected)
<EMAIL>If( PYTHON3, "Python3 has different rounding")
def test_INFRAC(self):
if PYTHON3:
print("Python3 has different rounding")
return
exp2 = u'ஓர் ஆயிரத்து ஒன்று புள்ளி நான்கு ஐந்து'
actual_IN2 = tamil.numeral.num2tamilstr(1001+0.45)
self.assertEqual(actual_IN2,exp2)
exp2 = u'ஓர் ஆயிரம் புள்ளி நான்கு ஐந்து'
actual_IN2 = tamil.numeral.num2tamilstr(1000+0.45)
self.assertEqual(actual_IN2,exp2)
def test_VITHIVILAKKU(self):
if PYTHON2_6:
# exception API is different in Python 2.6
return
with self.assertRaises(Exception):
tamil.numeral.num2tamilstr( complex(5,6) )
with self.assertRaises(Exception):
tamil.numeral.num2tamilstr( 'mannagatti' )
if __name__ == '__main__':
unittest.main() | tests/numeral_basic.py |
# setup the paths
from opentamiltests import *
import tamil.utf8 as utf8
from tamil.tscii import TSCII
import codecs
if PYTHON3:
class long(int):
pass
class NumeralStringLimitTests(unittest.TestCase):
def test_case_basic(self):
self.assertEqual(u"புள்ளி மூன்று மூன்று",tamil.numeral.num2tamilstr('0.33'))
self.assertEqual(u"புள்ளி ஒன்பது எட்டு ஏழு ஆறு",tamil.numeral.num2tamilstr('0.9876'))
def test_case_american(self):
self.assertEqual(u"புள்ளி மூன்று மூன்று",tamil.numeral.num2tamilstr_american('0.33'))
self.assertEqual(u"புள்ளி ஒன்பது எட்டு ஏழு ஆறு",tamil.numeral.num2tamilstr_american('0.9876'))
class NumeralTestAmerican(unittest.TestCase):
def runTest(self,var,nos):
for numerStr,num in zip(var,nos):
print('Testing ---> ',num)
self.assertEqual( numerStr, tamil.numeral.num2tamilstr_american( num ), num )
return
def test_friend_of_rama( self ):
ramanujan = 1729
gometra = tamil.numeral.num2tamilstr( ramanujan )
expected = u"ஓர் ஆயிரத்து எழுநூற்று இருபத்தொன்பது"
self.assertEqual( gometra, expected )
def test_units( self ):
units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10
self.runTest( units, range(0,11) )
return
def test_basic_pulli(self):
numerals = (u'புள்ளி ஐந்து', u'ஒன்று புள்ளி ஐந்து', u'இரண்டு புள்ளி ஐந்து', u'மூன்று புள்ளி ஐந்து', u'நான்கு புள்ளி ஐந்து', u'ஐந்து புள்ளி ஐந்து', u'ஆறு புள்ளி ஐந்து', u'ஏழு புள்ளி ஐந்து', u'எட்டு புள்ளி ஐந்து', u'ஒன்பது புள்ளி ஐந்து', u'பத்து புள்ளி ஐந்து')
numbers = [i+0.5 for i in range(0,11)]
self.runTest( numerals, numbers )
return
def test_teens( self ):
teens = (u'பதினொன்று ', u'பனிரண்டு ', u'பதிமூன்று ', u'பதினான்கு ', u'பதினைந்து ',u'பதினாறு ', u'பதினேழு ', u'பதினெட்டு ', u'பத்தொன்பது ') # 11-19
self.runTest( teens, range(11,20) )
return
def test_tens ( self ):
tens = (u'பத்து', u'இருபது', u'முப்பது', u'நாற்பது', u'ஐம்பது',u'அறுபது', u'எழுபது', u'எண்பது', u'தொன்னூறு') # 10-90
self.runTest( tens, range(10,100,10) )
return
def test_100s( self ):
hundreds = ( u'நூறு', u'இருநூறு ', u'முன்னூறு ', u'நாநூறு ',u'ஐநூறு ', u'அறுநூறு ', u'எழுநூறு ', u'எண்ணூறு ', u'தொள்ளாயிரம் ') #100 - 900
self.runTest( hundreds, range(100,1000,100) )
return
def test_max( self ):
maxno = long(1e15 - 1)
expected = u'தொள்ளாயிரத்து தொன்னூற்றொன்பது டிரில்லியன் தொள்ளாயிரத்து தொன்னூற்றொன்பது பில்லியன் தொள்ளாயிரத்து தொன்னூற்றொன்பது மில்லியன் தொள்ளாயிரத்து தொன்னூற்றொன்பது ஆயிரத்து தொள்ளாயிரத்து தொன்னூற்றொன்பது'
self.assertEqual( tamil.numeral.num2tamilstr_american( maxno ), expected )
return
def test_numerals(self):
var = {0:u"பூஜ்ஜியம்",
long(1e7):u"பத்து மில்லியன்",
long(1e9-1):u"தொள்ளாயிரத்து தொன்னூற்றொன்பது மில்லியன் தொள்ளாயிரத்து தொன்னூற்றொன்பது ஆயிரத்து தொள்ளாயிரத்து தொன்னூற்றொன்பது",
3060:u"மூன்று ஆயிரத்து அறுபது",
1:u"ஒன்று",
2:u"இரண்டு",
3:u"மூன்று",
5:u"ஐந்து",
10:u"பத்து",
11:u"பதினொன்று ",
17:u"பதினேழு ",
19:u"பத்தொன்பது ",
20:u"இருபது",
21:u"இருபத்தொன்று",
1051:u"ஓர் ஆயிரத்து ஐம்பத்தொன்று",
100000:u"நூறு ஆயிரம்",
100001:u"நூறு ஆயிரத்து ஒன்று",
10011:u"பத்து ஆயிரத்து பதினொன்று ",
49:u"நாற்பத்தொன்பது",
50:u"ஐம்பது",
55:u"ஐம்பத்தைந்து",
1000001:u"ஒரு மில்லியன் ஒன்று",
90:u"தொன்னூறு",
99:u"தொன்னூற்றொன்பது",
100:u"நூறு",
101:u"நூற்றி ஒன்று",
1000:u"ஓர் ஆயிரம்",
111:u"நூற்றி பதினொன்று ",
1000000000000:u"ஒரு டிரில்லியன்",
1011:u"ஓர் ஆயிரத்து பதினொன்று "}
for k,actual_v in var.items():
v = tamil.numeral.num2tamilstr_american(k)
print('verifying => # %d'%k)
self.assertEqual(v,actual_v,k)
return
class NumeralTest(unittest.TestCase):
def runTest(self,var,nos):
for numerStr,num in zip(var,nos):
print('Testing ---> ',num)
self.assertEqual( numerStr, tamil.numeral.num2tamilstr( num ), num )
return
def test_units( self ):
units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10
self.runTest( units, range(0,11) )
return
def test_teens( self ):
teens = (u'பதினொன்று ', u'பனிரண்டு ', u'பதிமூன்று ', u'பதினான்கு ', u'பதினைந்து ',u'பதினாறு ', u'பதினேழு ', u'பதினெட்டு ', u'பத்தொன்பது ') # 11-19
self.runTest( teens, range(11,20) )
return
def test_tens ( self ):
tens = (u'பத்து', u'இருபது', u'முப்பது', u'நாற்பது', u'ஐம்பது',u'அறுபது', u'எழுபது', u'எண்பது', u'தொன்னூறு') # 10-90
self.runTest( tens, range(10,100,10) )
return
def test_100s( self ):
hundreds = ( u'நூறு', u'இருநூறு ', u'முன்னூறு ', u'நாநூறு ',u'ஐநூறு ', u'அறுநூறு ', u'எழுநூறு ', u'எண்ணூறு ', u'தொள்ளாயிரம் ') #100 - 900
self.runTest( hundreds, range(100,1000,100) )
return
def test_max( self ):
maxno = long(1e12 - 1 )
expected = u'தொன்னூற்றொன்பது ஆயிரத்து தொள்ளாயிரத்து தொன்னூற்றொன்பது கோடியே தொன்னூற்றொன்பது இலட்சத்து தொன்னூற்றொன்பது ஆயிரத்து தொள்ளாயிரத்து தொன்னூற்றொன்பது'
self.assertEqual( tamil.numeral.num2tamilstr( maxno ), expected )
return
def test_numerals(self):
var = {0:u"பூஜ்ஜியம்",
3060:u"மூன்று ஆயிரத்து அறுபது",
1:u"ஒன்று",
2:u"இரண்டு",
3:u"மூன்று",
5:u"ஐந்து",
10:u"பத்து",
11:u"பதினொன்று ",
17:u"பதினேழு ",
19:u"பத்தொன்பது ",
20:u"இருபது",
21:u"இருபத்தொன்று",
1051:u"ஓர் ஆயிரத்து ஐம்பத்தொன்று",
100000:u"ஒரு இலட்சம்",
100001:u"ஒரு இலட்சத்து ஒன்று",
10011:u"பத்து ஆயிரத்து பதினொன்று ",
49:u"நாற்பத்தொன்பது",
50:u"ஐம்பது",
55:u"ஐம்பத்தைந்து",
1000001:u"பத்து இலட்சத்து ஒன்று",
90:u"தொன்னூறு",
99:u"தொன்னூற்றொன்பது",
100:u"நூறு",
101:u"நூற்றி ஒன்று",
1000:u"ஓர் ஆயிரம்",
111:u"நூற்றி பதினொன்று ",
1000000000000:u"ஒரு இலட்சம் கோடி ",
1011:u"ஓர் ஆயிரத்து பதினொன்று "}
for k,actual_v in var.items():
v = tamil.numeral.num2tamilstr(k)
print('verifying => # %d'%k)
self.assertEqual(v,actual_v,k)
return
class NumeralNegTest(unittest.TestCase):
def runTest(self,var,nos):
for numerStr,num in zip(var,nos):
print('Testing ---> ',num)
print('NumerString',numerStr)
self.maxDiff = None
self.assertEqual( numerStr, tamil.numeral.num2tamilstr( num ), num )
return
def test_100s( self ):
hundreds = ( u'- நூறு', u'- இருநூறு ', u'- முன்னூறு ', u'- நாநூறு ',u'- ஐநூறு ', u'- அறுநூறு ', u'- எழுநூறு ', u'- எண்ணூறு ', u'- தொள்ளாயிரம் ') #100 - 900
self.runTest( hundreds, range(-100,-1000,-100) )
return
def test_USA(self):
ramanujan = -1729
gometra = tamil.numeral.num2tamilstr( ramanujan )
expected = u"- ஓர் ஆயிரத்து எழுநூற்று இருபத்தொன்பது"
self.assertEqual( gometra, expected )
def test_3LKPLUS1(self):
x1 = 3e5 + 1
actual = tamil.numeral.num2tamilstr( x1 )
expected = u'மூன்று இலட்சத்து ஒன்று'
self.assertEqual( actual, expected )
def test_PI(self):
if PYTHON3:
print("Python3 has different rounding")
return
pie = 3.1415
expected = u'மூன்று புள்ளி ஒன்று நான்கு ஒன்று ஐந்து'
actual = tamil.numeral.num2tamilstr(pie)
actual_USA = tamil.numeral.num2tamilstr_american(pie)
self.assertEqual(actual,expected)
self.assertEqual(actual_USA,expected)
def test_PI_million(self):
pie = 3e6 + 0.1415
expected = u'மூன்று மில்லியன் புள்ளி ஒன்று நான்கு ஒன்று'
actual_USA = tamil.numeral.num2tamilstr_american(pie)
self.assertEqual(actual_USA[0:len(expected)],expected)
def test_PI_lakshalu(self):
pie = 3e5+0.1415
expected = u'மூன்று இலட்சம் புள்ளி ஒன்று நான்கு ஒன்று ஐந்து'
actual_IN = tamil.numeral.num2tamilstr(pie)
self.assertEqual(actual_IN[0:len(expected)],expected)
<EMAIL>If( PYTHON3, "Python3 has different rounding")
def test_INFRAC(self):
if PYTHON3:
print("Python3 has different rounding")
return
exp2 = u'ஓர் ஆயிரத்து ஒன்று புள்ளி நான்கு ஐந்து'
actual_IN2 = tamil.numeral.num2tamilstr(1001+0.45)
self.assertEqual(actual_IN2,exp2)
exp2 = u'ஓர் ஆயிரம் புள்ளி நான்கு ஐந்து'
actual_IN2 = tamil.numeral.num2tamilstr(1000+0.45)
self.assertEqual(actual_IN2,exp2)
def test_VITHIVILAKKU(self):
if PYTHON2_6:
# exception API is different in Python 2.6
return
with self.assertRaises(Exception):
tamil.numeral.num2tamilstr( complex(5,6) )
with self.assertRaises(Exception):
tamil.numeral.num2tamilstr( 'mannagatti' )
if __name__ == '__main__':
unittest.main() | 0.21892 | 0.336467 |
import json
import os
from pathlib import Path
from typing import List
from canvasxpress.util.example.generator import \
generate_canvasxpress_code_from_json_file
JSON_DIR_PATH = f"{os.getcwd()}/../../../tutorials/reproducible_json/"
JUPYTER_TEMPLATE_PATH = f"{os.getcwd()}/../../../canvasxpress/util/" \
f"example/template_tutorials.ipynb"
JUPYTER_EXAMPLES_DIR_PATH = f"{os.getcwd()}/../../../tutorials/notebook/" \
f"cx_site_chart_examples/"
def get_json_file_paths() -> List[str]:
"""
Returns a list of all reproducible JSON files tracked for tutorials.
:returns: `list[str]`
The file paths as a list of strings.
"""
json_files = list()
for file in os.listdir(JSON_DIR_PATH):
if file.endswith(".json"):
json_files.append(
os.path.join(JSON_DIR_PATH, file)
)
return sorted(json_files)
def get_type_from_filename(
file_name: str
) -> str:
"""
Returns the type of chart from a reproducible JSON filename.
:param file_name: `str`
The name of the file without parent path.
:returns: `str`
The name of the chart (e.g., bar) or an empty string.
"""
assembled_type = ""
started_type = False
for name_char in file_name.replace(".json", "")[::-1]:
if not started_type and name_char.isnumeric():
continue
else:
started_type = True
assembled_type += name_char
return assembled_type[::-1]
def get_index_from_filename(
file_name: str
) -> str:
"""
Returns the index of chart from a reproducible JSON filename.
:param file_name: `str`
The name of the file without parent path.
:returns: `str`
The index of the chart (e.g., 1) or an empty string.
"""
assembled_index = ""
for name_char in file_name.replace(".json", "")[::-1]:
if name_char.isnumeric():
assembled_index += name_char
else:
break
return assembled_index[::-1]
def create_jupyer_template_text(
chart_type: str,
chart_index: str,
chart_code: str
) -> str:
"""
Generates the text for a Jupyter Notebook example given a chart's type,
index, and code.
:param: chart_type: `str`
The type text (e.g., bar) for the chart.
:param chart_index: `str`
The index text (e.g., 1) for the chart.
:param chart_code: `str`
The chart source code.
:returns: `str`
The text for the full example and instruction.
"""
with open(JUPYTER_TEMPLATE_PATH, 'r') as template_file:
example_text = template_file.read()
example_text = example_text.replace("@type@", chart_type)
example_text = example_text.replace("@index@", chart_index)
ipython_json = json.loads(example_text)
for line in chart_code.splitlines():
candidate = line
# Convert render statement to explicit output
if "display.render()" in candidate:
candidate = candidate.replace(
"display.render()",
f'display.render(output_file="{chart_type}_{chart_index}.html")'
)
# Add the source line to the document
ipython_json['cells'][1]['source'].append(candidate + '\n')
ipython_text = json.dumps(ipython_json)
return ipython_text
if __name__ == "__main__":
json_paths = get_json_file_paths()
for json_path in json_paths:
try:
json_name = Path(json_path).name
chart_type = get_type_from_filename(json_name)
chart_index = get_index_from_filename(json_name)
jupyter_notebook_content = create_jupyer_template_text(
chart_type,
chart_index,
generate_canvasxpress_code_from_json_file(
json_path,
document_jupyter_render=True
)
)
example_file_name = f"{chart_type}_{chart_index}.ipynb"
example_file_path = str(
Path(JUPYTER_EXAMPLES_DIR_PATH).joinpath(example_file_name)
)
with open(example_file_path, 'w') as example_file:
example_file.write(jupyter_notebook_content)
except Exception as e:
print(f"Cannot process file: {json_path}")
print(f"Exception: {e}") | canvasxpress/util/example/generate_tutorials.py | import json
import os
from pathlib import Path
from typing import List
from canvasxpress.util.example.generator import \
generate_canvasxpress_code_from_json_file
JSON_DIR_PATH = f"{os.getcwd()}/../../../tutorials/reproducible_json/"
JUPYTER_TEMPLATE_PATH = f"{os.getcwd()}/../../../canvasxpress/util/" \
f"example/template_tutorials.ipynb"
JUPYTER_EXAMPLES_DIR_PATH = f"{os.getcwd()}/../../../tutorials/notebook/" \
f"cx_site_chart_examples/"
def get_json_file_paths() -> List[str]:
"""
Returns a list of all reproducible JSON files tracked for tutorials.
:returns: `list[str]`
The file paths as a list of strings.
"""
json_files = list()
for file in os.listdir(JSON_DIR_PATH):
if file.endswith(".json"):
json_files.append(
os.path.join(JSON_DIR_PATH, file)
)
return sorted(json_files)
def get_type_from_filename(
file_name: str
) -> str:
"""
Returns the type of chart from a reproducible JSON filename.
:param file_name: `str`
The name of the file without parent path.
:returns: `str`
The name of the chart (e.g., bar) or an empty string.
"""
assembled_type = ""
started_type = False
for name_char in file_name.replace(".json", "")[::-1]:
if not started_type and name_char.isnumeric():
continue
else:
started_type = True
assembled_type += name_char
return assembled_type[::-1]
def get_index_from_filename(
file_name: str
) -> str:
"""
Returns the index of chart from a reproducible JSON filename.
:param file_name: `str`
The name of the file without parent path.
:returns: `str`
The index of the chart (e.g., 1) or an empty string.
"""
assembled_index = ""
for name_char in file_name.replace(".json", "")[::-1]:
if name_char.isnumeric():
assembled_index += name_char
else:
break
return assembled_index[::-1]
def create_jupyer_template_text(
chart_type: str,
chart_index: str,
chart_code: str
) -> str:
"""
Generates the text for a Jupyter Notebook example given a chart's type,
index, and code.
:param: chart_type: `str`
The type text (e.g., bar) for the chart.
:param chart_index: `str`
The index text (e.g., 1) for the chart.
:param chart_code: `str`
The chart source code.
:returns: `str`
The text for the full example and instruction.
"""
with open(JUPYTER_TEMPLATE_PATH, 'r') as template_file:
example_text = template_file.read()
example_text = example_text.replace("@type@", chart_type)
example_text = example_text.replace("@index@", chart_index)
ipython_json = json.loads(example_text)
for line in chart_code.splitlines():
candidate = line
# Convert render statement to explicit output
if "display.render()" in candidate:
candidate = candidate.replace(
"display.render()",
f'display.render(output_file="{chart_type}_{chart_index}.html")'
)
# Add the source line to the document
ipython_json['cells'][1]['source'].append(candidate + '\n')
ipython_text = json.dumps(ipython_json)
return ipython_text
if __name__ == "__main__":
json_paths = get_json_file_paths()
for json_path in json_paths:
try:
json_name = Path(json_path).name
chart_type = get_type_from_filename(json_name)
chart_index = get_index_from_filename(json_name)
jupyter_notebook_content = create_jupyer_template_text(
chart_type,
chart_index,
generate_canvasxpress_code_from_json_file(
json_path,
document_jupyter_render=True
)
)
example_file_name = f"{chart_type}_{chart_index}.ipynb"
example_file_path = str(
Path(JUPYTER_EXAMPLES_DIR_PATH).joinpath(example_file_name)
)
with open(example_file_path, 'w') as example_file:
example_file.write(jupyter_notebook_content)
except Exception as e:
print(f"Cannot process file: {json_path}")
print(f"Exception: {e}") | 0.713531 | 0.357876 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('dateActivated', models.DateTimeField(blank=True, null=True, verbose_name='Date activated')),
('dateCompleted', models.DateTimeField(blank=True, null=True, verbose_name='Date completed')),
('dateRetired', models.DateTimeField(blank=True, null=True, verbose_name='Date retired')),
('dateModified', models.DateTimeField(blank=True, null=True, verbose_name='Date modified')),
('activated', models.BooleanField(default=False, verbose_name='Activated?')),
('completed', models.BooleanField(default=False, verbose_name='Completed?')),
('retired', models.BooleanField(default=False, verbose_name='Retired?')),
('rawData', models.TextField(blank=True, editable=False, verbose_name='Raw data')),
('campaignName', models.CharField(help_text='(max. 50 characters)', max_length=50, verbose_name='Campaign name')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CampaignData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('dateActivated', models.DateTimeField(blank=True, null=True, verbose_name='Date activated')),
('dateCompleted', models.DateTimeField(blank=True, null=True, verbose_name='Date completed')),
('dateRetired', models.DateTimeField(blank=True, null=True, verbose_name='Date retired')),
('dateModified', models.DateTimeField(blank=True, null=True, verbose_name='Date modified')),
('activated', models.BooleanField(default=False, verbose_name='Activated?')),
('completed', models.BooleanField(default=False, verbose_name='Completed?')),
('retired', models.BooleanField(default=False, verbose_name='Retired?')),
('rawData', models.TextField(blank=True, editable=False, verbose_name='Raw data')),
('dataFile', models.FileField(upload_to='Batches', verbose_name='Data file')),
('dataValid', models.BooleanField(default=False, editable=False, verbose_name='Data valid?')),
('dataReady', models.BooleanField(default=False, editable=False, verbose_name='Data ready?')),
('activatedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaigndata_activated_by', related_query_name='campaign_campaigndatas', to=settings.AUTH_USER_MODEL, verbose_name='Activated by')),
('completedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaigndata_completed_by', related_query_name='campaign_campaigndatas', to=settings.AUTH_USER_MODEL, verbose_name='Completed by')),
('createdBy', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaigndata_created_by', related_query_name='campaign_campaigndatas', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
],
options={
'verbose_name_plural': 'Batches',
'verbose_name': 'Batch',
},
),
migrations.CreateModel(
name='CampaignTeam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('dateActivated', models.DateTimeField(blank=True, null=True, verbose_name='Date activated')),
('dateCompleted', models.DateTimeField(blank=True, null=True, verbose_name='Date completed')),
('dateRetired', models.DateTimeField(blank=True, null=True, verbose_name='Date retired')),
('dateModified', models.DateTimeField(blank=True, null=True, verbose_name='Date modified')),
('activated', models.BooleanField(default=False, verbose_name='Activated?')),
('completed', models.BooleanField(default=False, verbose_name='Completed?')),
('retired', models.BooleanField(default=False, verbose_name='Retired?')),
('rawData', models.TextField(blank=True, editable=False, verbose_name='Raw data')),
('teamName', models.CharField(help_text='(max. 50 characters)', max_length=50, verbose_name='Team name')),
('requiredAnnotations', models.PositiveSmallIntegerField(help_text='(value in range=[1,32767])', verbose_name='Required annotations')),
('requiredHours', models.PositiveSmallIntegerField(help_text='(value in range=[1,32767])', verbose_name='Required hours')),
('activatedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_activated_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Activated by')),
('completedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_completed_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Completed by')),
('createdBy', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_created_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('members', models.ManyToManyField(related_name='campaign_campaignteam_members', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Team members')),
('modifiedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_modified_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Modified by')),
('owner', models.ForeignKey(help_text='(must be staff member)', on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_owner', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Team owner')),
('retiredBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_retired_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Retired by')),
],
options={
'verbose_name_plural': 'Teams',
'verbose_name': 'Team',
},
),
] | Campaign/migrations/0001_initial.py | from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('dateActivated', models.DateTimeField(blank=True, null=True, verbose_name='Date activated')),
('dateCompleted', models.DateTimeField(blank=True, null=True, verbose_name='Date completed')),
('dateRetired', models.DateTimeField(blank=True, null=True, verbose_name='Date retired')),
('dateModified', models.DateTimeField(blank=True, null=True, verbose_name='Date modified')),
('activated', models.BooleanField(default=False, verbose_name='Activated?')),
('completed', models.BooleanField(default=False, verbose_name='Completed?')),
('retired', models.BooleanField(default=False, verbose_name='Retired?')),
('rawData', models.TextField(blank=True, editable=False, verbose_name='Raw data')),
('campaignName', models.CharField(help_text='(max. 50 characters)', max_length=50, verbose_name='Campaign name')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CampaignData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('dateActivated', models.DateTimeField(blank=True, null=True, verbose_name='Date activated')),
('dateCompleted', models.DateTimeField(blank=True, null=True, verbose_name='Date completed')),
('dateRetired', models.DateTimeField(blank=True, null=True, verbose_name='Date retired')),
('dateModified', models.DateTimeField(blank=True, null=True, verbose_name='Date modified')),
('activated', models.BooleanField(default=False, verbose_name='Activated?')),
('completed', models.BooleanField(default=False, verbose_name='Completed?')),
('retired', models.BooleanField(default=False, verbose_name='Retired?')),
('rawData', models.TextField(blank=True, editable=False, verbose_name='Raw data')),
('dataFile', models.FileField(upload_to='Batches', verbose_name='Data file')),
('dataValid', models.BooleanField(default=False, editable=False, verbose_name='Data valid?')),
('dataReady', models.BooleanField(default=False, editable=False, verbose_name='Data ready?')),
('activatedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaigndata_activated_by', related_query_name='campaign_campaigndatas', to=settings.AUTH_USER_MODEL, verbose_name='Activated by')),
('completedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaigndata_completed_by', related_query_name='campaign_campaigndatas', to=settings.AUTH_USER_MODEL, verbose_name='Completed by')),
('createdBy', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaigndata_created_by', related_query_name='campaign_campaigndatas', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
],
options={
'verbose_name_plural': 'Batches',
'verbose_name': 'Batch',
},
),
migrations.CreateModel(
name='CampaignTeam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('dateActivated', models.DateTimeField(blank=True, null=True, verbose_name='Date activated')),
('dateCompleted', models.DateTimeField(blank=True, null=True, verbose_name='Date completed')),
('dateRetired', models.DateTimeField(blank=True, null=True, verbose_name='Date retired')),
('dateModified', models.DateTimeField(blank=True, null=True, verbose_name='Date modified')),
('activated', models.BooleanField(default=False, verbose_name='Activated?')),
('completed', models.BooleanField(default=False, verbose_name='Completed?')),
('retired', models.BooleanField(default=False, verbose_name='Retired?')),
('rawData', models.TextField(blank=True, editable=False, verbose_name='Raw data')),
('teamName', models.CharField(help_text='(max. 50 characters)', max_length=50, verbose_name='Team name')),
('requiredAnnotations', models.PositiveSmallIntegerField(help_text='(value in range=[1,32767])', verbose_name='Required annotations')),
('requiredHours', models.PositiveSmallIntegerField(help_text='(value in range=[1,32767])', verbose_name='Required hours')),
('activatedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_activated_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Activated by')),
('completedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_completed_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Completed by')),
('createdBy', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_created_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('members', models.ManyToManyField(related_name='campaign_campaignteam_members', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Team members')),
('modifiedBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_modified_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Modified by')),
('owner', models.ForeignKey(help_text='(must be staff member)', on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_owner', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Team owner')),
('retiredBy', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaign_campaignteam_retired_by', related_query_name='campaign_campaignteams', to=settings.AUTH_USER_MODEL, verbose_name='Retired by')),
],
options={
'verbose_name_plural': 'Teams',
'verbose_name': 'Team',
},
),
] | 0.558327 | 0.11353 |
class SearchAlgorithm:
"""Interface of an event handler API for hyperparameter search.
Unlike TrialSchedulers, SearchAlgorithms will not have the ability
to modify the execution (i.e., stop and pause trials).
Trials added manually (i.e., via the Client API) will also notify
this class upon new events, so custom search algorithms should
maintain a list of trials ID generated from this class.
See also: `ray.tune.suggest.BasicVariantGenerator`.
"""
def add_configurations(self, experiments):
"""Tracks given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
"""
raise NotImplementedError
def next_trials(self):
"""Provides Trial objects to be queued into the TrialRunner.
Returns:
trials (list): Returns a list of trials.
"""
raise NotImplementedError
def on_trial_result(self, trial_id, result):
"""Called on each intermediate result returned by a trial.
This will only be called when the trial is in the RUNNING state.
Arguments:
trial_id: Identifier for the trial.
"""
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial.
Arguments:
trial_id: Identifier for the trial.
result (dict): Defaults to None. A dict will
be provided with this notification when the trial is in
the RUNNING state AND either completes naturally or
by manual termination.
error (bool): Defaults to False. True if the trial is in
the RUNNING state and errors.
early_terminated (bool): Defaults to False. True if the trial
is stopped while in PAUSED or PENDING state.
"""
pass
def is_finished(self):
"""Returns True if no trials left to be queued into TrialRunner.
Can return True before all trials have finished executing.
"""
raise NotImplementedError | python/ray/tune/suggest/search.py | class SearchAlgorithm:
"""Interface of an event handler API for hyperparameter search.
Unlike TrialSchedulers, SearchAlgorithms will not have the ability
to modify the execution (i.e., stop and pause trials).
Trials added manually (i.e., via the Client API) will also notify
this class upon new events, so custom search algorithms should
maintain a list of trials ID generated from this class.
See also: `ray.tune.suggest.BasicVariantGenerator`.
"""
def add_configurations(self, experiments):
"""Tracks given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
"""
raise NotImplementedError
def next_trials(self):
"""Provides Trial objects to be queued into the TrialRunner.
Returns:
trials (list): Returns a list of trials.
"""
raise NotImplementedError
def on_trial_result(self, trial_id, result):
"""Called on each intermediate result returned by a trial.
This will only be called when the trial is in the RUNNING state.
Arguments:
trial_id: Identifier for the trial.
"""
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial.
Arguments:
trial_id: Identifier for the trial.
result (dict): Defaults to None. A dict will
be provided with this notification when the trial is in
the RUNNING state AND either completes naturally or
by manual termination.
error (bool): Defaults to False. True if the trial is in
the RUNNING state and errors.
early_terminated (bool): Defaults to False. True if the trial
is stopped while in PAUSED or PENDING state.
"""
pass
def is_finished(self):
"""Returns True if no trials left to be queued into TrialRunner.
Can return True before all trials have finished executing.
"""
raise NotImplementedError | 0.891687 | 0.668218 |
from testlib.custom_exceptions import UICmdException
from testlib.linux import service_lib
from testlib.ui_onpss_shell.switch_driver import SwitchDriver
class Dcrpd(object):
SERVICE = 'dcrpd'
CONFIG_PATH = "/usr/lib/systemd/system/"
MANIFEST_FILE = CONFIG_PATH + "dcrpd.service"
def __init__(self, run_command, switch):
"""Initialize Dcrpd class.
Args:
run_command(function): function that runs the actual commands
"""
super(Dcrpd, self).__init__()
self.run_command = run_command
self.switch = switch
self.switch_driver = SwitchDriver(self, switch)
self.service_manager = service_lib.SpecificServiceManager(self.SERVICE, self.run_command)
def start(self):
"""Start dcrpd process.
Raises:
UICmdException: On non-zero return code
"""
self.switch.ui.modify_ports(ports=[self.switch.ui.cpu_port], adminMode='Up')
self.service_manager.start()
def stop(self):
"""Stop dcrpd process.
Raises:
UICmdException: On non-zero return code
"""
self.service_manager.stop()
self.switch.ui.modify_ports(ports=[self.switch.ui.cpu_port], adminMode='Down')
def restart(self):
"""Restarting dcrpd process.
Raises:
UICmdException: On non-zero return code
"""
self.service_manager.restart()
def force_reload(self):
"""Restarting the switch driver and then the dcrpd process.
Raises:
UICmdException: On non-zero return code
"""
self.switch_driver.force_reload()
self.switch.ui.modify_ports(ports=[self.switch.ui.cpu_port], adminMode='Up')
self.restart()
def enable(self):
"""Enabling dcrpd service on start.
Raises:
UICmdException: On non-zero return code
"""
self.service_manager.enable()
def disable(self):
"""Disabling dcrpd service on start.
Raises:
UICmdException: On non-zero return code
"""
self.service_manager.disable()
def get_status(self):
"""Get dcrpd process status.
Raises:
UICmdException: On non-zero or non-three return code
Returns:
str
"""
try:
result = self.service_manager.status()
except UICmdException as err:
if err.rc == 3:
# If service is not active
return err.stdout
else:
raise
return result.stdout | taf/testlib/linux/dcrpd/dcrpd.py | from testlib.custom_exceptions import UICmdException
from testlib.linux import service_lib
from testlib.ui_onpss_shell.switch_driver import SwitchDriver
class Dcrpd(object):
SERVICE = 'dcrpd'
CONFIG_PATH = "/usr/lib/systemd/system/"
MANIFEST_FILE = CONFIG_PATH + "dcrpd.service"
def __init__(self, run_command, switch):
"""Initialize Dcrpd class.
Args:
run_command(function): function that runs the actual commands
"""
super(Dcrpd, self).__init__()
self.run_command = run_command
self.switch = switch
self.switch_driver = SwitchDriver(self, switch)
self.service_manager = service_lib.SpecificServiceManager(self.SERVICE, self.run_command)
def start(self):
"""Start dcrpd process.
Raises:
UICmdException: On non-zero return code
"""
self.switch.ui.modify_ports(ports=[self.switch.ui.cpu_port], adminMode='Up')
self.service_manager.start()
def stop(self):
"""Stop dcrpd process.
Raises:
UICmdException: On non-zero return code
"""
self.service_manager.stop()
self.switch.ui.modify_ports(ports=[self.switch.ui.cpu_port], adminMode='Down')
def restart(self):
"""Restarting dcrpd process.
Raises:
UICmdException: On non-zero return code
"""
self.service_manager.restart()
def force_reload(self):
"""Restarting the switch driver and then the dcrpd process.
Raises:
UICmdException: On non-zero return code
"""
self.switch_driver.force_reload()
self.switch.ui.modify_ports(ports=[self.switch.ui.cpu_port], adminMode='Up')
self.restart()
def enable(self):
"""Enabling dcrpd service on start.
Raises:
UICmdException: On non-zero return code
"""
self.service_manager.enable()
def disable(self):
"""Disabling dcrpd service on start.
Raises:
UICmdException: On non-zero return code
"""
self.service_manager.disable()
def get_status(self):
"""Get dcrpd process status.
Raises:
UICmdException: On non-zero or non-three return code
Returns:
str
"""
try:
result = self.service_manager.status()
except UICmdException as err:
if err.rc == 3:
# If service is not active
return err.stdout
else:
raise
return result.stdout | 0.573678 | 0.126839 |
import cv2
import os
#Creating/Checking a Database Dir
if os.path.exists('database'):
pass
else:
os.mkdir('database')
#Reseting Counter and Users
dir = os.listdir('database')
if len(dir) == 0:
users = open('resources/user.txt','w')
ids = open('resources/count.txt','w')
users.write('None')
ids.write('0')
users.close()
ids.close()
if os.path.exists('resources/trained_model.yml'):
os.remove('resources/trained_model.yml')
else:
pass
else:
pass
#Video Dimensions
camera = cv2.VideoCapture(0)
camera.set(3, 1280)
camera.set(4, 1024)
#Cascade for Frontal Face Detection
#Credits: https://github.com/opencv/opencv
detector = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
#Read Pre-Existing Users
users = open('resources/user.txt','r')
names = users.read().splitlines()
users.close()
#Name Input
flag = 1
while(flag == 1):
face_name = input('Enter the name of the person:')
if face_name in names:
print('\nSorry, the name already exists, please enter another name.\n')
flag = 1
else:
users = open('resources/user.txt','a')
users.write("\n" + face_name)
users.close()
flag = 0
#ID Counter Increment
ids = open('resources/count.txt','r')
read = ids.read()
count = int(read)
face_id = count + 1
ids.close()
ids = open('resources/count.txt','w')
write = str(face_id)
ids.write(write)
ids.close()
#Instructions
print("Please make sure that you are in a well lit environment.\nThis will capture several photos of you, so make sure to give different angles.\n\nCapturing will start automatically within 10 seconds.")
#Camera Capturing Initiated
count = 0
flag = 0
print("\nFace Caputuring Initiated. Please look into the camera.\nThis might take a while.")
while(flag == 0):
ret, img = camera.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in face:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
cv2.imwrite("database/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
cv2.imshow('image', img)
#Escape key Protocol
key = cv2.waitKey(10) & 0xff
if key == 27:
flag = 1
elif count >= 400:
flag = 1
#Training the Dataset
import training
#Exiting
camera.release()
cv2.destroyAllWindows() | Recognition/database.py | import cv2
import os
#Creating/Checking a Database Dir
if os.path.exists('database'):
pass
else:
os.mkdir('database')
#Reseting Counter and Users
dir = os.listdir('database')
if len(dir) == 0:
users = open('resources/user.txt','w')
ids = open('resources/count.txt','w')
users.write('None')
ids.write('0')
users.close()
ids.close()
if os.path.exists('resources/trained_model.yml'):
os.remove('resources/trained_model.yml')
else:
pass
else:
pass
#Video Dimensions
camera = cv2.VideoCapture(0)
camera.set(3, 1280)
camera.set(4, 1024)
#Cascade for Frontal Face Detection
#Credits: https://github.com/opencv/opencv
detector = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
#Read Pre-Existing Users
users = open('resources/user.txt','r')
names = users.read().splitlines()
users.close()
#Name Input
flag = 1
while(flag == 1):
face_name = input('Enter the name of the person:')
if face_name in names:
print('\nSorry, the name already exists, please enter another name.\n')
flag = 1
else:
users = open('resources/user.txt','a')
users.write("\n" + face_name)
users.close()
flag = 0
#ID Counter Increment
ids = open('resources/count.txt','r')
read = ids.read()
count = int(read)
face_id = count + 1
ids.close()
ids = open('resources/count.txt','w')
write = str(face_id)
ids.write(write)
ids.close()
#Instructions
print("Please make sure that you are in a well lit environment.\nThis will capture several photos of you, so make sure to give different angles.\n\nCapturing will start automatically within 10 seconds.")
#Camera Capturing Initiated
count = 0
flag = 0
print("\nFace Caputuring Initiated. Please look into the camera.\nThis might take a while.")
while(flag == 0):
ret, img = camera.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in face:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
cv2.imwrite("database/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
cv2.imshow('image', img)
#Escape key Protocol
key = cv2.waitKey(10) & 0xff
if key == 27:
flag = 1
elif count >= 400:
flag = 1
#Training the Dataset
import training
#Exiting
camera.release()
cv2.destroyAllWindows() | 0.10917 | 0.075585 |
import pytest
from plenum.common.constants import STEWARD_STRING, VALIDATOR
from pytest import fixture
from plenum.common.throughput_measurements import RevivalSpikeResistantEMAThroughputMeasurement
from plenum.common.util import getMaxFailures
from plenum.test.helper import sdk_send_random_and_check, assertExp, sdk_get_and_check_replies
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.pool_transactions.conftest import sdk_node_theta_added
from plenum.test.pool_transactions.helper import sdk_add_new_nym, prepare_new_node_data, prepare_node_request, \
sdk_sign_and_send_prepared_request, create_and_start_new_node
from plenum.test.test_node import checkNodesConnected, TestNode
from stp_core.loop.eventually import eventually
nodeCount = 6
def _send_txn_for_creating_node(looper, sdk_pool_handle, sdk_wallet_steward, tdir, new_node_name, clientIp,
clientPort, nodeIp, nodePort, bls_key, sigseed, key_proof):
new_steward_name = "testClientSteward"
new_steward_wallet_handle = sdk_add_new_nym(looper,
sdk_pool_handle,
sdk_wallet_steward,
alias=new_steward_name,
role=STEWARD_STRING)
# filling node request
_, steward_did = new_steward_wallet_handle
node_request = looper.loop.run_until_complete(
prepare_node_request(steward_did,
new_node_name=new_node_name,
clientIp=clientIp,
clientPort=clientPort,
nodeIp=nodeIp,
nodePort=nodePort,
bls_key=bls_key,
sigseed=sigseed,
services=[VALIDATOR],
key_proof=key_proof))
# sending request using 'sdk_' functions
request_couple = sdk_sign_and_send_prepared_request(looper, new_steward_wallet_handle,
sdk_pool_handle, node_request)
# waiting for replies
sdk_get_and_check_replies(looper, [request_couple])
def test_catchup_after_replica_addition(looper, sdk_pool_handle, txnPoolNodeSet,
sdk_wallet_steward, tdir, tconf, allPluginsPath):
view_no = txnPoolNodeSet[-1].viewNo
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_steward, 1)
waitNodeDataEquality(looper, *txnPoolNodeSet)
# create node
new_node_name = "Theta"
sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \
prepare_new_node_data(tconf, tdir, new_node_name)
new_node = create_and_start_new_node(looper=looper, node_name=new_node_name,
tdir=tdir, sigseed=sigseed,
node_ha=(nodeIp, nodePort), client_ha=(clientIp, clientPort),
tconf=tconf, auto_start=True, plugin_path=allPluginsPath,
nodeClass=TestNode)
_send_txn_for_creating_node(looper, sdk_pool_handle, sdk_wallet_steward, tdir, new_node_name, clientIp,
clientPort, nodeIp, nodePort, bls_key, sigseed, key_proof)
txnPoolNodeSet.append(new_node)
looper.run(checkNodesConnected(txnPoolNodeSet))
looper.run(eventually(lambda: assertExp(n.viewNo == view_no + 1 for n in txnPoolNodeSet)))
waitNodeDataEquality(looper, *txnPoolNodeSet)
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_steward, 1)
waitNodeDataEquality(looper, *txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc']) | plenum/test/replica/test_catchup_after_replica_addition.py | import pytest
from plenum.common.constants import STEWARD_STRING, VALIDATOR
from pytest import fixture
from plenum.common.throughput_measurements import RevivalSpikeResistantEMAThroughputMeasurement
from plenum.common.util import getMaxFailures
from plenum.test.helper import sdk_send_random_and_check, assertExp, sdk_get_and_check_replies
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.pool_transactions.conftest import sdk_node_theta_added
from plenum.test.pool_transactions.helper import sdk_add_new_nym, prepare_new_node_data, prepare_node_request, \
sdk_sign_and_send_prepared_request, create_and_start_new_node
from plenum.test.test_node import checkNodesConnected, TestNode
from stp_core.loop.eventually import eventually
nodeCount = 6
def _send_txn_for_creating_node(looper, sdk_pool_handle, sdk_wallet_steward, tdir, new_node_name, clientIp,
clientPort, nodeIp, nodePort, bls_key, sigseed, key_proof):
new_steward_name = "testClientSteward"
new_steward_wallet_handle = sdk_add_new_nym(looper,
sdk_pool_handle,
sdk_wallet_steward,
alias=new_steward_name,
role=STEWARD_STRING)
# filling node request
_, steward_did = new_steward_wallet_handle
node_request = looper.loop.run_until_complete(
prepare_node_request(steward_did,
new_node_name=new_node_name,
clientIp=clientIp,
clientPort=clientPort,
nodeIp=nodeIp,
nodePort=nodePort,
bls_key=bls_key,
sigseed=sigseed,
services=[VALIDATOR],
key_proof=key_proof))
# sending request using 'sdk_' functions
request_couple = sdk_sign_and_send_prepared_request(looper, new_steward_wallet_handle,
sdk_pool_handle, node_request)
# waiting for replies
sdk_get_and_check_replies(looper, [request_couple])
def test_catchup_after_replica_addition(looper, sdk_pool_handle, txnPoolNodeSet,
sdk_wallet_steward, tdir, tconf, allPluginsPath):
view_no = txnPoolNodeSet[-1].viewNo
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_steward, 1)
waitNodeDataEquality(looper, *txnPoolNodeSet)
# create node
new_node_name = "Theta"
sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \
prepare_new_node_data(tconf, tdir, new_node_name)
new_node = create_and_start_new_node(looper=looper, node_name=new_node_name,
tdir=tdir, sigseed=sigseed,
node_ha=(nodeIp, nodePort), client_ha=(clientIp, clientPort),
tconf=tconf, auto_start=True, plugin_path=allPluginsPath,
nodeClass=TestNode)
_send_txn_for_creating_node(looper, sdk_pool_handle, sdk_wallet_steward, tdir, new_node_name, clientIp,
clientPort, nodeIp, nodePort, bls_key, sigseed, key_proof)
txnPoolNodeSet.append(new_node)
looper.run(checkNodesConnected(txnPoolNodeSet))
looper.run(eventually(lambda: assertExp(n.viewNo == view_no + 1 for n in txnPoolNodeSet)))
waitNodeDataEquality(looper, *txnPoolNodeSet)
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_steward, 1)
waitNodeDataEquality(looper, *txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc']) | 0.313525 | 0.35209 |
import logging
import textwrap
from datetime import datetime, timedelta
from airflow import DAG # noqa
from airflow import macros # noqa
from airflow.operators.python_operator import PythonOperator # noqa
from pyhocon import ConfigFactory
from databuilder.extractor.hive_table_metadata_extractor import HiveTableMetadataExtractor
from databuilder.extractor.sql_alchemy_extractor import SQLAlchemyExtractor
from databuilder.job.job import DefaultJob
from databuilder.models.table_metadata import DESCRIPTION_NODE_LABEL
from databuilder.loader.file_system_neo4j_csv_loader import FsNeo4jCSVLoader
from databuilder.publisher import neo4j_csv_publisher
from databuilder.publisher.neo4j_csv_publisher import Neo4jCsvPublisher
from databuilder.task.task import DefaultTask
from databuilder.transformer.base_transformer import NoopTransformer
dag_args = {
'concurrency': 10,
# One dagrun at a time
'max_active_runs': 1,
# 4AM, 4PM PST
'schedule_interval': '0 11 * * *',
'catchup': False
}
default_args = {
'owner': 'amundsen',
'start_date': datetime(2018, 6, 18),
'depends_on_past': False,
'email': [''],
'email_on_failure': False,
'email_on_retry': False,
'retries': 3,
'priority_weight': 10,
'retry_delay': timedelta(minutes=5),
'execution_timeout': timedelta(minutes=120)
}
# NEO4J cluster endpoints
NEO4J_ENDPOINT = 'bolt://localhost:7687'
neo4j_endpoint = NEO4J_ENDPOINT
neo4j_user = 'neo4j'
neo4j_password = '<PASSWORD>'
# Todo: user provides a list of schema for indexing
SUPPORTED_HIVE_SCHEMAS = ['hive']
# Global used in all Hive metastore queries.
# String format - ('schema1', schema2', .... 'schemaN')
SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE = "('{schemas}')".format(schemas="', '".join(SUPPORTED_HIVE_SCHEMAS))
# Todo: user needs to modify and provide a hivemetastore connection string
def connection_string():
return 'hivemetastore.connection'
def create_table_wm_job(**kwargs):
sql = textwrap.dedent("""
SELECT From_unixtime(A0.create_time) as create_time,
C0.NAME as schema_name,
B0.tbl_name as table_name,
{func}(A0.part_name) as part_name,
{watermark} as part_type
FROM PARTITIONS A0
LEFT OUTER JOIN TBLS B0
ON A0.tbl_id = B0.tbl_id
LEFT OUTER JOIN DBS C0
ON B0.db_id = C0.db_id
WHERE C0.NAME IN {schemas}
AND B0.tbl_type IN ( 'EXTERNAL_TABLE', 'MANAGED_TABLE' )
AND A0.PART_NAME NOT LIKE '%%__HIVE_DEFAULT_PARTITION__%%'
GROUP BY C0.NAME, B0.tbl_name
ORDER by create_time desc
""").format(func=kwargs['templates_dict'].get('agg_func'),
watermark=kwargs['templates_dict'].get('watermark_type'),
schemas=SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE)
logging.info('SQL query: {}'.format(sql))
tmp_folder = '/var/tmp/amundsen/table_{hwm}'.format(hwm=kwargs['templates_dict']
.get('watermark_type').strip("\""))
node_files_folder = '{tmp_folder}/nodes'.format(tmp_folder=tmp_folder)
relationship_files_folder = '{tmp_folder}/relationships'.format(tmp_folder=tmp_folder)
hwm_extractor = SQLAlchemyExtractor()
csv_loader = FsNeo4jCSVLoader()
task = DefaultTask(extractor=hwm_extractor,
loader=csv_loader,
transformer=NoopTransformer())
job_config = ConfigFactory.from_dict({
'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING): connection_string(),
'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.EXTRACT_SQL): sql,
'extractor.sqlalchemy.model_class': 'databuilder.models.hive_watermark.HiveWatermark',
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
node_files_folder,
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NODE_FILES_DIR):
node_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.RELATION_FILES_DIR):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
neo4j_endpoint,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_USER):
neo4j_user,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_PASSWORD):
neo4j_password,
})
job = DefaultJob(conf=job_config,
task=task,
publisher=Neo4jCsvPublisher())
job.launch()
def create_table_metadata_databuilder_job():
"""
Launches data builder job that extracts table and column metadata from MySQL Hive metastore database,
and publishes to Neo4j.
@param kwargs:
@return:
"""
# Adding to where clause to scope schema, filter out temp tables which start with numbers and views
where_clause_suffix = textwrap.dedent("""
WHERE d.NAME IN {schemas}
AND t.TBL_NAME NOT REGEXP '^[0-9]+'
AND t.TBL_TYPE IN ( 'EXTERNAL_TABLE', 'MANAGED_TABLE' )
""").format(schemas=SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE)
tmp_folder = '/var/tmp/amundsen/table_metadata'
node_files_folder = '{tmp_folder}/nodes/'.format(tmp_folder=tmp_folder)
relationship_files_folder = '{tmp_folder}/relationships/'.format(tmp_folder=tmp_folder)
job_config = ConfigFactory.from_dict({
'extractor.hive_table_metadata.{}'.format(HiveTableMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY):
where_clause_suffix,
'extractor.hive_table_metadata.extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING):
connection_string(),
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
node_files_folder,
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NODE_FILES_DIR):
node_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.RELATION_FILES_DIR):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
neo4j_endpoint,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_USER):
neo4j_user,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_PASSWORD):
neo4j_password,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_CREATE_ONLY_NODES):
[DESCRIPTION_NODE_LABEL],
})
job = DefaultJob(conf=job_config,
task=DefaultTask(extractor=HiveTableMetadataExtractor(), loader=FsNeo4jCSVLoader()),
publisher=Neo4jCsvPublisher())
job.launch()
with DAG('amundsen_databuilder', default_args=default_args, **dag_args) as dag:
amundsen_databuilder_table_metadata_job = PythonOperator(
task_id='amundsen_databuilder_table_metadata_job',
python_callable=create_table_metadata_databuilder_job
)
# calculate hive high watermark
amundsen_hwm_job = PythonOperator(
task_id='amundsen_hwm_job',
python_callable=create_table_wm_job,
provide_context=True,
templates_dict={'agg_func': 'max',
'watermark_type': '"high_watermark"',
'part_regex': '{}'.format('{{ ds }}')}
)
# calculate hive low watermark
amundsen_lwm_job = PythonOperator(
task_id='amundsen_lwm_job',
python_callable=create_table_wm_job,
provide_context=True,
templates_dict={'agg_func': 'min',
'watermark_type': '"low_watermark"',
'part_regex': '{}'.format('{{ ds }}')}
) | example/dags/sample_dag.py | import logging
import textwrap
from datetime import datetime, timedelta
from airflow import DAG # noqa
from airflow import macros # noqa
from airflow.operators.python_operator import PythonOperator # noqa
from pyhocon import ConfigFactory
from databuilder.extractor.hive_table_metadata_extractor import HiveTableMetadataExtractor
from databuilder.extractor.sql_alchemy_extractor import SQLAlchemyExtractor
from databuilder.job.job import DefaultJob
from databuilder.models.table_metadata import DESCRIPTION_NODE_LABEL
from databuilder.loader.file_system_neo4j_csv_loader import FsNeo4jCSVLoader
from databuilder.publisher import neo4j_csv_publisher
from databuilder.publisher.neo4j_csv_publisher import Neo4jCsvPublisher
from databuilder.task.task import DefaultTask
from databuilder.transformer.base_transformer import NoopTransformer
dag_args = {
'concurrency': 10,
# One dagrun at a time
'max_active_runs': 1,
# 4AM, 4PM PST
'schedule_interval': '0 11 * * *',
'catchup': False
}
default_args = {
'owner': 'amundsen',
'start_date': datetime(2018, 6, 18),
'depends_on_past': False,
'email': [''],
'email_on_failure': False,
'email_on_retry': False,
'retries': 3,
'priority_weight': 10,
'retry_delay': timedelta(minutes=5),
'execution_timeout': timedelta(minutes=120)
}
# NEO4J cluster endpoints
NEO4J_ENDPOINT = 'bolt://localhost:7687'
neo4j_endpoint = NEO4J_ENDPOINT
neo4j_user = 'neo4j'
neo4j_password = '<PASSWORD>'
# Todo: user provides a list of schema for indexing
SUPPORTED_HIVE_SCHEMAS = ['hive']
# Global used in all Hive metastore queries.
# String format - ('schema1', schema2', .... 'schemaN')
SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE = "('{schemas}')".format(schemas="', '".join(SUPPORTED_HIVE_SCHEMAS))
# Todo: user needs to modify and provide a hivemetastore connection string
def connection_string():
return 'hivemetastore.connection'
def create_table_wm_job(**kwargs):
sql = textwrap.dedent("""
SELECT From_unixtime(A0.create_time) as create_time,
C0.NAME as schema_name,
B0.tbl_name as table_name,
{func}(A0.part_name) as part_name,
{watermark} as part_type
FROM PARTITIONS A0
LEFT OUTER JOIN TBLS B0
ON A0.tbl_id = B0.tbl_id
LEFT OUTER JOIN DBS C0
ON B0.db_id = C0.db_id
WHERE C0.NAME IN {schemas}
AND B0.tbl_type IN ( 'EXTERNAL_TABLE', 'MANAGED_TABLE' )
AND A0.PART_NAME NOT LIKE '%%__HIVE_DEFAULT_PARTITION__%%'
GROUP BY C0.NAME, B0.tbl_name
ORDER by create_time desc
""").format(func=kwargs['templates_dict'].get('agg_func'),
watermark=kwargs['templates_dict'].get('watermark_type'),
schemas=SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE)
logging.info('SQL query: {}'.format(sql))
tmp_folder = '/var/tmp/amundsen/table_{hwm}'.format(hwm=kwargs['templates_dict']
.get('watermark_type').strip("\""))
node_files_folder = '{tmp_folder}/nodes'.format(tmp_folder=tmp_folder)
relationship_files_folder = '{tmp_folder}/relationships'.format(tmp_folder=tmp_folder)
hwm_extractor = SQLAlchemyExtractor()
csv_loader = FsNeo4jCSVLoader()
task = DefaultTask(extractor=hwm_extractor,
loader=csv_loader,
transformer=NoopTransformer())
job_config = ConfigFactory.from_dict({
'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING): connection_string(),
'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.EXTRACT_SQL): sql,
'extractor.sqlalchemy.model_class': 'databuilder.models.hive_watermark.HiveWatermark',
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
node_files_folder,
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NODE_FILES_DIR):
node_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.RELATION_FILES_DIR):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
neo4j_endpoint,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_USER):
neo4j_user,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_PASSWORD):
neo4j_password,
})
job = DefaultJob(conf=job_config,
task=task,
publisher=Neo4jCsvPublisher())
job.launch()
def create_table_metadata_databuilder_job():
"""
Launches data builder job that extracts table and column metadata from MySQL Hive metastore database,
and publishes to Neo4j.
@param kwargs:
@return:
"""
# Adding to where clause to scope schema, filter out temp tables which start with numbers and views
where_clause_suffix = textwrap.dedent("""
WHERE d.NAME IN {schemas}
AND t.TBL_NAME NOT REGEXP '^[0-9]+'
AND t.TBL_TYPE IN ( 'EXTERNAL_TABLE', 'MANAGED_TABLE' )
""").format(schemas=SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE)
tmp_folder = '/var/tmp/amundsen/table_metadata'
node_files_folder = '{tmp_folder}/nodes/'.format(tmp_folder=tmp_folder)
relationship_files_folder = '{tmp_folder}/relationships/'.format(tmp_folder=tmp_folder)
job_config = ConfigFactory.from_dict({
'extractor.hive_table_metadata.{}'.format(HiveTableMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY):
where_clause_suffix,
'extractor.hive_table_metadata.extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING):
connection_string(),
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
node_files_folder,
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NODE_FILES_DIR):
node_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.RELATION_FILES_DIR):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
neo4j_endpoint,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_USER):
neo4j_user,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_PASSWORD):
neo4j_password,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_CREATE_ONLY_NODES):
[DESCRIPTION_NODE_LABEL],
})
job = DefaultJob(conf=job_config,
task=DefaultTask(extractor=HiveTableMetadataExtractor(), loader=FsNeo4jCSVLoader()),
publisher=Neo4jCsvPublisher())
job.launch()
with DAG('amundsen_databuilder', default_args=default_args, **dag_args) as dag:
amundsen_databuilder_table_metadata_job = PythonOperator(
task_id='amundsen_databuilder_table_metadata_job',
python_callable=create_table_metadata_databuilder_job
)
# calculate hive high watermark
amundsen_hwm_job = PythonOperator(
task_id='amundsen_hwm_job',
python_callable=create_table_wm_job,
provide_context=True,
templates_dict={'agg_func': 'max',
'watermark_type': '"high_watermark"',
'part_regex': '{}'.format('{{ ds }}')}
)
# calculate hive low watermark
amundsen_lwm_job = PythonOperator(
task_id='amundsen_lwm_job',
python_callable=create_table_wm_job,
provide_context=True,
templates_dict={'agg_func': 'min',
'watermark_type': '"low_watermark"',
'part_regex': '{}'.format('{{ ds }}')}
) | 0.38122 | 0.082475 |
import numpy as np
from rlgym.utils import RewardFunction
from rlgym.utils.common_values import CEILING_Z, BALL_MAX_SPEED, CAR_MAX_SPEED, BLUE_TEAM, BLUE_GOAL_BACK, \
BLUE_GOAL_CENTER, ORANGE_GOAL_BACK, ORANGE_GOAL_CENTER, BALL_RADIUS, ORANGE_TEAM
from rlgym.utils.gamestates import GameState, PlayerData
from rlgym.utils.math import cosine_similarity
from numpy import exp
from numpy.linalg import norm
class NectoRewardFunction(RewardFunction):
BLUE_GOAL = (np.array(BLUE_GOAL_BACK) + np.array(BLUE_GOAL_CENTER)) / 2
ORANGE_GOAL = (np.array(ORANGE_GOAL_BACK) + np.array(ORANGE_GOAL_CENTER)) / 2
def __init__(
self,
team_spirit=0.3,
goal_w=10,
goal_dist_w=10,
goal_speed_bonus_w=2.5,
goal_dist_bonus_w=2.5,
demo_w=5,
dist_w=0.75, # Changed from 1
align_w=0.5,
boost_w=1, # Changed from 0.5
touch_height_w=1, # Changed from 0.5
touch_accel_w=0.5, # Changed from 1
):
self.team_spirit = team_spirit
self.current_state = None
self.last_state = None
self.n = 0
self.goal_w = goal_w
self.goal_dist_w = goal_dist_w
self.goal_speed_bonus_w = goal_speed_bonus_w
self.goal_dist_bonus_w = goal_dist_bonus_w
self.demo_w = demo_w
self.dist_w = dist_w
self.align_w = align_w
self.boost_w = boost_w
self.touch_height_w = touch_height_w
self.touch_accel_w = touch_accel_w
self.state_quality = None
self.player_qualities = None
self.rewards = None
def _state_qualities(self, state: GameState):
ball_pos = state.ball.position
state_quality = self.goal_dist_w * (exp(-norm(self.ORANGE_GOAL - ball_pos) / CAR_MAX_SPEED)
- exp(-norm(self.BLUE_GOAL - ball_pos) / CAR_MAX_SPEED))
player_qualities = np.zeros(len(state.players))
for i, player in enumerate(state.players):
pos = player.car_data.position
# Align player->ball and player->net vectors
alignment = 0.5 * (cosine_similarity(ball_pos - pos, ORANGE_GOAL_BACK - pos)
- cosine_similarity(ball_pos - pos, BLUE_GOAL_BACK - pos))
if player.team_num == ORANGE_TEAM:
alignment *= -1
liu_dist = exp(-norm(ball_pos - pos) / 1410) # Max driving speed
player_qualities[i] = (self.dist_w * liu_dist + self.align_w * alignment
+ self.boost_w * np.sqrt(player.boost_amount))
# TODO use only dist of closest player for entire team
return state_quality, player_qualities
def _calculate_rewards(self, state: GameState):
# Calculate rewards, positive for blue, negative for orange
state_quality, player_qualities = self._state_qualities(state)
player_rewards = np.zeros_like(player_qualities)
for i, player in enumerate(state.players):
last = self.last_state.players[i]
if player.ball_touched:
curr_vel = self.current_state.ball.linear_velocity
last_vel = self.last_state.ball.linear_velocity
# On ground it gets about 0.05 just for touching, as well as some extra for the speed it produces
# Close to 20 in the limit with ball on top, but opponents should learn to challenge way before that
player_rewards[i] += (self.touch_height_w * state.ball.position[2] / CEILING_Z +
self.touch_accel_w * norm(curr_vel - last_vel) / BALL_MAX_SPEED)
if player.is_demoed and not last.is_demoed:
player_rewards[i] -= self.demo_w / 2
if player.match_demolishes > last.match_demolishes:
player_rewards[i] += self.demo_w / 2
mid = len(player_rewards) // 2
player_rewards += player_qualities - self.player_qualities
player_rewards[:mid] += state_quality - self.state_quality
player_rewards[mid:] -= state_quality - self.state_quality
self.player_qualities = player_qualities
self.state_quality = state_quality
# Handle goals with no scorer for critic consistency,
# random state could send ball straight into goal
d_blue = state.blue_score - self.last_state.blue_score
d_orange = state.orange_score - self.last_state.orange_score
if d_blue > 0:
goal_speed = norm(self.last_state.ball.linear_velocity)
distances = norm(
np.stack([p.car_data.position for p in state.players[mid:]])
- self.last_state.ball.position,
axis=-1
)
player_rewards[mid:] = -self.goal_dist_bonus_w * (1 - exp(-distances / CAR_MAX_SPEED))
player_rewards[:mid] = (self.goal_w * d_blue
+ self.goal_dist_bonus_w * goal_speed / BALL_MAX_SPEED)
if d_orange > 0:
goal_speed = norm(self.last_state.ball.linear_velocity)
distances = norm(
np.stack([p.car_data.position for p in state.players[:mid]])
- self.last_state.ball.position,
axis=-1
)
player_rewards[:mid] = -self.goal_dist_bonus_w * (1 - exp(-distances / CAR_MAX_SPEED))
player_rewards[mid:] = (self.goal_w * d_orange
+ self.goal_dist_bonus_w * goal_speed / BALL_MAX_SPEED)
blue = player_rewards[:mid]
orange = player_rewards[mid:]
bm = np.nan_to_num(blue.mean())
om = np.nan_to_num(orange.mean())
player_rewards[:mid] = (1 - self.team_spirit) * blue + self.team_spirit * bm - om
player_rewards[mid:] = (1 - self.team_spirit) * orange + self.team_spirit * om - bm
self.last_state = state
self.rewards = player_rewards
def reset(self, initial_state: GameState):
self.n = 0
self.last_state = None
self.rewards = None
self.current_state = initial_state
self.state_quality, self.player_qualities = self._state_qualities(initial_state)
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
if state != self.current_state:
self.last_state = self.current_state
self.current_state = state
self._calculate_rewards(state)
self.n = 0
rew = self.rewards[self.n]
self.n += 1
return float(rew) | training/reward.py | import numpy as np
from rlgym.utils import RewardFunction
from rlgym.utils.common_values import CEILING_Z, BALL_MAX_SPEED, CAR_MAX_SPEED, BLUE_TEAM, BLUE_GOAL_BACK, \
BLUE_GOAL_CENTER, ORANGE_GOAL_BACK, ORANGE_GOAL_CENTER, BALL_RADIUS, ORANGE_TEAM
from rlgym.utils.gamestates import GameState, PlayerData
from rlgym.utils.math import cosine_similarity
from numpy import exp
from numpy.linalg import norm
class NectoRewardFunction(RewardFunction):
BLUE_GOAL = (np.array(BLUE_GOAL_BACK) + np.array(BLUE_GOAL_CENTER)) / 2
ORANGE_GOAL = (np.array(ORANGE_GOAL_BACK) + np.array(ORANGE_GOAL_CENTER)) / 2
def __init__(
self,
team_spirit=0.3,
goal_w=10,
goal_dist_w=10,
goal_speed_bonus_w=2.5,
goal_dist_bonus_w=2.5,
demo_w=5,
dist_w=0.75, # Changed from 1
align_w=0.5,
boost_w=1, # Changed from 0.5
touch_height_w=1, # Changed from 0.5
touch_accel_w=0.5, # Changed from 1
):
self.team_spirit = team_spirit
self.current_state = None
self.last_state = None
self.n = 0
self.goal_w = goal_w
self.goal_dist_w = goal_dist_w
self.goal_speed_bonus_w = goal_speed_bonus_w
self.goal_dist_bonus_w = goal_dist_bonus_w
self.demo_w = demo_w
self.dist_w = dist_w
self.align_w = align_w
self.boost_w = boost_w
self.touch_height_w = touch_height_w
self.touch_accel_w = touch_accel_w
self.state_quality = None
self.player_qualities = None
self.rewards = None
def _state_qualities(self, state: GameState):
ball_pos = state.ball.position
state_quality = self.goal_dist_w * (exp(-norm(self.ORANGE_GOAL - ball_pos) / CAR_MAX_SPEED)
- exp(-norm(self.BLUE_GOAL - ball_pos) / CAR_MAX_SPEED))
player_qualities = np.zeros(len(state.players))
for i, player in enumerate(state.players):
pos = player.car_data.position
# Align player->ball and player->net vectors
alignment = 0.5 * (cosine_similarity(ball_pos - pos, ORANGE_GOAL_BACK - pos)
- cosine_similarity(ball_pos - pos, BLUE_GOAL_BACK - pos))
if player.team_num == ORANGE_TEAM:
alignment *= -1
liu_dist = exp(-norm(ball_pos - pos) / 1410) # Max driving speed
player_qualities[i] = (self.dist_w * liu_dist + self.align_w * alignment
+ self.boost_w * np.sqrt(player.boost_amount))
# TODO use only dist of closest player for entire team
return state_quality, player_qualities
def _calculate_rewards(self, state: GameState):
# Calculate rewards, positive for blue, negative for orange
state_quality, player_qualities = self._state_qualities(state)
player_rewards = np.zeros_like(player_qualities)
for i, player in enumerate(state.players):
last = self.last_state.players[i]
if player.ball_touched:
curr_vel = self.current_state.ball.linear_velocity
last_vel = self.last_state.ball.linear_velocity
# On ground it gets about 0.05 just for touching, as well as some extra for the speed it produces
# Close to 20 in the limit with ball on top, but opponents should learn to challenge way before that
player_rewards[i] += (self.touch_height_w * state.ball.position[2] / CEILING_Z +
self.touch_accel_w * norm(curr_vel - last_vel) / BALL_MAX_SPEED)
if player.is_demoed and not last.is_demoed:
player_rewards[i] -= self.demo_w / 2
if player.match_demolishes > last.match_demolishes:
player_rewards[i] += self.demo_w / 2
mid = len(player_rewards) // 2
player_rewards += player_qualities - self.player_qualities
player_rewards[:mid] += state_quality - self.state_quality
player_rewards[mid:] -= state_quality - self.state_quality
self.player_qualities = player_qualities
self.state_quality = state_quality
# Handle goals with no scorer for critic consistency,
# random state could send ball straight into goal
d_blue = state.blue_score - self.last_state.blue_score
d_orange = state.orange_score - self.last_state.orange_score
if d_blue > 0:
goal_speed = norm(self.last_state.ball.linear_velocity)
distances = norm(
np.stack([p.car_data.position for p in state.players[mid:]])
- self.last_state.ball.position,
axis=-1
)
player_rewards[mid:] = -self.goal_dist_bonus_w * (1 - exp(-distances / CAR_MAX_SPEED))
player_rewards[:mid] = (self.goal_w * d_blue
+ self.goal_dist_bonus_w * goal_speed / BALL_MAX_SPEED)
if d_orange > 0:
goal_speed = norm(self.last_state.ball.linear_velocity)
distances = norm(
np.stack([p.car_data.position for p in state.players[:mid]])
- self.last_state.ball.position,
axis=-1
)
player_rewards[:mid] = -self.goal_dist_bonus_w * (1 - exp(-distances / CAR_MAX_SPEED))
player_rewards[mid:] = (self.goal_w * d_orange
+ self.goal_dist_bonus_w * goal_speed / BALL_MAX_SPEED)
blue = player_rewards[:mid]
orange = player_rewards[mid:]
bm = np.nan_to_num(blue.mean())
om = np.nan_to_num(orange.mean())
player_rewards[:mid] = (1 - self.team_spirit) * blue + self.team_spirit * bm - om
player_rewards[mid:] = (1 - self.team_spirit) * orange + self.team_spirit * om - bm
self.last_state = state
self.rewards = player_rewards
def reset(self, initial_state: GameState):
self.n = 0
self.last_state = None
self.rewards = None
self.current_state = initial_state
self.state_quality, self.player_qualities = self._state_qualities(initial_state)
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
if state != self.current_state:
self.last_state = self.current_state
self.current_state = state
self._calculate_rewards(state)
self.n = 0
rew = self.rewards[self.n]
self.n += 1
return float(rew) | 0.444565 | 0.284781 |
from pathlib import Path
import pytest
import xlwings as xw
this_dir = Path(__file__).resolve().parent
@pytest.fixture(scope="module")
def app():
with xw.App(visible=False) as app:
yield app
for f in Path(".").glob("tempfile*"):
f.unlink()
for f in Path("temp").glob("tempfile*"):
f.unlink()
def test_save_new_book_defaults(app):
book = app.books.add()
if Path(book.name + ".xlsx").is_file():
Path(book.name + ".xlsx").unlink()
book.save()
assert Path(book.name).is_file()
# TODO: xlam and xltx fail
@pytest.mark.parametrize(
"name",
[
"tempfile.xlsx",
"tempfile.xlsm",
"tempfile.xlsb",
"tempfile.xltm",
"tempfile.xls",
"tempfile.xlt",
"tempfile.xla",
],
)
def test_save_new_book_no_path(app, name):
book = app.books.add()
book.save(name)
assert book.name == name
assert Path(name).is_file()
@pytest.mark.parametrize(
"name",
[
"tempfile2.xlsx",
"tempfile2.xlsm",
"tempfile2.xlsb",
"tempfile2.xltm",
"tempfile2.xls",
"tempfile2.xlt",
"tempfile2.xla",
],
)
def test_save_new_book_with_path(app, name):
Path("temp").mkdir(exist_ok=True)
book = app.books.add()
fullname = Path(".").resolve() / "temp" / name
book.save(fullname)
assert book.fullname == str(fullname)
assert Path(fullname).is_file()
@pytest.mark.parametrize(
"name",
[
"tempfile3.xlsx",
"tempfile3.xlsm",
"tempfile3.xlsb",
"tempfile3.xltm",
"tempfile3.xls",
"tempfile3.xlt",
"tempfile3.xla",
],
)
def test_save_existing_book_no_path(app, name):
book = app.books.open(this_dir / "test book.xlsx")
book.save(name)
book.save()
assert book.name == name
assert Path(name).is_file()
@pytest.mark.parametrize(
"name",
[
"tempfile4.xlsx",
"tempfile4.xlsm",
"tempfile4.xlsb",
"tempfile4.xltm",
"tempfile4.xls",
"tempfile4.xlt",
"tempfile4.xla",
],
)
def test_save_existing_book_with_path(app, name):
Path("temp").mkdir(exist_ok=True)
book = app.books.open(this_dir / "test book.xlsx")
fullname = Path(".").resolve() / "temp" / name
book.save(fullname)
book.save()
assert book.fullname == str(fullname)
assert Path(fullname).is_file() | tests/test_fileformats.py | from pathlib import Path
import pytest
import xlwings as xw
this_dir = Path(__file__).resolve().parent
@pytest.fixture(scope="module")
def app():
with xw.App(visible=False) as app:
yield app
for f in Path(".").glob("tempfile*"):
f.unlink()
for f in Path("temp").glob("tempfile*"):
f.unlink()
def test_save_new_book_defaults(app):
book = app.books.add()
if Path(book.name + ".xlsx").is_file():
Path(book.name + ".xlsx").unlink()
book.save()
assert Path(book.name).is_file()
# TODO: xlam and xltx fail
@pytest.mark.parametrize(
"name",
[
"tempfile.xlsx",
"tempfile.xlsm",
"tempfile.xlsb",
"tempfile.xltm",
"tempfile.xls",
"tempfile.xlt",
"tempfile.xla",
],
)
def test_save_new_book_no_path(app, name):
book = app.books.add()
book.save(name)
assert book.name == name
assert Path(name).is_file()
@pytest.mark.parametrize(
"name",
[
"tempfile2.xlsx",
"tempfile2.xlsm",
"tempfile2.xlsb",
"tempfile2.xltm",
"tempfile2.xls",
"tempfile2.xlt",
"tempfile2.xla",
],
)
def test_save_new_book_with_path(app, name):
Path("temp").mkdir(exist_ok=True)
book = app.books.add()
fullname = Path(".").resolve() / "temp" / name
book.save(fullname)
assert book.fullname == str(fullname)
assert Path(fullname).is_file()
@pytest.mark.parametrize(
"name",
[
"tempfile3.xlsx",
"tempfile3.xlsm",
"tempfile3.xlsb",
"tempfile3.xltm",
"tempfile3.xls",
"tempfile3.xlt",
"tempfile3.xla",
],
)
def test_save_existing_book_no_path(app, name):
book = app.books.open(this_dir / "test book.xlsx")
book.save(name)
book.save()
assert book.name == name
assert Path(name).is_file()
@pytest.mark.parametrize(
"name",
[
"tempfile4.xlsx",
"tempfile4.xlsm",
"tempfile4.xlsb",
"tempfile4.xltm",
"tempfile4.xls",
"tempfile4.xlt",
"tempfile4.xla",
],
)
def test_save_existing_book_with_path(app, name):
Path("temp").mkdir(exist_ok=True)
book = app.books.open(this_dir / "test book.xlsx")
fullname = Path(".").resolve() / "temp" / name
book.save(fullname)
book.save()
assert book.fullname == str(fullname)
assert Path(fullname).is_file() | 0.226784 | 0.53868 |
from sklearn.linear_model.stochastic_gradient import SGDClassifier, SGDRegressor
from sklearn.linear_model.passive_aggressive import PassiveAggressiveClassifier
from sklearn.linear_model.perceptron import Perceptron
from skmultiflow.classification.perceptron import PerceptronMask
from skmultiflow.classification.lazy.knn_adwin import KNNAdwin
from skmultiflow.classification.lazy.knn import KNN
from skmultiflow.classification.meta.oza_bagging_adwin import OzaBaggingAdwin
from skmultiflow.core.pipeline import Pipeline
from skmultiflow.data.file_stream import FileStream
from skmultiflow.data.generators.waveform_generator import WaveformGenerator
from skmultiflow.evaluation.evaluate_prequential import EvaluatePrequential
def demo(output_file=None, instances=40000):
""" _test_prequential
This demo shows how to produce a prequential evaluation.
The first thing needed is a stream. For this case we use a file stream
which gets its samples from the sea_big.csv file, inside the datasets
folder.
Then we need to setup a classifier, which in this case is an instance
of sklearn's PassiveAggressiveClassifier. Then, optionally we create a
pipeline structure, initialized on that classifier.
The evaluation is then run.
Parameters
----------
output_file: string
The name of the csv output file
instances: int
The evaluation's max number of instances
"""
# Setup the File Stream
stream = FileStream("../datasets/sea_big.csv", -1, 1)
# stream = WaveformGenerator()
stream.prepare_for_use()
# Setup the classifier
# classifier = SGDClassifier()
# classifier = KNNAdwin(k=8, max_window_size=2000,leaf_size=40, categorical_list=None)
# classifier = OzaBaggingAdwin(h=KNN(k=8, max_window_size=2000, leaf_size=30, categorical_list=None))
classifier = PassiveAggressiveClassifier()
# classifier = SGDRegressor()
# classifier = PerceptronMask()
# Setup the pipeline
pipe = Pipeline([('Classifier', classifier)])
# Setup the evaluator
evaluator = EvaluatePrequential(pretrain_size=200, max_samples=instances, batch_size=1, n_wait=100, max_time=1000,
output_file=output_file, show_plot=True,
metrics=['kappa', 'kappa_t', 'performance'])
# Evaluate
evaluator.evaluate(stream=stream, model=pipe)
if __name__ == '__main__':
demo('log_test_prequential.csv', 20000) | src/skmultiflow/demos/_test_prequential.py | from sklearn.linear_model.stochastic_gradient import SGDClassifier, SGDRegressor
from sklearn.linear_model.passive_aggressive import PassiveAggressiveClassifier
from sklearn.linear_model.perceptron import Perceptron
from skmultiflow.classification.perceptron import PerceptronMask
from skmultiflow.classification.lazy.knn_adwin import KNNAdwin
from skmultiflow.classification.lazy.knn import KNN
from skmultiflow.classification.meta.oza_bagging_adwin import OzaBaggingAdwin
from skmultiflow.core.pipeline import Pipeline
from skmultiflow.data.file_stream import FileStream
from skmultiflow.data.generators.waveform_generator import WaveformGenerator
from skmultiflow.evaluation.evaluate_prequential import EvaluatePrequential
def demo(output_file=None, instances=40000):
""" _test_prequential
This demo shows how to produce a prequential evaluation.
The first thing needed is a stream. For this case we use a file stream
which gets its samples from the sea_big.csv file, inside the datasets
folder.
Then we need to setup a classifier, which in this case is an instance
of sklearn's PassiveAggressiveClassifier. Then, optionally we create a
pipeline structure, initialized on that classifier.
The evaluation is then run.
Parameters
----------
output_file: string
The name of the csv output file
instances: int
The evaluation's max number of instances
"""
# Setup the File Stream
stream = FileStream("../datasets/sea_big.csv", -1, 1)
# stream = WaveformGenerator()
stream.prepare_for_use()
# Setup the classifier
# classifier = SGDClassifier()
# classifier = KNNAdwin(k=8, max_window_size=2000,leaf_size=40, categorical_list=None)
# classifier = OzaBaggingAdwin(h=KNN(k=8, max_window_size=2000, leaf_size=30, categorical_list=None))
classifier = PassiveAggressiveClassifier()
# classifier = SGDRegressor()
# classifier = PerceptronMask()
# Setup the pipeline
pipe = Pipeline([('Classifier', classifier)])
# Setup the evaluator
evaluator = EvaluatePrequential(pretrain_size=200, max_samples=instances, batch_size=1, n_wait=100, max_time=1000,
output_file=output_file, show_plot=True,
metrics=['kappa', 'kappa_t', 'performance'])
# Evaluate
evaluator.evaluate(stream=stream, model=pipe)
if __name__ == '__main__':
demo('log_test_prequential.csv', 20000) | 0.825238 | 0.325534 |
from __future__ import unicode_literals
import codecs
import os
import sys
import re
sys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, 'DropPy.Common')))
from file_tools import get_file_paths_from_directory
H1_SETEX_STYLE_REGEX = re.compile(r'^-+$')
H2_SETEX_STYLE_REGEX = re.compile(r'^=+$')
ATX_STYLE_REGEX = re.compile(r'^#{1,6} .*$')
class Task(object):
"""
Documentation: https://docs.droppyapp.com/tasks/markdown-add-toc
"""
def __init__(self, input_dir, output_dir, **kwargs):
# Get keyword arguments.
toc_header = kwargs.get(str('toc_header'), '# Table of Contents')
# Process files and directories.
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
self.add_toc(item_path, output_dir, toc_header)
elif os.path.isdir(item_path):
output_sub_dir = os.path.join(output_dir, item_name)
os.makedirs(output_sub_dir)
contained_files = get_file_paths_from_directory(item_path)
for contained_file in contained_files:
self.add_toc(contained_file, output_sub_dir, toc_header)
def add_toc(self, input_file, output_dir, toc_header):
headers = []
previous_line_content = ''
# Parse source file.
with codecs.open(input_file, encoding='utf-8', mode='r') as input_file_handler:
inside_code_block = False
for line in input_file_handler:
# Skip fenced code blocks.
if inside_code_block:
if line.startswith('```') or line.startswith('~~~'):
# We were in a code block but this line ended it, toggle mode and continue with next line.
inside_code_block = False
continue
else:
# We are still in a code block, continue with next line.
continue
else:
if line.startswith('```') or line.startswith('~~~'):
# We are now in a code block, toggle mode and continue with next line.
inside_code_block = True
continue
# Ignore lines indented by tab or 4x spaces.
if line.startswith('\t') or line.startswith(' '):
continue
# Detect the two types of headers.
header = None
if H1_SETEX_STYLE_REGEX.match(line):
header, previous_line_content = self.get_header(kind='setex',
previous_line_content=previous_line_content,
text=previous_line_content.strip(),
level=1)
elif H2_SETEX_STYLE_REGEX.match(line):
header, previous_line_content = self.get_header(kind='setex',
previous_line_content=previous_line_content,
text=previous_line_content.strip(),
level=2)
elif ATX_STYLE_REGEX.match(line):
header, previous_line_content = self.get_header(kind='atx',
previous_line_content=previous_line_content,
text=self.clean_atx_content(line.strip()),
level=self.detect_atx_level(line))
else:
# Remember line's content when checking the next line (if setex style header is detected then).
previous_line_content = line.strip()
if header:
headers.append(header)
# Write target file.
output_file_name = os.path.basename(input_file)
output_file = os.path.join(output_dir, output_file_name)
with codecs.open(output_file, encoding='utf-8', mode='w') as output_file_handler:
# Start the new file with the TOC header.
output_file_handler.write('%s\n\n' % toc_header)
# Add the TOC itself.
previous_level = 1
level_array = [0, 0, 0, 0, 0, 0]
for text, current_level in headers:
level_array[current_level - 1] += 1
if current_level < previous_level:
for index in range(current_level, len(level_array)):
level_array[index] = 0
output_file_handler.write('%s %d. [%s](#%s)\n' % ('\t' * (current_level - 1),
level_array[current_level-1],
text,
self.generate_anchor(text)))
# For next item.
previous_level = current_level
# Then add a horizontal rule.
output_file_handler.write('\n---\n\n')
# Finally add the rest of the content of the source file.
with codecs.open(input_file, encoding='utf-8', mode='r') as source_file_handler:
for line in source_file_handler:
output_file_handler.write(line)
@staticmethod
def get_header(kind, previous_line_content, text, level):
if kind == 'setex':
if previous_line_content == '': # ignore horizontal rules (also matches the regex)
return None, previous_line_content
header = [text, level]
previous_line_content = ''
return header, previous_line_content
@staticmethod
def generate_anchor(text):
# Convert spaces to hyphens and lowercase.
anchor_text = text.lower().replace(' ', '-')
# Remove every special character except hyphens, but kepp the usual unicode characters.
return re.sub('([^\w\- üöäßéèêáàâóòô]|_)+', '', anchor_text)
@staticmethod
def detect_atx_level(line_content):
for m, character in enumerate(line_content):
if character == ' ':
return m
@staticmethod
def clean_atx_content(line_content):
clean_line = line_content
for character in clean_line:
if character == '#':
clean_line = clean_line[1:]
else:
break
for character in reversed(clean_line):
if character == '#':
clean_line = clean_line[:-1]
else:
break
return clean_line.strip() | Tasks/Markdown.AddToc/task.py |
from __future__ import unicode_literals
import codecs
import os
import sys
import re
sys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, 'DropPy.Common')))
from file_tools import get_file_paths_from_directory
H1_SETEX_STYLE_REGEX = re.compile(r'^-+$')
H2_SETEX_STYLE_REGEX = re.compile(r'^=+$')
ATX_STYLE_REGEX = re.compile(r'^#{1,6} .*$')
class Task(object):
"""
Documentation: https://docs.droppyapp.com/tasks/markdown-add-toc
"""
def __init__(self, input_dir, output_dir, **kwargs):
# Get keyword arguments.
toc_header = kwargs.get(str('toc_header'), '# Table of Contents')
# Process files and directories.
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
self.add_toc(item_path, output_dir, toc_header)
elif os.path.isdir(item_path):
output_sub_dir = os.path.join(output_dir, item_name)
os.makedirs(output_sub_dir)
contained_files = get_file_paths_from_directory(item_path)
for contained_file in contained_files:
self.add_toc(contained_file, output_sub_dir, toc_header)
def add_toc(self, input_file, output_dir, toc_header):
headers = []
previous_line_content = ''
# Parse source file.
with codecs.open(input_file, encoding='utf-8', mode='r') as input_file_handler:
inside_code_block = False
for line in input_file_handler:
# Skip fenced code blocks.
if inside_code_block:
if line.startswith('```') or line.startswith('~~~'):
# We were in a code block but this line ended it, toggle mode and continue with next line.
inside_code_block = False
continue
else:
# We are still in a code block, continue with next line.
continue
else:
if line.startswith('```') or line.startswith('~~~'):
# We are now in a code block, toggle mode and continue with next line.
inside_code_block = True
continue
# Ignore lines indented by tab or 4x spaces.
if line.startswith('\t') or line.startswith(' '):
continue
# Detect the two types of headers.
header = None
if H1_SETEX_STYLE_REGEX.match(line):
header, previous_line_content = self.get_header(kind='setex',
previous_line_content=previous_line_content,
text=previous_line_content.strip(),
level=1)
elif H2_SETEX_STYLE_REGEX.match(line):
header, previous_line_content = self.get_header(kind='setex',
previous_line_content=previous_line_content,
text=previous_line_content.strip(),
level=2)
elif ATX_STYLE_REGEX.match(line):
header, previous_line_content = self.get_header(kind='atx',
previous_line_content=previous_line_content,
text=self.clean_atx_content(line.strip()),
level=self.detect_atx_level(line))
else:
# Remember line's content when checking the next line (if setex style header is detected then).
previous_line_content = line.strip()
if header:
headers.append(header)
# Write target file.
output_file_name = os.path.basename(input_file)
output_file = os.path.join(output_dir, output_file_name)
with codecs.open(output_file, encoding='utf-8', mode='w') as output_file_handler:
# Start the new file with the TOC header.
output_file_handler.write('%s\n\n' % toc_header)
# Add the TOC itself.
previous_level = 1
level_array = [0, 0, 0, 0, 0, 0]
for text, current_level in headers:
level_array[current_level - 1] += 1
if current_level < previous_level:
for index in range(current_level, len(level_array)):
level_array[index] = 0
output_file_handler.write('%s %d. [%s](#%s)\n' % ('\t' * (current_level - 1),
level_array[current_level-1],
text,
self.generate_anchor(text)))
# For next item.
previous_level = current_level
# Then add a horizontal rule.
output_file_handler.write('\n---\n\n')
# Finally add the rest of the content of the source file.
with codecs.open(input_file, encoding='utf-8', mode='r') as source_file_handler:
for line in source_file_handler:
output_file_handler.write(line)
@staticmethod
def get_header(kind, previous_line_content, text, level):
if kind == 'setex':
if previous_line_content == '': # ignore horizontal rules (also matches the regex)
return None, previous_line_content
header = [text, level]
previous_line_content = ''
return header, previous_line_content
@staticmethod
def generate_anchor(text):
# Convert spaces to hyphens and lowercase.
anchor_text = text.lower().replace(' ', '-')
# Remove every special character except hyphens, but kepp the usual unicode characters.
return re.sub('([^\w\- üöäßéèêáàâóòô]|_)+', '', anchor_text)
@staticmethod
def detect_atx_level(line_content):
for m, character in enumerate(line_content):
if character == ' ':
return m
@staticmethod
def clean_atx_content(line_content):
clean_line = line_content
for character in clean_line:
if character == '#':
clean_line = clean_line[1:]
else:
break
for character in reversed(clean_line):
if character == '#':
clean_line = clean_line[:-1]
else:
break
return clean_line.strip() | 0.466846 | 0.273911 |
from ctypes import CFUNCTYPE, c_void_p, c_char_p
from objc_util import retain_global, ObjCInstance, UIApplication, c, ns, on_main_thread, sel, ObjCClass
from blackmamba.util.runtime import swizzle
from blackmamba.log import error, info
import blackmamba.system as system
from enum import Enum, IntEnum
from typing import Union, Callable, List
if system.IOS:
_UIKeyboardImpl = ObjCClass('UIKeyboardImpl')
else:
_UIKeyboardImpl = None
def is_in_hardware_keyboard_mode() -> bool:
"""Check if HW keyboard is connected.
Returns:
True if HW keyboard is connected.
"""
if not _UIKeyboardImpl:
return False
return _UIKeyboardImpl.sharedInstance().isInHardwareKeyboardMode()
UIKeyCommand = ObjCClass('UIKeyCommand')
class UIKeyModifier(IntEnum):
"""Key modifiers.
Modifiers can be combined like::
UIKeyModifier.COMMAND | UIKeyModifier.SHIFT
* `NONE` - No modifier key.
* `ALPHA_SHIFT` - CapsLock.
* `SHIFT` - Shift key.
* `CONTROL` - Control key.
* `ALTERNATE` - Option key.
* `COMMAND` - Command key.
* `NUMERIC_PAD` - Key is on a numeric pad.
.. note:: Camel case constants deprecated in 1.4.4, will be removed in 2.0.0.
Use UPPER_CASE variants.
See also:
* `register_key_command`
* `register_key_event_handler`
"""
NONE = 0
ALPHA_SHIFT = 1 << 16
SHIFT = 1 << 17
CONTROL = 1 << 18
ALTERNATE = 1 << 19
COMMAND = 1 << 20
NUMERIC_PAD = 1 << 21
none = 0
alphaShift = 1 << 16
shift = 1 << 17
control = 1 << 18
alternate = 1 << 19
command = 1 << 20
numericPad = 1 << 21
class UIEventType(IntEnum):
TOUCHES = 0
MOTION = 1
REMOTE_CONTROL = 2
PRESSES = 3
PHYSICAL_KEYBOARD = 4
class UIEventSubtype(IntEnum):
NONE = 0
class UIEventKeyCode(IntEnum):
"""Event key codes.
Not all key codes are listed / included here. Feel free to create pull request with more
key codes if you'd like to use them.
* `RIGHT` - Right arrow key.
* `LEFT` - Left arrow key.
* `DOWN` - Down arrow key.
* `UP` - Up arrow key.
* `ENTER` - Enter / Return key.
* `SPACE` - Space key.
* `BACKSPACE` - Backspace key.
* `ESCAPE` - Escape key.
* `LEFT_SQUARE_BRACKET` - Left square bracket key.
* `DOT` - Dot key.
.. note:: Camel case constants deprecated in 1.4.4, will be removed in 2.0.0.
Use UPPER_CASE variants.
See also:
* `register_key_event_handler`
"""
RIGHT = 79
LEFT = 80
DOWN = 81
UP = 82
ENTER = 40
SPACE = 44
BACKSPACE = 42
ESCAPE = 41
LEFT_SQUARE_BRACKET = 47
DOT = 55
right = 79
left = 80
down = 81
up = 82
enter = 40
space = 44
backspace = 42
escape = 41
leftSquareBracket = 47
dot = 55
class UIKeyInput(str, Enum):
"""Enumeration of special key input values.
* `LEFT_ARROW` - Left arrow key.
* `RIGHT_ARROW` - Right arrow key.
* `UP_ARROW` - Up arrow key.
* `DOWN_ARROW` - Down arrow key.
.. note:: Camel case constants deprecated in 1.4.4, will be removed in 2.0.0.
Use UPPER_CASE variants.
See also:
* `register_key_command`
"""
LEFT_ARROW = 'UIKeyInputLeftArrow'
RIGHT_ARROW = 'UIKeyInputRightArrow'
UP_ARROW = 'UIKeyInputUpArrow'
DOWN_ARROW = 'UIKeyInputDownArrow'
leftArrow = 'UIKeyInputLeftArrow'
rightArrow = 'UIKeyInputRightArrow'
upArrow = 'UIKeyInputUpArrow'
downArrow = 'UIKeyInputDownArrow'
@property
def selector_name(self):
return self.name.replace('_', '').title()
_UIKeyInputNames = {
'/': 'Slash',
'.': 'Dot',
',': 'Comma',
'+': 'Plus',
'-': 'Minus',
' ': 'Space',
'_': 'Underscore',
'\t': 'Tab',
'[': 'LeftSquareBracket',
']': 'RightSquareBracket',
'?': 'QuestionMark'
}
_key_commands = []
def _blackmamba_keyCommands(_self, _cmd):
"""Swizzled version of keyCommands(). It calls original method to
get Pythonista shortcuts and then appends custom ones."""
obj = ObjCInstance(_self)
commands = list(obj.originalkeyCommands() or [])
commands.extend(_key_commands)
return ns(commands).ptr
def _input_selector_name(input):
if isinstance(input, UIKeyInput):
return input.selector_name
assert(isinstance(input, str))
if len(input) == 1:
input = input.upper()
if (input >= 'A' and input <= 'Z') or (input >= '0' and input <= '9'):
return input
if input not in _UIKeyInputNames:
raise ValueError('Unsupported key command input: {}'.format(input))
return _UIKeyInputNames[input]
def _modifier_selector_name(modifier):
_names = {
UIKeyModifier.alphaShift: 'AlphaShift',
UIKeyModifier.shift: 'Shift',
UIKeyModifier.control: 'Control',
UIKeyModifier.alternate: 'Alternate',
UIKeyModifier.command: 'Command',
UIKeyModifier.numericPad: 'NumericPad',
UIKeyModifier.ALPHA_SHIFT: 'AlphaShift',
UIKeyModifier.SHIFT: 'Shift',
UIKeyModifier.CONTROL: 'Control',
UIKeyModifier.ALTERNATE: 'Alternate',
UIKeyModifier.COMMAND: 'Command',
UIKeyModifier.NUMERIC_PAD: 'NumericPad'
}
if isinstance(modifier, UIKeyModifier):
modifier = modifier.value
flags = [
name
for mod, name in _names.items()
if mod.value & modifier
]
if flags:
return ''.join(flags)
else:
return ''
def _key_command_selector_name(input, modifier):
return 'blackMambaHandleKey{}{}'.format(
_modifier_selector_name(modifier),
_input_selector_name(input)
)
def _shortcut_name(input, modifier):
return '{} {}'.format(
_modifier_selector_name(modifier),
_input_selector_name(input)
)
@system.Pythonista(appex=False)
@on_main_thread
def _register_key_command(input, modifier_flags, function, title=None):
if not UIApplication.sharedApplication().respondsToSelector_(sel('originalkeyCommands')):
swizzle('UIApplication', 'keyCommands', _blackmamba_keyCommands)
selector_name = _key_command_selector_name(input, modifier_flags)
selector = sel(selector_name)
obj = UIApplication.sharedApplication()
info('Registering key command "{}" ({})'.format(
_shortcut_name(input, modifier_flags),
title or 'No discoverability title'
))
if not callable(function):
error('Skipping, provided function is not callable')
return False
if obj.respondsToSelector_(selector):
error('Skipping, method {} already registered'.format(selector_name))
return False
def key_command_action(_sel, _cmd, sender):
function()
IMPTYPE = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p)
imp = IMPTYPE(key_command_action)
retain_global(imp)
cls = c.object_getClass(obj.ptr)
type_encoding = c_char_p('v@:@'.encode('utf-8'))
did_add = c.class_addMethod(cls, selector, imp, type_encoding)
if not did_add:
error('Failed to add key command method {}'.format(selector_name))
return False
if isinstance(modifier_flags, UIKeyModifier):
modifier_flags = modifier_flags.value
if title:
kc = UIKeyCommand.keyCommandWithInput_modifierFlags_action_discoverabilityTitle_(
ns(input), modifier_flags, selector, ns(title))
else:
kc = UIKeyCommand.keyCommandWithInput_modifierFlags_action_(ns(input), modifier_flags, selector)
_key_commands.append(kc)
return True
def register_key_command(input: Union[str, UIKeyInput], modifier_flags: UIKeyModifier,
function: Callable[[], None], title: str=None) -> bool:
"""Register key command.
.. note:: There's no function to unregister key commands.
Args:
input: String like ``A`` or special `UIKeyInput` value
modifier_flags: Modifier flags
function: Function to call
title: Discoverability title
Returns:
True if key command was registered.
"""
return _register_key_command(input, modifier_flags, function, title)
_key_event_handlers = []
class KeyEventHandler(object):
"""Key event handler object.
.. note:: Use it only and only for key event deregistration (`unregister_key_event_handler`).
Attributes:
key_code (UIEventKeyCode): Key code
modifier (UIKeyModifier): Modifier flags
fn (Callable): Function to call
"""
def __init__(self, key_code: UIEventKeyCode, modifier: UIKeyModifier, fn: Callable[[], None]):
if isinstance(key_code, UIEventKeyCode):
self.key_code = key_code.value
else:
self.key_code = key_code
if isinstance(modifier, UIKeyModifier):
self.modifier = modifier.value
else:
self.modifier = modifier
self.fn = fn
def _blackmamba_handleKeyUIEvent(_self, _cmd, event):
e = ObjCInstance(event)
if e.type() == UIEventType.PHYSICAL_KEYBOARD.value and e.subtype() == UIEventSubtype.NONE.value:
for h in _key_event_handlers:
if h.key_code == e._keyCode() and h.modifier == e._modifierFlags():
if not e._isKeyDown():
h.fn()
return
ObjCInstance(_self).originalhandleKeyUIEvent_(e)
@on_main_thread
def _register_key_event_handler(key_code, func, *, modifier=UIKeyModifier.NONE):
if not UIApplication.sharedApplication().respondsToSelector_(sel('originalhandleKeyUIEvent:')):
swizzle('UIApplication', 'handleKeyUIEvent:', _blackmamba_handleKeyUIEvent)
@system.catch_exceptions
def invoke_func():
func()
handler = KeyEventHandler(key_code, modifier, invoke_func)
_key_event_handlers.append(handler)
return handler
def register_key_event_handler(key_code: UIEventKeyCode, func: Callable[[], None],
*, modifier: UIKeyModifier=UIKeyModifier.NONE) -> KeyEventHandler:
"""Register key event handler.
Usable in dialogs for example. Do not forget to unregister key event
handler in ``will_close`` function of your ``ui.View``.
Args:
key_code: Key code
func: Function to call
modifier: Modifier flags
Returns:
`KeyEventHandler` to use in `unregister_key_event_handler`.
"""
return _register_key_event_handler(key_code, func, modifier=modifier)
@on_main_thread
def _unregister_key_event_handler(handler):
try:
_key_event_handlers.remove(handler)
except ValueError:
pass
def unregister_key_event_handler(handler: KeyEventHandler):
"""Unregister key event handler.
It is safe to call this function multiple times with the same handler. Handler
is silently ignored if it's not registered.
Args:
handler: Key event handler to unregister
"""
_unregister_key_event_handler(handler)
def unregister_key_event_handlers(handlers: List[KeyEventHandler]):
"""Unregister list of key event handlers.
Convenience function, it just calls `unregister_key_event_handler` for every handler.
Args:
handlers: List of handlers
"""
for handler in handlers:
unregister_key_event_handler(handler) | blackmamba/uikit/keyboard.py | from ctypes import CFUNCTYPE, c_void_p, c_char_p
from objc_util import retain_global, ObjCInstance, UIApplication, c, ns, on_main_thread, sel, ObjCClass
from blackmamba.util.runtime import swizzle
from blackmamba.log import error, info
import blackmamba.system as system
from enum import Enum, IntEnum
from typing import Union, Callable, List
if system.IOS:
_UIKeyboardImpl = ObjCClass('UIKeyboardImpl')
else:
_UIKeyboardImpl = None
def is_in_hardware_keyboard_mode() -> bool:
"""Check if HW keyboard is connected.
Returns:
True if HW keyboard is connected.
"""
if not _UIKeyboardImpl:
return False
return _UIKeyboardImpl.sharedInstance().isInHardwareKeyboardMode()
UIKeyCommand = ObjCClass('UIKeyCommand')
class UIKeyModifier(IntEnum):
"""Key modifiers.
Modifiers can be combined like::
UIKeyModifier.COMMAND | UIKeyModifier.SHIFT
* `NONE` - No modifier key.
* `ALPHA_SHIFT` - CapsLock.
* `SHIFT` - Shift key.
* `CONTROL` - Control key.
* `ALTERNATE` - Option key.
* `COMMAND` - Command key.
* `NUMERIC_PAD` - Key is on a numeric pad.
.. note:: Camel case constants deprecated in 1.4.4, will be removed in 2.0.0.
Use UPPER_CASE variants.
See also:
* `register_key_command`
* `register_key_event_handler`
"""
NONE = 0
ALPHA_SHIFT = 1 << 16
SHIFT = 1 << 17
CONTROL = 1 << 18
ALTERNATE = 1 << 19
COMMAND = 1 << 20
NUMERIC_PAD = 1 << 21
none = 0
alphaShift = 1 << 16
shift = 1 << 17
control = 1 << 18
alternate = 1 << 19
command = 1 << 20
numericPad = 1 << 21
class UIEventType(IntEnum):
TOUCHES = 0
MOTION = 1
REMOTE_CONTROL = 2
PRESSES = 3
PHYSICAL_KEYBOARD = 4
class UIEventSubtype(IntEnum):
NONE = 0
class UIEventKeyCode(IntEnum):
"""Event key codes.
Not all key codes are listed / included here. Feel free to create pull request with more
key codes if you'd like to use them.
* `RIGHT` - Right arrow key.
* `LEFT` - Left arrow key.
* `DOWN` - Down arrow key.
* `UP` - Up arrow key.
* `ENTER` - Enter / Return key.
* `SPACE` - Space key.
* `BACKSPACE` - Backspace key.
* `ESCAPE` - Escape key.
* `LEFT_SQUARE_BRACKET` - Left square bracket key.
* `DOT` - Dot key.
.. note:: Camel case constants deprecated in 1.4.4, will be removed in 2.0.0.
Use UPPER_CASE variants.
See also:
* `register_key_event_handler`
"""
RIGHT = 79
LEFT = 80
DOWN = 81
UP = 82
ENTER = 40
SPACE = 44
BACKSPACE = 42
ESCAPE = 41
LEFT_SQUARE_BRACKET = 47
DOT = 55
right = 79
left = 80
down = 81
up = 82
enter = 40
space = 44
backspace = 42
escape = 41
leftSquareBracket = 47
dot = 55
class UIKeyInput(str, Enum):
"""Enumeration of special key input values.
* `LEFT_ARROW` - Left arrow key.
* `RIGHT_ARROW` - Right arrow key.
* `UP_ARROW` - Up arrow key.
* `DOWN_ARROW` - Down arrow key.
.. note:: Camel case constants deprecated in 1.4.4, will be removed in 2.0.0.
Use UPPER_CASE variants.
See also:
* `register_key_command`
"""
LEFT_ARROW = 'UIKeyInputLeftArrow'
RIGHT_ARROW = 'UIKeyInputRightArrow'
UP_ARROW = 'UIKeyInputUpArrow'
DOWN_ARROW = 'UIKeyInputDownArrow'
leftArrow = 'UIKeyInputLeftArrow'
rightArrow = 'UIKeyInputRightArrow'
upArrow = 'UIKeyInputUpArrow'
downArrow = 'UIKeyInputDownArrow'
@property
def selector_name(self):
return self.name.replace('_', '').title()
_UIKeyInputNames = {
'/': 'Slash',
'.': 'Dot',
',': 'Comma',
'+': 'Plus',
'-': 'Minus',
' ': 'Space',
'_': 'Underscore',
'\t': 'Tab',
'[': 'LeftSquareBracket',
']': 'RightSquareBracket',
'?': 'QuestionMark'
}
_key_commands = []
def _blackmamba_keyCommands(_self, _cmd):
"""Swizzled version of keyCommands(). It calls original method to
get Pythonista shortcuts and then appends custom ones."""
obj = ObjCInstance(_self)
commands = list(obj.originalkeyCommands() or [])
commands.extend(_key_commands)
return ns(commands).ptr
def _input_selector_name(input):
if isinstance(input, UIKeyInput):
return input.selector_name
assert(isinstance(input, str))
if len(input) == 1:
input = input.upper()
if (input >= 'A' and input <= 'Z') or (input >= '0' and input <= '9'):
return input
if input not in _UIKeyInputNames:
raise ValueError('Unsupported key command input: {}'.format(input))
return _UIKeyInputNames[input]
def _modifier_selector_name(modifier):
_names = {
UIKeyModifier.alphaShift: 'AlphaShift',
UIKeyModifier.shift: 'Shift',
UIKeyModifier.control: 'Control',
UIKeyModifier.alternate: 'Alternate',
UIKeyModifier.command: 'Command',
UIKeyModifier.numericPad: 'NumericPad',
UIKeyModifier.ALPHA_SHIFT: 'AlphaShift',
UIKeyModifier.SHIFT: 'Shift',
UIKeyModifier.CONTROL: 'Control',
UIKeyModifier.ALTERNATE: 'Alternate',
UIKeyModifier.COMMAND: 'Command',
UIKeyModifier.NUMERIC_PAD: 'NumericPad'
}
if isinstance(modifier, UIKeyModifier):
modifier = modifier.value
flags = [
name
for mod, name in _names.items()
if mod.value & modifier
]
if flags:
return ''.join(flags)
else:
return ''
def _key_command_selector_name(input, modifier):
return 'blackMambaHandleKey{}{}'.format(
_modifier_selector_name(modifier),
_input_selector_name(input)
)
def _shortcut_name(input, modifier):
return '{} {}'.format(
_modifier_selector_name(modifier),
_input_selector_name(input)
)
@system.Pythonista(appex=False)
@on_main_thread
def _register_key_command(input, modifier_flags, function, title=None):
if not UIApplication.sharedApplication().respondsToSelector_(sel('originalkeyCommands')):
swizzle('UIApplication', 'keyCommands', _blackmamba_keyCommands)
selector_name = _key_command_selector_name(input, modifier_flags)
selector = sel(selector_name)
obj = UIApplication.sharedApplication()
info('Registering key command "{}" ({})'.format(
_shortcut_name(input, modifier_flags),
title or 'No discoverability title'
))
if not callable(function):
error('Skipping, provided function is not callable')
return False
if obj.respondsToSelector_(selector):
error('Skipping, method {} already registered'.format(selector_name))
return False
def key_command_action(_sel, _cmd, sender):
function()
IMPTYPE = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p)
imp = IMPTYPE(key_command_action)
retain_global(imp)
cls = c.object_getClass(obj.ptr)
type_encoding = c_char_p('v@:@'.encode('utf-8'))
did_add = c.class_addMethod(cls, selector, imp, type_encoding)
if not did_add:
error('Failed to add key command method {}'.format(selector_name))
return False
if isinstance(modifier_flags, UIKeyModifier):
modifier_flags = modifier_flags.value
if title:
kc = UIKeyCommand.keyCommandWithInput_modifierFlags_action_discoverabilityTitle_(
ns(input), modifier_flags, selector, ns(title))
else:
kc = UIKeyCommand.keyCommandWithInput_modifierFlags_action_(ns(input), modifier_flags, selector)
_key_commands.append(kc)
return True
def register_key_command(input: Union[str, UIKeyInput], modifier_flags: UIKeyModifier,
function: Callable[[], None], title: str=None) -> bool:
"""Register key command.
.. note:: There's no function to unregister key commands.
Args:
input: String like ``A`` or special `UIKeyInput` value
modifier_flags: Modifier flags
function: Function to call
title: Discoverability title
Returns:
True if key command was registered.
"""
return _register_key_command(input, modifier_flags, function, title)
_key_event_handlers = []
class KeyEventHandler(object):
"""Key event handler object.
.. note:: Use it only and only for key event deregistration (`unregister_key_event_handler`).
Attributes:
key_code (UIEventKeyCode): Key code
modifier (UIKeyModifier): Modifier flags
fn (Callable): Function to call
"""
def __init__(self, key_code: UIEventKeyCode, modifier: UIKeyModifier, fn: Callable[[], None]):
if isinstance(key_code, UIEventKeyCode):
self.key_code = key_code.value
else:
self.key_code = key_code
if isinstance(modifier, UIKeyModifier):
self.modifier = modifier.value
else:
self.modifier = modifier
self.fn = fn
def _blackmamba_handleKeyUIEvent(_self, _cmd, event):
e = ObjCInstance(event)
if e.type() == UIEventType.PHYSICAL_KEYBOARD.value and e.subtype() == UIEventSubtype.NONE.value:
for h in _key_event_handlers:
if h.key_code == e._keyCode() and h.modifier == e._modifierFlags():
if not e._isKeyDown():
h.fn()
return
ObjCInstance(_self).originalhandleKeyUIEvent_(e)
@on_main_thread
def _register_key_event_handler(key_code, func, *, modifier=UIKeyModifier.NONE):
if not UIApplication.sharedApplication().respondsToSelector_(sel('originalhandleKeyUIEvent:')):
swizzle('UIApplication', 'handleKeyUIEvent:', _blackmamba_handleKeyUIEvent)
@system.catch_exceptions
def invoke_func():
func()
handler = KeyEventHandler(key_code, modifier, invoke_func)
_key_event_handlers.append(handler)
return handler
def register_key_event_handler(key_code: UIEventKeyCode, func: Callable[[], None],
*, modifier: UIKeyModifier=UIKeyModifier.NONE) -> KeyEventHandler:
"""Register key event handler.
Usable in dialogs for example. Do not forget to unregister key event
handler in ``will_close`` function of your ``ui.View``.
Args:
key_code: Key code
func: Function to call
modifier: Modifier flags
Returns:
`KeyEventHandler` to use in `unregister_key_event_handler`.
"""
return _register_key_event_handler(key_code, func, modifier=modifier)
@on_main_thread
def _unregister_key_event_handler(handler):
try:
_key_event_handlers.remove(handler)
except ValueError:
pass
def unregister_key_event_handler(handler: KeyEventHandler):
"""Unregister key event handler.
It is safe to call this function multiple times with the same handler. Handler
is silently ignored if it's not registered.
Args:
handler: Key event handler to unregister
"""
_unregister_key_event_handler(handler)
def unregister_key_event_handlers(handlers: List[KeyEventHandler]):
"""Unregister list of key event handlers.
Convenience function, it just calls `unregister_key_event_handler` for every handler.
Args:
handlers: List of handlers
"""
for handler in handlers:
unregister_key_event_handler(handler) | 0.737725 | 0.150778 |
import miepy
import numpy as np
from .get_tmatrix import nfmds_solver, tmatrix_solvers
def tmatrix_sphere(radius, wavelength, eps, eps_m, lmax, conducting=False):
"""Compute the T-matrix of a sphere, using regular Mie theory
Arguments:
radius sphere radius
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
conducting if True, calculate for conducting sphere (default: False)
"""
rmax = miepy.vsh.lmax_to_rmax(lmax)
tmatrix = np.zeros([2,rmax,2,rmax], dtype=complex)
k_medium = 2*np.pi*eps_m**0.5/wavelength
for i, n, m in miepy.mode_indices(lmax):
an, bn = miepy.mie_single.mie_sphere_scattering_coefficients(radius,
n, eps, 1, eps_m, 1, k_medium, conducting=conducting)
tmatrix[0,i,0,i] = an
tmatrix[1,i,1,i] = bn
return tmatrix
def tmatrix_core_shell(radius, thickness, wavelength, eps_core, eps_shell, eps_m, lmax):
"""Compute the T-matrix of a core-shell, using regular Mie theory
Arguments:
radius core radius
wavelength incident wavelength
eps_core particle permittivity
eps_shell shell permittivity
eps_m medium permittivity
lmax maximum number of multipoles
"""
rmax = miepy.vsh.lmax_to_rmax(lmax)
tmatrix = np.zeros([2,rmax,2,rmax], dtype=complex)
k_medium = 2*np.pi*eps_m**0.5/wavelength
particle = miepy.single_mie_core_shell(radius, radius + thickness,
material_in=miepy.dielectric(eps=eps_core),
material_out=miepy.dielectric(eps=eps_shell),
medium=miepy.dielectric(eps=eps_m),
lmax=lmax,
wavelength=wavelength)
particle.solve()
for i, n, m in miepy.mode_indices(lmax):
tmatrix[0,i,0,i] = -1j*particle.an[0,n-1]
tmatrix[1,i,1,i] = -1j*particle.bn[0,n-1]
return tmatrix
def tmatrix_spheroid(axis_xy, axis_z, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
axis_xy length of semiaxes perpendicular to the axis of symmetry
axis_z length of semiaxis along axis of symmetry
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
complex_plane = True if axis_xy > axis_z else False
parameters = dict(geometry_type=1, geometry_parameters=[axis_z, axis_xy], wavelength=wavelength,
index=eps**.5, index_m=eps_m**0.5, complex_plane=complex_plane, Nparam=1)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, extended_precision=extended_precision)
def tmatrix_cylinder(radius, height, wavelength, eps, eps_m, lmax, rounded=False, extended_precision=False, **kwargs):
"""Compute the T-matrix of a cylinder, with sharp or rounded (if oblate) edges
Arguments:
radius radius of cylinder
height height of cylinder
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
rounded (bool) if True, and cylinder is oblate, the cylinder's edges are rounded (default: False)
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
complex_plane = True if 2*radius > height else False
geometry_type = 3 if rounded else 2
if height >= 2*radius and rounded:
raise ValueError('prolate cylinders (height >= diameter) cannot be rounded')
parameters = dict(geometry_type=geometry_type, geometry_parameters=[height/2, radius], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, complex_plane=complex_plane, Nparam=3)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, extended_precision=extended_precision)
def tmatrix_ellipsoid(rx, ry, rz, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
rx,ry,rz radii of the 3 axes
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=1, geometry_parameters=[rx, ry, rz], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=1, Mrank=lmax, R_symmetry=0)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_ellipsoid(rx, ry, rz, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
rx,ry,rz radii of the 3 axes
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=1, geometry_parameters=[rx, ry, rz], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=1, Mrank=lmax, R_symmetry=0)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_square_prism(side, height, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
width side width of the prism
height height of the prism
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=2, geometry_parameters=[side/2, height/2], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=6, Mrank=lmax, R_symmetry=0)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_regular_prism(N, side, height, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
N number of vertices
width side width of the prism
height height of the prism
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=3, geometry_parameters=[side/2, height/2], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=2, Mrank=lmax, R_symmetry=N)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_sphere_cluster(pos, radii, lmax, lmax_cluster, wavelength, eps, eps_m, extended_precision=False, **kwargs):
parameters = dict(pos=pos, radii=radii, Nrank_particles=lmax,
wavelength=wavelength, index=eps**0.5, index_m=eps_m**0.5)
parameters.update(kwargs)
return nfmds_solver(lmax_cluster, parameters, solver=tmatrix_solvers.sphere_cluster,
extended_precision=extended_precision) | miepy/tmatrix/common.py | import miepy
import numpy as np
from .get_tmatrix import nfmds_solver, tmatrix_solvers
def tmatrix_sphere(radius, wavelength, eps, eps_m, lmax, conducting=False):
"""Compute the T-matrix of a sphere, using regular Mie theory
Arguments:
radius sphere radius
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
conducting if True, calculate for conducting sphere (default: False)
"""
rmax = miepy.vsh.lmax_to_rmax(lmax)
tmatrix = np.zeros([2,rmax,2,rmax], dtype=complex)
k_medium = 2*np.pi*eps_m**0.5/wavelength
for i, n, m in miepy.mode_indices(lmax):
an, bn = miepy.mie_single.mie_sphere_scattering_coefficients(radius,
n, eps, 1, eps_m, 1, k_medium, conducting=conducting)
tmatrix[0,i,0,i] = an
tmatrix[1,i,1,i] = bn
return tmatrix
def tmatrix_core_shell(radius, thickness, wavelength, eps_core, eps_shell, eps_m, lmax):
"""Compute the T-matrix of a core-shell, using regular Mie theory
Arguments:
radius core radius
wavelength incident wavelength
eps_core particle permittivity
eps_shell shell permittivity
eps_m medium permittivity
lmax maximum number of multipoles
"""
rmax = miepy.vsh.lmax_to_rmax(lmax)
tmatrix = np.zeros([2,rmax,2,rmax], dtype=complex)
k_medium = 2*np.pi*eps_m**0.5/wavelength
particle = miepy.single_mie_core_shell(radius, radius + thickness,
material_in=miepy.dielectric(eps=eps_core),
material_out=miepy.dielectric(eps=eps_shell),
medium=miepy.dielectric(eps=eps_m),
lmax=lmax,
wavelength=wavelength)
particle.solve()
for i, n, m in miepy.mode_indices(lmax):
tmatrix[0,i,0,i] = -1j*particle.an[0,n-1]
tmatrix[1,i,1,i] = -1j*particle.bn[0,n-1]
return tmatrix
def tmatrix_spheroid(axis_xy, axis_z, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
axis_xy length of semiaxes perpendicular to the axis of symmetry
axis_z length of semiaxis along axis of symmetry
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
complex_plane = True if axis_xy > axis_z else False
parameters = dict(geometry_type=1, geometry_parameters=[axis_z, axis_xy], wavelength=wavelength,
index=eps**.5, index_m=eps_m**0.5, complex_plane=complex_plane, Nparam=1)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, extended_precision=extended_precision)
def tmatrix_cylinder(radius, height, wavelength, eps, eps_m, lmax, rounded=False, extended_precision=False, **kwargs):
"""Compute the T-matrix of a cylinder, with sharp or rounded (if oblate) edges
Arguments:
radius radius of cylinder
height height of cylinder
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
rounded (bool) if True, and cylinder is oblate, the cylinder's edges are rounded (default: False)
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
complex_plane = True if 2*radius > height else False
geometry_type = 3 if rounded else 2
if height >= 2*radius and rounded:
raise ValueError('prolate cylinders (height >= diameter) cannot be rounded')
parameters = dict(geometry_type=geometry_type, geometry_parameters=[height/2, radius], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, complex_plane=complex_plane, Nparam=3)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, extended_precision=extended_precision)
def tmatrix_ellipsoid(rx, ry, rz, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
rx,ry,rz radii of the 3 axes
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=1, geometry_parameters=[rx, ry, rz], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=1, Mrank=lmax, R_symmetry=0)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_ellipsoid(rx, ry, rz, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
rx,ry,rz radii of the 3 axes
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=1, geometry_parameters=[rx, ry, rz], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=1, Mrank=lmax, R_symmetry=0)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_square_prism(side, height, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
width side width of the prism
height height of the prism
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=2, geometry_parameters=[side/2, height/2], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=6, Mrank=lmax, R_symmetry=0)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_regular_prism(N, side, height, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
N number of vertices
width side width of the prism
height height of the prism
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=3, geometry_parameters=[side/2, height/2], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=2, Mrank=lmax, R_symmetry=N)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_sphere_cluster(pos, radii, lmax, lmax_cluster, wavelength, eps, eps_m, extended_precision=False, **kwargs):
parameters = dict(pos=pos, radii=radii, Nrank_particles=lmax,
wavelength=wavelength, index=eps**0.5, index_m=eps_m**0.5)
parameters.update(kwargs)
return nfmds_solver(lmax_cluster, parameters, solver=tmatrix_solvers.sphere_cluster,
extended_precision=extended_precision) | 0.87749 | 0.458531 |