gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
from __future__ import print_function
"""
test_with_logger.py
"""
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0])) + "/"
input_file_names = [os.path.join(tempdir, "%d.1" % fn) for fn in range(20)]
final_file_name = os.path.join(tempdir, "final.result")
try:
os.makedirs(tempdir)
except:
pass
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
import ruffus
from ruffus import originate, transform, suffix, merge, pipeline_run, Pipeline
from ruffus.proxy_logger import make_shared_logger_and_proxy, setup_std_shared_logger
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import unittest
import re
import logging
import sys
import os
import json
import shutil
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
def write_input_output_filenames_to_output(infiles, outfile, logger_proxy, logging_mutex):
"""
Helper function: Writes input output file names and input contents to outfile
"""
with open(outfile, "w") as oo:
# save file name strings before we turn infiles into a list
fn_str = "%s -> %s" % (infiles, outfile)
# None = []
if infiles is None:
infiles = []
# str = [str]
if not isinstance(infiles, list):
infiles = [infiles]
max_white_space = -2
# write content of infiles indented
for infile in infiles:
with open(infile) as ii:
for line in ii:
oo.write(line)
max_white_space = max([max_white_space, len(line) - len(line.lstrip())])
# add extra spaces before filenames
oo.write(" " * (max_white_space + 2) + fn_str + "\n")
with logging_mutex:
logger_proxy.info(fn_str)
#
# Make logger
#
#import logging
args=dict()
args["file_name"] = os.path.join(tempdir, module_name + ".log")
args["level"] = logging.DEBUG
args["rotating"] = True
args["maxBytes"]=20000
args["backupCount"]=10
args["formatter"]="%(asctime)s - %(name)s - %(levelname)6s - %(message)s"
if sys.version_info[0] == 3 and sys.version_info[1] == 2 and __name__ != "__main__":
print (
"""
888888888888888888888888888888888888888888888888888888888888888888888888888
ERROR:
This unit test can not be run as a python module (python -m unittest xxx)
due to the interaction of bugs / misfeatures in the multiprocessing module
and python3.2
See http://bugs.python.org/issue15914
http://bugs.python.org/issue9573
In detail:
Making a shared logger calls code within the multiprocessing module.
This in turn tries to import the hmac module inside deliver_challenge().
This hangs if it happens after a module fork.
The only way around this is to only make calls to multiprocessing
(i.e. make_shared_logger_and_proxy(...)) after the import phase of
module loading.
This python bug will be triggered if your make_shared_logger_and_proxy()
call is at global scope in a module (i.e. not __main__) and only for
python version 3.2
888888888888888888888888888888888888888888888888888888888888888888888888888
""")
sys.exit()
(logger_proxy,
logging_mutex) = make_shared_logger_and_proxy (setup_std_shared_logger,
"my_logger", args)
#
# task1
#
@originate(input_file_names, logger_proxy, logging_mutex)
def task1(outfile, logger_proxy, logging_mutex):
write_input_output_filenames_to_output(None, outfile, logger_proxy, logging_mutex)
#
# task2
#
@transform(task1, suffix(".1"), ".2", logger_proxy, logging_mutex)
def task2(infile, outfile, logger_proxy, logging_mutex):
write_input_output_filenames_to_output(infile, outfile, logger_proxy, logging_mutex)
#
# task3
#
@transform(task2, suffix(".2"), ".3", logger_proxy, logging_mutex)
def task3(infile, outfile, logger_proxy, logging_mutex):
"""
Third task
"""
write_input_output_filenames_to_output(infile, outfile, logger_proxy, logging_mutex)
#
# task4
#
@merge(task3, final_file_name, logger_proxy, logging_mutex)
def task4(infile, outfile, logger_proxy, logging_mutex):
"""
Fourth task
"""
write_input_output_filenames_to_output(infile, outfile, logger_proxy, logging_mutex)
class Test_ruffus(unittest.TestCase):
def setUp(self):
self.tearDown()
try:
os.makedirs(tempdir)
#sys.stderr.write(" Created %s\n" % tempdir)
except:
pass
def tearDown(self):
try:
shutil.rmtree(tempdir)
#sys.stderr.write(" Removed %s\n" % tempdir)
pass
except:
pass
def test_simpler (self):
pipeline_run(multiprocess = 500, verbose = 0, pipeline= "main")
def test_newstyle_simpler (self):
test_pipeline = Pipeline("test")
test_pipeline.originate(task1, input_file_names, extras = [logger_proxy, logging_mutex])
test_pipeline.transform(task2, task1, suffix(".1"), ".2", extras = [logger_proxy, logging_mutex])
test_pipeline.transform(task3, task2, suffix(".2"), ".3", extras = [logger_proxy, logging_mutex])
test_pipeline.merge(task4, task3, final_file_name, extras = [logger_proxy, logging_mutex])
#test_pipeline.merge(task4, task3, final_file_name, extras = {"logger_proxy": logger_proxy, "logging_mutex": logging_mutex})
test_pipeline.run(multiprocess = 500, verbose = 0)
if __name__ == '__main__':
unittest.main()
| |
"""
Unit tests for optimization routines from optimize.py
Authors:
Ed Schofield, Nov 2005
Andrew Straw, April 2008
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (assert_raises, assert_allclose, assert_equal,
assert_, TestCase, run_module_suite, dec,
assert_almost_equal)
from scipy import optimize
def test_check_grad():
# Verify if check_grad is able to estimate the derivative of the
# logistic function.
def logit(x):
return 1 / (1 + np.exp(-x))
def der_logit(x):
return np.exp(-x) / (1 + np.exp(-x))**2
x0 = np.array([1.5])
r = optimize.check_grad(logit, der_logit, x0)
assert_almost_equal(r, 0)
r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)
assert_almost_equal(r, 0)
# Check if the epsilon parameter is being considered.
r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)
assert_(r > 1e-7)
class TestOptimize(object):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setUp(self):
self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = 0
self.gradcalls = 0
self.trace = []
def func(self, x):
self.funccalls += 1
if self.funccalls > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
self.trace.append(x)
return f
def grad(self, x):
self.gradcalls += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
def test_cg(self, use_wrapper=False):
""" conjugate gradient optimization routine """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='CG', jac=self.grad,
options=opts)
params, fopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (),
maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 9, self.funccalls)
assert_(self.gradcalls == 7, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs(self, use_wrapper=False):
""" Broyden-Fletcher-Goldfarb-Shanno optimization routine """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS', args=(),
options=opts)
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['jac'], res['hess_inv'], \
res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_nan(self):
"""Test corner case where nan is fed to optimizer. See #1542."""
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
with np.errstate(over='ignore', invalid='ignore'):
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
def test_bfgs_numerical_jacobian(self):
""" BFGS with numerical jacobian and a vector epsilon parameter """
# define the epsilon parameter using a random vector
epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_bfgs_infinite(self, use_wrapper=False):
"""Test corner case where -Inf is the minimum. See #1494."""
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if use_wrapper:
opts = {'disp': False}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
def test_bfgs_gh_2169(self):
def f(x):
if x < 0:
return 1.79769313e+308
else:
return x + 1./x
xs = optimize.fmin_bfgs(f, [10.], disp=False)
assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
def test_powell(self, use_wrapper=False):
""" Powell (direction set) optimization routine
"""
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = \
res['x'], res['fun'], res['direc'], res['nit'], \
res['nfev'], res['status']
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g. MKL, data alignment
# etc. affect the rounding error.
#
assert_(self.funccalls <= 116 + 20, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[34:39],
[[0.72949016, -0.44156936, 0.47100962],
[0.72949016, -0.44156936, 0.48052496],
[1.45898031, -0.88313872, 0.95153458],
[0.72949016, -0.44156936, 0.47576729],
[1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_neldermead(self, use_wrapper=False):
""" Nelder-Mead simplex algorithm
"""
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = \
res['x'], res['fun'], res['nit'], res['nfev'], \
res['status']
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 167, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[76:78],
[[0.1928968, -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_ncg(self, use_wrapper=False):
""" line-search Newton conjugate gradient optimization routine
"""
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0
#assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess=self.hess,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess=self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian times a vector p """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp=self.hessp,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p=self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_l_bfgs_b(self):
""" limited-memory bound-constrained BFGS algorithm
"""
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls == 5, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[0., -0.52489628, 0.48753042],
[0., -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
""" L-BFGS-B with numerical jacobian """
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
""" L-BFGS-B with combined objective function and jacobian """
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_minimize_l_bfgs_b(self):
""" Minimize with L-BFGS-B method """
opts = {'disp': False, 'maxiter': self.maxiter}
r = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
assert_allclose(self.func(r.x), self.func(self.solution),
atol=1e-6)
# approximate jacobian
ra = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', options=opts)
assert_allclose(self.func(ra.x), self.func(self.solution),
atol=1e-6)
# check that function evaluations in approximate jacobian are counted
assert_(ra.nfev > r.nfev)
def test_minimize_l_bfgs_b_ftol(self):
# Check that the `ftol` parameter in l_bfgs_b works as expected
v0 = None
for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
sol = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
v = self.func(sol.x)
if v0 is None:
v0 = v
else:
assert_(v < v0)
assert_allclose(v, self.func(self.solution), rtol=tol)
def test_custom(self):
# This function comes from the documentation example.
def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = x0
besty = fun(x0)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for dim in range(np.size(x0)):
for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
testx = np.copy(bestx)
testx[dim] = s
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
res = optimize.minimize(optimize.rosen, x0, method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
def test_minimize(self):
"""Tests for the minimize wrapper."""
self.setUp()
self.test_bfgs(True)
self.setUp()
self.test_bfgs_infinite(True)
self.setUp()
self.test_cg(True)
self.setUp()
self.test_ncg(True)
self.setUp()
self.test_ncg_hess(True)
self.setUp()
self.test_ncg_hessp(True)
self.setUp()
self.test_neldermead(True)
self.setUp()
self.test_powell(True)
self.setUp()
self.test_custom()
def test_minimize_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return x**2*y**2 + x**4 + 1
def dfunc(z):
x, y = z
return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'anneal', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp']:
if method in ('nelder-mead', 'powell', 'anneal', 'cobyla'):
jac = None
else:
jac = dfunc
with warnings.catch_warnings():
# suppress deprecation warning for 'anneal'
warnings.filterwarnings('ignore', category=DeprecationWarning)
sol1 = optimize.minimize(func, [1,1], jac=jac, tol=1e-10,
method=method)
sol2 = optimize.minimize(func, [1,1], jac=jac, tol=1.0,
method=method)
assert_(func(sol1.x) < func(sol2.x),
"%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
def test_no_increase(self):
# Check that the solver doesn't return a value worse than the
# initial point.
def func(x):
return (x - 1)**2
def bad_grad(x):
# purposefully invalid gradient function, simulates a case
# where line searches start failing
return 2*(x - 1) * (-1) - 2
def check(method):
x0 = np.array([2.0])
f0 = func(x0)
jac = bad_grad
if method in ['nelder-mead', 'powell', 'anneal', 'cobyla']:
jac = None
sol = optimize.minimize(func, x0, jac=jac, method=method,
options=dict(maxiter=20))
assert_equal(func(sol.x), sol.fun)
dec.knownfailureif(method == 'slsqp', "SLSQP returns slightly worse")(lambda: None)()
assert_(func(sol.x) <= f0)
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp']:
yield check, method
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
yield check, 'anneal'
def test_slsqp_respect_bounds(self):
# github issue 3108
def f(x):
return sum((x - np.array([1., 2., 3., 4.]))**2)
def cons(x):
a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
x0 = np.array([0.5, 1., 1.5, 2.])
res = optimize.minimize(f, x0, method='slsqp',
constraints={'type': 'ineq', 'fun': cons})
assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
def test_minimize_automethod(self):
def f(x):
return x**2
def cons(x):
return x - 2
x0 = np.array([10.])
sol_0 = optimize.minimize(f, x0)
sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}])
sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
sol_3 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(5, 10)])
sol_4 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(1, 10)])
for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
assert_(sol.success)
assert_allclose(sol_0.x, 0, atol=1e-8)
assert_allclose(sol_1.x, 2, atol=1e-8)
assert_allclose(sol_2.x, 5, atol=1e-8)
assert_allclose(sol_3.x, 5, atol=1e-8)
assert_allclose(sol_4.x, 2, atol=1e-8)
def test_minimize_coerce_args_param(self):
# github issue #3503
def Y(x, c):
return np.sum((x-c)**2)
def dY_dx(x, c=None):
return 2*(x-c)
c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
xinit = np.random.randn(len(c))
optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
class TestLBFGSBBounds(TestCase):
""" Tests for L-BFGS-B with bounds """
def setUp(self):
self.bounds = ((1, None), (None, None))
self.solution = (1, 0)
def fun(self, x, p=2.0):
return 1.0 / p * (x[0]**p + x[1]**p)
def jac(self, x, p=2.0):
return x**(p - 1)
def fj(self, x, p=2.0):
return self.fun(x, p), self.jac(x, p)
def test_l_bfgs_b_bounds(self):
""" L-BFGS-B with bounds """
x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
fprime=self.jac,
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_l_bfgs_b_funjac(self):
""" L-BFGS-B with fun and jac combined and extra arguments """
x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_l_bfgs_b_bounds(self):
""" Minimize with method='L-BFGS-B' with bounds """
res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=self.bounds)
assert_(res['success'], res['message'])
assert_allclose(res.x, self.solution, atol=1e-6)
class TestOptimizeScalar(TestCase):
"""Tests for scalar optimizers"""
def setUp(self):
self.solution = 1.5
def fun(self, x, a=1.5):
"""Objective function"""
return (x - a)**2 - 0.8
def test_brent(self):
""" brent algorithm """
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_golden(self):
""" golden algorithm """
x = optimize.golden(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_fminbound(self):
"""Test fminbound """
x = optimize.fminbound(self.fun, 0, 1)
assert_allclose(x, 1, atol=1e-4)
x = optimize.fminbound(self.fun, 1, 5)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
def test_fminbound_scalar(self):
try:
optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
self.fail("exception not raised")
except ValueError as e:
assert_('must be scalar' in str(e))
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar(self):
# combine all tests above for the minimize_scalar wrapper
x = optimize.minimize_scalar(self.fun).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='golden',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
method='Bounded').x
assert_allclose(x, 1, atol=1e-4)
x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
np.array([5])),
args=(np.array([1.5]), ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(5, 1), method='bounded', args=(1.5, ))
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar_custom(self):
# This function comes from the documentation example.
def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = (bracket[1] + bracket[0]) / 2.0
besty = fun(bestx)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for testx in [bestx - stepsize, bestx + stepsize]:
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, self.solution, atol=1e-6)
def test_minimize_scalar_coerce_args_param(self):
# github issue #3503
optimize.minimize_scalar(self.fun, args=1.5)
class TestNewtonCg(object):
def test_rosenbrock(self):
x0 = np.array([-1.2, 1.0])
sol = optimize.minimize(optimize.rosen, x0,
jac=optimize.rosen_der,
hess=optimize.rosen_hess,
tol=1e-5,
method='Newton-CG')
assert_(sol.success, sol.message)
assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
def test_himmelblau(self):
x0 = np.array(himmelblau_x0)
sol = optimize.minimize(himmelblau,
x0,
jac=himmelblau_grad,
hess=himmelblau_hess,
method='Newton-CG',
tol=1e-6)
assert_(sol.success, sol.message)
assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
class TestRosen(TestCase):
def test_hess(self):
"""Compare rosen_hess(x) times p with rosen_hess_prod(x,p) (ticket #1248)"""
x = np.array([3, 4, 5])
p = np.array([2, 2, 2])
hp = optimize.rosen_hess_prod(x, p)
dothp = np.dot(optimize.rosen_hess(x), p)
assert_equal(hp, dothp)
def himmelblau(p):
"""
R^2 -> R^1 test function for optimization. The function has four local
minima where himmelblau(xopt) == 0.
"""
x, y = p
a = x*x + y - 11
b = x + y*y - 7
return a*a + b*b
def himmelblau_grad(p):
x, y = p
return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
def himmelblau_hess(p):
x, y = p
return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
[4*x + 4*y, 4*x + 12*y**2 - 26]])
himmelblau_x0 = [-0.27, -0.9]
himmelblau_xopt = [3, 2]
himmelblau_min = 0.0
def test_minimize_multiple_constraints():
# Regression test for gh-4240.
def func(x):
return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
def func1(x):
return np.array([x[1]])
def func2(x):
return np.array([x[2]])
cons = ({'type': 'ineq', 'fun': func},
{'type': 'ineq', 'fun': func1},
{'type': 'ineq', 'fun': func2})
f = lambda x: -1 * (x[0] + x[1] + x[2])
res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
assert_allclose(res.x, [125, 0, 0], atol=1e-10)
if __name__ == "__main__":
run_module_suite()
| |
from collections import defaultdict
import atexit
import difflib
import jinja2
import json
import os
import socket
import subprocess
RULES_TEMPLATE = jinja2.Template('''
{% if nat_rules %}
*nat
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
{% for chain in nat_chains|default([]) -%}
:{{ chain }} - [0:0]
{% endfor %}
{% for rule in nat_rules|default([]) -%}
{{ rule }}
{% endfor -%}
COMMIT
{% endif %}
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT {{ output_policy }} [0:0]
{% for chain in filter_chains|default([]) -%}
:{{ chain }} - [0:0]
{% endfor %}
{% for rule in filter_rules|default([]) -%}
{{ rule }}
{% endfor -%}
COMMIT
''')
def register_cleanup_of_file(file_path):
""" Clean up rules on disk when exiting to prevent them from leaking between runs.
Since a state run might be aborted before apply() runs we have do it even though
it might not have been read.
"""
@atexit.register
def cleanup():
try:
os.remove(file_path)
except:
pass
def _add_rule(target_file, key, rule):
# Each rule is stored as a single line in the file, as a json
# object with table -> rule
object_to_store = {
key: rule,
}
schduled_deletions = __context__.get('firewall.scheduled_file_deletion', [])
if not target_file in schduled_deletions:
register_cleanup_of_file(target_file)
schduled_deletions.append(target_file)
__context__['firewall.scheduled_file_deletion'] = schduled_deletions
with open(target_file, 'a') as fh:
json.dump(object_to_store, fh)
fh.write('\n')
def append(name, chain='INPUT', table='filter', family='ipv4', **kwargs):
assert family in ('ipv4', 'ipv6')
assert table in ('filter', 'nat')
destination = kwargs.get('destination')
# Some convenience utilities for destinations here, first we allow specifying that the
# intended destination is the system dns servers, which will figure out which those are
# and add the correct IPs, but allow all traffic if we can't determine their IPs
if destination == 'system_dns':
grain_lookup = 'dns:%s_nameservers' % family.replace('v', '')
dns_servers = __salt__['grains.get'](grain_lookup)
if dns_servers:
kwargs['destination'] = ','.join(dns_servers)
else:
del kwargs['destination']
elif _is_ipv4(destination) and family == 'ipv6' or _is_ipv6(destination) and family == 'ipv4':
return {
'name': name,
'comment': 'Ignored due to wrong family for destination %s' % destination,
'result': True,
'changes': {},
}
elif destination and not _is_ipv6(destination) and not _is_ipv4(destination):
# not a valid address, assume hostname and allow all destinations
del kwargs['destination']
partial_rule = __salt__['iptables.build_rule'](**kwargs)
full_rule = '-A %s %s' % (chain, partial_rule)
file_target = get_cached_rule_file_for_family(family[-2:])
_add_rule(file_target, '%s_rules' % table, full_rule)
return {
'name': name,
'comment': '',
'result': True,
'changes': {},
}
def chain_present(name, table='filter', family='ipv4', **kwargs):
assert table in ('filter', 'nat')
assert family in ('ipv4', 'ipv6')
file_target = get_cached_rule_file_for_family(family[-2:])
_add_rule(file_target, '%s_chains' % table, name)
return {
'name': name,
'result': True,
'changes': {},
'comment': '',
}
def get_cached_rule_file_for_family(family):
assert family in ('v4', 'v6')
cachedir = __opts__['cachedir']
file_target = os.path.join(cachedir, 'firewall-rules-%s.json' % family)
return file_target
def _get_rules(path):
try:
fh = open(path)
except OSError:
return {}
with fh:
all_rules = defaultdict(list)
for line in fh:
parsed_line = json.loads(line)
for key, value in parsed_line.items():
all_rules[key].append(value)
return all_rules
def apply(name, output_policy='ACCEPT', apply=True):
'''
Build and apply the rules.
:param apply: Set this to False to only build the ruleset on disk.
'''
comment = []
if not apply:
comment.append('Built only, not applied')
changes = {}
success = True
for family in ('v4', 'v6'):
file_target = get_cached_rule_file_for_family(family)
context = {
'output_policy': output_policy,
}
context.update(_get_rules(file_target))
result, stderr, rule_changes = _apply_rule_for_family('rules.%s' % family,
context, 'ip%stables-restore' % ('' if family == 'v4' else '6'), apply)
if stderr:
comment.append(stderr)
if rule_changes:
changes['ip%s' % family] = rule_changes
if result != 0:
success = False
# Clear out the rules on disk (will also be done on exit if run stops before applying the rules)
os.remove(file_target)
return {
'name': name,
'comment': '\n'.join(comment),
'result': success,
'changes': changes,
}
def _apply_rule_for_family(filename, context, restore_command, apply):
rendered_rules = RULES_TEMPLATE.render(context)
# iptables-restore fails to parse if the rules doesnt end with newline
if not rendered_rules[-1] == '\n':
rendered_rules += '\n'
# Ensure that the target directory exists
if not os.path.exists('/etc/iptables'):
os.makedirs('/etc/iptables')
# First, read old content so that we can compute a diff (but might not exist already)
target_file = '/etc/iptables/%s' % filename
try:
with open(target_file) as fh:
old_content = fh.readlines()
except IOError:
old_content = []
with open(target_file, 'w') as fh:
fh.write(rendered_rules)
new_content = [line + '\n' for line in rendered_rules[:-1].split('\n')]
changes = ''.join(difflib.unified_diff(old_content, new_content))
if apply:
restore_process = subprocess.Popen([restore_command],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = restore_process.communicate(rendered_rules.encode('utf-8'))
result = restore_process.wait()
else:
result = 0
stderr = ''
return (result, stderr.decode('utf-8'), changes)
def _is_ipv4(address):
'''
>>> _is_ipv4('1.1.1.1')
True
>>> _is_ipv4('2.2.2.2/32')
True
>>> _is_ipv4(None)
False
>>> _is_ipv4('2001:db8::')
False
'''
return _is_ip_family(socket.AF_INET, address)
def _is_ipv6(address):
'''
>>> _is_ipv6('2001:db8::')
True
>>> _is_ipv6('2001:db8::/64')
True
>>> _is_ipv6(None)
False
>>> _is_ipv6('1.1.1.1')
False
'''
return _is_ip_family(socket.AF_INET6, address)
def _is_ip_family(family, address):
# Asssumes that inet_pton exists, which is fair since this state only works
# on systems with iptables anyway
if not address:
return False
# Trim CIDR ranges
address = address.split('/', 1)[0]
try:
socket.inet_pton(family, address)
except socket.error: # not a valid address
return False
return True
| |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import tensorflow as tf
import shutil
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.platform import gfile
from bigdl.dllib.nn.layer import Model
from bigdl.dllib.utils.common import JTensor
from bigdl.dllib.utils.common import callBigDlFunc
import os
def get_path(output_name, sess=None):
if sess is None:
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
temp = tempfile.mkdtemp()
saver = tf.train.Saver()
saver.save(sess, temp + '/model.chkp')
tf.train.write_graph(sess.graph, temp, 'model.pbtxt')
merge_checkpoint(temp + '/model.pbtxt',
temp + '/model.chkp',
[output_name],
temp + '/model.pb', sess)
return temp + '/model.pb'
def convert(input_ops, output_ops, byte_order, bigdl_type):
"""
Convert tensorflow model to bigdl model
:param input_ops: operation list used for input, should be placeholders
:param output_ops: operations list used for output
:return: bigdl model
"""
input_names = map(lambda x: x.name.split(":")[0], input_ops)
output_names = map(lambda x: x.name.split(":")[0], output_ops)
temp = tempfile.mkdtemp()
dump_model(path=temp)
model_path = temp + '/model.pb'
bin_path = temp + '/model.bin'
model = Model.load_tensorflow(model_path, input_names, output_names,
byte_order, bin_path, bigdl_type)
try:
shutil.rmtree(temp)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return model
def export_checkpoint(checkpoint_path):
"""
Export variable tensors from the checkpoint files.
:param checkpoint_path: tensorflow checkpoint path
:return: dictionary of tensor. The key is the variable name and the value is the numpy
"""
reader = tf.train.NewCheckpointReader(checkpoint_path)
# Get tensor name list
tensor_names = filter(lambda n: n != 'global_step',
reader.get_variable_to_shape_map().keys())
# Prepare key-value dictionary
tensors = {}
for tn in tensor_names:
tensors[tn] = reader.get_tensor(tn)
return tensors
def save_variable_bigdl(tensors, target_path, bigdl_type="float"):
"""
Save a variable dictionary to a Java object file, so it can be read by BigDL
:param tensors: tensor dictionary
:param target_path: where is the Java object file store
:param bigdl_type: model variable numeric type
:return: nothing
"""
import numpy as np
jtensors = {}
for tn in tensors.keys():
if not isinstance(tensors[tn], np.ndarray):
value = np.array(tensors[tn])
else:
value = tensors[tn]
jtensors[tn] = JTensor.from_ndarray(value)
callBigDlFunc(bigdl_type, "saveTensorDictionary", jtensors, target_path)
def dump_model(path, graph=None, sess=None, ckpt_file=None, bigdl_type="float"):
"""
Dump a tensorflow model to files. The graph will be dumped to path/model.pb, and the checkpoint
will be dumped to path/model.bin
:param path: dump folder path
:param sess: if user pass in session, we assume that the variable of the graph in the session
has been inited
:param graph: tensorflow graph. Default use the default graph of the session
:param bigdl_type: model variable numeric type
:return: nothing
"""
if not os.path.isdir(path):
raise ValueError("Folder " + path + " does not exist")
temp = None
if ckpt_file is None:
if sess is None:
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
temp = tempfile.mkdtemp()
ckpt_file = temp
# dump checkpoint to temp files
saver = tf.train.Saver()
saver.save(sess, ckpt_file)
# generate bin files
tensors = export_checkpoint(ckpt_file)
save_variable_bigdl(tensors, path + "/model.bin", bigdl_type)
# dump grap to pb file
graph = sess.graph if graph is None else graph
with gfile.GFile(path + "/model.pb", "wb") as f:
f.write(graph.as_graph_def().SerializeToString())
if temp is not None:
try:
shutil.rmtree(temp)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def merge_checkpoint(input_graph,
checkpoint,
output_node_names,
output_graph,
sess):
"""
Get the variable values from the checkpoint file, and merge them to the GraphDef file
Args:
input_graph: the GraphDef file, doesn't contain variable values
checkpoint: the checkpoint file
output_node_names: A list of string, the output names
output_graph: String of the location and the name of the
output graph
"""
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
input_graph_def = graph_pb2.GraphDef()
with gfile.FastGFile(input_graph, "r") as f:
text_format.Merge(f.read().decode("utf-8"), input_graph_def)
for node in input_graph_def.node:
node.device = ""
importer.import_graph_def(input_graph_def, name="")
sess.run([restore_op_name], {filename_tensor_name: checkpoint})
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names,
variable_names_blacklist=""
)
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
| |
"""
In this module, the similarity classes of the item-based approach are defined.
The most important classes are the CombinedRecordSimilarity, the
CollaborativeRecordSimilarity and the ContentRecordSimilarity. The first one
combines the record similarity values of two underlying item based similarity
classes. The second similarity class calculates the similarity of two records
by using a similarity metric, e.g., cosine similarity, on their preference
vectors. Finally, ContentRecordSimilarity retrieves the imported content-based
similarity from the database and stores in the local memory.
"""
from similarity_metrics import jaccard_sim
from similarity_metrics import cosine_sim
from .. import queries
from ..refreshable import Refreshable
from ..refreshable import RefreshHelper
from search_rex.util.date_util import utcnow
import math
from datetime import timedelta
from collections import defaultdict
class AbstractRecordSimilarity(Refreshable):
"""
Computes the similarity from one record to the other
"""
def get_similarity(self, from_record_id, to_record_id):
"""
Computes the similarity from one record to the other
"""
raise NotImplementedError()
class RecordSimilarity(AbstractRecordSimilarity):
"""
Computes the similarity from one record to the other by comparing their
preferences using a similarity metric
"""
def __init__(self, data_model, similarity_metric):
"""
:param data_model: the data model where the preferences are stored
:param similarity_metric: the metric that computes the similarity
between the preference vectors
"""
self.data_model = data_model
self.similarity_metric = similarity_metric
self.refresh_helper = RefreshHelper()
self.refresh_helper.add_dependency(data_model)
def get_similarity(self, from_record_id, to_record_id):
"""
Computes the similarity from one record to the other by comparing their
preferences using a similarity metric
:param from_record_id: the id of the record from which the similarity
is directed
:param from_record_id: the id of the record to which the similarity
is directed
"""
from_preferences = self.data_model.get_preferences_for_record(
from_record_id)
to_preferences = self.data_model.get_preferences_for_record(
to_record_id)
return self.similarity_metric.get_similarity(
from_preferences, to_preferences)
def refresh(self, refreshed_components):
self.refresh_helper.refresh(refreshed_components)
refreshed_components.add(self)
class CombinedRecordSimilarity(AbstractRecordSimilarity):
"""
Combines the similarity value of two similarity metrics by applying a
weight on each similarities
sim = w * sim1 + (1-w) * sim2
"""
def __init__(self, similarity_metric1, similarity_metric2, weight):
"""
:param similarity_metric1: the first similarity metric
:param similarity_metric2: the second similarity metric
:param weight: the weight between 0 and 1 of metric 1
"""
assert weight >= 0 and weight <= 1
self.similarity_metric1 = similarity_metric1
self.similarity_metric2 = similarity_metric2
self.weight = weight
self.refresh_helper = RefreshHelper()
self.refresh_helper.add_dependency(similarity_metric1)
self.refresh_helper.add_dependency(similarity_metric2)
def get_similarity(self, from_record_id, to_record_id):
"""
Computes the similarity from one record to the other by comparing their
preferences using a similarity metric
:param from_record_id: the id of the record from which the similarity
is directed
:param from_record_id: the id of the record to which the similarity
is directed
"""
sim1 = self.similarity_metric1.get_similarity(
from_record_id, to_record_id)
sim2 = self.similarity_metric2.get_similarity(
from_record_id, to_record_id)
if math.isnan(sim1):
return sim2 * (1-self.weight)
if math.isnan(sim2):
return sim1 * self.weight
return sim1 * self.weight + sim2 * (1-self.weight)
def refresh(self, refreshed_components):
self.refresh_helper.refresh(refreshed_components)
refreshed_components.add(self)
class InMemoryRecordSimilarity(AbstractRecordSimilarity):
"""
Loads similarities from the database and stores them in the memory
"""
def __init__(self, include_internal_records, max_sims_per_record=100):
self.include_internal_records = include_internal_records
self.max_sims_per_record = max_sims_per_record
self.similarities = {}
self.refresh_helper = RefreshHelper(
target_refresh_function=self.init_similarities)
self.init_similarities()
def init_similarities(self):
similarities = defaultdict(dict)
for from_record, rec_sims in queries.get_similarities(
self.include_internal_records):
sorted_sims = sorted(
rec_sims.iteritems(), key=lambda(_, s): s, reverse=True)
for i, (to_record, sim) in enumerate(sorted_sims):
if i >= self.max_sims_per_record:
break
similarities[from_record][to_record] = sim
self.similarities = similarities
def get_similarity(self, from_record_id, to_record_id):
"""
Computes the similarity from one record to the other by comparing their
preferences using a similarity metric
:param from_record_id: the id of the record from which the similarity
is directed
:param from_record_id: the id of the record to which the similarity
is directed
"""
if from_record_id in self.similarities:
if to_record_id in self.similarities[from_record_id]:
return self.similarities[from_record_id][to_record_id]
return float('nan')
def refresh(self, refreshed_components):
self.refresh_helper.refresh(refreshed_components)
refreshed_components.add(self)
class AbstractPreferenceSimilarity(object):
"""
Computes the similarity from one preference vector to the other
"""
def get_similarity(self, from_preferences, to_preferences):
"""
Computes the similarity of the preference vectors
:param from_preferences: the preference vector of the record from which
the similarity is directed
:param to_preferences: the preference vector of the record to which
the similarity is directed
"""
raise NotImplementedError()
class JaccardSimilarity(AbstractPreferenceSimilarity):
"""
Computes the Jaccard similarity of the two preference vectors
"""
def get_similarity(self, from_preferences, to_preferences):
"""
Computes the jaccard similarity of the preference vectors
:param from_preferences: the preference vector of the record from which
the similarity is directed
:param to_preferences: the preference vector of the record to which
the similarity is directed
"""
return jaccard_sim(
from_preferences.keys(), to_preferences.keys())
class CosineSimilarity(AbstractPreferenceSimilarity):
"""
Computes the cosine similarity of the two preference vectors
"""
def get_similarity(self, from_preferences, to_preferences):
"""
Computes the cosine similarity of the preference vectors
:param from_preferences: the preference vector of the record from which
the similarity is directed
:param to_preferences: the preference vector of the record to which
the similarity is directed
"""
return cosine_sim(
{key: pref.value for key, pref in from_preferences.iteritems()},
{key: pref.value for key, pref in to_preferences.iteritems()},
)
class SignificanceWeighting(AbstractPreferenceSimilarity):
"""
Penalises the similarity of the underlying similarity_metric if it is
based on few overlaps in terms of the preference vectors
"""
def __init__(self, similarity_metric, min_overlap):
self.similarity_metric = similarity_metric
self.min_overlap = min_overlap
def get_similarity(self, from_preferences, to_preferences):
"""
Penalises the similarity of the underlying similarity_metric if it is
based on few overlaps in terms of the preference vectors
:param from_preferences: the preference vector of the record from which
the similarity is directed
:param to_preferences: the preference vector of the record to which
the similarity is directed
"""
similarity = self.similarity_metric.get_similarity(
from_preferences, to_preferences)
overlap = len(
set(from_preferences.iterkeys()) & set(to_preferences.iterkeys()))
weight = min(overlap, self.min_overlap) / float(self.min_overlap)
return similarity * weight
def partition_preferences_by_time(
preferences, time_bounds):
time_parts = [{} for _ in xrange(len(time_bounds))]
for key, pref in preferences.iteritems():
for t, time_bound in enumerate(time_bounds):
if pref.preference_time > time_bound:
time_parts[t][key] = pref
break
return time_parts
class TimeDecaySimilarity(AbstractPreferenceSimilarity):
"""
Implements a decreasing weight that penalises older interactions
"""
def __init__(
self, similarity_metric,
time_interval=timedelta(weeks=8), half_life=2,
max_age=12):
"""
:param similarity_metric: The underlying similarity metric that is
called with every partition of preference values
:param time_interval: The interval with which the preferences are
partitioned
:param half_life: The number of intervals until the weight is half of
its initial value
:param max_age: The max number of intervals to consider
"""
self.similarity_metric = similarity_metric
self.time_interval = time_interval
self.half_life = half_life
self.max_age = max_age
def get_similarity(self, from_preferences, to_preferences):
"""
Implements a decreasing weight that penalises older interactions
:param from_preferences: the preference vector of the record from which
the similarity is directed
:param to_preferences: the preference vector of the record to which
the similarity is directed
"""
if len(from_preferences) == 0 and len(to_preferences) == 0:
return float('NaN')
time_bounds = []
time_weights = []
weight_sum = 0.0
curr = utcnow()
for t in xrange(self.max_age):
curr -= self.time_interval
time_bounds.append(curr)
weight = 2**(-(t)/float(self.half_life))
weight_sum += weight
time_weights.append(weight)
from_parts = partition_preferences_by_time(
from_preferences, time_bounds)
to_parts = partition_preferences_by_time(
to_preferences, time_bounds)
sim_sum = 0.0
for t, w in enumerate(time_weights):
sim = self.similarity_metric.get_similarity(
from_parts[t], to_parts[t])
if not math.isnan(sim):
sim_sum += w*sim
return sim_sum / weight_sum
| |
import sys
import os
import gzip
import zipfile
from optparse import make_option
import traceback
from django.conf import settings
from django.core import serializers
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import (connections, router, transaction, DEFAULT_DB_ALIAS,
IntegrityError, DatabaseError)
from django.db.models import get_apps
from itertools import product
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
args = "fixture [fixture ...]"
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.'),
)
def handle(self, *fixture_labels, **options):
using = options.get('database')
connection = connections[using]
self.style = no_style()
if not len(fixture_labels):
self.stderr.write(
self.style.ERROR("No database fixture specified. Please provide the path of at least one fixture in the command line.\n")
)
return
verbosity = int(options.get('verbosity'))
show_traceback = options.get('traceback')
# commit is a stealth option - it isn't really useful as
# a command line option, but it can be useful when invoking
# loaddata from within another script.
# If commit=True, loaddata will use its own transaction;
# if commit=False, the data load SQL will become part of
# the transaction in place when loaddata was invoked.
commit = options.get('commit', True)
# Keep a count of the installed objects and fixtures
fixture_count = 0
loaded_object_count = 0
fixture_object_count = 0
models = set()
humanize = lambda dirname: "'%s'" % dirname if dirname else 'absolute path'
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
if commit:
transaction.commit_unless_managed(using=using)
transaction.enter_transaction_management(using=using)
transaction.managed(True, using=using)
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if settings.DEBUG:
assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
compression_types = {
None: open,
'gz': gzip.GzipFile,
'zip': SingleZipReader
}
if has_bz2:
compression_types['bz2'] = bz2.BZ2File
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
try:
with connection.constraint_checks_disabled():
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) > 1 and parts[-1] in compression_types:
compression_formats = [parts[-1]]
parts = parts[:-1]
else:
compression_formats = compression_types.keys()
if len(parts) == 1:
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if formats:
if verbosity >= 2:
self.stdout.write("Loading '%s' fixtures...\n" % fixture_name)
else:
self.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format.\n" %
(fixture_name, format)))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
if verbosity >= 2:
self.stdout.write("Checking %s for fixtures...\n" % humanize(fixture_dir))
label_found = False
for combo in product([using, None], formats, compression_formats):
database, format, compression_format = combo
file_name = '.'.join(
p for p in [
fixture_name, database, format, compression_format
]
if p
)
if verbosity >= 3:
self.stdout.write("Trying %s for %s fixture '%s'...\n" % \
(humanize(fixture_dir), file_name, fixture_name))
full_path = os.path.join(fixture_dir, file_name)
open_method = compression_types[compression_format]
try:
fixture = open_method(full_path, 'r')
except IOError:
if verbosity >= 2:
self.stdout.write("No %s fixture '%s' in %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
else:
try:
if label_found:
self.stderr.write(self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting.\n" %
(fixture_name, humanize(fixture_dir))))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if verbosity >= 2:
self.stdout.write("Installing %s fixture '%s' from %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
objects = serializers.deserialize(format, fixture, using=using)
for obj in objects:
objects_in_fixture += 1
if router.allow_syncdb(using, obj.object.__class__):
loaded_objects_in_fixture += 1
models.add(obj.object.__class__)
try:
obj.save(using=using)
except (DatabaseError, IntegrityError) as e:
msg = "Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': e
}
raise e.__class__, e.__class__(msg), sys.exc_info()[2]
loaded_object_count += loaded_objects_in_fixture
fixture_object_count += objects_in_fixture
label_found = True
finally:
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
self.stderr.write(
self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)\n" %
(fixture_name)))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in models]
connection.check_constraints(table_names=table_names)
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s\n" %
(full_path, ''.join(traceback.format_exception(sys.exc_type,
sys.exc_value, sys.exc_traceback)))))
return
# If we found even one object in a fixture, we need to reset the
# database sequences.
if loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
if sequence_sql:
if verbosity >= 2:
self.stdout.write("Resetting sequences\n")
for line in sequence_sql:
cursor.execute(line)
if commit:
transaction.commit(using=using)
transaction.leave_transaction_management(using=using)
if verbosity >= 1:
if fixture_object_count == loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)\n" % (
loaded_object_count, fixture_count))
else:
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)\n" % (
loaded_object_count, fixture_object_count, fixture_count))
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
if commit:
connection.close()
| |
import re
import string
import math
from operator import itemgetter
from itertools import groupby
import itertools
import operator
import foofah_utils
COST_DELETE_EXISTING_CELL = 1
COST_DELETE_CELL = 1
COST_DELETE_EMPTY = 1
COST_ADD_EMPTY = 1
COST_MOVE_EMPTY = 1
COST_MOVE_CELL = 1
COST_SPLIT = 1
COST_MERGE = 1
COST_COPY = 1
COST_MOVE_CELL_HORIZONTAL_1 = 1
cost_data_transform_cpp = False
cost_move_cpp = False
cost_edit_op_cpp = False
debug_print = False
COST_IMPOSSIBLE = 100000
class TableNode:
def __init__(self, data, row=None, col=None):
self.data = data
self.id = id(self)
self.row = row
self.col = col
def __str__(self):
return "'%s' (%d,%d)" % (self.data, self.row, self.col)
class TableGraph:
def __init__(self, table):
self.cells = []
self.data_set = set()
for rid, row in enumerate(table):
for cid, cell in enumerate(row):
cell_node = TableNode(cell, rid, cid)
self.cells.append(cell_node)
self.cell_set = set(self.cells)
self.cells = tuple(self.cells)
self.row_num = len(table)
self.col_num = len(table[0])
def __str__(self):
return str(list(self.graph.edges()))
def nodes(self):
return self.cells
def nodes_set(self):
return self.cell_set
def graph_edit_distance(self, other):
return graph_edit_distance(self, other)
def graph_edit_distance_greedy(self, other, batch=False):
if batch:
return clustered_maps(graph_edit_distance_greedy(self, other)[0], self, other)
return graph_edit_distance_greedy(self, other)
def batch_graph_edit_distance_greedy(self, other):
return clustered_maps(graph_edit_distance_greedy(self, other)[0], self, other)
# Edit distance
def __sub__(self, other):
return self.graph_edit_distance(other)
# Edit distance
def __rshift__(self, other):
return self.graph_edit_distance_greedy(other)
# Print a path
def print_map(edge):
if edge[0] and edge[1]:
print edge[0].data, "(%d,%d)" % (edge[0].row, edge[0].col), "->", edge[1].data, "(%d,%d)" % (
edge[1].row, edge[1].col)
elif edge[0]:
print edge[0].data, "(%d,%d)" % (edge[0].row, edge[0].col), "->", "empty"
else:
print "empty", "->", edge[1].data, "(%d,%d)" % (edge[1].row, edge[1].col)
# Print a path
def print_path(path):
if path:
for edge in path:
if edge[0] and edge[1]:
print str(edge[0]), "->", str(edge[1]), "%", edge[2]
elif edge[0]:
print str(edge[0]), "->", "empty", "%", edge[2]
else:
print "empty", "->", str(edge[1]), "%", edge[2]
print "Actual Cost:", cost_edit_path(path)
else:
print "No Transformation Available"
PATTERN_R_2_C = "PATTERN_R_2_C"
PATTERN_R_2_R = "PATTERN_R_2_R"
PATTERN_R_2_T = "PATTERN_R_2_T"
PATTERN_C_2_C = "PATTERN_C_2_C"
PATTERN_C_2_R = "PATTERN_C_2_R"
PATTERN_C_2_T = "PATTERN_C_2_T"
PATTERN_T_2_C = "PATTERN_T_2_C"
PATTERN_T_2_R = "PATTERN_T_2_R"
PATTERN_T_2_T = "PATTERN_T_2_T"
def divide_if_identical_col(path, id=0):
groups = []
path.sort(key=lambda x: x[id].col)
for k, g in groupby(enumerate(path), lambda (i, x): x[id].col):
groups.append(map(itemgetter(1), g))
return groups
def divide_if_identical_row(path, id=0):
groups = []
path.sort(key=lambda x: x[id].row)
for k, g in groupby(enumerate(path), lambda (i, x): x[id].row):
groups.append(map(itemgetter(1), g))
return groups
def divide_if_discontinuous_col(path, id=0):
groups = []
if id == 0:
path.sort(key=lambda x: x[id].col)
for k, g in groupby(enumerate(path), lambda (i, x): i - x[id].col):
groups.append(map(itemgetter(1), g))
return groups
def divide_if_discontinuous_row(path, c_id=0):
groups = []
if c_id == 0:
path.sort(key=lambda x: x[c_id].row)
for k, g in groupby(enumerate(path), lambda (i, x): i - x[c_id].row):
groups.append(map(itemgetter(1), g))
return groups
def func_1(table_graph):
if table_graph:
return table_graph.col
else:
return -1
def func_2(table_graph):
if table_graph:
return table_graph.row
else:
return -1
def cluster_by_columns(path, i=0, continuous=False, identical_row=False):
cluster_c = {}
for tran in path:
if tran[i]:
if tran[i].col not in cluster_c.keys():
cluster_c[tran[i].col] = [tran]
else:
cluster_c[tran[i].col].append(tran)
ret_cluster = []
if continuous:
for group in cluster_c.values():
ret_cluster += divide_if_discontinuous_row(group, i)
return ret_cluster
elif identical_row:
for group in cluster_c.values():
ret_cluster += divide_if_identical_row(group, i)
return ret_cluster
else:
return cluster_c.values()
def cluster_by_rows(path, i=0, continuous=False, identical_row=False):
cluster_r = {}
for tran in path:
if tran[i]:
if tran[i].row not in cluster_r.keys():
cluster_r[tran[i].row] = [tran]
else:
cluster_r[tran[i].row].append(tran)
ret_cluster = []
if continuous:
for group in cluster_r.values():
ret_cluster += divide_if_discontinuous_col(group, i)
return ret_cluster
elif identical_row:
for group in cluster_r.values():
ret_cluster += divide_if_identical_col(group, i)
return ret_cluster
else:
return cluster_r.values()
def cluster_by_types(path):
path = sorted(path, key=lambda tup: tup[2])
cluster = []
for key, group in groupby(path, lambda x: x[2]):
cluster.append(list(group))
return cluster
def clustered_maps(path, orig_table, target_table):
patterns = []
mv_dict = {}
for pair in path:
if pair[0] and pair[1]:
mv_dict[(pair[0].row, pair[0].col, pair[1].row, pair[1].col)] = pair
elif pair[0]:
mv_dict[(pair[0].row, pair[0].col, None, None)] = pair
elif pair[1]:
mv_dict[(None, None, pair[1].row, pair[1].col)] = pair
# Separate by types
for group in cluster_by_types(path):
input_output_set = []
for pair in group:
if pair[0] and pair[1]:
input_output_set.append((pair[0].row, pair[0].col, pair[1].row, pair[1].col))
elif pair[0]:
input_output_set.append((pair[0].row, pair[0].col, None, None))
elif pair[1]:
input_output_set.append((None, None, pair[1].row, pair[1].col))
if group[0][2] == MAP_TYPE_MV or group[0][2] == MAP_TYPE_MER or group[0][2] == MAP_TYPE_SPL or group[0][
2] == MAP_TYPE_UNKNOWN:
# Row major input table
i_row_o_row = sorted(input_output_set, key=lambda x: (x[0], x[1], x[2], x[3]))
temp_path = [mv_dict[i_row_o_row[0]]]
base = i_row_o_row[0]
i = 1
while i < len(i_row_o_row):
# H to H
if i_row_o_row[i] == (base[0], base[1] + len(temp_path), base[2], base[3] + len(temp_path)):
temp_path.append(mv_dict[(base[0], base[1] + len(temp_path), base[2], base[3] + len(temp_path))])
else:
if len(temp_path) > 1:
patterns.append(list(temp_path))
base = i_row_o_row[i]
temp_path = [mv_dict[i_row_o_row[i]]]
i += 1
if len(temp_path) > 1:
patterns.append(list(temp_path))
if group[0][2] != MAP_TYPE_MER and group[0][2] != MAP_TYPE_SPL:
temp_path = [mv_dict[i_row_o_row[0]]]
base = i_row_o_row[0]
i = 1
while i < len(i_row_o_row):
# One to H
if i_row_o_row[i] == (base[0], base[1], base[2], base[3] + len(temp_path)):
temp_path.append(mv_dict[(base[0], base[1], base[2], base[3] + len(temp_path))])
else:
if len(temp_path) > 1:
patterns.append(list(temp_path))
base = i_row_o_row[i]
temp_path = [mv_dict[i_row_o_row[i]]]
i += 1
if len(temp_path) > 1:
patterns.append(list(temp_path))
i_row_o_col = sorted(input_output_set, key=lambda x: (x[0], x[1], x[3], x[2]))
temp_path = [mv_dict[i_row_o_col[0]]]
base = i_row_o_col[0]
i = 1
while i < len(i_row_o_col):
# H to V
if i_row_o_col[i] == (base[0], base[1] + len(temp_path), base[2] + len(temp_path), base[3]):
temp_path.append(mv_dict[(base[0], base[1] + len(temp_path), base[2] + len(temp_path), base[3])])
else:
if len(temp_path) > 1:
patterns.append(list(temp_path))
base = i_row_o_col[i]
temp_path = [mv_dict[i_row_o_col[i]]]
i += 1
if len(temp_path) > 1:
patterns.append(list(temp_path))
# Sort column major of input table
i_col_o_col = sorted(input_output_set, key=lambda x: (x[1], x[0], x[3], x[2]))
temp_path = [mv_dict[i_col_o_col[0]]]
base = i_col_o_col[0]
i = 1
while i < len(i_col_o_col):
# V to V
if i_col_o_col[i] == (base[0] + len(temp_path), base[1], base[2] + len(temp_path), base[3]):
temp_path.append(mv_dict[(base[0] + len(temp_path), base[1], base[2] + len(temp_path), base[3])])
else:
if len(temp_path) > 1:
patterns.append(list(temp_path))
base = i_col_o_col[i]
temp_path = [mv_dict[i_col_o_col[i]]]
i += 1
if len(temp_path) > 1:
patterns.append(list(temp_path))
# Sort column major of output table
i_col_o_col = sorted(input_output_set, key=lambda x: (x[3], x[2], x[1], x[0]))
temp_path = [mv_dict[i_col_o_col[0]]]
base = i_col_o_col[0]
i = 1
while i < len(i_col_o_col):
# V to V
if i_col_o_col[i] == (base[0] + len(temp_path), base[1], base[2] + len(temp_path), base[3]):
temp_path.append(mv_dict[(base[0] + len(temp_path), base[1], base[2] + len(temp_path), base[3])])
else:
if len(temp_path) > 1:
patterns.append(list(temp_path))
base = i_col_o_col[i]
temp_path = [mv_dict[i_col_o_col[i]]]
i += 1
if len(temp_path) > 1:
patterns.append(list(temp_path))
if group[0][2] != MAP_TYPE_MER and group[0][2] != MAP_TYPE_SPL:
temp_path = [mv_dict[i_col_o_col[0]]]
base = i_col_o_col[0]
i = 1
while i < len(i_col_o_col):
# One to V
if i_col_o_col[i] == (base[0], base[1], base[2] + len(temp_path), base[3]):
temp_path.append(mv_dict[(base[0], base[1], base[2] + len(temp_path), base[3])])
else:
if len(temp_path) > 1:
patterns.append(list(temp_path))
base = i_col_o_col[i]
temp_path = [mv_dict[i_col_o_col[i]]]
i += 1
if len(temp_path) > 1:
patterns.append(list(temp_path))
i_col_o_row = sorted(input_output_set, key=lambda x: (x[1], x[0], x[2], x[3]))
temp_path = [mv_dict[i_col_o_row[0]]]
base = i_col_o_row[0]
i = 1
while i < len(i_col_o_row):
# V to H
if i_col_o_row[i] == (base[0] + len(temp_path), base[1], base[2], base[3] + len(temp_path)):
temp_path.append(mv_dict[(base[0] + len(temp_path), base[1], base[2], base[3] + len(temp_path))])
else:
if len(temp_path) > 1:
patterns.append(list(temp_path))
base = i_col_o_row[i]
temp_path = [mv_dict[i_col_o_row[i]]]
i += 1
if len(temp_path) > 1:
patterns.append(list(temp_path))
i_col_o_row = sorted(input_output_set, key=lambda x: (x[2], x[3], x[1], x[0]))
temp_path = [mv_dict[i_col_o_row[0]]]
base = i_col_o_row[0]
i = 1
while i < len(i_col_o_row):
# V to H
if i_col_o_row[i] == (base[0] + len(temp_path), base[1], base[2], base[3] + len(temp_path)):
temp_path.append(mv_dict[(base[0] + len(temp_path), base[1], base[2], base[3] + len(temp_path))])
else:
if len(temp_path) > 1:
patterns.append(list(temp_path))
base = i_col_o_row[i]
temp_path = [mv_dict[i_col_o_row[i]]]
i += 1
if len(temp_path) > 1:
patterns.append(list(temp_path))
if group[0][2] == MAP_TYPE_RM:
temp = sorted(input_output_set, key=operator.itemgetter(1))
# Group Removes by Column
for key, g in itertools.groupby(temp, operator.itemgetter(1)):
temp_path = []
for t in list(g):
temp_path.append(mv_dict[t])
if len(temp_path) > 1:
patterns.append(list(temp_path))
# Determine the final groups
patterns.sort(key=lambda t: len(t), reverse=True)
final_group = []
cost = 0
overlaps = set()
for group in patterns:
if not (set(group) & overlaps):
overlaps = overlaps.union(set(group))
final_group.append(group)
cost += sum([mapping[3] for mapping in group]) / float(len(group))
if debug_print:
print "*" * 20
print_path(group)
print
if debug_print and set(path) - overlaps:
print "*" * 20, "Remains"
print print_path(set(path) - overlaps)
cost += sum([mapping[3] for mapping in (set(path) - overlaps)])
return path, cost
def tokenize(a, first=False):
if not a:
return [""]
if first:
return re.split('[' + string.punctuation + string.whitespace + ']*', a, 1)
else:
return re.split('[' + string.punctuation + string.whitespace + ']*', a)
MAP_TYPE_MV = 1
MAP_TYPE_MER = 2
MAP_TYPE_SPL = 3
MAP_TYPE_UNKNOWN = 4
MAP_TYPE_RM = 5
MAP_TYPE_ADD = 6
# Cost of substitution
def cost_data_transform(str1, str2, use_cpp=cost_data_transform_cpp):
if use_cpp:
return foofah_utils.cost_data_transform(str1, str2)
if str1 == str2:
return 0, MAP_TYPE_MV
elif not str1 or not str2:
return COST_IMPOSSIBLE, MAP_TYPE_UNKNOWN
elif str1 in str2:
return COST_MERGE, MAP_TYPE_MER
elif str2 in str1:
return COST_SPLIT, MAP_TYPE_SPL
else:
token_1 = tokenize(str1)
token_2 = tokenize(str2)
not_found_1 = False
if_all_empty = True
for token in token_1:
if token:
if_all_empty = False
if token not in str2:
not_found_1 = True
break
if if_all_empty:
not_found_1 = True
not_found_2 = False
if_all_empty = True
for token in token_2:
if token:
if_all_empty = False
if token not in str1:
not_found_2 = True
break
if if_all_empty:
not_found_2 = True
if not not_found_1 or not not_found_2:
return COST_MERGE + COST_SPLIT, MAP_TYPE_UNKNOWN
return COST_IMPOSSIBLE, MAP_TYPE_UNKNOWN
# Cost of substitution
def cost_move(node_1, node_2, use_cpp=cost_move_cpp):
if use_cpp:
return foofah_utils.cost_move(node_1.row, node_1.col, node_2.row, node_2.col, node_1.data)
cost = 0
# Moving empty space shouldn't count
if node_1.data:
if math.fabs(node_1.col - node_2.col) == 1 and node_1.row == node_2.row:
cost += COST_MOVE_CELL_HORIZONTAL_1
elif node_1.row != node_2.row or node_1.col != node_2.col:
cost += COST_MOVE_CELL
else:
if node_1.row != node_2.row or node_1.col != node_2.col:
cost += COST_MOVE_EMPTY
return cost
# Calculate the cost of path
def cost_edit_op(operation, target=None, use_cpp=cost_edit_op_cpp):
cost = 0
if use_cpp:
if operation[0] and operation[1]:
return foofah_utils.cost_edit_op(operation[0].row, operation[0].col, operation[0].data, operation[1].row,
operation[1].col, operation[1].data)
elif operation[0]:
return foofah_utils.cost_edit_op(operation[0].row, operation[0].col, operation[0].data, -1, -1, "")
elif operation[1]:
return foofah_utils.cost_edit_op(-1, -1, "", operation[1].row, operation[1].col, operation[1].data)
else:
return foofah_utils.cost_edit_op(-1, -1, "", -1, -1, "")
if operation[0] and operation[1]:
new_cost, map_type = cost_data_transform(operation[0].data, operation[1].data)
cost += new_cost
if cost >= COST_IMPOSSIBLE:
return cost, map_type
cost += cost_move(operation[0], operation[1])
elif operation[0] and operation[0].data:
cost += COST_DELETE_CELL
map_type = MAP_TYPE_RM
elif operation[0] and not operation[0].data:
cost += COST_DELETE_EMPTY
map_type = MAP_TYPE_RM
elif operation[1] and operation[1].data:
cost += COST_IMPOSSIBLE
map_type = MAP_TYPE_ADD
else:
cost += COST_ADD_EMPTY
map_type = MAP_TYPE_ADD
return cost, map_type
# Calculate the cost of path
def cost_edit_path(edit_path, target=None):
cost = 0
for operation in edit_path:
if operation[0] and operation[1]:
new_cost, sub_type = cost_data_transform(operation[0].data, operation[1].data)
cost += new_cost
if cost >= COST_IMPOSSIBLE:
return cost
cost += cost_move(operation[0], operation[1])
elif operation[0] and operation[0].data:
cost += COST_DELETE_CELL
elif operation[0] and not operation[0].data:
cost += COST_DELETE_EMPTY
elif operation[1] and operation[1].data:
cost += COST_IMPOSSIBLE
else:
cost += COST_ADD_EMPTY
return cost
# Check unprocessed nodes in graph u and v
def check_unprocessed(u, v, path):
processed_u = []
processed_v = []
for operation in path:
if operation[0]:
processed_u.append(operation[0])
if operation[1]:
processed_v.append(operation[1])
unprocessed_u = u.nodes_set() - set(processed_u)
unprocessed_v = v.nodes_set() - set(processed_v)
return list(unprocessed_u), list(unprocessed_v)
# More greedy edit distance graph
def graph_edit_distance_greedy(u, v):
chosen_path = []
chosen_path_cost = 0
# For each node w in u, insert the substitution {w -> v1} into OPEN
v1 = v.nodes()[0]
possible_path = []
possible_path_cost = []
for w in u.nodes():
edit_op = (w, v1)
new_cost, map_type = cost_edit_op(edit_op, v)
if map_type == MAP_TYPE_MV:
if_exact_match_found = True
new_path = (w, v1, map_type, new_cost)
possible_path.append(new_path)
possible_path_cost.append(new_cost)
# Comes out of nowhere
edit_op = (None, v1)
new_cost, map_type = cost_edit_op(edit_op, v)
edit_path = (None, v1, map_type, new_cost)
possible_path.append(edit_path)
possible_path_cost.append(new_cost)
path_idx = possible_path_cost.index(min(possible_path_cost))
# The cheapest operation is not a move when exact match exists, we keep finding the second cheapest until we find
# the move
chosen_path.append(possible_path[path_idx])
chosen_path_cost += possible_path_cost[path_idx]
unprocessed_u = list(u.nodes())
unprocessed_v = list(v.nodes())
if possible_path[path_idx][0] in unprocessed_u:
unprocessed_u.remove(possible_path[path_idx][0])
unprocessed_v.pop(0)
while unprocessed_v and unprocessed_u:
v_next = unprocessed_v.pop(0)
possible_path = []
possible_path_cost = []
if_exact_match_found = False
for u_next in unprocessed_u:
edit_op = (u_next, v_next)
new_cost, map_type = cost_edit_op(edit_op, v)
if map_type == MAP_TYPE_MV:
if_exact_match_found = True
new_path = (u_next, v_next, map_type, new_cost)
possible_path.append(new_path)
possible_path_cost.append(new_cost)
if new_cost <= 0:
break
edit_op = (None, v_next)
new_cost, map_type = cost_edit_op(edit_op, v)
new_path = (None, v_next, map_type, new_cost)
possible_path.append(new_path)
possible_path_cost.append(new_cost)
path_idx = possible_path_cost.index(min(possible_path_cost))
# The cheapest operation is not a move when exact match exists, we keep finding the second cheapest until we
# find the move
while if_exact_match_found and possible_path[path_idx][2] != MAP_TYPE_MV:
if len(possible_path_cost) > 1:
possible_path_cost.pop(path_idx)
possible_path.pop(path_idx)
path_idx = possible_path_cost.index(min(possible_path_cost))
else:
break
# We already don't have a good choice in unprocessed v, let's pick one from the old choice
if possible_path[path_idx][2] == MAP_TYPE_UNKNOWN or possible_path[path_idx][2] == MAP_TYPE_SPL or \
possible_path[path_idx][2] == MAP_TYPE_MER:
possible_path_new = []
possible_path_cost_new = []
for u_next in u.nodes():
edit_op = (u_next, v_next)
new_cost, map_type = cost_edit_op(edit_op, v)
new_path = (u_next, v_next, map_type, new_cost)
possible_path_new.append(new_path)
possible_path_cost_new.append(new_cost)
if new_cost <= 0:
break
path_idx_new = possible_path_cost_new.index(min(possible_path_cost_new))
if possible_path_cost_new[path_idx_new] < possible_path_cost[path_idx]:
chosen_path.append(possible_path_new[path_idx_new])
chosen_path_cost += possible_path_cost_new[path_idx_new]
if possible_path_new[path_idx_new][0] in unprocessed_u:
unprocessed_u.remove(possible_path_new[path_idx_new][0])
else:
chosen_path.append(possible_path[path_idx])
chosen_path_cost += possible_path_cost[path_idx]
if possible_path[path_idx][0] in unprocessed_u:
unprocessed_u.remove(possible_path[path_idx][0])
else:
chosen_path.append(possible_path[path_idx])
chosen_path_cost += possible_path_cost[path_idx]
if possible_path[path_idx][0] in unprocessed_u:
unprocessed_u.remove(possible_path[path_idx][0])
# If unprocessed_u is empty, but unprocessed_v is not, we transform some of the old u nodes
if not unprocessed_u and unprocessed_v:
for v_next in unprocessed_v:
possible_path = []
possible_path_cost = []
for u_old in u.nodes():
edit_op = (u_old, v_next)
new_cost, map_type = cost_edit_op(edit_op, v)
new_path = (u_old, v_next, map_type, new_cost)
possible_path.append(new_path)
possible_path_cost.append(new_cost)
edit_op = (None, v_next)
new_cost, map_type = cost_edit_op(edit_op, v)
new_path = (None, v_next, map_type, new_cost)
possible_path.append(new_path)
possible_path_cost.append(new_cost)
path_idx = possible_path_cost.index(min(possible_path_cost))
chosen_path.append(possible_path[path_idx])
chosen_path_cost += possible_path_cost[path_idx]
# If unprocessed_v is empty, but unprocessed_u is not, we kick the rest of unprocessed u out
if unprocessed_u and not unprocessed_v:
for u_next in unprocessed_u:
edit_op = (u_next, None)
new_cost, map_type = cost_edit_op(edit_op, v)
new_path = (u_next, None, map_type, new_cost)
chosen_path.append(new_path)
chosen_path_cost += new_cost
if debug_print:
print_path(chosen_path)
return chosen_path, chosen_path_cost
def graph_edit_distance(u, v):
# Partial edit path
open_set = []
cost_open_set = []
# For each node w in V2, insert the substitution {u1 -> w} into OPEN
u1 = u.nodes()[0]
for w in v.nodes():
edit_path = set()
edit_path.add((u1, w))
new_cost = cost_edit_path(edit_path)
if new_cost < COST_IMPOSSIBLE:
open_set.append(edit_path)
cost_open_set.append(new_cost)
# Insert the deletion {u1 -> none} into OPEN
edit_path = set()
edit_path.add((u1, None))
new_cost = cost_edit_path(edit_path)
if new_cost < COST_IMPOSSIBLE:
open_set.append(edit_path)
cost_open_set.append(new_cost)
while cost_open_set:
# Retrieve minimum-cost partial edit path pmin from OPEN
path_idx = cost_open_set.index(min(cost_open_set))
min_path = open_set.pop(path_idx)
cost = cost_open_set.pop(path_idx)
# check p_min is a complete edit path
unprocessed_u, unprocessed_v = check_unprocessed(u, v, min_path)
if not unprocessed_u and not unprocessed_v:
# print len(cost_open_set)
return min_path, cost
else:
if unprocessed_u:
u_next = unprocessed_u.pop()
for v_next in unprocessed_v:
new_path = set(min_path)
new_path.add((u_next, v_next))
new_cost = cost_edit_path(new_path)
if new_cost < COST_IMPOSSIBLE:
open_set.append(new_path)
cost_open_set.append(new_cost)
new_path = set(min_path)
new_path.add((u_next, None))
new_cost = cost_edit_path(new_path)
if new_cost < COST_IMPOSSIBLE:
open_set.append(new_path)
cost_open_set.append(new_cost)
else:
# All nodes in u have been processed, but there are nodes in v not been processed
# They are either copied, splited or merged from u
for v_next in unprocessed_v:
for u_old in u.nodes():
new_path = set(min_path)
new_path.add((u_old, v_next))
new_cost = cost_edit_path(new_path)
if new_cost < COST_IMPOSSIBLE:
open_set.append(new_path)
cost_open_set.append(new_cost)
return None, None
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import operator
from appengine_url_fetcher import AppEngineUrlFetcher
import url_constants
class ChannelInfo(object):
'''Represents a Chrome channel with three pieces of information. |channel| is
one of 'stable', 'beta', 'dev', or 'trunk'. |branch| and |version| correspond
with each other, and represent different releases of Chrome. Note that
|branch| and |version| can occasionally be the same for separate channels
(i.e. 'beta' and 'dev'), so all three fields are required to uniquely
identify a channel.
'''
def __init__(self, channel, branch, version):
self.channel = channel
self.branch = branch
self.version = version
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BranchUtility(object):
'''Provides methods for working with Chrome channel, branch, and version
data served from OmahaProxy.
'''
def __init__(self, fetch_url, history_url, fetcher, object_store_creator):
self._fetcher = fetcher
def create_object_store(category):
return object_store_creator.Create(BranchUtility, category=category)
self._branch_object_store = create_object_store('branch')
self._version_object_store = create_object_store('version')
self._fetch_result = self._fetcher.FetchAsync(fetch_url)
self._history_result = self._fetcher.FetchAsync(history_url)
@staticmethod
def Create(object_store_creator):
return BranchUtility(url_constants.OMAHA_PROXY_URL,
url_constants.OMAHA_DEV_HISTORY,
AppEngineUrlFetcher(),
object_store_creator)
@staticmethod
def GetAllChannelNames():
return ('stable', 'beta', 'dev', 'trunk')
@staticmethod
def NewestChannel(channels):
channels = set(channels)
for channel in reversed(BranchUtility.GetAllChannelNames()):
if channel in channels:
return channel
def Newer(self, channel_info):
'''Given a ChannelInfo object, returns a new ChannelInfo object
representing the next most recent Chrome version/branch combination.
'''
if channel_info.channel == 'trunk':
return None
if channel_info.channel == 'stable':
stable_info = self.GetChannelInfo('stable')
if channel_info.version < stable_info.version:
return self.GetStableChannelInfo(channel_info.version + 1)
names = self.GetAllChannelNames()
return self.GetAllChannelInfo()[names.index(channel_info.channel) + 1]
def Older(self, channel_info):
'''Given a ChannelInfo object, returns a new ChannelInfo object
representing the previous Chrome version/branch combination.
'''
if channel_info.channel == 'stable':
if channel_info.version <= 5:
# BranchUtility can't access branch data from before Chrome version 5.
return None
return self.GetStableChannelInfo(channel_info.version - 1)
names = self.GetAllChannelNames()
return self.GetAllChannelInfo()[names.index(channel_info.channel) - 1]
@staticmethod
def SplitChannelNameFromPath(path):
'''Splits the channel name out of |path|, returning the tuple
(channel_name, real_path). If the channel cannot be determined then returns
(None, path).
'''
if '/' in path:
first, second = path.split('/', 1)
else:
first, second = (path, '')
if first in BranchUtility.GetAllChannelNames():
return (first, second)
return (None, path)
def GetAllBranches(self):
return tuple((channel, self.GetChannelInfo(channel).branch)
for channel in BranchUtility.GetAllChannelNames())
def GetAllVersions(self):
return tuple(self.GetChannelInfo(channel).version
for channel in BranchUtility.GetAllChannelNames())
def GetAllChannelInfo(self):
return tuple(self.GetChannelInfo(channel)
for channel in BranchUtility.GetAllChannelNames())
def GetChannelInfo(self, channel):
return ChannelInfo(channel,
self._ExtractFromVersionJson(channel, 'branch'),
self._ExtractFromVersionJson(channel, 'version'))
def GetStableChannelInfo(self, version):
'''Given a |version| corresponding to a 'stable' version of Chrome, returns
a ChannelInfo object representing that version.
'''
return ChannelInfo('stable', self.GetBranchForVersion(version), version)
def _ExtractFromVersionJson(self, channel_name, data_type):
'''Returns the branch or version number for a channel name.
'''
if channel_name == 'trunk':
return 'trunk'
if data_type == 'branch':
object_store = self._branch_object_store
elif data_type == 'version':
object_store = self._version_object_store
data = object_store.Get(channel_name).Get()
if data is not None:
return data
try:
version_json = json.loads(self._fetch_result.Get().content)
except Exception as e:
# This can happen if omahaproxy is misbehaving, which we've seen before.
# Quick hack fix: just serve from trunk until it's fixed.
logging.error('Failed to fetch or parse branch from omahaproxy: %s! '
'Falling back to "trunk".' % e)
return 'trunk'
numbers = {}
for entry in version_json:
if entry['os'] not in ['win', 'linux', 'mac', 'cros']:
continue
for version in entry['versions']:
if version['channel'] != channel_name:
continue
if data_type == 'branch':
number = version['version'].split('.')[2]
elif data_type == 'version':
number = version['version'].split('.')[0]
if number not in numbers:
numbers[number] = 0
else:
numbers[number] += 1
sorted_numbers = sorted(numbers.iteritems(),
None,
operator.itemgetter(1),
True)
object_store.Set(channel_name, int(sorted_numbers[0][0]))
return int(sorted_numbers[0][0])
def GetBranchForVersion(self, version):
'''Returns the most recent branch for a given chrome version number using
data stored on omahaproxy (see url_constants).
'''
if version == 'trunk':
return 'trunk'
branch = self._branch_object_store.Get(str(version)).Get()
if branch is not None:
return branch
version_json = json.loads(self._history_result.Get().content)
for entry in version_json['events']:
# Here, entry['title'] looks like: '<title> - <version>.##.<branch>.##'
version_title = entry['title'].split(' - ')[1].split('.')
if version_title[0] == str(version):
self._branch_object_store.Set(str(version), version_title[2])
return int(version_title[2])
raise ValueError('The branch for %s could not be found.' % version)
def GetChannelForVersion(self, version):
'''Returns the name of the development channel corresponding to a given
version number.
'''
for channel_info in self.GetAllChannelInfo():
if channel_info.channel == 'stable' and version <= channel_info.version:
return channel_info.channel
if version == channel_info.version:
return channel_info.channel
def GetLatestVersionNumber(self):
'''Returns the most recent version number found using data stored on
omahaproxy.
'''
latest_version = self._version_object_store.Get('latest').Get()
if latest_version is not None:
return latest_version
version_json = json.loads(self._history_result.Get().content)
latest_version = 0
for entry in version_json['events']:
version_title = entry['title'].split(' - ')[1].split('.')
version = int(version_title[0])
if version > latest_version:
latest_version = version
self._version_object_store.Set('latest', latest_version)
return latest_version
| |
"""
Python script to generate a zero-offset synthetic from a 3-layer wedge model.
Created by: Wes Hamlyn
Create Date: 19-Aug-2014
Last Mod: 1-Nov-2014
This script is provided without warranty of any kind.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
###########################################################
#
# DEFINE MODELING PARAMETERS HERE
#
# 3-Layer Model Parameters [Layer1, Layer2, Layer 3]
vp_mod = [2500.0, 2600.0, 2550.0] # P-wave velocity (m/s)
vs_mod = [1200.0, 1300.0, 1200.0] # S-wave velocity (m/s)
rho_mod= [1.95, 2.0, 1.98] # Density (g/cc)
dz_min = 0.0 # Minimum thickness of Layer 2 (m)
dz_max = 60.0 # Maximum thickness of Layer 2 (m)
dz_step= 1.0 # Thickness step from trace-to-trace (normally 1.0 m)
# Ricker Wavelet Parameters
wvlt_length= 0.128
wvlt_cfreq = 30.0
wvlt_phase = 0.0
# Trace Parameters
tmin = 0.0
tmax = 0.5
dt = 0.0001 # changing this from 0.0001 can affect the display quality
# Plot Parameters
min_plot_time = 0.15
max_plot_time = 0.3
excursion = 2
###########################################################
#
# FUNCTIONS DEFINITIONS
#
def plot_vawig(axhdl, data, t, excursion, highlight=None):
import numpy as np
import matplotlib.pyplot as plt
[ntrc, nsamp] = data.shape
t = np.hstack([0, t, t.max()])
for i in range(0, ntrc):
tbuf = excursion * data[i] / np.max(np.abs(data)) + i
tbuf = np.hstack([i, tbuf, i])
if i==highlight:
lw = 2
else:
lw = 0.5
axhdl.plot(tbuf, t, color='black', linewidth=lw)
plt.fill_betweenx(t, tbuf, i, where=tbuf>i, facecolor=[0.6,0.6,1.0], linewidth=0)
plt.fill_betweenx(t, tbuf, i, where=tbuf<i, facecolor=[1.0,0.7,0.7], linewidth=0)
axhdl.set_xlim((-excursion, ntrc+excursion))
axhdl.xaxis.tick_top()
axhdl.xaxis.set_label_position('top')
axhdl.invert_yaxis()
def ricker(cfreq, phase, dt, wvlt_length):
'''
Calculate a zero-phase ricker wavelet
Usage:
------
t, wvlt = wvlt_ricker(cfreq, dt, wvlt_length)
cfreq: central frequency of wavelet in Hz
phase: wavelet phase in degrees
dt: sample rate in seconds
wvlt_length: length of wavelet in seconds
'''
import numpy as np
import scipy.signal as signal
nsamp = int(wvlt_length/dt + 1)
t_max = wvlt_length*0.5
t_min = -t_max
t = np.arange(t_min, t_max, dt)
t = np.linspace(-wvlt_length/2, (wvlt_length-dt)/2, wvlt_length/dt)
wvlt = (1.0 - 2.0*(np.pi**2)*(cfreq**2)*(t**2)) * np.exp(-(np.pi**2)*(cfreq**2)*(t**2))
if phase != 0:
phase = phase*np.pi/180.0
wvlth = signal.hilbert(wvlt)
wvlth = np.imag(wvlth)
wvlt = np.cos(phase)*wvlt - np.sin(phase)*wvlth
return t, wvlt
def calc_rc(vp_mod, rho_mod):
'''
rc_int = calc_rc(vp_mod, rho_mod)
'''
nlayers = len(vp_mod)
nint = nlayers - 1
rc_int = []
for i in range(0, nint):
buf1 = vp_mod[i+1]*rho_mod[i+1]-vp_mod[i]*rho_mod[i]
buf2 = vp_mod[i+1]*rho_mod[i+1]+vp_mod[i]*rho_mod[i]
buf3 = buf1/buf2
rc_int.append(buf3)
return rc_int
def calc_times(z_int, vp_mod):
'''
t_int = calc_times(z_int, vp_mod)
'''
nlayers = len(vp_mod)
nint = nlayers - 1
t_int = []
for i in range(0, nint):
if i == 0:
tbuf = z_int[i]/vp_mod[i]
t_int.append(tbuf)
else:
zdiff = z_int[i]-z_int[i-1]
tbuf = 2*zdiff/vp_mod[i] + t_int[i-1]
t_int.append(tbuf)
return t_int
def digitize_model(rc_int, t_int, t):
'''
rc = digitize_model(rc, t_int, t)
rc = reflection coefficients corresponding to interface times
t_int = interface times
t = regularly sampled time series defining model sampling
'''
import numpy as np
nlayers = len(rc_int)
nint = nlayers - 1
nsamp = len(t)
rc = list(np.zeros(nsamp,dtype='float'))
lyr = 0
for i in range(0, nsamp):
if t[i] >= t_int[lyr]:
rc[i] = rc_int[lyr]
lyr = lyr + 1
if lyr > nint:
break
return rc
##########################################################
#
# COMPUTATIONS BELOW HERE...
#
# Some handy constants
nlayers = len(vp_mod)
nint = nlayers - 1
nmodel = int((dz_max-dz_min)/dz_step+1)
# Generate ricker wavelet
wvlt_t, wvlt_amp = ricker(wvlt_cfreq, wvlt_phase, dt, wvlt_length)
# Calculate reflectivities from model parameters
rc_int = calc_rc(vp_mod, rho_mod)
syn_zo = []
rc_zo = []
lyr_times = []
for model in range(0, nmodel):
# Calculate interface depths
z_int = [500.0]
z_int.append(z_int[0]+dz_min+dz_step*model)
# Calculate interface times
t_int = calc_times(z_int, vp_mod)
lyr_times.append(t_int)
# Digitize 3-layer model
nsamp = int((tmax-tmin)/dt) + 1
t = []
for i in range(0,nsamp):
t.append(i*dt)
rc = digitize_model(rc_int, t_int, t)
rc_zo.append(rc)
# Convolve wavelet with reflectivities
syn_buf = np.convolve(rc, wvlt_amp, mode='same')
syn_buf = list(syn_buf)
syn_zo.append(syn_buf)
print "finished step %i" % (model)
syn_zo = np.array(syn_zo)
t = np.array(t)
lyr_times = np.array(lyr_times)
lyr_indx = np.array(np.round(lyr_times/dt), dtype='int16')
# Use the transpose because rows are traces;
# columns are time samples.
tuning_trace = np.argmax(np.abs(syn_zo.T)) % syn_zo.T.shape[1]
tuning_thickness = tuning_trace * dz_step
# Plotting Code
[ntrc, nsamp] = syn_zo.shape
fig = plt.figure(figsize=(12, 14))
fig.set_facecolor('white')
gs = gridspec.GridSpec(3, 1, height_ratios=[1, 1, 1])
ax0 = fig.add_subplot(gs[0])
ax0.plot(lyr_times[:,0], color='blue', lw=1.5)
ax0.plot(lyr_times[:,1], color='red', lw=1.5)
ax0.set_ylim((min_plot_time,max_plot_time))
ax0.invert_yaxis()
ax0.set_xlabel('Thickness (m)')
ax0.set_ylabel('Time (s)')
plt.text(2,
min_plot_time + (lyr_times[0,0] - min_plot_time)/2.,
'Layer 1',
fontsize=16)
plt.text(dz_max/dz_step - 2,
lyr_times[-1,0] + (lyr_times[-1,1] - lyr_times[-1,0])/2.,
'Layer 2',
fontsize=16,
horizontalalignment='right')
plt.text(2,
lyr_times[0,0] + (max_plot_time - lyr_times[0,0])/2.,
'Layer 3',
fontsize=16)
plt.gca().xaxis.tick_top()
plt.gca().xaxis.set_label_position('top')
ax0.set_xlim((-excursion, ntrc+excursion))
ax1 = fig.add_subplot(gs[1])
plot_vawig(ax1, syn_zo, t, excursion, highlight=tuning_trace)
ax1.plot(lyr_times[:,0], color='blue', lw=1.5)
ax1.plot(lyr_times[:,1], color='red', lw=1.5)
ax1.set_ylim((min_plot_time,max_plot_time))
ax1.invert_yaxis()
ax1.set_xlabel('Thickness (m)')
ax1.set_ylabel('Time (s)')
ax2 = fig.add_subplot(gs[2])
ax2.plot(syn_zo[:,lyr_indx[:,0]], color='blue')
ax2.set_xlim((-excursion, ntrc+excursion))
ax2.axvline(tuning_trace, color='k', lw=2)
ax2.grid()
ax2.set_title('Upper interface amplitude')
ax2.set_xlabel('Thickness (m)')
ax2.set_ylabel('Amplitude')
plt.text(tuning_trace + 2,
plt.ylim()[0] * 1.1,
'tuning thickness = {0} m'.format(str(tuning_thickness)),
fontsize=16)
plt.savefig('figure_1.png')
plt.show()
| |
"""\
PyQuante2 contains a more structured interface to all of the functions
in PyQuante.
solver = SCF(molecule,**options)
Create a solver that can perform a HF calculation on *molecule*.
General options
Option Value Description
-------- ----- -----------
method HF Use the HF method for the calculation
UHF Use the UHF method for the calculation
DFT Use the DFT method for the calculation
MINDO3 Use the MINDO3 method for the calculation
UMINDO3 Use the UMINDO3 method for the calculation
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
basis None The name of a basis set, e.g. '6-31g**',
'sto-3g','cc-pVTZ'
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
orbs None If not none, the guess orbitals
Options passed into solver.iterate(**options):
Options: Value Description
-------- ----- -----------
etol 1e-5 Energy convergence criteria
max_iter 50 Maximum SCF iterations
(do_averaging True Use DIIS for accelerated convergence (default)
False No convergence acceleration)
etemp False Use etemp value for finite temperature DFT (default)
float Use (float) for the electron temperature
The test suite at the bottom of the file has examples of usage.
"""
import unittest,logging
class SCFIterator:
def __init__(self,**opts):
self.energy_history = []
self.converged = False
return
def iterate(self,ham,**opts):
self.max_iter = opts.get('max_iter',50)
for self.iter in range(1,self.max_iter+1):
ham.update(**opts)
logging.debug("%d %f" % (self.iter,ham.energy))
if self.is_converged(ham): break
if self.iter < self.max_iter:
logging.info("PyQuante converged in %d iterations" % self.iter)
else:
logging.warning("PyQuante failed to converge after %d iterations"
% self.max_iter)
return
def is_converged(self,ham,**opts):
self.energy = ham.get_energy()
etol = opts.get('etol',1e-5)
if not self.energy_history:
self.energy_history.append(self.energy)
return False
self.converged = abs(self.energy-self.energy_history[-1]) < etol
self.energy_history.append(self.energy)
return self.converged
def __repr__(self):
lstr = ["Iterator information:"]
lstr.extend([str(en) for en in self.energy_history])
if self.converged:
lstr.append("The iterator is converged")
else:
lstr.append("The iterator is not converged")
return "\n".join(lstr)
class Integrals:
def __init__(self,molecule,basis_set,**opts):
from PyQuante.Ints import getints
integrals = opts.get("integrals",None)
nbf = len(basis_set)
if integrals:
self.S, self.h, self.ERI = integrals
else:
self.S, self.h, self.ERI = getints(basis_set.get(),molecule)
return
def get(self): return self.S, self.h, self.ERI
def get_S(self): return self.S
def get_h(self): return self.h
def get_ERI(self): return self.ERI
class BasisSet:
def __init__(self,molecule,**opts):
from PyQuante.Ints import getbasis
from PyQuante.Basis.Tools import get_basis_data
basis_data = opts.get('basis_data')
bfs = opts.get('bfs')
if bfs:
self.bfs = bfs
else:
if not basis_data:
basis = opts.get('basis')
if basis:
basis_data = get_basis_data(basis)
self.bfs = getbasis(molecule,basis_data)
logging.info("%d basis functions" % len(self.bfs))
return
def __repr__(self): return 'Gaussian basis set with %d bfns' % len(self.bfs)
def __len__(self): return len(self.bfs)
def get(self): return self.bfs
########## Hamiltonian ##########
def HamiltonianFactory(molecule,**opts):
method = opts.get('method','HF')
if method == "UHF":
return UHFHamiltonian(molecule,**opts)
elif method == 'ROHF':
return ROHFHamiltonian(molecule,**opts)
elif method == "DFT":
return DFTHamiltonian(molecule,**opts)
elif method == 'MINDO3':
return MINDO3Hamiltonian(molecule,**opts)
elif method == 'UMINDO3':
return UMINDO3Hamiltonian(molecule,**opts)
return HFHamiltonian(molecule,**opts)
# Convenience function
def SCF(molecule,**opts): return HamiltonianFactory(molecule,**opts)
class AbstractHamiltonian:
def __init__(self,molecule,**opts):
raise Exception("AbstractHamiltonian::__init__")
def update(self,**opts):
raise Exception("AbstractHamiltonian::update")
def iterate(self,**opts):
raise Exception("AbstractHamiltonian::iterate")
def get_energy(self,**opts):
raise Exception("AbstractHamiltonian::get_energy")
class HFHamiltonian(AbstractHamiltonian):
method='HF'
def __init__(self,molecule,**opts):
from PyQuante.Convergence import DIIS
self.molecule = molecule
logging.info("HF calculation on system %s" % self.molecule.name)
self.basis_set = BasisSet(molecule,**opts)
self.integrals = Integrals(molecule,self.basis_set,**opts)
self.iterator = SCFIterator()
self.h = self.integrals.get_h()
self.S = self.integrals.get_S()
self.ERI = self.integrals.get_ERI()
self.Enuke = molecule.get_enuke()
self.F = self.h
self.dmat = None
self.entropy = None
self.DoAveraging = opts.get('DoAveraging',True)
if self.DoAveraging:
self.Averager = DIIS(self.S)
nel = molecule.get_nel()
nclosed,nopen = molecule.get_closedopen()
logging.info("Nclosed/open = %d, %d" % (nclosed,nopen))
self.solver = SolverFactory(nel,nclosed,nopen,self.S,**opts)
return
def __repr__(self):
lstr = ['Hamiltonian constructed for method %s' % self.method,
repr(self.molecule),
repr(self.basis_set),
repr(self.iterator)]
return '\n'.join(lstr)
def get_energy(self): return self.energy
def iterate(self,**opts): return self.iterator.iterate(self,**opts)
def update(self,**opts):
from PyQuante.LA2 import trace2
from PyQuante.Ints import getJ,getK
if self.DoAveraging and self.dmat is not None:
self.F = self.Averager.getF(self.F,self.dmat)
self.dmat,self.entropy = self.solver.solve(self.F,**opts)
D = self.dmat
self.J = getJ(self.ERI,D)
self.Ej = 2*trace2(D,self.J)
self.K = getK(self.ERI,D)
self.Exc = -trace2(D,self.K)
self.Eone = 2*trace2(D,self.h)
self.F = self.h + 2*self.J - self.K
self.energy = self.Eone + self.Ej + self.Exc + self.Enuke + self.entropy
return
class DFTHamiltonian(AbstractHamiltonian):
method='DFT'
def __init__(self,molecule,**opts):
from PyQuante.DFunctionals import need_gradients
self.molecule = molecule
logging.info("DFT calculation on system %s" % self.molecule.name)
self.basis_set = BasisSet(molecule,**opts)
self.integrals = Integrals(molecule,self.basis_set,**opts)
self.iterator = SCFIterator()
self.h = self.integrals.get_h()
self.S = self.integrals.get_S()
self.ERI = self.integrals.get_ERI()
self.Enuke = molecule.get_enuke()
self.nel = molecule.get_nel()
self.F = self.h
self.functional = opts.get('functional','SVWN')
opts['do_grad_dens'] = need_gradients[self.functional]
self.setup_grid(molecule,self.basis_set.get(),**opts)
self.dmat = None
self.entropy = None
nel = molecule.get_nel()
nclosed,nopen = molecule.get_closedopen()
logging.info("Nclosed/open = %d, %d" % (nclosed,nopen))
self.solver = SolverFactory(nel,nclosed,nopen,self.S,**opts)
return
def __repr__(self):
lstr = ['Hamiltonian constructed for method %s' % self.method,
repr(self.molecule),
repr(self.basis_set),
repr(self.iterator)]
return '\n'.join(lstr)
def get_energy(self): return self.energy
def iterate(self,**opts): return self.iterator.iterate(self,**opts)
def setup_grid(self,molecule,bfs,**opts):
from PyQuante.MolecularGrid import MolecularGrid
grid_nrad = opts.get('grid_nrad',32)
grid_fineness = opts.get('grid_fineness',1)
self.gr = MolecularGrid(molecule,grid_nrad,grid_fineness,**opts)
self.gr.set_bf_amps(bfs)
self.bfgrid = self.gr.allbfs() # bfs over all grid points
return
def update(self,**opts):
from PyQuante.LA2 import trace2
from PyQuante.Ints import getJ
from PyQuante.dft import getXC
self.dmat,self.entropy = self.solver.solve(self.F,**opts)
D = self.dmat
self.gr.setdens(D)
self.J = getJ(self.ERI,D)
self.Ej = 2*trace2(D,self.J)
self.Exc,self.XC = getXC(self.gr,self.nel,self.bfgrid,
functional=self.functional)
self.Eone = 2*trace2(D,self.h)
self.F = self.h+2*self.J+self.XC
self.energy = self.Eone + self.Ej + self.Exc + self.Enuke + self.entropy
return
class UHFHamiltonian(AbstractHamiltonian):
method='UHF'
def __init__(self,molecule,**opts):
self.molecule = molecule
logging.info("UHF calculation on system %s" % self.molecule.name)
self.basis_set = BasisSet(molecule,**opts)
self.integrals = Integrals(molecule,self.basis_set,**opts)
self.iterator = SCFIterator()
self.h = self.integrals.get_h()
self.S = self.integrals.get_S()
self.ERI = self.integrals.get_ERI()
self.Enuke = molecule.get_enuke()
self.Fa = self.h
self.Fb = self.h
self.amat = None
self.bmat = None
self.entropy = None
nalpha,nbeta = molecule.get_alphabeta()
logging.info("Nalpha/beta = %d, %d" % (nalpha,nbeta))
self.solvera = SolverFactory(2*nalpha,nalpha,0,self.S,**opts)
self.solverb = SolverFactory(2*nbeta,nbeta,0,self.S,**opts)
return
def __repr__(self):
lstr = ['Hamiltonian constructed for method %s' % self.method,
repr(self.molecule),
repr(self.basis_set),
repr(self.iterator)]
return '\n'.join(lstr)
def get_energy(self): return self.energy
def iterate(self,**opts): return self.iterator.iterate(self,**opts)
def update(self,**opts):
from PyQuante.LA2 import trace2
from PyQuante.Ints import getJ,getK
self.amat,entropya = self.solvera.solve(self.Fa)
self.bmat,entropyb = self.solverb.solve(self.Fb)
Da = self.amat
Db = self.bmat
D = Da+Db
self.entropy = 0.5*(entropya+entropyb)
self.J = getJ(self.ERI,D)
self.Ej = 0.5*trace2(D,self.J)
self.Ka = getK(self.ERI,Da)
self.Kb = getK(self.ERI,Db)
self.Exc = -0.5*(trace2(Da,self.Ka)+trace2(Db,self.Kb))
self.Eone = trace2(D,self.h)
self.Fa = self.h + self.J - self.Ka
self.Fb = self.h + self.J - self.Kb
self.energy = self.Eone + self.Ej + self.Exc + self.Enuke + self.entropy
return
class ROHFHamiltonian(AbstractHamiltonian):
method='ROHF'
def __init__(self,molecule,**opts):
self.molecule = molecule
logging.info("ROHF calculation on system %s" % self.molecule.name)
self.basis_set = BasisSet(molecule,**opts)
self.integrals = Integrals(molecule,self.basis_set,**opts)
self.iterator = SCFIterator()
self.h = self.integrals.get_h()
self.S = self.integrals.get_S()
self.ERI = self.integrals.get_ERI()
self.Enuke = molecule.get_enuke()
self.orbs = None
self.norbs = len(self.basis_set)
self.nalpha,self.nbeta = molecule.get_alphabeta()
logging.info("Nalpha/beta = %d, %d" % (self.nalpha,self.nbeta))
return
def __repr__(self):
lstr = ['Hamiltonian constructed for method %s' % self.method,
repr(self.molecule),
repr(self.basis_set),
repr(self.iterator)]
return '\n'.join(lstr)
def get_energy(self): return self.energy
def iterate(self,**opts): return self.iterator.iterate(self,**opts)
def update(self,**opts):
from PyQuante.Ints import getJ,getK
from PyQuante.LA2 import geigh,mkdens
from PyQuante.rohf import ao2mo
from PyQuante.hartree_fock import get_energy
from PyQuante.NumWrap import eigh,matrixmultiply
if self.orbs is None:
self.orbe,self.orbs = geigh(self.h, self.S)
Da = mkdens(self.orbs,0,self.nalpha)
Db = mkdens(self.orbs,0,self.nbeta)
Ja = getJ(self.ERI,Da)
Jb = getJ(self.ERI,Db)
Ka = getK(self.ERI,Da)
Kb = getK(self.ERI,Db)
Fa = self.h+Ja+Jb-Ka
Fb = self.h+Ja+Jb-Kb
energya = get_energy(self.h,Fa,Da)
energyb = get_energy(self.h,Fb,Db)
self.energy = (energya+energyb)/2 + self.Enuke
Fa = ao2mo(Fa,self.orbs)
Fb = ao2mo(Fb,self.orbs)
# Building the approximate Fock matrices in the MO basis
F = 0.5*(Fa+Fb)
K = Fb-Fa
# The Fock matrix now looks like
# F-K | F + K/2 | F
# ---------------------------------
# F + K/2 | F | F - K/2
# ---------------------------------
# F | F - K/2 | F + K
# Make explicit slice objects to simplify this
do = slice(0,self.nbeta)
so = slice(self.nbeta,self.nalpha)
uo = slice(self.nalpha,self.norbs)
F[do,do] -= K[do,do]
F[uo,uo] += K[uo,uo]
F[do,so] += 0.5*K[do,so]
F[so,do] += 0.5*K[so,do]
F[so,uo] -= 0.5*K[so,uo]
F[uo,so] -= 0.5*K[uo,so]
self.orbe,mo_orbs = eigh(F)
self.orbs = matrixmultiply(self.orbs,mo_orbs)
return
class MINDO3Hamiltonian(AbstractHamiltonian):
method='MINDO3'
def __init__(self,molecule,**opts):
from PyQuante.MINDO3 import initialize, get_nbf, get_reference_energy,\
get_F0, get_nel,get_open_closed,get_enuke,get_guess_D
self.molecule = molecule
logging.info("MINDO3 calculation on system %s" % self.molecule.name)
self.iterator = SCFIterator()
self.charge = self.molecule.charge
self.multiplicity = self.molecule.multiplicity
# This is an ugly-ish hack to deal with the brain-dead
# way that MINDO3.get_open_closed works
if self.multiplicity == 1: self.multiplicity=None
# Ultimately I should subclass Atom for MINDO3Atom
self.molecule = initialize(self.molecule)
self.nel = get_nel(self.molecule,self.charge)
self.nclosed,self.nopen = get_open_closed(self.nel,self.multiplicity)
self.Enuke = get_enuke(self.molecule)
self.energy = 0
self.method = "MINDO3"
self.nbf = get_nbf(self.molecule)
self.eref = get_reference_energy(self.molecule)
self.F0 = get_F0(self.molecule)
self.F = self.F0
self.D = get_guess_D(self.molecule)
logging.info("Nel = %d Nclosed = %d Nopen = %d Enuke = %f Nbf = %d"
% (self.nel,self.nclosed,self.nopen,self.Enuke,self.nbf))
return
def __repr__(self):
lstr = ['Hamiltonian constructed for method %s' % self.method,
repr(self.molecule),
'Implicit MINDO3 basis set with %d bfns' % self.nbf,
repr(self.iterator)]
return '\n'.join(lstr)
def get_energy(self): return self.energy
def iterate(self,**opts): return self.iterator.iterate(self,**opts)
def update(self,**opts):
self.update_fock()
self.calculate_energy()
self.solve_fock()
self.update_density()
def update_fock(self):
from PyQuante.MINDO3 import get_F1, get_F2
avg = 0.25
Fold = self.F
self.F1 = get_F1(self.molecule,self.D)
self.F2 = get_F2(self.molecule,self.D)
self.F = self.F0+self.F1+self.F2
#self.F = avg*self.F + (1-avg)*Fold
def solve_fock(self):
from PyQuante.NumWrap import eigh
self.orbe,self.orbs = eigh(self.F)
def update_density(self):
from PyQuante.LA2 import mkdens
self.D = 2*mkdens(self.orbs,0,self.nclosed)
def calculate_energy(self):
from PyQuante.LA2 import trace2
from PyQuante.MINDO3 import ev2kcal
self.Eel = 0.5*trace2(self.D,self.F0+self.F)
self.Etot = self.Eel+self.Enuke
self.energy = self.Etot*ev2kcal+self.eref
class UMINDO3Hamiltonian(AbstractHamiltonian):
def __init__(self,molecule,**opts):
from PyQuante.MINDO3 import initialize, get_nbf, get_reference_energy,\
get_F0, get_nel,get_open_closed,get_enuke,get_guess_D
self.molecule = molecule
logging.info("uMINDO3 calculation on system %s" % self.molecule.name)
self.iterator = SCFIterator()
self.charge = self.molecule.charge
self.multiplicity = self.molecule.multiplicity
# This is an ugly-ish hack to deal with the brain-dead
# way that MINDO3.get_open_closed works
if self.multiplicity == 1: self.multiplicity=None
# Ultimately I should subclass Atom for MINDO3Atom
self.molecule = initialize(self.molecule)
self.nel = get_nel(self.molecule,self.charge)
self.nclosed,self.nopen = get_open_closed(self.nel,self.multiplicity)
logging.info("Nclosed/open = %d, %d" % (self.nclosed,self.nopen))
self.Enuke = get_enuke(self.molecule)
self.energy = 0
self.method = "MINDO3"
self.nbf = get_nbf(self.molecule)
self.eref = get_reference_energy(self.molecule)
self.F0 = get_F0(self.molecule)
self.Fa = self.Fb = self.F0
self.nalpha = self.nclosed+self.nopen
self.nbeta = self.nclosed
self.Da = self.Db = 0.5*get_guess_D(self.molecule)
self.start = True
return
def __repr__(self):
lstr = ['Hamiltonian constructed for method %s' % self.method,
repr(self.molecule),
'Implicit MINDO3 basis set with %d bfns' % self.nbf,
repr(self.iterator)]
return '\n'.join(lstr)
def get_energy(self): return self.energy
def iterate(self,**opts): return self.iterator.iterate(self,**opts)
def update(self,**opts):
self.solve_fock()
self.update_density()
self.update_fock()
self.calculate_energy()
def update_fock(self):
from PyQuante.MINDO3 import get_F1_open, get_F2_open
F1a = get_F1_open(self.molecule,self.Da,self.Db)
F1b = get_F1_open(self.molecule,self.Db,self.Da)
F2a = get_F2_open(self.molecule,self.Da,self.Db)
F2b = get_F2_open(self.molecule,self.Db,self.Da)
self.Fa = self.F0+F1a+F2a
self.Fb = self.F0+F1b+F2b
return
def solve_fock(self):
from PyQuante.NumWrap import eigh
from PyQuante.LA2 import mkdens
self.orbea,self.orbsa = eigh(self.Fa)
self.orbeb,self.orbsb = eigh(self.Fb)
def update_density(self):
from PyQuante.LA2 import mkdens
if self.start:
self.start = False
else:
self.Da = mkdens(self.orbsa,0,self.nalpha)
self.Db = mkdens(self.orbsb,0,self.nbeta)
def calculate_energy(self):
from PyQuante.LA2 import trace2
from PyQuante.MINDO3 import ev2kcal
self.Eel = 0.5*trace2(self.Da,self.F0+self.Fa)+\
0.5*trace2(self.Db,self.F0+self.Fb)
self.Etot = self.Eel+self.Enuke
self.energy = self.Etot*ev2kcal+self.eref
########## Solver ##########
def SolverFactory(nel,nclosed,nopen,S,**opts):
if opts.get("SolverConstructor"):
# We can override all of this and pass in an explicit solver constructor:
return opts["SolverConstructor"](nel,nclosed,nopen,S,**opts)
if opts.get('etemp',False):
return FermiDiracSolver(nel,nclosed,nopen,S,**opts)
return BasicSolver(nel,nclosed,nopen,S,**opts)
class AbstractSolver:
def __init__(self,S,**opts):
raise Exception("AbstractSolver::__init__")
def solve(self,ham,**opts):
raise Exception("AbstractSolver::solve")
class BasicSolver(AbstractSolver):
def __init__(self,nel,nclosed,nopen,S,**opts):
self.S = S
self.nel = nel
self.nclosed = nclosed
self.nopen = nopen
return
def solve(self,H,**opts):
from PyQuante.LA2 import geigh,mkdens_spinavg
self.orbe,self.orbs = geigh(H,self.S)
self.D = mkdens_spinavg(self.orbs,self.nclosed,self.nopen)
self.entropy = 0
return self.D,self.entropy
class FermiDiracSolver(AbstractSolver):
def __init__(self,nel,nclosed,nopen,S,**opts):
self.S = S
self.nel = nel
self.nclosed = nclosed
self.nopen = nopen
self.etemp = opts.get('etemp',0)
return
def solve(self,H,**opts):
from PyQuante.LA2 import geigh
from PyQuante.fermi_dirac import mkdens_fermi
self.orbe,self.orbs = geigh(H,self.S)
self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,
self.orbs,self.etemp)
return self.D,self.entropy
class SubspaceSolver(AbstractSolver):
def __init__(self,nel,nclosed,nopen,S,**opts):
self.S = S
self.nel = nel
self.nclosed = nclosed
self.nopen = nopen
self.first_iteration = True
self.solver = opts.get("solver")
# Determine nroots, which Davidson (and perhaps others) needs:
self.pass_nroots = opts.get("pass_nroots",False)
self.nvirt = opts.get("nvirt",1) # solve for 1 virtual orbital by default
self.nroots = self.nclosed + self.nopen + self.nvirt
if not self.solver:
from PyQuante.NumWrap import eigh
self.solver = eigh
return
def solve(self,H,**opts):
from PyQuante.LA2 import mkdens_spinavg,simx,geigh
from PyQuante.NumWrap import matrixmultiply,eigh
if self.first_iteration:
self.first_iteration = False
self.orbe,self.orbs = geigh(H,self.S)
else:
Ht = simx(H,self.orbs)
if self.pass_nroots:
self.orbe,orbs = self.solver(Ht,self.nroots)
else:
self.orbe,orbs = self.solver(Ht)
self.orbs = matrixmultiply(self.orbs,orbs)
self.D = mkdens_spinavg(self.orbs,self.nclosed,self.nopen)
self.entropy = 0
return self.D,self.entropy
class DmatSolver(AbstractSolver):
def __init__(self,nel,nclosed,nopen,S,**opts):
self.S = S
self.nel = nel
self.nclosed = nclosed
self.nopen = nopen
self.solver = opts.get("solver")
if not self.solver:
from PyQuante.DMP import TCP
self.solver = TCP
return
def solve(self,H,**opts):
solver = self.solver(H,self.nclosed,self.S)
solver.iterate()
self.entropy = 0
self.D = solver.D
return self.D,self.entropy
class UnitTests(unittest.TestCase):
def setUp(self):
from PyQuante.Molecule import Molecule
self.h2 = Molecule('H2',atomlist=[(1,(0.35,0,0)),(1,(-0.35,0,0))],
units='Angs')
self.he = Molecule('He',atomlist = [(2,(0,0,0))])
self.li = Molecule('Li',atomlist = [(3,(0,0,0))],multiplicity=2)
self.li_p = Molecule('Li+',atomlist = [(3,(0,0,0))],charge=1)
self.li_m = Molecule('Li-',atomlist = [(3,(0,0,0))],charge=-1)
self.h2o = Molecule('h2o',[(8,(0,0,0)),(1,(1.,0,0)),(1,(0,1.,0))],
units="Angstrom")
self.oh = Molecule('oh',[(8,(0,0,0)),(1,(1.,0,0))],
units="Angstrom")
def testH2HF(self):
h2_hf = SCF(self.h2,method='HF')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
def testH2HFFT(self):
h2_hf = SCF(self.h2,method='HF',etemp=1e4)
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130502,4)
def testHeHF(self):
he_hf = SCF(self.he,method='HF')
he_hf.iterate()
self.assertAlmostEqual(he_hf.energy,-2.855260,3)
def testLiHF(self):
li_hf = SCF(self.li,method='HF')
li_hf.iterate()
return
def testLipHF(self):
li_p_hf = SCF(self.li_p,method='HF')
li_p_hf.iterate()
self.assertAlmostEqual(li_p_hf.energy,-7.235536,4)
def testLimHF(self):
li_m_hf = SCF(self.li_m,method='HF')
li_m_hf.iterate()
self.assertAlmostEqual(li_m_hf.energy,-7.407030,4)
def testH2LDA(self):
h2_lda = SCF(self.h2,method='DFT',functional="SVWN")
h2_lda.iterate()
self.assertAlmostEqual(h2_lda.energy,-1.132799,4)
def testH2LDAFT(self):
h2_lda = SCF(self.h2,method='DFT',functional="SVWN",etemp=1e4)
h2_lda.iterate()
self.assertAlmostEqual(h2_lda.energy,-1.132558,4)
def testLiLDA(self):
li_lda = SCF(self.li,method='DFT',functional="SVWN")
li_lda.iterate()
self.assertAlmostEqual(li_lda.energy,-7.332050,4)
def testLiLDAFT(self):
li_lda = SCF(self.li,method='DFT',functional="SVWN",etemp=1e4)
li_lda.iterate()
self.assertAlmostEqual(li_lda.energy,-7.349422,4)
def testLiUHF(self):
li_uhf = SCF(self.li,method='UHF')
li_uhf.iterate()
self.assertAlmostEqual(li_uhf.energy,-7.431364,4)
def testLiUHFFT(self):
li_uhf = SCF(self.li,method="UHF",etemp=1e4)
li_uhf.iterate()
# No test, since I don't really know what the energy should be:
# finite temperature HF is kind of a hack. But this at least
# tests that the program runs
return
########## Solver tests ##########
def testSubspaceSolver(self):
h2_hf = SCF(self.h2,method='HF',SolverConstructor=SubspaceSolver)
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
def testDavidsonSolver(self):
from PyQuante.Solvers import davidson
h2_hf = SCF(self.h2,method='HF',SolverConstructor=SubspaceSolver,
solver=davidson,pass_nroots=True)
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
def testJacobiSolver(self):
from PyQuante.Solvers import jacobi
h2_hf = SCF(self.h2,method='HF',SolverConstructor=SubspaceSolver,
solver=jacobi)
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
def testTCPSolver(self):
h2_hf = SCF(self.h2,method='HF',SolverConstructor=DmatSolver)
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
def testTRPSolver(self):
from PyQuante.DMP import TRP
h2_hf = SCF(self.h2,method='HF',SolverConstructor=DmatSolver,
solver=TRP)
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
def testCPSolver(self):
from PyQuante.DMP import CP
h2_hf = SCF(self.h2,method='HF',SolverConstructor=DmatSolver,
solver=CP)
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
def testMCWSolver(self):
from PyQuante.DMP import McWeeny
h2_hf = SCF(self.h2,method='HF',SolverConstructor=DmatSolver,
solver=McWeeny)
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
########## Basis set tests ##########
def testSTO3G(self):
h2_hf = SCF(self.h2,method='HF',basis='sto-3g')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.117349,4)
def testSTO6G(self):
h2_hf = SCF(self.h2, method="HF",basis='sto-3g')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.117349,4)
def test321G(self):
h2_hf = SCF(self.h2, method="HF",basis='3-21g')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.121998,4)
def test631Gss(self):
h2_hf = SCF(self.h2, method="HF",basis='6-31g**')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
def test631Gppss(self):
h2_hf = SCF(self.h2, method="HF",basis='6-31g++**')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130553,4)
def test631Gdp(self):
h2_hf = SCF(self.h2, method="HF",basis='6-31G(d,p)')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.130501,4)
def testVDZ(self):
h2_hf = SCF(self.h2, method="HF",basis='cc-pvdz')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.126923,4)
def testVTZ(self):
h2_hf = SCF(self.h2, method="HF",basis='cc-pvtz')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.132136,4)
def testDZVP(self):
h2_hf = SCF(self.h2, method="HF",basis='dzvp')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.126728,4)
def test6311G(self):
h2_hf = SCF(self.h2, method="HF",basis='6-311G**')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.131516,4)
def test6311Gdp(self):
h2_hf = SCF(self.h2, method="HF",basis='6-311G++(2d,2p)')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.132122,4)
def test6311G3d3p(self):
h2_hf = SCF(self.h2, method="HF",basis='6-311G++(3d,3p)')
h2_hf.iterate()
self.assertAlmostEqual(h2_hf.energy,-1.132166,4)
########## Misc Tests ##########
def testH2OMINDO(self):
h2o_mindo3 = SCF(self.h2o,method="MINDO3")
h2o_mindo3.iterate()
self.assertAlmostEqual(h2o_mindo3.energy,-48.826208,2)
def testOHMINDO(self):
oh_mindo = SCF(self.oh,method="UMINDO3")
oh_mindo.iterate()
self.assertAlmostEqual(oh_mindo.energy,18.1258,2)
def test():
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
suite = unittest.TestLoader().loadTestsFromTestCase(UnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
#unittest.main()
if __name__ == '__main__': test()
| |
# third party
# third party
import torch as th
# syft absolute
import syft as sy
from syft import serialize
from syft.core.common import UID
from syft.core.store.dataset import Dataset
from syft.core.store.storeable_object import StorableObject
def test_create_dataset_with_store_obj() -> None:
id = UID()
data = UID()
description = "This is a dummy id"
tags = ["dummy", "test"]
obj1 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([1, 2, 3, 4])
description = "This is a dummy tensor"
tags = ["dummy", "test"]
obj2 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = [obj1, obj2]
description = "This is a dummy tensor"
tags = ["dummy", "dataset"]
Dataset(id=id, data=data, description=description, tags=tags)
def test_dataset_search_id() -> None:
id = UID()
data = UID()
description = "This is a dummy id"
tags = ["dummy", "test"]
obj1 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([1, 2, 3, 4])
description = "This is a dummy tensor n1"
tags = ["dummy", "test"]
obj2 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([10, 20, 30, 40])
description = "This is a dummy tensor n2"
tags = ["dummy", "test"]
obj3 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = [obj1, obj2, obj3]
description = "This is a dataset"
tags = ["dummy", "dataset"]
dataset_obj = Dataset(id=id, data=data, description=description, tags=tags)
assert dataset_obj.__contains__(_id=obj1.id)
def test_dataset_search_id_fail() -> None:
id = UID()
data = UID()
description = "This is a dummy id"
tags = ["dummy", "test"]
obj1 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([1, 2, 3, 4])
description = "This is a dummy tensor n1"
tags = ["dummy", "test"]
obj2 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([10, 20, 30, 40])
description = "This is a dummy tensor n2"
tags = ["dummy", "test"]
obj3 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = [obj1, obj2, obj3]
description = "This is a dataset"
tags = ["dummy", "dataset"]
dataset_obj = Dataset(id=id, data=data, description=description, tags=tags)
assert not dataset_obj.__contains__(_id=UID())
def test_dataset_get_element() -> None:
id = UID()
data = UID()
description = "This is a dummy id"
tags = ["dummy", "test"]
obj1 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([1, 2, 3, 4])
description = "This is a dummy tensor n1"
tags = ["dummy", "test"]
obj2 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([10, 20, 30, 40])
description = "This is a dummy tensor n2"
tags = ["dummy", "test"]
obj3 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = [obj1, obj2, obj3]
description = "This is a dataset"
tags = ["dummy", "dataset"]
dataset_obj = Dataset(id=id, data=data, description=description, tags=tags)
result = dataset_obj.__getitem__(_id=obj1.id)
assert len(result) == 1
assert result[0] == obj1
def test_dataset_get_element_fail() -> None:
id = UID()
data = UID()
description = "This is a dummy id"
tags = ["dummy", "test"]
obj1 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([1, 2, 3, 4])
description = "This is a dummy tensor n1"
tags = ["dummy", "test"]
obj2 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([10, 20, 30, 40])
description = "This is a dummy tensor n2"
tags = ["dummy", "test"]
obj3 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = [obj1, obj2, obj3]
description = "This is a dataset"
tags = ["dummy", "dataset"]
dataset_obj = Dataset(id=id, data=data, description=description, tags=tags)
assert dataset_obj.__getitem__(UID()) == []
def test_dataset_get_keys() -> None:
id = UID()
data = UID()
description = "This is a dummy id"
tags = ["dummy", "test"]
obj1 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([1, 2, 3, 4])
description = "This is a dummy tensor n1"
tags = ["dummy", "test"]
obj2 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([10, 20, 30, 40])
description = "This is a dummy tensor n2"
tags = ["dummy", "test"]
obj3 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = [obj1, obj2, obj3]
description = "This is a dataset"
tags = ["dummy", "dataset"]
dataset_obj = Dataset(id=id, data=data, description=description, tags=tags)
assert dataset_obj.keys() == [obj1.id, obj2.id, obj3.id]
def test_dataset_del() -> None:
id = UID()
data = UID()
description = "This is a dummy id"
tags = ["dummy", "test"]
obj1 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([1, 2, 3, 4])
description = "This is a dummy tensor n1"
tags = ["dummy", "test"]
obj2 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([10, 20, 30, 40])
description = "This is a dummy tensor n2"
tags = ["dummy", "test"]
obj3 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = [obj1, obj2, obj3]
description = "This is a dataset"
tags = ["dummy", "dataset"]
dataset_obj = Dataset(id=id, data=data, description=description, tags=tags)
dataset_obj.__delitem__(obj2.id)
assert dataset_obj.data == [obj1, obj3]
def test_serde_storable_obj() -> None:
id = UID()
data = th.Tensor([-1, -2, -3, -4])
description = "This is a dummy id"
tags = ["dummy", "test"]
obj1 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([1, 2, 3, 4])
description = "This is a dummy tensor n1"
tags = ["dummy", "test"]
obj2 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([10, 20, 30, 40])
description = "This is a dummy tensor n2"
tags = ["dummy", "test"]
obj3 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = [obj1, obj2, obj3]
description = "This is a dataset"
tags = ["dummy", "dataset"]
dataset_obj = Dataset(id=id, data=data, description=description, tags=tags)
blob = sy.serialize(obj=dataset_obj)
sy.deserialize(blob=blob)
def test_serde_storable_obj_2() -> None:
id = UID()
data = th.Tensor([-1, -2, -3, -4])
description = "This is a dummy id"
tags = ["dummy", "test"]
obj1 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([1, 2, 3, 4])
description = "This is a dummy tensor n1"
tags = ["dummy", "test"]
obj2 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = th.Tensor([10, 20, 30, 40])
description = "This is a dummy tensor n2"
tags = ["dummy", "test"]
obj3 = StorableObject(id=id, data=data, description=description, tags=tags)
id = UID()
data = [obj1, obj2, obj3]
description = "This is a dataset"
tags = ["dummy", "dataset"]
obj = Dataset(id=id, data=data, description=description, tags=tags)
blob = serialize(obj)
ds_obj = sy.deserialize(blob=blob)
assert obj.id == ds_obj.id
assert obj.description == ds_obj.description
assert obj.tags == ds_obj.tags
assert len(obj.data) == len(ds_obj.data)
for i in range(len(obj.data)):
assert obj.data[i].id == ds_obj.data[i].id
assert th.all(th.eq(obj.data[i].data, ds_obj.data[i].data))
assert obj.data[i].description == ds_obj.data[i].description
assert obj.data[i].tags == ds_obj.data[i].tags
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
from concurrent.futures import ThreadPoolExecutor, as_completed
from db import commit_db_item
from db.player import Player
from db.team import Team
from utils.player_finder import PlayerFinder
from utils.player_data_retriever import PlayerDataRetriever
from utils.eliteprospects_utils import retrieve_drafted_players_with_dobs
MAX_WORKERS = 8
def migrate_players(plr_src_file=None):
"""
Migrates players from specified JSON file to currently connected database.
"""
if not plr_src_file:
plr_src_file = os.path.join(
os.path.dirname(__file__), 'nhl_players.json')
migration_data = json.load(open(plr_src_file))
for player_id in sorted(migration_data.keys())[:]:
# populating mandatory attributes
last_name = migration_data[player_id]['last_name']
first_name = migration_data[player_id]['first_name']
position = migration_data[player_id]['position']
# setting up and populating optional keyword attributes
kw_args = dict()
for key in [
'alternate_last_names', 'alternate_first_names',
'alternate_positions', 'capfriendly_id'
]:
kw_args[key] = migration_data[player_id].get(key, None)
plr = Player(
player_id, last_name, first_name, position, **kw_args)
print("Working on %s" % plr)
commit_db_item(plr)
def search_players(src_type, teams=None, season=None):
"""
Searches (and optionally creates) players that are listed either on the
each team's official roster page (source type 'roster') or on its *in-the-
system* page (source type 'sytem'). Finally retrieves career regular season
and playoff statistics for each player.
"""
plr_f = PlayerFinder()
plr_r = PlayerDataRetriever()
if teams is None:
if season is None:
# using current teams (if nothing else is specified)
teams_of_interest = Team.find_teams_for_season()
else:
# using teams from specified season
teams_of_interest = Team.find_teams_for_season(season)
else:
# explicitly using specified teams
teams_of_interest = Team.find_teams_with_abbrs(teams)
for team in sorted(teams_of_interest)[:]:
team_players = plr_f.find_players_for_team(team, src_type, season)
# using concurrent threads to speed up the retrieval of single player
# season statistics
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as threads:
future_tasks = {
threads.submit(
plr_r.retrieve_player_seasons,
plr.player_id): plr for plr in team_players}
for _ in as_completed(future_tasks):
try:
# TODO: think of something to do with the result here
# data = future.result()
pass
except Exception as e:
print
print("Conccurrent task generated an exception: %s" % e)
def create_players_by_unused_ids(min_id, max_id):
"""
Takes specified range of ids and checks for each instance whether a player
already exists or otherwise tries to create one.
"""
pf = PlayerFinder()
for potential_plr_id in range(min_id, max_id):
plr = pf.search_player_by_id(potential_plr_id)
if plr:
print(plr)
def create_players_for_draft_year(draft_year):
"""
Uses specified draft year to create database items for each drafted player.
"""
# retrieving suggestions from nhl.com for all retrieved drafted players
suggested_plrs = get_suggestions_for_drafted_players(draft_year)
for suggested_plr in suggested_plrs:
# exploding tuple
# TODO: use named tuple
(
plr_id, position, last_name,
first_name, _, alt_last_name
) = suggested_plr
# checking if player already exists
plr = Player.find_by_id(plr_id)
# if yes, announcing a player item's existence
if plr is not None:
print("+ %s already existing in database" % plr)
# otherwise creating it
else:
plr = Player(
plr_id, last_name, first_name,
position, alternate_last_names=alt_last_name)
commit_db_item(plr)
print("+ %s created" % Player.find_by_id(plr_id))
def get_suggestions_for_drafted_players(draft_year):
"""
Retrieves player id suggestions from nhl.com for all players drafted in
specified year.
"""
# retrieving players (with date of births and alternate last names) drafted
# in specified year
drafted_players = retrieve_drafted_players_with_dobs(draft_year)
print(
"+ Basic information retrieved for " +
"%d players drafted in %d" % (len(drafted_players), draft_year))
pfr = PlayerFinder()
suggested_players = list()
# TODO: more tidy, more structured output
for drafted_plr in drafted_players:
found = True
# trying to find suggestions by using both first and last name
suggestions = pfr.get_suggested_players(
drafted_plr.last_name, drafted_plr.first_name)
# otherwise trying to find suggestions for last name only
if not suggestions:
found = False
print(
"+ No suggestion found " +
"for %s %s. " % (
drafted_plr.first_name, drafted_plr.last_name) +
"Trying last name only.")
suggestions = pfr.get_suggested_players(drafted_plr.last_name)
# otherwise trying to find suggestions for alternate
# last name (if applicable)
if not suggestions and drafted_plr.alt_last_name:
print(
"+ No suggestion found for %s. " % drafted_plr.last_name +
"Trying alternate last names.")
suggestions = pfr.get_suggested_players(drafted_plr.alt_last_name)
if len(suggestions) > 1:
found = False
print(
"+ %d suggestions found " % len(suggestions) +
"for %s %s" % (drafted_plr.first_name, drafted_plr.last_name))
# finding out the exact suggestion by comparing dates of birth
for suggestion in suggestions:
suggested_dob = suggestion[-1]
if suggested_dob == drafted_plr.date_of_birth:
break
else:
found = False
print("+ Date of births don't match for %s %s (%s vs. %s)" % (
drafted_plr.first_name, drafted_plr.last_name,
suggested_dob, drafted_plr.date_of_birth))
continue
if drafted_plr.alt_last_name:
if suggestion[2] == drafted_plr.last_name:
suggestion = suggestion + (drafted_plr.alt_last_name,)
else:
suggestion = suggestion + (drafted_plr.last_name,)
else:
suggestion = suggestion + ('',)
if not found:
print(suggestion)
suggested_players.append(suggestion)
return suggested_players
| |
#!/usr/bin/env python
import json
import time
import xml.etree.ElementTree as ET
import argparse
import sys
import subprocess
import requests
from decimal import Decimal
from gomatic.gocd.pipelines import Pipeline, PipelineGroup, PipelineEnvironment
from gomatic.gocd.repositories import GenericArtifactoryRepository
from gomatic.gocd.agents import Agent
from gomatic.xml_operations import Ensurance, PossiblyMissingElement, move_all_to_end, prettify
class GoCdConfigurator(object):
def __init__(self, host_rest_client):
self.__host_rest_client = host_rest_client
self.__set_initial_config_xml()
def __set_initial_config_xml(self):
self.__initial_config, self._initial_md5 = self.__current_config_response()
self.__xml_root = ET.fromstring(self.__initial_config)
def __repr__(self):
return "GoCdConfigurator(%s)" % self.__host_rest_client
def as_python(self, pipeline, with_save=True):
result = "#!/usr/bin/env python\nfrom gomatic import *\n\nconfigurator = " + str(self) + "\n"
result += "pipeline = configurator"
result += pipeline.as_python_commands_applied_to_server()
save_part = ""
if with_save:
save_part = "\n\nconfigurator.save_updated_config(save_config_locally=True, dry_run=True)"
return result + save_part
@property
def current_config(self):
return self.__current_config_response()[0]
def __current_config_response(self):
config_url = "/go/admin/restful/configuration/file/GET/xml"
response = self.__host_rest_client.get(config_url)
if response.status_code != 200:
raise Exception("Failed to get {} status {}\n:{}".format(config_url, response.status_code, response.text))
return response.text, response.headers['x-cruise-config-md5']
def reorder_elements_to_please_go(self):
move_all_to_end(self.__xml_root, 'repositories')
move_all_to_end(self.__xml_root, 'pipelines')
move_all_to_end(self.__xml_root, 'templates')
move_all_to_end(self.__xml_root, 'environments')
move_all_to_end(self.__xml_root, 'agents')
for pipeline in self.pipelines:
pipeline.reorder_elements_to_please_go()
for template in self.templates:
template.reorder_elements_to_please_go()
@property
def config(self):
self.reorder_elements_to_please_go()
return ET.tostring(self.__xml_root, 'utf-8')
@property
def artifacts_dir(self):
return self.__possibly_missing_server_element().attribute('artifactsdir')
@artifacts_dir.setter
def artifacts_dir(self, artifacts_dir):
self.__server_element_ensurance().set('artifactsdir', artifacts_dir)
@property
def site_url(self):
return self.__possibly_missing_server_element().attribute('siteUrl')
@site_url.setter
def site_url(self, site_url):
self.__server_element_ensurance().set('siteUrl', site_url)
@property
def agent_auto_register_key(self):
return self.__possibly_missing_server_element().attribute('agentAutoRegisterKey')
@agent_auto_register_key.setter
def agent_auto_register_key(self, agent_auto_register_key):
self.__server_element_ensurance().set('agentAutoRegisterKey', agent_auto_register_key)
@property
def purge_start(self):
return self.__server_decimal_attribute('purgeStart')
@purge_start.setter
def purge_start(self, purge_start_decimal):
assert isinstance(purge_start_decimal, Decimal)
self.__server_element_ensurance().set('purgeStart', str(purge_start_decimal))
@property
def purge_upto(self):
return self.__server_decimal_attribute('purgeUpto')
@purge_upto.setter
def purge_upto(self, purge_upto_decimal):
assert isinstance(purge_upto_decimal, Decimal)
self.__server_element_ensurance().set('purgeUpto', str(purge_upto_decimal))
def __server_decimal_attribute(self, attribute_name):
attribute = self.__possibly_missing_server_element().attribute(attribute_name)
return Decimal(attribute) if attribute else None
def __possibly_missing_server_element(self):
return PossiblyMissingElement(self.__xml_root).possibly_missing_child('server')
def __server_element_ensurance(self):
return Ensurance(self.__xml_root).ensure_child('server')
@property
def generic_artifactory_repositories(self):
return [GenericArtifactoryRepository(e) for e in self.__xml_root.findall('repositories')[0].findall('repository')]
def ensure_generic_artifactory_repository(self, repository_name):
repository_element = Ensurance(self.__xml_root).ensure_child('repositories').ensure_child_with_attribute("repository", "name", repository_name)
repository = GenericArtifactoryRepository(repository_element.element)
return repository
def ensure_removal_generic_artifactory_repository(self, repository_name):
matching = [r for r in self.generic_artifactory_repositories if r.name == repository_name]
for repository in matching:
self.__xml_root.findall('repositories')[0].remove(repository.element)
return self
def ensure_replacement_generic_artifactory_repository(self, repository_name):
repository = self.ensure_generic_artifactory_repository(repository_name)
repository.make_empty()
return repository
@property
def pipeline_groups(self):
return [PipelineGroup(e, self) for e in self.__xml_root.findall('pipelines')]
def ensure_env(self, env_name):
envs = Ensurance(self.__xml_root).ensure_child("environments")
env = Ensurance(envs.element).ensure_child_with_attribute("environment", "name", env_name)
return PipelineEnvironment(env.element, self)
def ensure_pipeline_group(self, group_name):
pipeline_group_element = Ensurance(self.__xml_root).ensure_child_with_attribute("pipelines", "group", group_name)
return PipelineGroup(pipeline_group_element.element, self)
def ensure_removal_of_pipeline_group(self, group_name):
matching = [g for g in self.pipeline_groups if g.name == group_name]
for group in matching:
self.__xml_root.remove(group.element)
return self
def remove_all_pipeline_groups(self):
for e in self.__xml_root.findall('pipelines'):
self.__xml_root.remove(e)
return self
@property
def agents(self):
return [Agent(e) for e in PossiblyMissingElement(self.__xml_root).possibly_missing_child('agents').findall('agent')]
def ensure_removal_of_agent(self, hostname):
matching = [agent for agent in self.agents if agent.hostname == hostname]
for agent in matching:
Ensurance(self.__xml_root).ensure_child('agents').element.remove(agent._element)
return self
@property
def pipelines(self):
result = []
groups = self.pipeline_groups
for group in groups:
result.extend(group.pipelines)
return result
@property
def templates(self):
return [Pipeline(e, 'templates') for e in PossiblyMissingElement(self.__xml_root).possibly_missing_child('templates').findall('pipeline')]
def ensure_template(self, template_name):
pipeline_element = Ensurance(self.__xml_root).ensure_child('templates').ensure_child_with_attribute('pipeline', 'name', template_name).element
return Pipeline(pipeline_element, 'templates')
def ensure_replacement_of_template(self, template_name):
template = self.ensure_template(template_name)
template.make_empty()
return template
def ensure_removal_of_template(self, template_name):
matching = [template for template in self.templates if template.name == template_name]
root = Ensurance(self.__xml_root)
templates_element = root.ensure_child('templates').element
for template in matching:
templates_element.remove(template.element)
if len(self.templates) == 0:
root.element.remove(templates_element)
return self
@property
def git_urls(self):
return [pipeline.git_url for pipeline in self.pipelines if pipeline.has_single_git_material]
@property
def has_changes(self):
return prettify(self.__initial_config) != prettify(self.config)
def save_updated_config(self, save_config_locally=False, dry_run=False):
config_before = prettify(self.__initial_config)
config_after = prettify(self.config)
if save_config_locally:
open('config-before.xml', 'w').write(config_before.encode('utf-8'))
open('config-after.xml', 'w').write(config_after.encode('utf-8'))
def has_kdiff3():
try:
return subprocess.call(["kdiff3", "-version"]) == 0
except:
return False
if dry_run and config_before != config_after and has_kdiff3():
subprocess.call(["kdiff3", "config-before.xml", "config-after.xml"])
data = {
'xmlFile': self.config,
'md5': self._initial_md5
}
if not dry_run and config_before != config_after:
self.__host_rest_client.post('/go/admin/restful/configuration/file/POST/xml', data)
self.__set_initial_config_xml()
class HostRestClient(object):
def __init__(self, host, ssl=False, ssl_verify=True):
self.__host = host
self.__ssl = ssl
self.__ssl_verify = ssl_verify
def __repr__(self):
return 'HostRestClient("{0}", ssl={1})'.format(self.__host, self.__ssl)
def __path(self, path):
http_prefix = 'https://' if self.__ssl else 'http://'
return '{0}{1}{2}'.format(http_prefix, self.__host, path)
def get(self, path):
result = requests.get(self.__path(path), verify=self.__ssl_verify)
count = 0
while ((result.status_code == 503) or (result.status_code == 504)) and (count < 5):
result = requests.get(self.__path(path), verify=self.__ssl_verify)
time.sleep(1)
count += 1
return result
def post(self, path, data):
url = self.__path(path)
result = requests.post(url, data, verify=self.__ssl_verify)
if result.status_code != 200:
try:
result_json = json.loads(result.text.replace("\\'", "'"))
message = result_json.get('result', result.text)
raise RuntimeError("Could not post config to Go server (%s) [status code=%s]:\n%s" % (url, result.status_code, message))
except ValueError:
raise RuntimeError("Could not post config to Go server (%s) [status code=%s] (and result was not json):\n%s" % (url, result.status_code, result))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Gomatic is an API for configuring GoCD. '
'Run python -m gomatic.go_cd_configurator to reverse engineer code to configure an existing pipeline.')
parser.add_argument('-s', '--server', help='the go server (e.g. "localhost:8153" or "my.gocd.com")')
parser.add_argument('-p', '--pipeline', help='the name of the pipeline to reverse-engineer the config for')
parser.add_argument('--ssl', help='use HTTPS for the connection to the gocd server', dest='ssl', action='store_true', default=False)
parser.add_argument('--ssl-verify', help='if set the identity of the ssl certificate will be verified.', dest='verify_ssl', action='store_true', default=True)
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
go_server = GoCdConfigurator(HostRestClient(args.server, ssl=args.ssl, ssl_verify=args.ssl_verify))
matching_pipelines = [p for p in go_server.pipelines if p.name == args.pipeline]
if len(matching_pipelines) != 1:
raise RuntimeError("Should have found one matching pipeline but found %s" % matching_pipelines)
pipeline = matching_pipelines[0]
print(go_server.as_python(pipeline))
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import base64
import cloudpickle
import os
import re
import requests
import threading
import traceback
from configparser import ConfigParser
from concurrent.futures import ThreadPoolExecutor
from future.moves.urllib.parse import ParseResult, urlparse
from io import open, StringIO
from requests_kerberos import HTTPKerberosAuth, REQUIRED
from livy.job_handle import JobHandle
class HttpClient(object):
"""A http based client for submitting Spark-based jobs to a Livy backend.
Parameters
----------
url_str : string
Livy server url to create a new session or the url of an existing
session
load_defaults : boolean, optional
This parameter decides if the default config needs to be loaded
Default is True
conf_dict : dict, optional
The key-value pairs in the conf_dict will be loaded to the config
Default is None
Examples
--------
Imports needed to create an instance of HttpClient
>>> from livy.client import HttpClient
1) Creates a client that is loaded with default config
as 'load_defaults' is True by default
>>> client = HttpClient("http://example:8998/")
2) Creates a client that does not load default config, but loads
config that are passed in 'config_dict'
>>> config_dict = {'spark.app.name', 'Test App'}
>>> client = HttpClient("http://example:8998/", load_defaults=False,
>>> config_dict=config_dict)
"""
_CONFIG_SECTION = 'env'
_LIVY_CLIENT_CONF_DIR = "LIVY_CLIENT_CONF_DIR"
def __init__(self, url, load_defaults=True, conf_dict=None):
uri = urlparse(url)
self._config = ConfigParser()
self._load_config(load_defaults, conf_dict)
self._job_type = 'pyspark'
match = re.match(r'(.*)/sessions/([0-9]+)', uri.path)
if match:
base = ParseResult(scheme=uri.scheme, netloc=uri.netloc,
path=match.group(1), params=uri.params, query=uri.query,
fragment=uri.fragment)
self._set_uri(base)
self._conn = _LivyConnection(base, self._config)
self._session_id = int(match.group(2))
self._reconnect_to_existing_session()
else:
self._set_uri(uri)
session_conf_dict = dict(self._config.items(self._CONFIG_SECTION))
self._conn = _LivyConnection(uri, self._config)
self._session_id = self._create_new_session(
session_conf_dict).json()['id']
self._executor = ThreadPoolExecutor(max_workers=1)
self._stopped = False
self.lock = threading.Lock()
def submit(self, job):
"""
Submits a job for execution to the spark cluster.
Parameters
----------
job : function
The function must accept a single parameter, which is an instance
of JobContext.
Returns
-------
job_handle : an instance of the class JobHandle
A handle that can be used to monitor the job
Examples
-------
>>> def simple_spark_job(context):
>>> elements = [10, 20, 30, 40, 50]
>>> return context.sc.parallelize(elements, 2).count()
>>> client.submit(simple_spark_job)
"""
return self._send_job('submit-job', job)
def run(self, job):
"""
Asks the remote context to run a job immediately.
Normally, the remote context will queue jobs and execute them based on
how many worker threads have been configured. This method will run
the submitted job in the same thread processing the RPC message,
so that queueing does not apply.
It's recommended that this method only be used to run code that
finishes quickly. This avoids interfering with the normal operation
of the context.
Parameters
----------
job : function
The function must accept a single parameter, which is an instance
of JobContext. Spark jobs can be created with the help of
JobContext, which exposes the Spark libraries.
Returns
-------
future : concurrent.futures.Future
A future to monitor the status of the job
Examples
-------
>>> def simple_job(context):
>>> return "hello"
>>> client.run(simple_job)
"""
return self._send_job("run-job", job)
def add_file(self, file_uri):
"""
Adds a file to the running remote context.
Note that the URL should be reachable by the Spark driver process. If
running the driver in cluster mode, it may reside on a different
host, meaning "file:" URLs have to exist on that node (and not on
the client machine).
Parameters
----------
file_uri : string
String representation of the uri that points to the location
of the file
Returns
-------
future : concurrent.futures.Future
A future to monitor the status of the job
Examples
-------
>>> client.add_file("file:/test_add.txt")
>>> # Example job using the file added using add_file function
>>> def add_file_job(context):
>>> from pyspark import SparkFiles
>>> def func(iterator):
>>> with open(SparkFiles.get("test_add.txt")) as testFile:
>>> fileVal = int(testFile.readline())
>>> return [x * fileVal for x in iterator]
>>> return context.sc.parallelize([1, 2, 3, 4])
>>> .mapPartitions(func).collect()
>>> client.submit(add_file_job)
"""
return self._add_file_or_pyfile_job("add-file", file_uri)
def add_jar(self, file_uri):
"""
Adds a jar file to the running remote context.
Note that the URL should be reachable by the Spark driver process. If
running the driver in cluster mode, it may reside on a different host,
meaning "file:" URLs have to exist on that node (and not on the
client machine).
Parameters
----------
file_uri : string
String representation of the uri that points to the location
of the file
Returns
-------
future : concurrent.futures.Future
A future to monitor the status of the job
Examples
-------
>>> client.add_jar("file:/test_package.jar")
"""
return self._add_file_or_pyfile_job("add-jar", file_uri)
def add_pyfile(self, file_uri):
"""
Adds a .py or .zip to the running remote context.
Note that the URL should be reachable by the Spark driver process. If
running the driver in cluster mode, it may reside on a different host,
meaning "file:" URLs have to exist on that node (and not on the
client machine).
Parameters
----------
file_uri : string
String representation of the uri that points to the location
of the file
Returns
-------
future : concurrent.futures.Future
A future to monitor the status of the job
Examples
-------
>>> client.add_pyfile("file:/test_package.egg")
>>> # Example job using the file added using add_pyfile function
>>> def add_pyfile_job(context):
>>> # Importing module from test_package.egg
>>> from test.pyfile_test import TestClass
>>> test_class = TestClass()
>>> return test_class.say_hello()
>>> client.submit(add_pyfile_job)
"""
return self._add_file_or_pyfile_job("add-pyfile", file_uri)
def upload_file(self, file_path):
"""
Upload a file to be passed to the Spark application.
Parameters
----------
file_path : string
File path of the local file to be uploaded.
Returns
-------
future : concurrent.futures.Future
A future to monitor the status of the job
Examples
-------
>>> client.upload_file("/test_upload.txt")
>>> # Example job using the file uploaded using upload_file function
>>> def upload_file_job(context):
>>> from pyspark import SparkFiles
>>> def func(iterator):
>>> with open(SparkFiles.get("test_upload.txt")) as testFile:
>>> fileVal = int(testFile.readline())
>>> return [x * fileVal for x in iterator]
>>> return context.sc.parallelize([1, 2, 3, 4])
>>> .mapPartitions(func).collect()
>>> client.submit(add_file_job)
"""
return self._upload_file_or_pyfile("upload-file",
open(file_path, 'rb'))
def upload_pyfile(self, file_path):
"""
Upload a .py or .zip dependency to be passed to the Spark application.
Parameters
----------
file_path : string
File path of the local file to be uploaded.
Returns
-------
future : concurrent.futures.Future
A future to monitor the status of the job
Examples
-------
>>> client.upload_pyfile("/test_package.egg")
>>> # Example job using the file uploaded using upload_pyfile function
>>> def upload_pyfile_job(context):
>>> # Importing module from test_package.egg
>>> from test.pyfile_test import TestClass
>>> test_class = TestClass()
>>> return test_class.say_hello()
>>> client.submit(upload_pyfile_job)
"""
return self._upload_file_or_pyfile("upload-pyfile",
open(file_path, 'rb'))
def stop(self, shutdown_context):
"""
Stops the remote context.
The function will return immediately and will not wait for the pending
jobs to get completed
Parameters
----------
shutdown_context : Boolean
Whether to shutdown the underlying Spark context. If false, the
context will keep running and it's still possible to send commands
to it, if the backend being used supports it.
"""
with self.lock:
if not self._stopped:
self._executor.shutdown(wait=False)
try:
if shutdown_context:
session_uri = "/" + str(self._session_id)
headers = {'X-Requested-By': 'livy'}
self._conn.send_request("DELETE", session_uri,
headers=headers)
except Exception:
raise Exception(traceback.format_exc())
self._stopped = True
def _set_uri(self, uri):
if uri is not None and uri.scheme in ('http', 'https'):
self._config.set(self._CONFIG_SECTION, 'livy.uri', uri.geturl())
else:
url_exception = uri.geturl if uri is not None else None
raise ValueError('Cannot create client - Uri not supported - ',
url_exception)
def _set_conf(self, key, value):
if value is not None:
self._config.set(self._CONFIG_SECTION, key, value)
else:
self._delete_conf(key)
def _delete_conf(self, key):
self._config.remove_option(self._CONFIG_SECTION, key)
def _set_multiple_conf(self, conf_dict):
for key, value in conf_dict.items():
self._set_conf(key, value)
def _load_config(self, load_defaults, conf_dict):
self._config.add_section(self._CONFIG_SECTION)
if load_defaults:
self._load_default_config()
if conf_dict is not None and len(conf_dict) > 0:
self._set_multiple_conf(conf_dict)
def _load_default_config(self):
config_dir = os.environ.get(self._LIVY_CLIENT_CONF_DIR)
if config_dir is not None:
config_files = os.listdir(config_dir)
default_conf_files = ['spark-defaults.conf', 'livy-client.conf']
for default_conf_file in default_conf_files:
if default_conf_file in config_files:
self._load_config_from_file(config_dir, default_conf_file)
def _load_config_from_file(self, config_dir, config_file):
path = os.path.join(config_dir, config_file)
data = "[" + self._CONFIG_SECTION + "]\n" + \
open(path, encoding='utf-8').read()
self._config.readfp(StringIO(data))
def _create_new_session(self, session_conf_dict):
data = {'kind': 'pyspark', 'conf': session_conf_dict}
response = self._conn.send_request('POST', "/",
headers=self._conn._JSON_HEADERS, data=data)
return response
def _reconnect_to_existing_session(self):
reconnect_uri = "/" + str(self._session_id) + "/connect"
self._conn.send_request('POST', reconnect_uri,
headers=self._conn._JSON_HEADERS)
def _send_job(self, command, job):
pickled_job = cloudpickle.dumps(job)
base64_pickled_job = base64.b64encode(pickled_job).decode('utf-8')
base64_pickled_job_data = \
{'job': base64_pickled_job, 'jobType': self._job_type}
handle = JobHandle(self._conn, self._session_id,
self._executor)
handle._start(command, base64_pickled_job_data)
return handle
def _add_file_or_pyfile_job(self, command, file_uri):
data = {'uri': file_uri}
suffix_url = "/" + str(self._session_id) + "/" + command
return self._executor.submit(self._add_or_upload_resource, suffix_url,
data=data, headers=self._conn._JSON_HEADERS)
def _upload_file_or_pyfile(self, command, open_file):
files = {'file': open_file}
suffix_url = "/" + str(self._session_id) + "/" + command
return self._executor.submit(self._add_or_upload_resource, suffix_url,
files=files)
def _add_or_upload_resource(
self,
suffix_url,
files=None,
data=None,
headers=None
):
return self._conn.send_request('POST', suffix_url, files=files,
data=data, headers=headers).content
class _LivyConnection(object):
_SESSIONS_URI = '/sessions'
# Timeout in seconds
_TIMEOUT = 10
_JSON_HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
_SPNEGO_ENABLED_CONF = 'livy.client.http.spnego.enable'
def __init__(self, uri, config):
self._server_url_prefix = uri.geturl() + self._SESSIONS_URI
self._requests = requests
self.lock = threading.Lock()
self._spnego_enabled = \
config.getboolean('env', self._SPNEGO_ENABLED_CONF) \
if config.has_option('env', self._SPNEGO_ENABLED_CONF) else False
def _spnego_auth(self):
if self._spnego_enabled:
return HTTPKerberosAuth(mutual_authentication=REQUIRED,
sanitize_mutual_error_response=False)
else:
return None
def send_request(
self,
method,
suffix_url,
headers=None,
files=None,
data=None
):
"""
Makes a HTTP request to the server for the given REST method and
endpoint.
This method takes care of closing the handles of the files that
are to be sent as part of the http request
Parameters
----------
method : string
REST verb
suffix_url : string
valid API endpoint
headers : dict, optional
Http headers for the request
Default is None
files : dict, optional
Files to be sent with the http request
Default is None
data : dict, optional
The payload to be sent with the http request
Default is None
Returns
-------
future : concurrent.futures.Future
A future to monitor the status of the job
"""
try:
with self.lock:
local_headers = {'X-Requested-By': 'livy'}
if headers:
local_headers.update(headers)
request_url = self._server_url_prefix + suffix_url
return self._requests.request(method, request_url,
timeout=self._TIMEOUT, headers=local_headers, files=files,
json=data, auth=self._spnego_auth())
finally:
if files is not None:
files.clear()
| |
import csv
from django import http
from django.conf import settings
from django.contrib import admin, messages
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import redirect, render
from django.views import debug
import commonware.log
import elasticsearch
import jinja2
import mkt
from mkt.developers.models import ActivityLog
from mkt.prices.utils import update_from_csv
from mkt.site.decorators import permission_required
from mkt.site.mail import FakeEmailBackend
from mkt.site.utils import chunked
from mkt.users.models import UserProfile
from mkt.webapps.models import WebappUser, Webapp
from mkt.webapps.tasks import update_manifests
from . import tasks
from .decorators import admin_required
from .forms import DevMailerForm, GenerateErrorForm, PriceTiersForm, YesImSure
from .models import EmailPreviewTopic
log = commonware.log.getLogger('z.zadmin')
@admin_required
def show_settings(request):
settings_dict = debug.get_safe_settings()
for i in ['GOOGLE_ANALYTICS_CREDENTIALS']:
settings_dict[i] = debug.cleanse_setting(i,
getattr(settings, i, {}))
settings_dict['WEBAPPS_RECEIPT_KEY'] = '********************'
return render(request, 'zadmin/settings.html',
{'settings_dict': settings_dict})
@admin_required
def env(request):
return http.HttpResponse(u'<pre>%s</pre>' % (jinja2.escape(request)))
@admin_required
def email_preview_csv(request, topic):
resp = http.HttpResponse()
resp['Content-Type'] = 'text/csv; charset=utf-8'
resp['Content-Disposition'] = "attachment; filename=%s.csv" % (topic)
writer = csv.writer(resp)
fields = ['from_email', 'recipient_list', 'subject', 'body']
writer.writerow(fields)
rs = EmailPreviewTopic(topic=topic).filter().values_list(*fields)
for row in rs:
writer.writerow([r.encode('utf8') for r in row])
return resp
@admin.site.admin_view
def mail(request):
backend = FakeEmailBackend()
if request.method == 'POST':
backend.clear()
return redirect('zadmin.mail')
return render(request, 'zadmin/mail.html', dict(mail=backend.view_all()))
@admin.site.admin_view
def email_devs(request):
form = DevMailerForm(request.POST or None)
preview = EmailPreviewTopic(topic='email-devs')
if preview.filter().count():
preview_csv = reverse('zadmin.email_preview_csv',
args=[preview.topic])
else:
preview_csv = None
if request.method == 'POST' and form.is_valid():
data = form.cleaned_data
qs = (WebappUser.objects.filter(role__in=(mkt.AUTHOR_ROLE_DEV,
mkt.AUTHOR_ROLE_OWNER))
.exclude(user__email=None))
if data['recipients'] in ('payments', 'desktop_apps'):
qs = qs.exclude(webapp__status=mkt.STATUS_DELETED)
else:
qs = qs.filter(webapp__status__in=mkt.LISTED_STATUSES)
if data['recipients'] in ('payments', 'payments_region_enabled',
'payments_region_disabled'):
qs = qs.exclude(webapp__premium_type__in=(mkt.WEBAPP_FREE,
mkt.WEBAPP_OTHER_INAPP))
if data['recipients'] == 'payments_region_enabled':
qs = qs.filter(webapp__enable_new_regions=True)
elif data['recipients'] == 'payments_region_disabled':
qs = qs.filter(webapp__enable_new_regions=False)
elif data['recipients'] in ('apps', 'free_apps_region_enabled',
'free_apps_region_disabled'):
if data['recipients'] == 'free_apps_region_enabled':
qs = qs.filter(webapp__enable_new_regions=True)
elif data['recipients'] == 'free_apps_region_disabled':
qs = qs.filter(webapp__enable_new_regions=False)
elif data['recipients'] == 'desktop_apps':
qs = (qs.filter(
webapp__webappdevicetype__device_type=mkt.DEVICE_DESKTOP.id))
else:
raise NotImplementedError('If you want to support emailing other '
'types of developers, do it here!')
if data['preview_only']:
# Clear out the last batch of previewed emails.
preview.filter().delete()
total = 0
for emails in chunked(set(qs.values_list('user__email', flat=True)),
100):
total += len(emails)
tasks.admin_email.delay(emails, data['subject'], data['message'],
preview_only=data['preview_only'],
preview_topic=preview.topic)
msg = 'Emails queued for delivery: %s' % total
if data['preview_only']:
msg = '%s (for preview only, emails not sent!)' % msg
messages.success(request, msg)
return redirect('zadmin.email_devs')
return render(request, 'zadmin/email-devs.html',
dict(form=form, preview_csv=preview_csv))
@permission_required([('Admin', '%'),
('AdminTools', 'View'),
('ReviewerAdminTools', 'View')])
def index(request):
log = ActivityLog.objects.admin_events()[:5]
return render(request, 'zadmin/index.html', {'log': log})
@admin.site.admin_view
def memcache(request):
form = YesImSure(request.POST or None)
if form.is_valid() and form.cleaned_data['yes']:
cache.clear()
form = YesImSure()
messages.success(request, 'Cache cleared')
if cache._cache and hasattr(cache._cache, 'get_stats'):
stats = cache._cache.get_stats()
else:
stats = []
return render(request, 'zadmin/memcache.html',
{'form': form, 'stats': stats})
@admin_required
def generate_error(request):
form = GenerateErrorForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.explode()
return render(request, 'zadmin/generate-error.html', {'form': form})
@permission_required([('Admin', '%'),
('MailingLists', 'View')])
def export_email_addresses(request):
return render(request, 'zadmin/export_button.html', {})
@permission_required([('Admin', '%'),
('MailingLists', 'View')])
def email_addresses_file(request):
resp = http.HttpResponse()
resp['Content-Type'] = 'text/plain; charset=utf-8'
resp['Content-Disposition'] = ('attachment; '
'filename=amo_optin_emails.txt')
emails = (UserProfile.objects.filter(notifications__notification_id=13,
notifications__enabled=1)
.values_list('email', flat=True))
for e in emails:
if e is not None:
resp.write(e + '\n')
return resp
@admin_required
def price_tiers(request):
output = []
form = PriceTiersForm(request.POST or None, request.FILES)
if request.method == 'POST' and form.is_valid():
output = update_from_csv(form.cleaned_data['prices'])
return render(request, 'zadmin/update-prices.html',
{'result': output, 'form': form})
@admin_required(reviewers=True)
def manifest_revalidation(request):
if request.method == 'POST':
# Collect the apps to revalidate.
qs = Q(is_packaged=False, status=mkt.STATUS_PUBLIC,
disabled_by_user=False)
webapp_pks = Webapp.objects.filter(qs).values_list('pk', flat=True)
for pks in chunked(webapp_pks, 100):
update_manifests.delay(list(pks), check_hash=False)
messages.success(request, 'Manifest revalidation queued')
return render(request, 'zadmin/manifest.html')
@admin_required
def elastic(request):
es = elasticsearch.Elasticsearch(hosts=settings.ES_HOSTS)
indexes = set(settings.ES_INDEXES.values())
ctx = {
'aliases': es.indices.get_aliases(),
'health': es.cluster.health(),
'state': es.cluster.state(),
'mappings': [(index, es.indices.get_mapping(index=index))
for index in indexes],
}
return render(request, 'zadmin/elastic.html', ctx)
| |
from math import *
import numpy as np
from PyQt5.QtCore import QPoint
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QColor, QOpenGLShaderProgram, QOpenGLShader, QMatrix4x4, QVector3D, QVector2D
from Data import read_text_from_disk
from Data.Edges import Edge, EdgeType
from Data.Feature import FeatureType
from GUI.init import is_dark_theme
try:
from OpenGL import GL
except:
pass
from PyQt5.QtGui import QOpenGLVersionProfile
from PyQt5.QtGui import QSurfaceFormat
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QInputDialog
from PyQt5.QtWidgets import QOpenGLWidget
from Business.PartAction import *
from Data.Events import ChangeEvent
from Data.Part import Part, Feature
from Data.Vertex import Vertex
from GUI.Widgets.GlDrawable import GlPlaneDrawable, GlPartDrawable
from GUI.Widgets.SimpleDialogs import SketchDialog, ExtrudeDialog, RevolveDialog
class PartViewWidget(QOpenGLWidget):
PROGRAM_VERTEX_ATTRIBUTE = 0
PROGRAM_NORMALS_ATTRIBUTE = 1
def __init__(self, parent, document):
super(PartViewWidget, self).__init__(parent)
self._document = document
self._part = None
self._is_dark_theme = is_dark_theme()
self._gen_lists_start = 0
self._drawables = []
self.xRot = 225 * 16
self.yRot = 45 * 16
self.zRot = 0
self._scale = 0.5
self.lastPos = QPoint()
self._offset = Vertex()
self._mouse_position = None
self.part_color = QColor(100, 100, 190, 255)
self.part_specular = 0.5
self.part_color_edge = QColor(50, 50, 50, 255)
self.plane_color = QColor(0, 150, 200, 25)
self.plane_color_edge = QColor(0, 150, 200, 180)
if self._is_dark_theme:
self.background_color = QColor(50, 50, 60, 25)
else:
self.background_color = QColor(180, 180, 195, 25)
self._gl = None
self._show_surfaces = True
self._show_lines = True
self._show_planes = True
self._program = None
self._vertices = [[0, 0, 0]]
self._normals = [[0, 0, 0]]
self._plane_faces_index = 0
self._plane_edges_index = 0
self._part_faces_index = 0
self._part_edges_index = 0
self._new_verts = False
format = QSurfaceFormat()
format.setSamples(4)
self.setFormat(format)
@property
def show_surfaces(self):
return self._show_surfaces
@show_surfaces.setter
def show_surfaces(self, value):
self._show_surfaces = value
self.redraw_drawables()
self.update()
@property
def show_lines(self):
return self._show_lines
@show_lines.setter
def show_lines(self, value):
self._show_lines = value
self.redraw_drawables()
self.update()
@property
def show_planes(self):
return self._show_planes
@show_planes.setter
def show_planes(self, value):
self._show_planes = value
self.update()
@property
def part(self):
return self._part
def set_part(self, part: Part):
if self._part == part:
return
if self._part is not None:
part.remove_change_handler(self.part_changed)
self._part = part
self.update_drawables()
self.redraw_drawables()
self.part_color = QColor(part.color[0], part.color[1], part.color[2], part.color[3])
self.part_specular = part.specular
part.add_change_handler(self.part_changed)
self.scale_to_content()
self.update()
def update_drawables(self):
self._drawables = []
if self._part is not None:
for plane_feature in self._part.get_plane_features():
drawable = GlPlaneDrawable(len(self._drawables) + self._gen_lists_start, plane_feature)
self._drawables.append(drawable)
part_drawable = GlPartDrawable(len(self._drawables) + self._gen_lists_start, self._part)
self._drawables.append(part_drawable)
def redraw_drawables(self, show_messages=True):
if self._part.update_needed:
self._part.update_geometry()
self.update_drawables()
count = len(self._drawables) * 4
counter = 1
self._vertices.clear()
self._normals.clear()
for drawable in self._drawables:
if type(drawable) == GlPlaneDrawable:
drawable.on_plane_changed(None)
self._vertices.extend(drawable.vertices)
self._normals.extend(drawable.normals)
if show_messages:
self._document.set_status("Drawing planes faces %d" % counter, 100 * counter / count)
counter += 1
self._plane_faces_index = len(self._vertices) - 1
for drawable in self._drawables:
if type(drawable) == GlPlaneDrawable:
self._vertices.extend(drawable.lines)
self._normals.extend(drawable.lines)
if show_messages:
self._document.set_status("Drawing plane edges %d" % counter, 100 * counter / count)
counter += 1
self._plane_edges_index = len(self._vertices) - 1
for drawable in self._drawables:
if type(drawable) == GlPartDrawable:
self._vertices.extend(drawable.vertices)
self._normals.extend(drawable.normals)
if show_messages:
self._document.set_status("Drawing part faces %d" % counter, 100 * counter / count)
counter += 1
self._part_faces_index = len(self._vertices) - 1
for drawable in self._drawables:
if type(drawable) == GlPartDrawable:
self._vertices.extend(drawable.lines)
self._normals.extend(drawable.lines)
if show_messages:
self._document.set_status("Drawing part edges %d" % counter, 100 * counter / count)
counter += 1
self._part_edges_index = len(self._vertices) - 1
self._new_verts = True
def on_zoom_fit(self):
self.redraw_drawables()
self.scale_to_content()
self.update()
def scale_to_content(self):
limits = self._part.get_limits()
size = max(limits[1].x - limits[0].x, limits[1].y - limits[0].y)
size = max(size, limits[1].z - limits[0].z) * 0.7
self._scale = size
self._offset.x = 0
self._offset.y = 0
self._offset.z = 0
def part_changed(self, event):
if event.type == ChangeEvent.ObjectAdded:
self.redraw_drawables()
self.update()
self.scale_to_content()
if event.type == ChangeEvent.ValueChanged:
self.part_color = QColor(self._part.color[0], self._part.color[1], self._part.color[2], self._part.color[3])
self.part_specular = self._part.specular
def on_escape(self):
pass
def on_insert_sketch(self):
sketch_dialog = SketchDialog(self, self._document, self._part)
value = sketch_dialog.exec_()
if value == QDialog.Accepted:
sketch_name = sketch_dialog.sketch()
plane_name = sketch_dialog.plane()
sketch = self._document.get_geometries().get_sketch_by_name(sketch_name)
for plane in self._part.get_plane_features():
if plane.name == plane_name:
break
insert_sketch_in_part(self._document, self._part, sketch, plane)
self.redraw_drawables()
self.scale_to_content()
def on_insert_extrude(self):
if self._part is not None:
extrude_dialog = ExtrudeDialog(self, self._document, self._part)
result = extrude_dialog.exec_()
if result == QDialog.Accepted:
sketch_feature = extrude_dialog.sketch_feature
area = extrude_dialog.area
direction = extrude_dialog.direction
if direction == Feature.Forward:
length = [extrude_dialog.length, 0]
elif direction == Feature.Backward:
length = [0, -extrude_dialog.length]
else:
length = [extrude_dialog.length / 2, -extrude_dialog.length / 2]
if area is not None:
add_extrude_in_part(self._document, self._part, sketch_feature, area, length)
def on_revolve_area(self):
if self._part is not None:
revolve_dialog = RevolveDialog(self, self._document, self._part)
result = revolve_dialog.exec_()
if result == QDialog.Accepted:
sketch_feature = revolve_dialog.sketch_feature
area = revolve_dialog.area
revolve_axis = revolve_dialog.get_axis()
direction = revolve_dialog.direction
if direction == Feature.Forward:
length = [revolve_dialog.length, 0]
elif direction == Feature.Backward:
length = [0, -revolve_dialog.length]
else:
length = [revolve_dialog.length / 2, -revolve_dialog.length / 2]
if area is not None and revolve_axis is not None:
add_revolve_in_part(self._document, self._part, sketch_feature, area, length, revolve_axis)
def on_create_nurbs_surface(self):
sketch_feature = None
nurbs_edges = []
feats = self._part.get_features_list()
for feat in feats:
if feat.feature_type == FeatureType.SketchFeature:
sketch_feature = feat
sketch = sketch_feature.get_objects()[0]
for edge in sketch.get_edges():
if edge.type == EdgeType.NurbsEdge:
nurbs_edges.append(edge)
if sketch_feature is not None and len(nurbs_edges) > 2:
add_nurbs_surface_in_part(self._document, self._part, sketch_feature, nurbs_edges)
def setXRotation(self, angle):
angle = max(90 * 16, angle)
angle = min(270 * 16, angle)
if angle != self.xRot:
self.xRot = angle
# self.xRotationChanged.emit(angle)
self.update()
def setYRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
# self.yRotationChanged.emit(angle)
self.update()
def setZRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
# self.zRotationChanged.emit(angle)
self.update()
def initializeGL(self):
c = self.context()
f = QSurfaceFormat() # The default
vp = QOpenGLVersionProfile(f)
self._gl = c.versionFunctions(vp)
if self._gl is None:
return
self._gl.initializeOpenGLFunctions()
self._gl.glEnable(self._gl.GL_MULTISAMPLE)
self._gl.glEnable(self._gl.GL_DEPTH_TEST)
self._gl.glDisable(self._gl.GL_CULL_FACE)
self._gl.glBlendFunc(self._gl.GL_SRC_ALPHA, self._gl.GL_ONE_MINUS_SRC_ALPHA)
self._gl.glEnable(self._gl.GL_BLEND)
vertex_shader_code = read_text_from_disk("./GUI/Shaders/vertex_shader.c")
fragment_shader_code = read_text_from_disk("./GUI/Shaders/fragment_shader.c")
self._program = QOpenGLShaderProgram()
self._program.addShaderFromSourceCode(QOpenGLShader.Vertex, vertex_shader_code)
self._program.addShaderFromSourceCode(QOpenGLShader.Fragment, fragment_shader_code)
self._program.link()
self._program.bind()
self._program.enableAttributeArray(self.PROGRAM_VERTEX_ATTRIBUTE)
self._program.enableAttributeArray(self.PROGRAM_NORMALS_ATTRIBUTE)
self._program.setAttributeArray(self.PROGRAM_VERTEX_ATTRIBUTE, self._vertices)
self._program.setAttributeArray(self.PROGRAM_NORMALS_ATTRIBUTE, self._normals)
self._program.setUniformValue('gradient_color', self.background_color)
def paintGL(self):
gl = self._gl
if gl is None:
return
if self._part is not None:
if self._part.update_needed:
self.redraw_drawables(False)
if self._new_verts:
if len(self._vertices) > 0:
self._program.setAttributeArray(self.PROGRAM_VERTEX_ATTRIBUTE, self._vertices)
self._program.setAttributeArray(self.PROGRAM_NORMALS_ATTRIBUTE, self._normals)
self._new_verts = False
c = self.background_color
self._gl.glClearColor(c.redF(), c.greenF(), c.blueF(), c.alphaF())
self._gl.glClear(self._gl.GL_COLOR_BUFFER_BIT | self._gl.GL_DEPTH_BUFFER_BIT)
m = QMatrix4x4()
v = QMatrix4x4()
p = QMatrix4x4()
# Gradient
v.lookAt(QVector3D(0, 0, -10 * self._scale), QVector3D(0, 0, 0), QVector3D(0, 1, 0))
p.ortho(-0.5, 0.5, 0.5, -0.5, 0, 15 * self._scale)
mvp = p * v * m
self._program.setUniformValue('mvp', mvp)
mv = v * m
self._program.setUniformValue('model_view_matrix', mv)
self._program.setUniformValue('normal_matrix', mv.normalMatrix())
self._gl.glDisable(self._gl.GL_DEPTH_TEST)
self._program.setUniformValue('resolution', QVector2D(self.width(), self.height()))
self._program.setUniformValue('gradient', True)
self._program.setUniformValue('lighting', False)
gl.glBegin(gl.GL_QUADS)
gl.glVertex2f(-0.5, 0.5)
gl.glVertex2f(-0.5, -0.5)
gl.glVertex2f(0.5, -0.5)
gl.glVertex2f(0.5, 0.5)
gl.glEnd()
self._program.setUniformValue('gradient', False)
# End Gradient
v.rotate(self.xRot / 16.0, 1.0, 0.0, 0.0)
v.rotate(self.yRot / 16.0, 0.0, 1.0, 0.0)
v.rotate(self.zRot / 16.0, 0.0, 0.0, 1.0)
v.translate(self._offset.x, self._offset.y, self._offset.z)
scale = self._scale
width = self.width()
height = self.height()
aspect_ratio = width / height
p = QMatrix4x4()
if width <= height:
p.ortho(-scale, scale, scale / aspect_ratio, -scale / aspect_ratio, min(-10 * scale, -10), max(20 * scale, 15))
else:
p.ortho(-scale * aspect_ratio, scale * aspect_ratio, scale, -scale, min(-10 * scale, -10), max(20 * scale, 15))
mvp = p * v * m
self._program.setUniformValue('mvp', mvp)
mv = v * m
self._program.setUniformValue('model_view_matrix', mv)
self._program.setUniformValue('normal_matrix', mv.normalMatrix())
self._gl.glEnable(self._gl.GL_DEPTH_TEST)
if self._plane_faces_index + 1 < self._plane_edges_index and self._show_planes:
self.set_color(self.plane_color_edge)
self._gl.glLineWidth(2.0)
count = self._plane_edges_index - self._plane_faces_index
self._gl.glDrawArrays(self._gl.GL_LINES, self._plane_faces_index + 1, count)
if self._part_faces_index + 1 < self._part_edges_index:
if self._show_lines:
self.set_color(self.part_color_edge)
self._gl.glLineWidth(1.5)
count = self._part_edges_index - self._part_faces_index
self._gl.glDrawArrays(self._gl.GL_LINES, self._part_faces_index + 1, count)
if self._plane_edges_index + 1 < self._part_faces_index:
if self._show_surfaces:
if self._show_lines:
self._gl.glEnable(self._gl.GL_POLYGON_OFFSET_FILL)
self._gl.glPolygonOffset(1.0, 1.0)
self._program.setUniformValue('lighting', True)
self.set_color(self.part_color)
self.set_specular(self.part_specular)
count = self._part_faces_index - self._plane_edges_index
self._gl.glDrawArrays(self._gl.GL_TRIANGLES, self._plane_edges_index + 1, count)
self._program.setUniformValue('lighting', False)
if self._show_lines:
self._gl.glDisable(self._gl.GL_POLYGON_OFFSET_FILL)
if self._plane_faces_index > 0 and self._show_planes:
self._gl.glDepthMask(self._gl.GL_FALSE)
self.set_color(self.plane_color)
self._gl.glDrawArrays(self._gl.GL_TRIANGLES, 0, self._plane_faces_index + 1)
self._gl.glDepthMask(self._gl.GL_TRUE)
def resizeGL(self, width, height):
side = min(width, height)
if side < 0 or self._gl is None:
return
self._gl.glViewport((width - side) // 2, (height - side) // 2, width, height)
aspect_ratio = width / height
self._gl.glMatrixMode(self._gl.GL_PROJECTION)
self._gl.glLoadIdentity()
if width <= height:
self._gl.glOrtho(-0.5, +0.5, +0.5 / aspect_ratio, -0.5 / aspect_ratio, 4.0, 15.0)
else:
self._gl.glOrtho(-0.5 * aspect_ratio, +0.5 * aspect_ratio, +0.5, -0.5, 4.0, 15.0)
self._gl.glMatrixMode(self._gl.GL_MODELVIEW)
def mousePressEvent(self, event):
self.lastPos = event.pos()
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & Qt.RightButton:
self.setXRotation(self.xRot + 8 * dy)
self.setYRotation(self.yRot - 8 * dx)
elif event.buttons() & Qt.MiddleButton:
yangle = self.yRot * pi / (180 * 16)
xangle = self.xRot * pi / (180 * 16)
dx *= self._scale * -1
dy *= self._scale
self._offset.x += dx * 0.002 * cos(yangle) + dy * 0.002 * sin(xangle) * sin(yangle)
self._offset.y += dy * 0.002 * cos(xangle)
self._offset.z += dx * 0.002 * sin(yangle) - dy * 0.002 * sin(xangle) * cos(yangle)
self.update()
self.lastPos = event.pos()
def wheelEvent(self, event):
delta = event.angleDelta().y() * 0.01 / 8
if self._scale + self._scale * (delta * 0.01) > 0:
self._scale *= 1 - delta
self.update()
def normalizeAngle(self, angle):
while angle < 0:
angle += 360 * 16
while angle > 360 * 16:
angle -= 360 * 16
return angle
def set_color(self, c):
self._program.setUniformValue('color', c)
def set_specular(self, spec):
self._program.setUniformValue('specular', spec)
| |
# -*- Mode: Python; tab-width: 4 -*-
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
r"""A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '\r\n', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import socket
import asyncore
from collections import deque
def buffer(obj, start=None, stop=None):
# if memoryview objects gain slicing semantics,
# this function will change for the better
# memoryview used for the TypeError
memoryview(obj)
if start == None:
start = 0
if stop == None:
stop = len(obj)
x = obj[start:stop]
## print("buffer type is: %s"%(type(x),))
return x
class async_chat (asyncore.dispatcher):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
# these are overridable defaults
ac_in_buffer_size = 4096
ac_out_buffer_size = 4096
# we don't want to enable the use of encoding by default, because that is a
# sign of an application bug that we don't want to pass silently
use_encoding = 0
encoding = 'latin1'
def __init__ (self, sock=None, map=None):
# for string terminator matching
self.ac_in_buffer = b''
# we use a list here rather than cStringIO for a few reasons...
# del lst[:] is faster than sio.truncate(0)
# lst = [] is faster than sio.truncate(0)
# cStringIO will be gaining unicode support in py3k, which
# will negatively affect the performance of bytes compared to
# a ''.join() equivalent
self.incoming = []
# we toss the use of the "simple producer" and replace it with
# a pure deque, which the original fifo was a wrapping of
self.producer_fifo = deque()
asyncore.dispatcher.__init__ (self, sock, map)
def collect_incoming_data(self, data):
raise NotImplementedError("must be implemented in subclass")
def _collect_incoming_data(self, data):
self.incoming.append(data)
def _get_data(self):
d = b''.join(self.incoming)
del self.incoming[:]
return d
def found_terminator(self):
raise NotImplementedError("must be implemented in subclass")
def set_terminator (self, term):
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
if isinstance(term, str) and self.use_encoding:
term = bytes(term, self.encoding)
self.terminator = term
def get_terminator (self):
return self.terminator
# grab some more data from the socket,
# throw it to the collector method,
# check for the terminator,
# if found, transition to the next state.
def handle_read (self):
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error as why:
self.handle_error()
return
if isinstance(data, str) and self.use_encoding:
data = bytes(str, self.encoding)
self.ac_in_buffer = self.ac_in_buffer + data
# Continue to search for self.terminator in self.ac_in_buffer,
# while calling self.collect_incoming_data. The while loop
# is necessary because we might read several data+terminator
# combos with a single recv(4096).
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
if not terminator:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
elif isinstance(terminator, int):
# numeric terminator
n = terminator
if lb < n:
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
self.terminator = self.terminator - lb
else:
self.collect_incoming_data (self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self.ac_in_buffer.find(terminator)
if index != -1:
# we found the terminator
if index > 0:
# don't bother reporting the empty string (source of subtle bugs)
self.collect_incoming_data (self.ac_in_buffer[:index])
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
else:
# check for a prefix of the terminator
index = find_prefix_at_end (self.ac_in_buffer, terminator)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data (self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
break
else:
# no prefix, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
def handle_write (self):
self.initiate_send()
def handle_close (self):
self.close()
def push (self, data):
sabs = self.ac_out_buffer_size
if len(data) > sabs:
for i in range(0, len(data), sabs):
self.producer_fifo.append(data[i:i+sabs])
else:
self.producer_fifo.append(data)
self.initiate_send()
def push_with_producer (self, producer):
self.producer_fifo.append(producer)
self.initiate_send()
def readable (self):
"predicate for inclusion in the readable for select()"
# cannot use the old predicate, it violates the claim of the
# set_terminator method.
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
return 1
def writable (self):
"predicate for inclusion in the writable for select()"
return self.producer_fifo or (not self.connected)
def close_when_done (self):
"automatically close this channel once the outgoing queue is empty"
self.producer_fifo.append(None)
def initiate_send(self):
while self.producer_fifo and self.connected:
first = self.producer_fifo[0]
# handle empty string/buffer or None entry
if not first:
del self.producer_fifo[0]
if first is None:
## print("first is None")
self.handle_close()
return
## print("first is not None")
# handle classic producer behavior
obs = self.ac_out_buffer_size
try:
data = buffer(first, 0, obs)
except TypeError:
data = first.more()
if data:
self.producer_fifo.appendleft(data)
else:
del self.producer_fifo[0]
continue
if isinstance(data, str) and self.use_encoding:
data = bytes(data, self.encoding)
# send the data
try:
num_sent = self.send(data)
except socket.error:
self.handle_error()
return
if num_sent:
if num_sent < len(data) or obs < len(first):
self.producer_fifo[0] = first[num_sent:]
else:
del self.producer_fifo[0]
# we tried to send some actual data
return
def discard_buffers (self):
# Emergencies only!
self.ac_in_buffer = b''
del self.incoming[:]
self.producer_fifo.clear()
class simple_producer:
def __init__ (self, data, buffer_size=512):
self.data = data
self.buffer_size = buffer_size
def more (self):
if len (self.data) > self.buffer_size:
result = self.data[:self.buffer_size]
self.data = self.data[self.buffer_size:]
return result
else:
result = self.data
self.data = b''
return result
class fifo:
def __init__ (self, list=None):
if not list:
self.list = deque()
else:
self.list = deque(list)
def __len__ (self):
return len(self.list)
def is_empty (self):
return not self.list
def first (self):
return self.list[0]
def push (self, data):
self.list.append(data)
def pop (self):
if self.list:
return (1, self.list.popleft())
else:
return (0, None)
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
# for example:
# f_p_a_e ("qwerty\r", "\r\n") => 1
# f_p_a_e ("qwertydkjf", "\r\n") => 0
# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
# this could maybe be made faster with a computed regex?
# [answer: no; circa Python-2.0, Jan 2001]
# new python: 28961/s
# old python: 18307/s
# re: 12820/s
# regex: 14035/s
def find_prefix_at_end (haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
| |
from pandac.PandaModules import *
from MsgTypes import *
from direct.showbase import ShowBase # __builtin__.config
from direct.task.TaskManagerGlobal import * # taskMgr
from direct.directnotify import DirectNotifyGlobal
from ConnectionRepository import ConnectionRepository
from PyDatagram import PyDatagram
from PyDatagramIterator import PyDatagramIterator
from AstronDatabaseInterface import AstronDatabaseInterface
class AstronInternalRepository(ConnectionRepository):
"""
This class is part of Panda3D's new MMO networking framework.
It interfaces with an Astron (https://github.com/Astron/Astron) server in
order to manipulate objects in the Astron cluster. It does not require any
specific "gateway" into the Astron network. Rather, it just connects directly
to any Message Director. Hence, it is an "internal" repository.
This class is suitable for constructing your own AI Servers and UberDOG servers
using Panda3D. Objects with a "self.air" attribute are referring to an instance
of this class.
"""
notify = DirectNotifyGlobal.directNotify.newCategory("AstronInternalRepository")
def __init__(self, baseChannel, serverId=None, dcFileNames = None,
dcSuffix = 'AI', connectMethod = None, threadedNet = None):
if connectMethod is None:
connectMethod = self.CM_HTTP
ConnectionRepository.__init__(self, connectMethod, config, hasOwnerView = False, threadedNet = threadedNet)
self.setClientDatagram(False)
self.dcSuffix = dcSuffix
if hasattr(self, 'setVerbose'):
if self.config.GetBool('verbose-internalrepository'):
self.setVerbose(1)
# The State Server we are configured to use for creating objects.
#If this is None, generating objects is not possible.
self.serverId = self.config.GetInt('air-stateserver', 0) or None
if serverId is not None:
self.serverId = serverId
maxChannels = self.config.GetInt('air-channel-allocation', 1000000)
self.channelAllocator = UniqueIdAllocator(baseChannel, baseChannel+maxChannels-1)
self._registeredChannels = set()
self.__contextCounter = 0
self.dbInterface = AstronDatabaseInterface(self)
self.__callbacks = {}
self.ourChannel = self.allocateChannel()
self.eventLogId = self.config.GetString('eventlog-id', 'AIR:%d' % self.ourChannel)
self.eventSocket = None
eventLogHost = self.config.GetString('eventlog-host', '')
if eventLogHost:
if ':' in eventLogHost:
host, port = eventLogHost.split(':', 1)
self.setEventLogHost(host, int(port))
else:
self.setEventLogHost(eventLogHost)
self.readDCFile(dcFileNames)
def getContext(self):
self.__contextCounter = (self.__contextCounter + 1) & 0xFFFFFFFF
return self.__contextCounter
def allocateChannel(self):
"""
Allocate an unused channel out of this AIR's configured channel space.
This is also used to allocate IDs for DistributedObjects, since those
occupy a channel.
"""
return self.channelAllocator.allocate()
def deallocateChannel(self, channel):
"""
Return the previously-allocated channel back to the allocation pool.
"""
self.channelAllocator.free(channel)
def registerForChannel(self, channel):
"""
Register for messages on a specific Message Director channel.
If the channel is already open by this AIR, nothing will happen.
"""
if channel in self._registeredChannels:
return
self._registeredChannels.add(channel)
dg = PyDatagram()
dg.addServerControlHeader(CONTROL_ADD_CHANNEL)
dg.addChannel(channel)
self.send(dg)
def unregisterForChannel(self, channel):
"""
Unregister a channel subscription on the Message Director. The Message
Director will cease to relay messages to this AIR sent on the channel.
"""
if channel not in self._registeredChannels:
return
self._registeredChannels.remove(channel)
dg = PyDatagram()
dg.addServerControlHeader(CONTROL_REMOVE_CHANNEL)
dg.addChannel(channel)
self.send(dg)
def addPostRemove(self, dg):
"""
Register a datagram with the Message Director that gets sent out if the
connection is ever lost.
This is useful for registering cleanup messages: If the Panda3D process
ever crashes unexpectedly, the Message Director will detect the socket
close and automatically process any post-remove datagrams.
"""
dg2 = PyDatagram()
dg2.addServerControlHeader(CONTROL_ADD_POST_REMOVE)
dg2.addString(dg.getMessage())
self.send(dg2)
def clearPostRemove(self):
"""
Clear all datagrams registered with addPostRemove.
This is useful if the Panda3D process is performing a clean exit. It may
clear the "emergency clean-up" post-remove messages and perform a normal
exit-time clean-up instead, depending on the specific design of the game.
"""
dg = PyDatagram()
dg.addServerControlHeader(CONTROL_CLEAR_POST_REMOVE)
self.send(dg)
def handleDatagram(self, di):
msgType = self.getMsgType()
if msgType in (STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED,
STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED_OTHER):
self.handleObjEntry(di, msgType == STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED_OTHER)
elif msgType in (STATESERVER_OBJECT_CHANGING_AI,
STATESERVER_OBJECT_DELETE_RAM):
self.handleObjExit(di)
elif msgType == STATESERVER_OBJECT_CHANGING_LOCATION:
self.handleObjLocation(di)
elif msgType in (DBSERVER_CREATE_OBJECT_RESP,
DBSERVER_OBJECT_GET_ALL_RESP,
DBSERVER_OBJECT_SET_FIELD_IF_EQUALS_RESP,
DBSERVER_OBJECT_SET_FIELDS_IF_EQUALS_RESP):
self.dbInterface.handleDatagram(msgType, di)
elif msgType == DBSS_OBJECT_GET_ACTIVATED_RESP:
self.handleGetActivatedResp(di)
else:
self.notify.warning('Received message with unknown MsgType=%d' % msgType)
def handleObjLocation(self, di):
doId = di.getUint32()
parentId = di.getUint32()
zoneId = di.getUint32()
do = self.doId2do.get(doId)
if not do:
self.notify.warning('Received location for unknown doId=%d!' % (doId))
return
do.setLocation(parentId, zoneId)
def handleObjEntry(self, di, other):
doId = di.getUint32()
parentId = di.getUint32()
zoneId = di.getUint32()
classId = di.getUint16()
if classId not in self.dclassesByNumber:
self.notify.warning('Received entry for unknown dclass=%d! (Object %d)' % (classId, doId))
return
if doId in self.doId2do:
return # We already know about this object; ignore the entry.
dclass = self.dclassesByNumber[classId]
do = dclass.getClassDef()(self)
do.dclass = dclass
do.doId = doId
# The DO came in off the server, so we do not unregister the channel when
# it dies:
do.doNotDeallocateChannel = True
self.addDOToTables(do, location=(parentId, zoneId))
# Now for generation:
do.generate()
if other:
do.updateAllRequiredOtherFields(dclass, di)
else:
do.updateAllRequiredFields(dclass, di)
def handleObjExit(self, di):
doId = di.getUint32()
if doId not in self.doId2do:
self.notify.warning('Received AI exit for unknown object %d' % (doId))
return
do = self.doId2do[doId]
self.removeDOFromTables(do)
do.delete()
do.sendDeleteEvent()
def handleGetActivatedResp(self, di):
ctx = di.getUint32()
doId = di.getUint32()
activated = di.getUint8()
if ctx not in self.__callbacks:
self.notify.warning('Received unexpected DBSS_OBJECT_GET_ACTIVATED_RESP (ctx: %d)' %ctx)
return
try:
self.__callbacks[ctx](doId, activated)
finally:
del self.__callbacks[ctx]
def getActivated(self, doId, callback):
ctx = self.getContext()
self.__callbacks[ctx] = callback
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, DBSS_OBJECT_GET_ACTIVATED)
dg.addUint32(ctx)
dg.addUint32(doId)
self.send(dg)
def sendUpdate(self, do, fieldName, args):
"""
Send a field update for the given object.
You should probably use do.sendUpdate(...) instead.
"""
self.sendUpdateToChannel(do, do.doId, fieldName, args)
def sendUpdateToChannel(self, do, channelId, fieldName, args):
"""
Send an object field update to a specific channel.
This is useful for directing the update to a specific client or node,
rather than at the State Server managing the object.
You should probably use do.sendUpdateToChannel(...) instead.
"""
dclass = do.dclass
field = dclass.getFieldByName(fieldName)
dg = field.aiFormatUpdate(do.doId, channelId, self.ourChannel, args)
self.send(dg)
def sendActivate(self, doId, parentId, zoneId, dclass=None, fields=None):
"""
Activate a DBSS object, given its doId, into the specified parentId/zoneId.
If both dclass and fields are specified, an ACTIVATE_WITH_DEFAULTS_OTHER
will be sent instead. In other words, the specified fields will be
auto-applied during the activation.
"""
fieldPacker = DCPacker()
fieldCount = 0
if dclass and fields:
for k,v in fields.items():
field = dclass.getFieldByName(k)
if not field:
self.notify.error('Activation request for %s object contains '
'invalid field named %s' % (dclass.getName(), k))
fieldPacker.rawPackUint16(field.getNumber())
fieldPacker.beginPack(field)
field.packArgs(fieldPacker, v)
fieldPacker.endPack()
fieldCount += 1
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, DBSS_OBJECT_ACTIVATE_WITH_DEFAULTS)
dg.addUint32(doId)
dg.addUint32(0)
dg.addUint32(0)
self.send(dg)
# DEFAULTS_OTHER isn't implemented yet, so we chase it with a SET_FIELDS
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, STATESERVER_OBJECT_SET_FIELDS)
dg.addUint32(doId)
dg.addUint16(fieldCount)
dg.appendData(fieldPacker.getString())
self.send(dg)
# Now slide it into the zone we expect to see it in (so it
# generates onto us with all of the fields in place)
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, STATESERVER_OBJECT_SET_LOCATION)
dg.addUint32(parentId)
dg.addUint32(zoneId)
self.send(dg)
else:
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, DBSS_OBJECT_ACTIVATE_WITH_DEFAULTS)
dg.addUint32(doId)
dg.addUint32(parentId)
dg.addUint32(zoneId)
self.send(dg)
def sendSetLocation(self, do, parentId, zoneId):
dg = PyDatagram()
dg.addServerHeader(do.doId, self.ourChannel, STATESERVER_OBJECT_SET_LOCATION)
dg.addUint32(parentId)
dg.addUint32(zoneId)
self.send(dg)
def generateWithRequired(self, do, parentId, zoneId, optionalFields=[]):
"""
Generate an object onto the State Server, choosing an ID from the pool.
You should probably use do.generateWithRequired(...) instead.
"""
doId = self.allocateChannel()
self.generateWithRequiredAndId(do, doId, parentId, zoneId, optionalFields)
def generateWithRequiredAndId(self, do, doId, parentId, zoneId, optionalFields=[]):
"""
Generate an object onto the State Server, specifying its ID and location.
You should probably use do.generateWithRequiredAndId(...) instead.
"""
do.doId = doId
self.addDOToTables(do, location=(parentId, zoneId))
do.sendGenerateWithRequired(self, parentId, zoneId, optionalFields)
def requestDelete(self, do):
"""
Request the deletion of an object that already exists on the State Server.
You should probably use do.requestDelete() instead.
"""
dg = PyDatagram()
dg.addServerHeader(do.doId, self.ourChannel, STATESERVER_OBJECT_DELETE_RAM)
dg.addUint32(do.doId)
self.send(dg)
def connect(self, host, port=7199):
"""
Connect to a Message Director. The airConnected message is sent upon
success.
N.B. This overrides the base class's connect(). You cannot use the
ConnectionRepository connect() parameters.
"""
url = URLSpec()
url.setServer(host)
url.setPort(port)
self.notify.info('Now connecting to %s:%s...' % (host, port))
ConnectionRepository.connect(self, [url],
successCallback=self.__connected,
failureCallback=self.__connectFailed,
failureArgs=[host, port])
def __connected(self):
self.notify.info('Connected successfully.')
# Listen to our channel...
self.registerForChannel(self.ourChannel)
# If we're configured with a State Server, register a post-remove to
# clean up whatever objects we own on this server should we unexpectedly
# fall over and die.
if self.serverId:
dg = PyDatagram()
dg.addServerHeader(self.serverId, self.ourChannel, STATESERVER_DELETE_AI_OBJECTS)
dg.addChannel(self.ourChannel)
self.addPostRemove(dg)
messenger.send('airConnected')
self.handleConnected()
def __connectFailed(self, code, explanation, host, port):
self.notify.warning('Failed to connect! (code=%s; %r)' % (code, explanation))
# Try again...
retryInterval = config.GetFloat('air-reconnect-delay', 5.0)
taskMgr.doMethodLater(retryInterval, self.connect, 'Reconnect delay', extraArgs=[host, port])
def handleConnected(self):
"""
Subclasses should override this if they wish to handle the connection
event.
"""
def lostConnection(self):
# This should be overridden by a subclass if unexpectedly losing connection
# is okay.
self.notify.error('Lost connection to gameserver!')
def setEventLogHost(self, host, port=7197):
"""
Set the target host for Event Logger messaging. This should be pointed
at the UDP IP:port that hosts the cluster's running Event Logger.
Providing a value of None or an empty string for 'host' will disable
event logging.
"""
if not host:
self.eventSocket = None
return
address = SocketAddress()
if not address.setHost(host, port):
self.notify.warning('Invalid Event Log host specified: %s:%s' % (host, port))
self.eventSocket = None
else:
self.eventSocket = SocketUDPOutgoing()
self.eventSocket.InitToAddress(address)
def writeServerEvent(self, logtype, *args):
"""
Write an event to the central Event Logger, if one is configured.
The purpose of the Event Logger is to keep a game-wide record of all
interesting in-game events that take place. Therefore, this function
should be used whenever such an interesting in-game event occurs.
"""
if self.eventSocket is None:
return # No event logger configured!
dg = PyDatagram()
dg.addString(self.eventLogId)
dg.addString(logtype)
for arg in args:
dg.addString(str(arg))
self.eventSocket.Send(dg.getMessage())
| |
from collections import OrderedDict
volumesObjects = OrderedDict([("ot",{
"volume_id":1,
"volume_title":"Old Testament",
}), ("nt",{
"volume_id":2,
"volume_title":"New Testament",
}), ("bm",{
"volume_id":3,
"volume_title":"Book of Mormon",
}), ("dc",{
"volume_id":4,
"volume_title":"Doctrine and Covenants",
}), ("pgp",{
"volume_id":5,
"volume_title":"Pearl of Great Price",
}) ])
volumesObjects["ot"]["books"] = OrderedDict([("gen",{
"book_id" : 1,
"book_title" : "Genesis",
"num_chapters" : 50,
}), ("ex",{ "book_id" : 2,
"book_title" : "Exodus",
"num_chapters" : 40,
}), ("lev",{ "book_id" : 3,
"book_title" : "Leviticus",
"num_chapters" : 27,
}), ("num",{ "book_id" : 4,
"book_title" : "Numbers",
"num_chapters" : 36,
}), ("deut",{ "book_id" : 5,
"book_title" : "Deuteronomy",
"num_chapters" : 34,
}), ("josh",{ "book_id" : 6,
"book_title" : "Joshua",
"num_chapters" : 24,
}), ("judg",{ "book_id" : 7,
"book_title" : "Judges",
"num_chapters" : 21,
}), ("ruth",{ "book_id" : 8,
"book_title" : "Ruth",
"num_chapters" : 4,
}), ("1_sam",{ "book_id" : 9,
"book_title" : "1 Samuel",
"num_chapters" : 31,
}), ("2_sam",{ "book_id" : 10,
"book_title" : "2 Samuel",
"num_chapters" : 24,
}), ("1_kgs",{ "book_id" : 11,
"book_title" : "1 Kings",
"num_chapters" : 22,
}), ("2_kgs",{ "book_id" : 12,
"book_title" : "2 Kings",
"num_chapters" : 25,
}), ("1_chr",{ "book_id" : 13,
"book_title" : "1 Chronicles",
"num_chapters" : 29,
}), ("2_chr",{ "book_id" : 14,
"book_title" : "2 Chronicles",
"num_chapters" : 36,
}), ("ezra",{ "book_id" : 15,
"book_title" : "Ezra",
"num_chapters" : 10,
}), ("neh",{ "book_id" : 16,
"book_title" : "Nehemiah",
"num_chapters" : 13,
}), ("esth",{ "book_id" : 17,
"book_title" : "Esther",
"num_chapters" : 10,
}), ("job",{ "book_id" : 18,
"book_title" : "Job",
"num_chapters" : 42,
}), ("ps",{ "book_id" : 19,
"book_title" : "Psalms",
"num_chapters" : 150,
}), ("prov",{ "book_id" : 20,
"book_title" : "Proverbs",
"num_chapters" : 31,
}), ("eccl",{ "book_id" : 21,
"book_title" : "Ecclesiastes",
"num_chapters" : 12,
}), ("song",{ "book_id" : 22,
"book_title" : "Solomon's Song",
"num_chapters" : 8,
}), ("isa",{ "book_id" : 23,
"book_title" : "Isaiah",
"num_chapters" : 66,
"lds_org" : "isa"
}), ("jer",{ "book_id" : 24,
"book_title" : "Jeremiah",
"num_chapters" : 52,
}), ("lam",{ "book_id" : 25,
"book_title" : "Lamentations",
"num_chapters" : 5,
"lds_org" : "lam"
}), ("ezek",{ "book_id" : 26,
"book_title" : "Ezekiel",
"num_chapters" : 48,
}), ("dan",{ "book_id" : 27,
"book_title" : "Daniel",
"num_chapters" : 12,
}), ("hosea",{ "book_id" : 28,
"book_title" : "Hosea",
"num_chapters" : 14,
}), ("joel",{ "book_id" : 29,
"book_title" : "Joel",
"num_chapters" : 3,
}), ("amos",{ "book_id" : 30,
"book_title" : "Amos",
"num_chapters" : 9,
}), ("obad",{ "book_id" : 31,
"book_title" : "Obadiah",
"num_chapters" : 1,
}), ("jonah",{ "book_id" : 32,
"book_title" : "Jonah",
"num_chapters" : 4,
}), ("micah",{ "book_id" : 33,
"book_title" : "Micah",
"num_chapters" : 7,
}), ("nahum",{ "book_id" : 34,
"book_title" : "Nahum",
"num_chapters" : 3,
}), ("hab",{ "book_id" : 35,
"book_title" : "Habakkuk",
"num_chapters" : 3,
}), ("zeph",{ "book_id" : 36,
"book_title" : "Zephaniah",
"num_chapters" : 3,
}), ("hag",{ "book_id" : 37,
"book_title" : "Haggai",
"num_chapters" : 2,
}), ("zech",{ "book_id" : 38,
"book_title" : "Zechariah",
"num_chapters" : 14,
}), ("mal",{ "book_id" : 39,
"book_title" : "Malachi",
"num_chapters" : 4,
}) ])
volumesObjects["nt"]["books"] = OrderedDict([("matt",{ "book_id" : 40,
"book_title" : "Matthew",
"num_chapters" : 28,
"lds_org" : "matt"
}), ("mark",{ "book_id" : 41,
"book_title" : "Mark",
"num_chapters" : 16,
}), ("luke",{ "book_id" : 42,
"book_title" : "Luke",
"num_chapters" : 24,
}), ("john",{ "book_id" : 43,
"book_title" : "John",
"num_chapters" : 21,
}), ("acts",{ "book_id" : 44,
"book_title" : "Acts",
"num_chapters" : 28,
}), ("rom",{ "book_id" : 45,
"book_title" : "Romans",
"num_chapters" : 16,
}), ("1_cor",{ "book_id" : 46,
"book_title" : "1 Corinthians",
"num_chapters" : 16,
}), ("2_cor",{ "book_id" : 47,
"book_title" : "2 Corinthians",
"num_chapters" : 13,
}), ("gal",{ "book_id" : 48,
"book_title" : "Galatians",
"num_chapters" : 6,
}), ("eph",{ "book_id" : 49,
"book_title" : "Ephesians",
"num_chapters" : 6,
}), ("philip",{ "book_id" : 50,
"book_title" : "Philippians",
"num_chapters" : 4,
}), ("col",{ "book_id" : 51,
"book_title" : "Colossians",
"num_chapters" : 4,
}), ("1_thes",{ "book_id" : 52,
"book_title" : "1 Thessalonians",
"num_chapters" : 5,
"lds_org" : "1_thes"
}), ("2_thes",{ "book_id" : 53,
"book_title" : "2 Thessalonians",
"num_chapters" : 3,
}), ("1_tim",{ "book_id" : 54,
"book_title" : "1 Timothy",
"num_chapters" : 6,
}), ("2_tim",{ "book_id" : 55,
"book_title" : "2 Timothy",
"num_chapters" : 4,
}), ("titus",{ "book_id" : 56,
"book_title" : "Titus",
"num_chapters" : 3,
}), ("philem",{ "book_id" : 57,
"book_title" : "Philemon",
"num_chapters" : 1,
}), ("heb",{ "book_id" : 58,
"book_title" : "Hebrews",
"num_chapters" : 13,
}), ("james",{ "book_id" : 59,
"book_title" : "James",
"num_chapters" : 5,
"lds_org" : "james"
}), ("1_pet",{ "book_id" : 60,
"book_title" : "1 Peter",
"num_chapters" : 5,
}), ("2_pet",{ "book_id" : 61,
"book_title" : "2 Peter",
"num_chapters" : 3,
"lds_org" : "2_pet"
}), ("1_jn",{ "book_id" : 62,
"book_title" : "1 John",
"num_chapters" : 5,
"lds_org" : "1_jn"
}), ("2_jn",{ "book_id" : 63,
"book_title" : "2 John",
"num_chapters" : 1,
}), ("3_jn",{ "book_id" : 64,
"book_title" : "3 John",
"num_chapters" : 1,
}), ("jude",{ "book_id" : 65,
"book_title" : "Jude",
"num_chapters" : 1,
}), ("rev",{ "book_id" : 66,
"book_title" : "Revelation",
"num_chapters" : 22,
}) ])
volumesObjects["bm"]["books"] = OrderedDict([("1_ne",{ "book_id" : 67,
"book_title" : "1 Nephi",
"num_chapters" : 22,
}), ("2_ne",{ "book_id" : 68,
"book_title" : "2 Nephi",
"num_chapters" : 33,
}), ("jacob",{ "book_id" : 69,
"book_title" : "Jacob",
"num_chapters" : 7,
}), ("enos",{ "book_id" : 70,
"book_title" : "Enos",
"num_chapters" : 1,
}), ("jarom",{ "book_id" : 71,
"book_title" : "Jarom",
"num_chapters" : 1,
}), ("omni",{ "book_id" : 72,
"book_title" : "Omni",
"num_chapters" : 1,
}), ("w_of_m",{ "book_id" : 73,
"book_title" : "Words of Mormon",
"num_chapters" : 1,
}), ("mosiah",{ "book_id" : 74,
"book_title" : "Mosiah",
"num_chapters" : 29,
}), ("alma",{ "book_id" : 75,
"book_title" : "Alma",
"num_chapters" : 63,
}), ("hel",{ "book_id" : 76,
"book_title" : "Helaman",
"num_chapters" : 16,
}), ("3_ne",{ "book_id" : 77,
"book_title" : "3 Nephi",
"num_chapters" : 30,
}), ("4_ne",{ "book_id" : 78,
"book_title" : "4 Nephi",
"num_chapters" : 1,
}), ("morm",{ "book_id" : 79,
"book_title" : "Mormon",
"num_chapters" : 9,
}), ("ether",{ "book_id" : 80,
"book_title" : "Ether",
"num_chapters" : 15,
}), ("moro",{ "book_id" : 81,
"book_title" : "Moroni",
"num_chapters" : 10,
}) ])
volumesObjects["dc"]["books"] = OrderedDict([("dc",{ "book_id" : 82,
"book_title" : "Doctrine and Covenants",
"num_chapters" : 138,
}) ])
volumesObjects["pgp"]["books"] = OrderedDict([("moses",{ "book_id" : 83,
"book_title" : "Moses",
"num_chapters" : 8,
}), ("abr",{ "book_id" : 84,
"book_title" : "Abraham",
"num_chapters" : 5,
}), ("js_m",{ "book_id" : 85,
"book_title" : "Joseph Smith--Matthew",
"num_chapters" : 1,
}), ("js_h",{ "book_id" : 86,
"book_title" : "Joseph Smith--History",
"num_chapters" : 1,
}), ("a_of_f",{ "book_id" : 87,
"book_title" : "Articles of Faith",
"num_chapters" : 1,
"lds_org" : "a_of_f"
}) ])
| |
"""Unit tests for the memoryview
XXX We need more tests! Some tests are in test_bytes
"""
import unittest
import sys
import gc
import weakref
import array
from test import test_support
import io
class AbstractMemoryTests:
source_bytes = b"abcdef"
@property
def _source(self):
return self.source_bytes
@property
def _types(self):
return filter(None, [self.ro_type, self.rw_type])
def check_getitem_with_type(self, tp):
item = self.getitem_type
b = tp(self._source)
if hasattr(sys, 'getrefcount'):
oldrefcount = sys.getrefcount(b)
m = self._view(b)
self.assertEqual(m[0], item(b"a"))
self.assertIsInstance(m[0], bytes)
self.assertEqual(m[5], item(b"f"))
self.assertEqual(m[-1], item(b"f"))
self.assertEqual(m[-6], item(b"a"))
# Bounds checking
self.assertRaises(IndexError, lambda: m[6])
self.assertRaises(IndexError, lambda: m[-7])
self.assertRaises(IndexError, lambda: m[sys.maxsize])
self.assertRaises(IndexError, lambda: m[-sys.maxsize])
# Type checking
self.assertRaises(TypeError, lambda: m[None])
self.assertRaises(TypeError, lambda: m[0.0])
self.assertRaises(TypeError, lambda: m["a"])
m = None
if hasattr(sys, 'getrefcount'):
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_getitem(self):
for tp in self._types:
self.check_getitem_with_type(tp)
def test_iter(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
self.assertEqual(list(m), [m[i] for i in range(len(m))])
def test_repr(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
self.assertIsInstance(m.__repr__(), str)
def test_setitem_readonly(self):
if not self.ro_type:
self.skipTest("no read-only type to test")
b = self.ro_type(self._source)
if hasattr(sys, 'getrefcount'):
oldrefcount = sys.getrefcount(b)
m = self._view(b)
def setitem(value):
m[0] = value
self.assertRaises(TypeError, setitem, b"a")
self.assertRaises(TypeError, setitem, 65)
self.assertRaises(TypeError, setitem, memoryview(b"a"))
m = None
if hasattr(sys, 'getrefcount'):
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_setitem_writable(self):
if not self.rw_type:
self.skipTest("no writable type to test")
tp = self.rw_type
b = self.rw_type(self._source)
if hasattr(sys, 'getrefcount'):
oldrefcount = sys.getrefcount(b)
m = self._view(b)
m[0] = tp(b"0")
self._check_contents(tp, b, b"0bcdef")
m[1:3] = tp(b"12")
self._check_contents(tp, b, b"012def")
m[1:1] = tp(b"")
self._check_contents(tp, b, b"012def")
m[:] = tp(b"abcdef")
self._check_contents(tp, b, b"abcdef")
# Overlapping copies of a view into itself
m[0:3] = m[2:5]
self._check_contents(tp, b, b"cdedef")
m[:] = tp(b"abcdef")
m[2:5] = m[0:3]
self._check_contents(tp, b, b"ababcf")
def setitem(key, value):
m[key] = tp(value)
# Bounds checking
self.assertRaises(IndexError, setitem, 6, b"a")
self.assertRaises(IndexError, setitem, -7, b"a")
self.assertRaises(IndexError, setitem, sys.maxsize, b"a")
self.assertRaises(IndexError, setitem, -sys.maxsize, b"a")
# Wrong index/slice types
self.assertRaises(TypeError, setitem, 0.0, b"a")
self.assertRaises(TypeError, setitem, (0,), b"a")
self.assertRaises(TypeError, setitem, "a", b"a")
# Trying to resize the memory object
self.assertRaises(ValueError, setitem, 0, b"")
self.assertRaises(ValueError, setitem, 0, b"ab")
self.assertRaises(ValueError, setitem, slice(1,1), b"a")
self.assertRaises(ValueError, setitem, slice(0,2), b"a")
m = None
if hasattr(sys, 'getrefcount'):
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_delitem(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
with self.assertRaises(TypeError):
del m[1]
with self.assertRaises(TypeError):
del m[1:4]
def test_tobytes(self):
for tp in self._types:
m = self._view(tp(self._source))
b = m.tobytes()
# This calls self.getitem_type() on each separate byte of b"abcdef"
expected = b"".join(
self.getitem_type(c) for c in b"abcdef")
self.assertEqual(b, expected)
self.assertIsInstance(b, bytes)
def test_tolist(self):
for tp in self._types:
m = self._view(tp(self._source))
l = m.tolist()
self.assertEqual(l, map(ord, b"abcdef"))
def test_compare(self):
# memoryviews can compare for equality with other objects
# having the buffer interface.
for tp in self._types:
m = self._view(tp(self._source))
for tp_comp in self._types:
self.assertTrue(m == tp_comp(b"abcdef"))
self.assertFalse(m != tp_comp(b"abcdef"))
self.assertFalse(m == tp_comp(b"abcde"))
self.assertTrue(m != tp_comp(b"abcde"))
self.assertFalse(m == tp_comp(b"abcde1"))
self.assertTrue(m != tp_comp(b"abcde1"))
self.assertTrue(m == m)
self.assertTrue(m == m[:])
self.assertTrue(m[0:6] == m[:])
self.assertFalse(m[0:5] == m)
# Comparison with objects which don't support the buffer API
self.assertFalse(m == u"abcdef")
self.assertTrue(m != u"abcdef")
self.assertFalse(u"abcdef" == m)
self.assertTrue(u"abcdef" != m)
# Unordered comparisons are unimplemented, and therefore give
# arbitrary results (they raise a TypeError in py3k)
def check_attributes_with_type(self, tp):
m = self._view(tp(self._source))
self.assertEqual(m.format, self.format)
self.assertIsInstance(m.format, str)
self.assertEqual(m.itemsize, self.itemsize)
self.assertEqual(m.ndim, 1)
self.assertEqual(m.shape, (6,))
self.assertEqual(len(m), 6)
self.assertEqual(m.strides, (self.itemsize,))
self.assertEqual(m.suboffsets, None)
return m
def test_attributes_readonly(self):
if not self.ro_type:
self.skipTest("no read-only type to test")
m = self.check_attributes_with_type(self.ro_type)
self.assertEqual(m.readonly, True)
def test_attributes_writable(self):
if not self.rw_type:
self.skipTest("no writable type to test")
m = self.check_attributes_with_type(self.rw_type)
self.assertEqual(m.readonly, False)
# Disabled: unicode uses the old buffer API in 2.x
#def test_getbuffer(self):
## Test PyObject_GetBuffer() on a memoryview object.
#for tp in self._types:
#b = tp(self._source)
#oldrefcount = sys.getrefcount(b)
#m = self._view(b)
#oldviewrefcount = sys.getrefcount(m)
#s = unicode(m, "utf-8")
#self._check_contents(tp, b, s.encode("utf-8"))
#self.assertEqual(sys.getrefcount(m), oldviewrefcount)
#m = None
#self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_gc(self):
for tp in self._types:
if not isinstance(tp, type):
# If tp is a factory rather than a plain type, skip
continue
class MySource(tp):
pass
class MyObject:
pass
# Create a reference cycle through a memoryview object
b = MySource(tp(b'abc'))
m = self._view(b)
o = MyObject()
b.m = m
b.o = o
wr = weakref.ref(o)
b = m = o = None
# The cycle must be broken
gc.collect()
self.assertTrue(wr() is None, wr())
def test_writable_readonly(self):
# Issue #10451: memoryview incorrectly exposes a readonly
# buffer as writable causing a segfault if using mmap
tp = self.ro_type
if tp is None:
self.skipTest("no read-only type to test")
b = tp(self._source)
m = self._view(b)
i = io.BytesIO(b'ZZZZ')
self.assertRaises(TypeError, i.readinto, m)
# Variations on source objects for the buffer: bytes-like objects, then arrays
# with itemsize > 1.
# NOTE: support for multi-dimensional objects is unimplemented.
class BaseBytesMemoryTests(AbstractMemoryTests):
ro_type = bytes
rw_type = bytearray
getitem_type = bytes
itemsize = 1
format = 'B'
# Disabled: array.array() does not support the new buffer API in 2.x
#class BaseArrayMemoryTests(AbstractMemoryTests):
#ro_type = None
#rw_type = lambda self, b: array.array('i', map(ord, b))
#getitem_type = lambda self, b: array.array('i', map(ord, b)).tostring()
#itemsize = array.array('i').itemsize
#format = 'i'
#def test_getbuffer(self):
## XXX Test should be adapted for non-byte buffers
#pass
#def test_tolist(self):
## XXX NotImplementedError: tolist() only supports byte views
#pass
# Variations on indirection levels: memoryview, slice of memoryview,
# slice of slice of memoryview.
# This is important to test allocation subtleties.
class BaseMemoryviewTests:
def _view(self, obj):
return memoryview(obj)
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj, tp(contents))
class BaseMemorySliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[1:7]
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj[1:7], tp(contents))
@unittest.skipUnless(hasattr(sys, 'getrefcount'), "Reference counting")
def test_refs(self):
for tp in self._types:
m = memoryview(tp(self._source))
oldrefcount = sys.getrefcount(m)
m[1:2]
self.assertEqual(sys.getrefcount(m), oldrefcount)
class BaseMemorySliceSliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[:7][1:]
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj[1:7], tp(contents))
# Concrete test classes
class BytesMemoryviewTest(unittest.TestCase,
BaseMemoryviewTests, BaseBytesMemoryTests):
def test_constructor(self):
for tp in self._types:
ob = tp(self._source)
self.assertTrue(memoryview(ob))
self.assertTrue(memoryview(object=ob))
self.assertRaises(TypeError, memoryview)
self.assertRaises(TypeError, memoryview, ob, ob)
self.assertRaises(TypeError, memoryview, argument=ob)
self.assertRaises(TypeError, memoryview, ob, argument=True)
#class ArrayMemoryviewTest(unittest.TestCase,
#BaseMemoryviewTests, BaseArrayMemoryTests):
#def test_array_assign(self):
## Issue #4569: segfault when mutating a memoryview with itemsize != 1
#a = array.array('i', range(10))
#m = memoryview(a)
#new_a = array.array('i', range(9, -1, -1))
#m[:] = new_a
#self.assertEqual(a, new_a)
class BytesMemorySliceTest(unittest.TestCase,
BaseMemorySliceTests, BaseBytesMemoryTests):
pass
#class ArrayMemorySliceTest(unittest.TestCase,
#BaseMemorySliceTests, BaseArrayMemoryTests):
#pass
class BytesMemorySliceSliceTest(unittest.TestCase,
BaseMemorySliceSliceTests, BaseBytesMemoryTests):
pass
#class ArrayMemorySliceSliceTest(unittest.TestCase,
#BaseMemorySliceSliceTests, BaseArrayMemoryTests):
#pass
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| |
import os
import types
from tempfile import TemporaryFile, NamedTemporaryFile
import unittest
import numpy
import scipy.io.wavfile as sp_wavfile
from .__init__ import A440_MONO_16B, A440_STEREO_16B, STEPS_MONO_16B
from pychedelic import stream_functions
from pychedelic import config
from pychedelic.core import errors
class ramp_test(unittest.TestCase):
def tearDown(self):
config.frame_rate = 44100
config.block_size = 1024
def simple_ramp_test(self):
config.frame_rate = 4
config.block_size = 2
ramp_gen = stream_functions.ramp(1, (2, 1), (0, 1))
numpy.testing.assert_array_equal(
numpy.round(next(ramp_gen), 4),
numpy.round([[1], [1.33333]], 4)
)
numpy.testing.assert_array_equal(
numpy.round(next(ramp_gen), 4),
numpy.round([[1.66666], [2]], 4)
)
numpy.testing.assert_array_equal(
numpy.round(next(ramp_gen), 4),
numpy.round([[2], [1.33333]], 4)
)
numpy.testing.assert_array_equal(
numpy.round(next(ramp_gen), 4),
numpy.round([[0.66666], [0]], 4)
)
self.assertRaises(StopIteration, next, ramp_gen)
class resampler_test(unittest.TestCase):
def upsample1_test(self):
"""
Testing upsampling with the following configurations :
IN: | | |
OUT: | | | | | | |
"""
def gen():
"""
[[0], [2], [4]] [[6], [8], [10]] ...
"""
for i in range(0, 2):
yield numpy.arange(i * 2 * 3, (i + 1) * 2 * 3, 2).reshape(3, 1)
resampler = stream_functions.resample(gen())
resampler.set_ratio(3.0)
# IN: 0 1 2 3 4 5
# OUT: 0 1 2 3 4 5 6 7 8 9 a b c d e f
numpy.testing.assert_array_equal(
stream_functions.concatenate(resampler).round(8),
(numpy.array([
[0], [1], [2], [3], [4], [5], [6], [7], [8],
[9], [10], [11], [12], [13], [14], [15]
]) * 2.0 / 3).round(8)
)
def upsample2_test(self):
"""
Here testing upsampling with the following configurations (+ testing stereo):
IN: | | | |
OUT: | | | | |
"""
def gen():
"""
[[0, 0], [-2, 2], [-4, 4]] [[-6, 6], [-8, 8], [-10, 10]] ...
"""
for i in range(0, 2):
block_in = numpy.vstack([
numpy.arange(-i * 2 * 3, -(i + 1) * 2 * 3, -2),
numpy.arange(i * 2 * 3, (i + 1) * 2 * 3, 2)
]).transpose()
yield block_in
resampler = stream_functions.resample(gen())
ratio = 3.0 / 2
resampler.set_ratio(ratio)
# IN: 0 1 2 3 4 5
# OUT: 0 1 2 3 4 5 6 7
numpy.testing.assert_array_equal(
stream_functions.concatenate(resampler).round(8),
(numpy.array([
[0, 0], [-1, 1], [-2, 2], [-3, 3],
[-4, 4], [-5, 5], [-6, 6], [-7, 7]
]) * 2 / ratio).round(8)
)
def downsample1_test(self):
"""
Testing downsampling with the following configurations:
IN: | | | | | |
OUT: | | |
"""
def gen():
"""
[[0], [0.5], [1]] [[1.5], [2], [2.5]] ...
"""
for i in range(0, 5):
yield numpy.arange(i * 3 * 0.5, (i + 1) * 3 * 0.5, 0.5).reshape(3, 1)
resampler = stream_functions.resample(gen())
ratio = 3.0 / 7
resampler.set_ratio(ratio)
# IN: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
# OUT: 0 1 2 3 4 5 6
numpy.testing.assert_array_equal(
stream_functions.concatenate(resampler).round(8),
(numpy.array([[0], [1], [2], [3], [4], [5]]) * (0.5 / ratio)).round(8)
)
def downsample2_test(self):
"""
Testing downsampling with the following configurations:
# IN: | | | | | | | | |
# OUT: | | |
"""
def gen():
"""
[[0], [0.5], [1]] [[1.5], [2], [2.5]] ...
"""
for i in range(0, 3):
yield numpy.arange(i * 3 * 0.5, (i + 1) * 3 * 0.5, 0.5).reshape(3, 1)
resampler = stream_functions.resample(gen())
ratio = 1.0 / 4
resampler.set_ratio(ratio)
# IN: 0 1 2 3 4 5 6 7 8
# OUT: 0 1 2
numpy.testing.assert_array_equal(
stream_functions.concatenate(resampler).round(8),
(numpy.array([[0], [1], [2]]) * 0.5 / ratio).round(8)
)
def downsample3_test(self):
"""
Testing high downsampling, several blocks of incoming data fetched for one frame out.
"""
def gen():
"""
[[0], [0.5], [1]] [[1.5], [2], [2.5]] ...
"""
for i in range(0, 6):
yield numpy.arange(i * 3 * 0.5, (i + 1) * 3 * 0.5, 0.5).reshape(3, 1)
resampler = stream_functions.resample(gen())
ratio = 1.0 / 8
resampler.set_ratio(ratio)
# IN: 0 1 2 3 4 5 6 7 8 9 a b c d e f g h
# OUT: 0 1 2
numpy.testing.assert_array_equal(
stream_functions.concatenate(resampler).round(8),
(numpy.array([[0], [1], [2]]) * (0.5 / ratio)).round(8)
)
def ratio1_test(self):
"""
Ratio 1 test.
"""
def gen():
"""
[[0], [0.5], [1]] [[1.5], [2], [2.5]] ...
"""
for i in range(0, 2):
yield numpy.arange(i * 3 * 0.5, (i + 1) * 3 * 0.5, 0.5).reshape(3, 1)
resampler = stream_functions.resample(gen())
numpy.testing.assert_array_equal(
stream_functions.concatenate(resampler).round(8),
(numpy.array([[0], [1], [2], [3], [4], [5]]) * 0.5).round(8)
)
def sanity_check_test(self):
"""
Test that something's not fundamentally wrong.
"""
def zcr_f0(samples):
"""
Calculate frequency using zero-crossings method.
"""
frame_rate = config.frame_rate
frame_count = len(samples)
crossings = (numpy.diff(numpy.sign(samples)) != 0)
time = (numpy.ones(frame_count) / frame_rate).cumsum() - 1 / frame_rate
half_oscillation_times = numpy.diff(time[crossings])
self.assertTrue(half_oscillation_times.std() < 0.00005)
return 0.5 / half_oscillation_times.mean()
frame_count = 44100 * 20
config.block_size = frame_count
f0 = 440
ratio = 1/0.99999
time = numpy.arange(0, frame_count) / float(config.frame_rate)
samples = numpy.cos(2 * numpy.pi * f0 * time)
def gen():
yield samples.reshape(len(samples), 1)
resampler = stream_functions.resample(gen())
resampler.set_ratio(ratio)
self.assertEqual(round(zcr_f0(samples), 3), round(f0, 3))
samples2 = next(resampler)[:,0]
self.assertEqual(round(zcr_f0(samples2), 3), round(f0 / ratio, 3))
class mixer_test(unittest.TestCase):
def tearDown(self):
config.frame_rate = 44100
config.block_size = 1024
def dynamic_plug_test(self):
config.frame_rate = 4
config.block_size = 2
def source_stereo1():
for i in range(0, 3):
yield numpy.ones((1, 2)) * 1 * (i + 1)
def source_stereo2():
for i in range(0, 2):
yield numpy.ones((2, 2)) * 0.1 * (i + 1)
def source_mono1():
for i in range(0, 3):
yield numpy.ones((3, 1)) * 0.01 * (i + 1)
mixer = stream_functions.mixer(2)
mixer.plug(source_stereo1())
mixer.plug(source_mono1())
numpy.testing.assert_array_equal(next(mixer), [
[1 + 0.01, 1],
[2 + 0.01, 2]
])
numpy.testing.assert_array_equal(next(mixer), [
[3 + 0.01, 3],
[0.02, 0]
])
numpy.testing.assert_array_equal(next(mixer), [
[0.02, 0],
[0.02, 0]
])
mixer.plug(source_stereo2())
numpy.testing.assert_array_equal(next(mixer), [
[0.1 + 0.03, 0.1],
[0.1 + 0.03, 0.1]
])
numpy.testing.assert_array_equal(next(mixer), [
[0.2 + 0.03, 0.2],
[0.2, 0.2]
])
self.assertRaises(StopIteration, next, mixer)
def schedule_plug_test(self):
config.frame_rate = 4
config.block_size = 4
def source_stereo():
for i in range(0, 2):
yield numpy.ones((2, 2)) * 0.1 * (i + 1)
def source_mono():
for i in range(0, 3):
yield numpy.ones((3, 1)) * 0.01 * (i + 1)
mixer = stream_functions.mixer(2)
mixer.plug(source_mono())
mixer.clock.run_after(1.5, mixer.plug, args=[source_stereo()])
numpy.testing.assert_array_equal(next(mixer), [
[0.01, 0], [0.01, 0], [0.01, 0], [0.02, 0]
])
numpy.testing.assert_array_equal(next(mixer), [
[0.02, 0], [0.02, 0]
])
numpy.testing.assert_array_equal(next(mixer), [
[0.1 + 0.03, 0.1],
[0.1 + 0.03, 0.1],
[0.2 + 0.03, 0.2],
[0.2, 0.2]
])
self.assertRaises(StopIteration, next, mixer)
def unplug_test(self):
config.frame_rate = 4
config.block_size = 2
def source_stereo():
for i in range(0, 3):
yield numpy.ones((1, 2)) * 1 * (i + 1)
def source_mono():
for i in range(0, 3):
yield numpy.ones((3, 1)) * 0.01 * (i + 1)
mixer = stream_functions.mixer(2)
src1 = source_mono()
src2 = source_stereo()
mixer.plug(src2)
mixer.plug(src1)
numpy.testing.assert_array_equal(next(mixer), [
[1 + 0.01, 1], [2 + 0.01, 2]
])
mixer.unplug(src2)
numpy.testing.assert_array_equal(next(mixer), [
[0.01, 0], [0.02, 0]
])
numpy.testing.assert_array_equal(next(mixer), [
[0.02, 0], [0.02, 0]
])
mixer.unplug(src1)
self.assertRaises(StopIteration, next, mixer)
def stop_when_empty_test(self):
config.frame_rate = 4
config.block_size = 2
def source_mono():
for i in range(0, 3):
yield numpy.ones((1, 1)) * 1 * (i + 1)
mixer = stream_functions.mixer(1, stop_when_empty=False)
numpy.testing.assert_array_equal(next(mixer), [
[0],
[0]
])
mixer.plug(source_mono())
numpy.testing.assert_array_equal(next(mixer), [
[1],
[2]
])
numpy.testing.assert_array_equal(next(mixer), [
[3],
[0]
])
numpy.testing.assert_array_equal(next(mixer), [
[0],
[0]
])
class window_test(unittest.TestCase):
def no_pad_test(self):
def gen():
yield numpy.array([[0, 0]])
yield numpy.array([[1, 1]])
yield numpy.array([[2, 2]])
window = stream_functions.window(gen(), 2, 1, pad=False)
numpy.testing.assert_array_equal(next(window), [[0, 0], [1, 1]])
numpy.testing.assert_array_equal(next(window), [[1, 1], [2, 2]])
self.assertRaises(StopIteration, next, window)
def no_pad_decimal_hop_size_test(self):
def gen():
yield numpy.array([[0, 0]])
yield numpy.array([[1, 1]])
yield numpy.array([[2, 2]])
window = stream_functions.window(gen(), 2, 0.5, pad=False)
# read position = 0
numpy.testing.assert_array_equal(next(window), [[0, 0], [1, 1]])
# read position = 0.5
numpy.testing.assert_array_equal(next(window), [[0, 0], [1, 1]])
# read position = 1.0
numpy.testing.assert_array_equal(next(window), [[1, 1], [2, 2]])
# read position = 1.5
numpy.testing.assert_array_equal(next(window), [[1, 1], [2, 2]])
# read position = 2
self.assertRaises(StopIteration, next, window)
def pad_test(self):
def gen():
yield numpy.array([[0, 0]])
yield numpy.array([[1, 1]])
yield numpy.array([[2, 2]])
window = stream_functions.window(gen(), 3, 1)
numpy.testing.assert_array_equal(next(window), [[0, 0], [1, 1], [2, 2]])
numpy.testing.assert_array_equal(next(window), [[1, 1], [2, 2], [0, 0]])
numpy.testing.assert_array_equal(next(window), [[2, 2], [0, 0], [0, 0]])
self.assertRaises(StopIteration, next, window)
def win_size_exact_and_pad_test(self):
"""
Test when padding is True, and last window falls exactly, without actual need for padding.
"""
def gen():
for i in range(2):
yield numpy.array([[i * 11, i * 11]])
window = stream_functions.window(gen(), 1, 1, pad=True)
numpy.testing.assert_array_equal(
stream_functions.concatenate(window),
[[0, 0], [11, 11]]
)
def overlap_cut_test(self):
"""
Test overlap with pulled blocks smaller than source blocks.
"""
def gen():
yield numpy.tile(numpy.arange(0, 6), (2, 1)).transpose()
window = stream_functions.window(gen(), 3, 1)
blocks = [next(window) for i in range(0, 4)]
numpy.testing.assert_array_equal(blocks, [
[[0, 0], [1, 1], [2, 2]], [[1, 1], [2, 2], [3, 3]],
[[2, 2], [3, 3], [4, 4]], [[3, 3], [4, 4], [5, 5]],
])
def overlap_concatenate_test(self):
"""
Test overlap with pulled blocks bigger than source blocks.
"""
def gen():
for i in range(6):
yield numpy.array([[i * 11, i * 11]])
window = stream_functions.window(gen(), 2, 1)
blocks = [next(window) for i in range(0, 5)]
numpy.testing.assert_array_equal(blocks, [
[[0, 0], [11, 11]], [[11, 11], [22, 22]],
[[22, 22], [33, 33]], [[33, 33], [44, 44]],
[[44, 44], [55, 55]]
])
def overlap_almost_static_test(self):
"""
Test with such a big overlap that same block is returned several times
"""
def gen():
for i in range(6):
yield numpy.array([[i * 11]])
window = stream_functions.window(gen(), 3, 0.5)
numpy.testing.assert_array_equal(next(window), [[0], [11], [22]])
numpy.testing.assert_array_equal(next(window), [[0], [11], [22]])
numpy.testing.assert_array_equal(next(window), [[11], [22], [33]])
def hop_size_bigger_than_win_size_test(self):
def gen():
for i in range(6):
yield numpy.array([[i * 11, i * 11]])
window = stream_functions.window(gen(), 2, 3, pad=True)
numpy.testing.assert_array_equal(
stream_functions.concatenate(window),
[[0, 0], [11, 11], [33, 33], [44, 44]]
)
def get_archive_test(self):
def gen():
for i in range(6):
yield numpy.array([[i * 11, i * 11]])
window = stream_functions.window(gen(), 2, 2, archive_size=3)
block = next(window)
numpy.testing.assert_array_equal(block, [[0, 0], [11, 11]])
block = next(window)
numpy.testing.assert_array_equal(block, [[22, 22], [33, 33]])
numpy.testing.assert_array_equal(
window._buffer._blocks,
[[[11, 11]], [[22, 22]], [[33, 33]]]
)
numpy.testing.assert_array_equal(window.get_archive(2), [[22, 22], [33, 33]])
block = next(window)
numpy.testing.assert_array_equal(block, [[44, 44], [55, 55]])
numpy.testing.assert_array_equal(
window._buffer._blocks,
[[[33, 33]], [[44, 44]], [[55, 55]]]
)
numpy.testing.assert_array_equal(window.get_archive(1), [[55, 55]])
numpy.testing.assert_array_equal(window.get_archive(3), [[33, 33], [44, 44], [55, 55]])
class iter_Test(unittest.TestCase):
def tearDown(self):
config.frame_rate = 44100
config.block_size = 1024
def simple_test(self):
config.block_size = 2
samples = numpy.vstack([numpy.arange(0, 10), numpy.arange(0, 10) * 2]).transpose()
iter_gen = stream_functions.iter(samples)
numpy.testing.assert_array_equal(next(iter_gen), [[0, 0], [1, 2]])
numpy.testing.assert_array_equal(next(iter_gen), [[2, 4], [3, 6]])
numpy.testing.assert_array_equal(next(iter_gen), [[4, 8], [5, 10]])
numpy.testing.assert_array_equal(next(iter_gen), [[6, 12], [7, 14]])
numpy.testing.assert_array_equal(next(iter_gen), [[8, 16], [9, 18]])
self.assertRaises(StopIteration, next, iter_gen)
def start_test(self):
config.block_size = 2
samples = numpy.vstack([numpy.arange(0, 10), numpy.arange(0, 10) * 2]).transpose()
iter_gen = stream_functions.iter(samples, start=4.0/config.frame_rate)
numpy.testing.assert_array_equal(next(iter_gen), [[4, 8], [5, 10]])
numpy.testing.assert_array_equal(next(iter_gen), [[6, 12], [7, 14]])
numpy.testing.assert_array_equal(next(iter_gen), [[8, 16], [9, 18]])
self.assertRaises(StopIteration, next, iter_gen)
def end_test(self):
config.block_size = 2
samples = numpy.vstack([numpy.arange(0, 10), numpy.arange(0, 10) * 2]).transpose()
iter_gen = stream_functions.iter(samples, start=4.0/config.frame_rate, end=5.0/config.frame_rate)
numpy.testing.assert_array_equal(next(iter_gen), [[4, 8]])
self.assertRaises(StopIteration, next, iter_gen)
def pad_test(self):
config.block_size = 2
samples = numpy.vstack([numpy.arange(0, 10), numpy.arange(0, 10) * 2]).transpose()
iter_gen = stream_functions.iter(samples, start=4.0/config.frame_rate, end=5.0/config.frame_rate, pad=True)
numpy.testing.assert_array_equal(next(iter_gen), [[4, 8], [0, 0]])
self.assertRaises(StopIteration, next, iter_gen)
class concatenate_Test(unittest.TestCase):
def simple_test(self):
def source():
for i in range(0, 3):
yield numpy.ones([3, 1]) * i
block = stream_functions.concatenate(source())
numpy.testing.assert_array_equal(block, numpy.array([
[0], [0], [0], [1], [1], [1], [2], [2], [2]
]))
class read_wav_Test(unittest.TestCase):
def tearDown(self):
config.frame_rate = 44100
config.block_size = 1024
def blocks_size_test(self):
config.block_size = 50
blocks = stream_functions.read_wav(A440_STEREO_16B)
self.assertEqual(blocks.infos['frame_rate'], 44100)
self.assertEqual(blocks.infos['channel_count'], 2)
blocks = list(blocks)
self.assertEqual([len(b) for b in blocks], [50, 50, 50, 50, 50, 50, 50, 50, 41])
self.assertEqual(blocks[0].shape, (50, 2))
actual = numpy.concatenate(blocks)
frame_rate, expected = sp_wavfile.read(A440_STEREO_16B)
expected = expected / float(2**15)
numpy.testing.assert_array_equal(expected.round(4), actual.round(4))
def block_size_bigger_than_slice_to_read_test(self):
"""
Read only a segment of the file, block_size bigger than segment to read.
"""
config.block_size = 1000
blocks = stream_functions.read_wav(A440_MONO_16B, start=0.002, end=0.004)
self.assertEqual(blocks.infos['frame_rate'], 44100)
self.assertEqual(blocks.infos['channel_count'], 1)
blocks = list(blocks)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].shape, (88, 1))
actual = numpy.concatenate(blocks)
frame_rate, expected = sp_wavfile.read(A440_MONO_16B)
expected = numpy.array([expected[0.002*44100:0.004*44100] / float(2**15)]).transpose()
numpy.testing.assert_array_equal(expected.round(4), actual.round(4))
def last_block_too_small_test(self):
"""
Ommit end, not an exact count of block_size.
"""
config.block_size = 20
blocks = stream_functions.read_wav(A440_MONO_16B, start=0.002)
self.assertEqual(blocks.infos['frame_rate'], 44100)
self.assertEqual(blocks.infos['channel_count'], 1)
blocks = list(blocks)
self.assertEqual([len(b) for b in blocks], [20] * 17 + [13])
self.assertEqual(blocks[0].shape, (20, 1))
actual = numpy.concatenate(blocks)
frame_rate, expected = sp_wavfile.read(A440_MONO_16B)
expected = numpy.array([expected[0.002*44100:] / float(2**15)]).transpose()
numpy.testing.assert_array_equal(expected.round(4), actual.round(4))
def seek_test(self):
config.block_size = 441
blocks = stream_functions.read_wav(STEPS_MONO_16B, start=1.1, end=1.4)
self.assertEqual(blocks.infos['frame_rate'], 44100)
self.assertEqual(blocks.infos['channel_count'], 1)
expected = numpy.ones([441, 1]) * 0.1
samples = next(blocks)
numpy.testing.assert_array_equal(expected.round(3), samples.round(3))
blocks.seek(1.3)
expected = numpy.ones([441, 1]) * 0.3
samples = next(blocks)
numpy.testing.assert_array_equal(expected.round(3), samples.round(3))
blocks.seek(1.3)
expected = numpy.ones([441, 1]) * 0.3
samples = next(blocks)
numpy.testing.assert_array_equal(expected.round(3), samples.round(3))
blocks.seek(0)
expected = numpy.ones([441, 1]) * -1
samples = next(blocks)
numpy.testing.assert_array_equal(expected.round(3), samples.round(3))
def read_invalid_wav_test(self):
# __file__ is obviously not a wav file ...
self.assertRaises(errors.WavFormatError, stream_functions.read_wav, __file__)
class write_wav_Test(unittest.TestCase):
def simple_write_test(self):
temp_file = NamedTemporaryFile()
blocks = []
def source():
for i in range(0, 5):
block = numpy.ones((44100, 1)) * i * 0.1
blocks.append(block)
yield block
sink = stream_functions.write_wav(source(), temp_file)
self.assertEqual(sink.infos['frame_rate'], 44100)
self.assertEqual(sink.infos['channel_count'], 1)
expected = numpy.concatenate(blocks)
frame_rate, actual = sp_wavfile.read(temp_file.name)
actual = numpy.array([actual / float(2**15)]).transpose()
numpy.testing.assert_array_equal(expected.round(4), actual.round(4))
def chain_test(self):
"""
Test that if one generator raises StopIteration up the chain, the sink catches it.
"""
temp_file = NamedTemporaryFile()
blocks = []
def source():
for i in range(0, 5):
block = numpy.ones((44100, 2)) * i * 0.1
blocks.append(block)
yield block
def double(source):
while True:
yield next(source) * 2
sink = stream_functions.write_wav(double(source()), temp_file)
self.assertEqual(sink.infos['frame_rate'], 44100)
self.assertEqual(sink.infos['channel_count'], 2)
expected = numpy.concatenate(blocks) * 2
frame_rate, actual = sp_wavfile.read(temp_file.name)
actual = actual / float(2**15)
self.assertEqual(actual.shape, (44100 * 5, 2))
numpy.testing.assert_array_equal(expected.round(4), actual.round(4))
def write_incorrect_channel_count_test(self):
temp_file = NamedTemporaryFile()
got_error = False
def source():
yield numpy.ones((44100, 2)) * 0.1
yield numpy.ones((44100, 2)) * 0.1
yield numpy.ones((44100, 1)) * 0.1
try:
stream_functions.write_wav(source(), temp_file)
except ValueError:
got_error = True
self.assertTrue(got_error)
expected = numpy.ones((44100 * 2, 2)) * 0.1
frame_rate, actual = sp_wavfile.read(temp_file.name)
actual = actual / float(2**15)
self.assertEqual(actual.shape, (44100 * 2, 2))
numpy.testing.assert_array_equal(expected.round(4), actual.round(4))
@unittest.skip('temporarily disabled cause too slow')
def reach_wav_size_limit_test(self):
temp_file = TemporaryFile('w')
def source():
while True:
yield numpy.zeros((2**20, 1))
got_error = False
try:
stream_functions.write_wav(source(), temp_file)
except errors.WavSizeLimitError:
got_error = True
self.assertTrue(got_error)
| |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import unicodedata
from openerp import tools
import openerp.modules
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
TRANSLATION_TYPE = [
('field', 'Field'),
('model', 'Object'),
('rml', 'RML (deprecated - use Report)'), # Pending deprecation - to be replaced by report!
('report', 'Report/Template'),
('selection', 'Selection'),
('view', 'View'),
('wizard_button', 'Wizard Button'),
('wizard_field', 'Wizard Field'),
('wizard_view', 'Wizard View'),
('xsl', 'XSL'),
('help', 'Help'),
('code', 'Code'),
('constraint', 'Constraint'),
('sql_constraint', 'SQL Constraint')
]
class ir_translation_import_cursor(object):
"""Temporary cursor for optimizing mass insert into ir.translation
Open it (attached to a sql cursor), feed it with translation data and
finish() it in order to insert multiple translations in a batch.
"""
_table_name = 'tmp_ir_translation_import'
def __init__(self, cr, uid, parent, context):
""" Initializer
Store some values, and also create a temporary SQL table to accept
the data.
@param parent an instance of ir.translation ORM model
"""
self._cr = cr
self._uid = uid
self._context = context
self._overwrite = context.get('overwrite', False)
self._debug = False
self._parent_table = parent._table
# Note that Postgres will NOT inherit the constraints or indexes
# of ir_translation, so this copy will be much faster.
cr.execute('''CREATE TEMP TABLE %s(
imd_model VARCHAR(64),
imd_name VARCHAR(128)
) INHERITS (%s) ''' % (self._table_name, self._parent_table))
def push(self, trans_dict):
"""Feed a translation, as a dictionary, into the cursor
"""
params = dict(trans_dict, state="translated" if trans_dict['value'] else "to_translate")
if params['type'] == 'view':
# ugly hack for QWeb views - pending refactoring of translations in master
if params['imd_model'] == 'website':
params['imd_model'] = "ir.ui.view"
# non-QWeb views do not need a matching res_id -> force to 0 to avoid dropping them
elif params['res_id'] is None:
params['res_id'] = 0
self._cr.execute("""INSERT INTO %s (name, lang, res_id, src, type, imd_model, module, imd_name, value, state, comments)
VALUES (%%(name)s, %%(lang)s, %%(res_id)s, %%(src)s, %%(type)s, %%(imd_model)s, %%(module)s,
%%(imd_name)s, %%(value)s, %%(state)s, %%(comments)s)""" % self._table_name,
params)
def finish(self):
""" Transfer the data from the temp table to ir.translation
"""
cr = self._cr
if self._debug:
cr.execute("SELECT count(*) FROM %s" % self._table_name)
c = cr.fetchone()[0]
_logger.debug("ir.translation.cursor: We have %d entries to process", c)
# Step 1: resolve ir.model.data references to res_ids
cr.execute("""UPDATE %s AS ti
SET res_id = imd.res_id
FROM ir_model_data AS imd
WHERE ti.res_id IS NULL
AND ti.module IS NOT NULL AND ti.imd_name IS NOT NULL
AND ti.module = imd.module AND ti.imd_name = imd.name
AND ti.imd_model = imd.model; """ % self._table_name)
if self._debug:
cr.execute("SELECT module, imd_name, imd_model FROM %s " \
"WHERE res_id IS NULL AND module IS NOT NULL" % self._table_name)
for row in cr.fetchall():
_logger.info("ir.translation.cursor: missing res_id for %s.%s <%s> ", *row)
# Records w/o res_id must _not_ be inserted into our db, because they are
# referencing non-existent data.
cr.execute("DELETE FROM %s WHERE res_id IS NULL AND module IS NOT NULL" % \
self._table_name)
find_expr = "irt.lang = ti.lang AND irt.type = ti.type " \
" AND irt.name = ti.name AND irt.src = ti.src " \
" AND (ti.type != 'model' OR ti.res_id = irt.res_id) "
# Step 2: update existing (matching) translations
if self._overwrite:
cr.execute("""UPDATE ONLY %s AS irt
SET value = ti.value,
state = 'translated'
FROM %s AS ti
WHERE %s AND ti.value IS NOT NULL AND ti.value != ''
""" % (self._parent_table, self._table_name, find_expr))
# Step 3: insert new translations
cr.execute("""INSERT INTO %s(name, lang, res_id, src, type, value, module, state, comments)
SELECT name, lang, res_id, src, type, value, module, state, comments
FROM %s AS ti
WHERE NOT EXISTS(SELECT 1 FROM ONLY %s AS irt WHERE %s);
""" % (self._parent_table, self._table_name, self._parent_table, find_expr))
if self._debug:
cr.execute('SELECT COUNT(*) FROM ONLY %s' % self._parent_table)
c1 = cr.fetchone()[0]
cr.execute('SELECT COUNT(*) FROM ONLY %s AS irt, %s AS ti WHERE %s' % \
(self._parent_table, self._table_name, find_expr))
c = cr.fetchone()[0]
_logger.debug("ir.translation.cursor: %d entries now in ir.translation, %d common entries with tmp", c1, c)
# Step 4: cleanup
cr.execute("DROP TABLE %s" % self._table_name)
return True
class ir_translation(osv.osv):
_name = "ir.translation"
_log_access = False
def _get_language(self, cr, uid, context):
lang_model = self.pool.get('res.lang')
lang_ids = lang_model.search(cr, uid, [('translatable', '=', True)], context=context)
lang_data = lang_model.read(cr, uid, lang_ids, ['code', 'name'], context=context)
return [(d['code'], d['name']) for d in lang_data]
def _get_src(self, cr, uid, ids, name, arg, context=None):
''' Get source name for the translation. If object type is model then
return the value store in db. Otherwise return value store in src field
'''
if context is None:
context = {}
res = dict.fromkeys(ids, False)
for record in self.browse(cr, uid, ids, context=context):
if record.type != 'model':
res[record.id] = record.src
else:
model_name, field = record.name.split(',')
model = self.pool.get(model_name)
if model is not None:
# Pass context without lang, need to read real stored field, not translation
context_no_lang = dict(context, lang=None)
result = model.read(cr, uid, [record.res_id], [field], context=context_no_lang)
res[record.id] = result[0][field] if result else False
return res
def _set_src(self, cr, uid, id, name, value, args, context=None):
''' When changing source term of a translation, change its value in db for
the associated object, and the src field
'''
if context is None:
context = {}
record = self.browse(cr, uid, id, context=context)
if record.type == 'model':
model_name, field = record.name.split(',')
model = self.pool.get(model_name)
#We need to take the context without the language information, because we want to write on the
#value store in db and not on the one associate with current language.
#Also not removing lang from context trigger an error when lang is different
context_wo_lang = context.copy()
context_wo_lang.pop('lang', None)
model.write(cr, uid, record.res_id, {field: value}, context=context_wo_lang)
return self.write(cr, uid, id, {'src': value}, context=context)
_columns = {
'name': fields.char('Translated field', required=True),
'res_id': fields.integer('Record ID', select=True),
'lang': fields.selection(_get_language, string='Language'),
'type': fields.selection(TRANSLATION_TYPE, string='Type', select=True),
'src': fields.text('Old source'),
'source': fields.function(_get_src, fnct_inv=_set_src, type='text', string='Source'),
'value': fields.text('Translation Value'),
'module': fields.char('Module', help="Module this term belongs to", select=True),
'state': fields.selection(
[('to_translate','To Translate'),
('inprogress','Translation in Progress'),
('translated','Translated')],
string="Status",
help="Automatically set to let administators find new terms that might need to be translated"),
# aka gettext extracted-comments - we use them to flag openerp-web translation
# cfr: http://www.gnu.org/savannah-checkouts/gnu/gettext/manual/html_node/PO-Files.html
'comments': fields.text('Translation comments', select=True),
}
_defaults = {
'state': 'to_translate',
}
_sql_constraints = [ ('lang_fkey_res_lang', 'FOREIGN KEY(lang) REFERENCES res_lang(code)',
'Language code of translation item must be among known languages' ), ]
def _auto_init(self, cr, context=None):
super(ir_translation, self)._auto_init(cr, context)
# FIXME: there is a size limit on btree indexed values so we can't index src column with normal btree.
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_ltns',))
if cr.fetchone():
#temporarily removed: cr.execute('CREATE INDEX ir_translation_ltns ON ir_translation (name, lang, type, src)')
cr.execute('DROP INDEX ir_translation_ltns')
cr.commit()
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_lts',))
if cr.fetchone():
#temporarily removed: cr.execute('CREATE INDEX ir_translation_lts ON ir_translation (lang, type, src)')
cr.execute('DROP INDEX ir_translation_lts')
cr.commit()
# add separate hash index on src (no size limit on values), as postgres 8.1+ is able to combine separate indexes
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_src_hash_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_translation_src_hash_idx ON ir_translation using hash (src)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_ltn',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_translation_ltn ON ir_translation (name, lang, type)')
cr.commit()
def _check_selection_field_value(self, cr, uid, field, value, context=None):
if field == 'lang':
return
return super(ir_translation, self)._check_selection_field_value(cr, uid, field, value, context=context)
@tools.ormcache_multi(skiparg=3, multi=6)
def _get_ids(self, cr, uid, name, tt, lang, ids):
translations = dict.fromkeys(ids, False)
if ids:
cr.execute('select res_id,value '
'from ir_translation '
'where lang=%s '
'and type=%s '
'and name=%s '
'and res_id IN %s',
(lang,tt,name,tuple(ids)))
for res_id, value in cr.fetchall():
translations[res_id] = value
return translations
def _set_ids(self, cr, uid, name, tt, lang, ids, value, src=None):
self._get_ids.clear_cache(self)
self._get_source.clear_cache(self)
cr.execute('delete from ir_translation '
'where lang=%s '
'and type=%s '
'and name=%s '
'and res_id IN %s',
(lang,tt,name,tuple(ids),))
for id in ids:
self.create(cr, uid, {
'lang':lang,
'type':tt,
'name':name,
'res_id':id,
'value':value,
'src':src,
})
return len(ids)
def _get_source_query(self, cr, uid, name, types, lang, source, res_id):
if source:
query = """SELECT value
FROM ir_translation
WHERE lang=%s
AND type in %s
AND src=%s"""
params = (lang or '', types, tools.ustr(source))
if res_id:
query += " AND res_id=%s"
params += (res_id,)
if name:
query += " AND name=%s"
params += (tools.ustr(name),)
else:
query = """SELECT value
FROM ir_translation
WHERE lang=%s
AND type in %s
AND name=%s"""
params = (lang or '', types, tools.ustr(name))
return (query, params)
@tools.ormcache(skiparg=3)
def _get_source(self, cr, uid, name, types, lang, source=None, res_id=None):
"""
Returns the translation for the given combination of name, type, language
and source. All values passed to this method should be unicode (not byte strings),
especially ``source``.
:param name: identification of the term to translate, such as field name (optional if source is passed)
:param types: single string defining type of term to translate (see ``type`` field on ir.translation), or sequence of allowed types (strings)
:param lang: language code of the desired translation
:param source: optional source term to translate (should be unicode)
:param res_id: optional resource id to translate (if used, ``source`` should be set)
:rtype: unicode
:return: the request translation, or an empty unicode string if no translation was
found and `source` was not passed
"""
# FIXME: should assert that `source` is unicode and fix all callers to always pass unicode
# so we can remove the string encoding/decoding.
if not lang:
return tools.ustr(source or '')
if isinstance(types, basestring):
types = (types,)
query, params = self._get_source_query(cr, uid, name, types, lang, source, res_id)
cr.execute(query, params)
res = cr.fetchone()
trad = res and res[0] or u''
if source and not trad:
return tools.ustr(source)
# Remove control characters
return filter(lambda c: unicodedata.category(c) != 'Cc', tools.ustr(trad))
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
ids = super(ir_translation, self).create(cr, uid, vals, context=context)
self._get_source.clear_cache(self)
self._get_ids.clear_cache(self)
self.pool['ir.ui.view'].clear_cache()
return ids
def write(self, cursor, user, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('src') or ('value' in vals and not(vals.get('value'))):
vals.update({'state':'to_translate'})
if vals.get('value'):
vals.update({'state':'translated'})
result = super(ir_translation, self).write(cursor, user, ids, vals, context=context)
self._get_source.clear_cache(self)
self._get_ids.clear_cache(self)
self.pool['ir.ui.view'].clear_cache()
return result
def unlink(self, cursor, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._get_source.clear_cache(self)
self._get_ids.clear_cache(self)
result = super(ir_translation, self).unlink(cursor, user, ids, context=context)
return result
def translate_fields(self, cr, uid, model, id, field=None, context=None):
trans_model = self.pool[model]
domain = ['&', ('res_id', '=', id), ('name', '=like', model + ',%')]
langs_ids = self.pool.get('res.lang').search(cr, uid, [('code', '!=', 'en_US')], context=context)
if not langs_ids:
raise osv.except_osv(_('Error'), _("Translation features are unavailable until you install an extra OpenERP translation."))
langs = [lg.code for lg in self.pool.get('res.lang').browse(cr, uid, langs_ids, context=context)]
main_lang = 'en_US'
translatable_fields = []
for f, info in trans_model._all_columns.items():
if info.column.translate:
if info.parent_model:
parent_id = trans_model.read(cr, uid, [id], [info.parent_column], context=context)[0][info.parent_column][0]
translatable_fields.append({ 'name': f, 'id': parent_id, 'model': info.parent_model })
domain.insert(0, '|')
domain.extend(['&', ('res_id', '=', parent_id), ('name', '=', "%s,%s" % (info.parent_model, f))])
else:
translatable_fields.append({ 'name': f, 'id': id, 'model': model })
if len(langs):
fields = [f.get('name') for f in translatable_fields]
record = trans_model.read(cr, uid, [id], fields, context={ 'lang': main_lang })[0]
for lg in langs:
for f in translatable_fields:
# Check if record exists, else create it (at once)
sql = """INSERT INTO ir_translation (lang, src, name, type, res_id, value)
SELECT %s, %s, %s, 'model', %s, %s WHERE NOT EXISTS
(SELECT 1 FROM ir_translation WHERE lang=%s AND name=%s AND res_id=%s AND type='model');
UPDATE ir_translation SET src = %s WHERE lang=%s AND name=%s AND res_id=%s AND type='model';
"""
src = record[f['name']] or None
name = "%s,%s" % (f['model'], f['name'])
cr.execute(sql, (lg, src , name, f['id'], src, lg, name, f['id'], src, lg, name, id))
action = {
'name': 'Translate',
'res_model': 'ir.translation',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'tree,form',
'domain': domain,
}
if field:
info = trans_model._all_columns[field]
action['context'] = {
'search_default_name': "%s,%s" % (info.parent_model or model, field)
}
return action
def _get_import_cursor(self, cr, uid, context=None):
""" Return a cursor-like object for fast inserting translations
"""
return ir_translation_import_cursor(cr, uid, self, context=context)
def load_module_terms(self, cr, modules, langs, context=None):
context = dict(context or {}) # local copy
for module_name in modules:
modpath = openerp.modules.get_module_path(module_name)
if not modpath:
continue
for lang in langs:
lang_code = tools.get_iso_codes(lang)
base_lang_code = None
if '_' in lang_code:
base_lang_code = lang_code.split('_')[0]
# Step 1: for sub-languages, load base language first (e.g. es_CL.po is loaded over es.po)
if base_lang_code:
base_trans_file = openerp.modules.get_module_resource(module_name, 'i18n', base_lang_code + '.po')
if base_trans_file:
_logger.info('module %s: loading base translation file %s for language %s', module_name, base_lang_code, lang)
tools.trans_load(cr, base_trans_file, lang, verbose=False, module_name=module_name, context=context)
context['overwrite'] = True # make sure the requested translation will override the base terms later
# i18n_extra folder is for additional translations handle manually (eg: for l10n_be)
base_trans_extra_file = openerp.modules.get_module_resource(module_name, 'i18n_extra', base_lang_code + '.po')
if base_trans_extra_file:
_logger.info('module %s: loading extra base translation file %s for language %s', module_name, base_lang_code, lang)
tools.trans_load(cr, base_trans_extra_file, lang, verbose=False, module_name=module_name, context=context)
context['overwrite'] = True # make sure the requested translation will override the base terms later
# Step 2: then load the main translation file, possibly overriding the terms coming from the base language
trans_file = openerp.modules.get_module_resource(module_name, 'i18n', lang_code + '.po')
if trans_file:
_logger.info('module %s: loading translation file (%s) for language %s', module_name, lang_code, lang)
tools.trans_load(cr, trans_file, lang, verbose=False, module_name=module_name, context=context)
elif lang_code != 'en_US':
_logger.warning('module %s: no translation for language %s', module_name, lang_code)
trans_extra_file = openerp.modules.get_module_resource(module_name, 'i18n_extra', lang_code + '.po')
if trans_extra_file:
_logger.info('module %s: loading extra translation file (%s) for language %s', module_name, lang_code, lang)
tools.trans_load(cr, trans_extra_file, lang, verbose=False, module_name=module_name, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| |
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup swift code.
"""
import bz2
import hashlib
import os
import tempfile
import zlib
from swiftclient import client as swift
from cinder.backup.drivers.swift import SwiftBackupDriver
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.backup.fake_swift_client import FakeSwiftClient
LOG = logging.getLogger(__name__)
def fake_md5(arg):
class result(object):
def hexdigest(self):
return 'fake-md5-sum'
ret = result()
return ret
class BackupSwiftTestCase(test.TestCase):
"""Test Case for swift."""
def _create_volume_db_entry(self):
vol = {'id': '1234-5678-1234-8888',
'size': 1,
'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, container='test-container'):
backup = {'id': 123,
'size': 1,
'container': container,
'volume_id': '1234-5678-1234-8888'}
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
super(BackupSwiftTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.stubs.Set(swift, 'Connection', FakeSwiftClient.Connection)
self.stubs.Set(hashlib, 'md5', fake_md5)
self._create_volume_db_entry()
self.volume_file = tempfile.NamedTemporaryFile()
for i in xrange(0, 128):
self.volume_file.write(os.urandom(1024))
def tearDown(self):
self.volume_file.close()
super(BackupSwiftTestCase, self).tearDown()
def test_backup_uncompressed(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
service = SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_bz2(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='bz2')
service = SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_zlib(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='zlib')
service = SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_default_container(self):
self._create_backup_db_entry(container=None)
service = SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123)
self.assertEquals(backup['container'], 'volumebackups')
def test_backup_custom_container(self):
container_name = 'fake99'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123)
self.assertEquals(backup['container'], container_name)
def test_create_backup_container_check_wraps_socket_error(self):
container_name = 'socket_error_on_head'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
service.backup,
backup, self.volume_file)
def test_create_backup_put_object_wraps_socket_error(self):
container_name = 'socket_error_on_put'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
service.backup,
backup, self.volume_file)
def test_restore(self):
self._create_backup_db_entry()
service = SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', volume_file)
def test_restore_wraps_socket_error(self):
container_name = 'socket_error_on_get'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
service.restore,
backup, '1234-5678-1234-8888', volume_file)
def test_restore_unsupported_version(self):
container_name = 'unsupported_version'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123)
self.assertRaises(exception.InvalidBackup,
service.restore,
backup, '1234-5678-1234-8888', volume_file)
def test_delete(self):
self._create_backup_db_entry()
service = SwiftBackupDriver(self.ctxt)
backup = db.backup_get(self.ctxt, 123)
service.delete(backup)
def test_delete_wraps_socket_error(self):
container_name = 'socket_error_on_delete'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
backup = db.backup_get(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
service.delete,
backup)
def test_get_compressor(self):
service = SwiftBackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertEquals(compressor, None)
compressor = service._get_compressor('zlib')
self.assertEquals(compressor, zlib)
compressor = service._get_compressor('bz2')
self.assertEquals(compressor, bz2)
self.assertRaises(ValueError, service._get_compressor, 'fake')
def test_check_container_exists(self):
service = SwiftBackupDriver(self.ctxt)
exists = service._check_container_exists('fake_container')
self.assertEquals(exists, True)
exists = service._check_container_exists('missing_container')
self.assertEquals(exists, False)
self.assertRaises(swift.ClientException,
service._check_container_exists,
'unauthorized_container')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich
"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text.
Unlike the original BPE, it does not compress the plain text, but can be used to reduce the vocabulary
of a text to a configurable number of symbols, with only a small increase in the number of tokens.
Reference:
Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units.
Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.
"""
from __future__ import unicode_literals
import os
import sys
import inspect
import codecs
import re
import copy
import warnings
from collections import defaultdict, Counter
def update_vocabulary(vocab, file_name, is_dict=False):
"""Read text and return dictionary that encodes vocabulary
"""
#vocab = Counter()
with codecs.open(file_name, encoding='utf-8') as fobj:
for i, line in enumerate(fobj):
if is_dict:
try:
word, count = line.strip('\r\n ').split(' ')
except:
print('Failed reading vocabulary file at line {0}: {1}'.format(i, line))
sys.exit(1)
vocab[word] += int(count)
else:
for word in line.strip('\r\n ').split(' '):
if word:
vocab[word] += 1
return vocab
def update_pair_statistics(pair, changed, stats, indices):
"""Minimally update the indices and frequency of symbol pairs
if we merge a pair of symbols, only pairs that overlap with occurrences
of this pair are affected, and need to be updated.
"""
stats[pair] = 0
indices[pair] = defaultdict(int)
first, second = pair
new_pair = first+second
for j, word, old_word, freq in changed:
# find all instances of pair, and update frequency/indices around it
i = 0
while True:
# find first symbol
try:
i = old_word.index(first, i)
except ValueError:
break
# if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2])
if i < len(old_word)-1 and old_word[i+1] == second:
# assuming a symbol sequence "A B C", if "B C" is merged, reduce the frequency of "A B"
if i:
prev = old_word[i-1:i+1]
stats[prev] -= freq
indices[prev][j] -= 1
if i < len(old_word)-2:
# assuming a symbol sequence "A B C B", if "B C" is merged, reduce the frequency of "C B".
# however, skip this if the sequence is A B C B C, because the frequency of "C B" will be reduced by the previous code block
if old_word[i+2] != first or i >= len(old_word)-3 or old_word[i+3] != second:
nex = old_word[i+1:i+3]
stats[nex] -= freq
indices[nex][j] -= 1
i += 2
else:
i += 1
i = 0
while True:
try:
# find new pair
i = word.index(new_pair, i)
except ValueError:
break
# assuming a symbol sequence "A BC D", if "B C" is merged, increase the frequency of "A BC"
if i:
prev = word[i-1:i+1]
stats[prev] += freq
indices[prev][j] += 1
# assuming a symbol sequence "A BC B", if "B C" is merged, increase the frequency of "BC B"
# however, if the sequence is A BC BC, skip this step because the count of "BC BC" will be incremented by the previous code block
if i < len(word)-1 and word[i+1] != new_pair:
nex = word[i:i+2]
stats[nex] += freq
indices[nex][j] += 1
i += 1
def get_pair_statistics(vocab):
"""Count frequency of all symbol pairs, and create index"""
# data structure of pair frequencies
stats = defaultdict(int)
#index from pairs to words
indices = defaultdict(lambda: defaultdict(int))
for i, (word, freq) in enumerate(vocab):
prev_char = word[0]
for char in word[1:]:
stats[prev_char, char] += freq
indices[prev_char, char][i] += 1
prev_char = char
return stats, indices
def replace_pair(pair, vocab, indices):
"""Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB'"""
first, second = pair
pair_str = ''.join(pair)
pair_str = pair_str.replace('\\','\\\\')
changes = []
pattern = re.compile(r'(?<!\S)' + re.escape(first + ' ' + second) + r'(?!\S)')
if sys.version_info < (3, 0):
iterator = indices[pair].iteritems()
else:
iterator = indices[pair].items()
for j, freq in iterator:
if freq < 1:
continue
word, freq = vocab[j]
new_word = ' '.join(word)
new_word = pattern.sub(pair_str, new_word)
new_word = tuple(new_word.split(' '))
vocab[j] = (new_word, freq)
changes.append((j, new_word, word, freq))
return changes
def prune_stats(stats, big_stats, threshold):
"""Prune statistics dict for efficiency of max()
The frequency of a symbol pair never increases, so pruning is generally safe
(until we the most frequent pair is less frequent than a pair we previously pruned)
big_stats keeps full statistics for when we need to access pruned items
"""
for item,freq in list(stats.items()):
if freq < threshold:
del stats[item]
if freq < 0:
big_stats[item] += freq
else:
big_stats[item] = freq
def learn_bpe(infile_names, outfile_name, num_symbols, min_frequency=2, verbose=False, is_dict=False, total_symbols=False):
"""Learn num_symbols BPE operations from vocabulary, and write to outfile.
"""
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
#vocab = get_vocabulary(infile, is_dict)
vocab = Counter()
for f in infile_names:
sys.stderr.write(f'Collecting vocab from {f}\n')
vocab = update_vocabulary(vocab, f, is_dict)
vocab = dict([(tuple(x[:-1])+(x[-1]+'</w>',) ,y) for (x,y) in vocab.items()])
sorted_vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
stats, indices = get_pair_statistics(sorted_vocab)
big_stats = copy.deepcopy(stats)
if total_symbols:
uniq_char_internal = set()
uniq_char_final = set()
for word in vocab:
for char in word[:-1]:
uniq_char_internal.add(char)
uniq_char_final.add(word[-1])
sys.stderr.write('Number of word-internal characters: {0}\n'.format(len(uniq_char_internal)))
sys.stderr.write('Number of word-final characters: {0}\n'.format(len(uniq_char_final)))
sys.stderr.write('Reducing number of merge operations by {0}\n'.format(len(uniq_char_internal) + len(uniq_char_final)))
num_symbols -= len(uniq_char_internal) + len(uniq_char_final)
sys.stderr.write(f'Write vocab file to {outfile_name}')
with codecs.open(outfile_name, 'w', encoding='utf-8') as outfile:
# version 0.2 changes the handling of the end-of-word token ('</w>');
# version numbering allows bckward compatibility
outfile.write('#version: 0.2\n')
# threshold is inspired by Zipfian assumption, but should only affect speed
threshold = max(stats.values()) / 10
for i in range(num_symbols):
if stats:
most_frequent = max(stats, key=lambda x: (stats[x], x))
# we probably missed the best pair because of pruning; go back to full statistics
if not stats or (i and stats[most_frequent] < threshold):
prune_stats(stats, big_stats, threshold)
stats = copy.deepcopy(big_stats)
most_frequent = max(stats, key=lambda x: (stats[x], x))
# threshold is inspired by Zipfian assumption, but should only affect speed
threshold = stats[most_frequent] * i/(i+10000.0)
prune_stats(stats, big_stats, threshold)
if stats[most_frequent] < min_frequency:
sys.stderr.write(f'no pair has frequency >= {min_frequency}. Stopping\n')
break
if verbose:
sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\n'.format(
i, most_frequent[0], most_frequent[1], stats[most_frequent]))
outfile.write('{0} {1}\n'.format(*most_frequent))
changes = replace_pair(most_frequent, sorted_vocab, indices)
update_pair_statistics(most_frequent, changes, stats, indices)
stats[most_frequent] = 0
if not i % 100:
prune_stats(stats, big_stats, threshold)
| |
import theano.tensor as T
from .. import init
from .. import nonlinearities
from ..utils import as_tuple
from ..theano_extensions import conv, padding
from .base import Layer
__all__ = [
"Conv1DLayer",
"Conv2DLayer",
]
def conv_output_length(input_length, filter_size, stride, pad=0):
"""Helper function to compute the output size of a convolution operation
This function computes the length along a single axis, which corresponds
to a 1D convolution. It can also be used for convolutions with higher
dimensionalities by using it individually for each axis.
Parameters
----------
input_length : int
The size of the input.
filter_size : int
The size of the filter.
stride : int
The stride of the convolution operation.
pad : int, 'full' or 'same' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size on both sides (one less on
the second side for an even filter size). When ``stride=1``, this
results in an output size equal to the input size.
Returns
-------
int
The output size corresponding to the given convolution parameters.
Raises
------
RuntimeError
When an invalid padding is specified, a `RuntimeError` is raised.
"""
if input_length is None:
return None
if pad == 'valid':
output_length = input_length - filter_size + 1
elif pad == 'full':
output_length = input_length + filter_size - 1
elif pad == 'same':
output_length = input_length
elif isinstance(pad, int):
output_length = input_length + 2 * pad - filter_size + 1
else:
raise ValueError('Invalid pad: {0}'.format(pad))
# This is the integer arithmetic equivalent to
# np.ceil(output_length / stride)
output_length = (output_length + stride - 1) // stride
return output_length
class Conv1DLayer(Layer):
"""
lasagne.layers.Conv1DLayer(incoming, num_filters, filter_size, stride=1,
pad=0, untie_biases=False, W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify,
convolution=lasagne.theano_extensions.conv.conv1d_mc0, **kwargs)
1D convolutional layer
Performs a 1D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 3D tensor, with shape
``(batch_size, num_input_channels, input_length)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 1-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 1-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
An integer or a 1-element tuple results in symmetric zero-padding of
the given size on both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
matrix (2D).
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 3D tensor with shape
``(num_filters, num_input_channels, filter_length)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, input_length)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
convolution : callable
The convolution implementation to use. The
`lasagne.theano_extensions.conv` module provides some alternative
implementations for 1D convolutions, because the Theano API only
features a 2D convolution implementation. Usually it should be fine
to leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
Notes
-----
Theano's underlying convolution (:func:`theano.tensor.nnet.conv.conv2d`)
only supports ``pad=0`` and ``pad='full'``. This layer emulates other modes
by cropping a full convolution or explicitly padding the input with zeros.
"""
def __init__(self, incoming, num_filters, filter_size, stride=1,
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
convolution=conv.conv1d_mc0, **kwargs):
super(Conv1DLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, 1)
self.stride = as_tuple(stride, 1)
self.untie_biases = untie_biases
self.convolution = convolution
if pad == 'same':
if self.filter_size[0] % 2 == 0:
raise NotImplementedError(
'`same` padding requires odd filter size.')
if pad == 'valid':
self.pad = (0,)
elif pad in ('full', 'same'):
self.pad = pad
else:
self.pad = as_tuple(pad, 1, int)
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_filters, self.output_shape[2])
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[1]
return (self.num_filters, num_input_channels, self.filter_size[0])
def get_output_shape_for(self, input_shape):
pad = self.pad if isinstance(self.pad, tuple) else (self.pad,)
output_length = conv_output_length(input_shape[2],
self.filter_size[0],
self.stride[0],
pad[0])
return (input_shape[0], self.num_filters, output_length)
def get_output_for(self, input, input_shape=None, **kwargs):
# the optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
input_shape = self.input_shape
if self.stride == (1,) and self.pad == 'same':
# simulate same convolution by cropping a full convolution
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode='full')
crop = self.filter_size[0] // 2
conved = conved[:, :, crop:-crop or None]
else:
# no padding needed, or explicit padding of input needed
if self.pad == 'full':
border_mode = 'full'
pad = (0, 0)
elif self.pad == 'same':
border_mode = 'valid'
pad = self.filter_size[0] // 2, (self.filter_size[0] - 1) // 2
else:
border_mode = 'valid'
pad = (self.pad[0], self.pad[0])
if pad != (0, 0):
input = padding.pad(input, [pad], batch_ndim=2)
input_shape = (input_shape[0], input_shape[1],
None if input_shape[2] is None else
input_shape[2] + pad[0] + pad[1])
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode=border_mode)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x')
return self.nonlinearity(activation)
class Conv2DLayer(Layer):
"""
lasagne.layers.Conv2DLayer(incoming, num_filters, filter_size,
stride=(1, 1), pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify,
convolution=theano.tensor.nnet.conv2d, **kwargs)
2D convolutional layer
Performs a 2D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 2-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 2-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
all borders, a tuple of two integers allows different symmetric padding
per dimension.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
3D tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 4D tensor with shape
``(num_filters, num_input_channels, filter_rows, filter_columns)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, output_rows, output_columns)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
convolution : callable
The convolution implementation to use. Usually it should be fine to
leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
Notes
-----
Theano's underlying convolution (:func:`theano.tensor.nnet.conv.conv2d`)
only supports ``pad=0`` and ``pad='full'``. This layer emulates other modes
by cropping a full convolution or explicitly padding the input with zeros.
"""
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
convolution=T.nnet.conv2d, **kwargs):
super(Conv2DLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, 2)
self.stride = as_tuple(stride, 2)
self.untie_biases = untie_biases
self.convolution = convolution
if pad == 'same':
if any(s % 2 == 0 for s in self.filter_size):
raise NotImplementedError(
'`same` padding requires odd filter size.')
if pad == 'valid':
self.pad = (0, 0)
elif pad in ('full', 'same'):
self.pad = pad
else:
self.pad = as_tuple(pad, 2, int)
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_filters, self.output_shape[2], self.
output_shape[3])
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[1]
return (self.num_filters, num_input_channels, self.filter_size[0],
self.filter_size[1])
def get_output_shape_for(self, input_shape):
pad = self.pad if isinstance(self.pad, tuple) else (self.pad,) * 2
output_rows = conv_output_length(input_shape[2],
self.filter_size[0],
self.stride[0],
pad[0])
output_columns = conv_output_length(input_shape[3],
self.filter_size[1],
self.stride[1],
pad[1])
return (input_shape[0], self.num_filters, output_rows, output_columns)
def get_output_for(self, input, input_shape=None, **kwargs):
# The optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
input_shape = self.input_shape
if self.stride == (1, 1) and self.pad == 'same':
# simulate same convolution by cropping a full convolution
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode='full')
crop_x = self.filter_size[0] // 2
crop_y = self.filter_size[1] // 2
conved = conved[:, :, crop_x:-crop_x or None,
crop_y:-crop_y or None]
else:
# no padding needed, or explicit padding of input needed
if self.pad == 'full':
border_mode = 'full'
pad = [(0, 0), (0, 0)]
elif self.pad == 'same':
border_mode = 'valid'
pad = [(self.filter_size[0] // 2,
self.filter_size[0] // 2),
(self.filter_size[1] // 2,
self.filter_size[1] // 2)]
else:
border_mode = 'valid'
pad = [(self.pad[0], self.pad[0]), (self.pad[1], self.pad[1])]
if pad != [(0, 0), (0, 0)]:
input = padding.pad(input, pad, batch_ndim=2)
input_shape = (input_shape[0], input_shape[1],
None if input_shape[2] is None else
input_shape[2] + pad[0][0] + pad[0][1],
None if input_shape[3] is None else
input_shape[3] + pad[1][0] + pad[1][1])
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode=border_mode)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
# TODO: add Conv3DLayer
| |
#!/usr/bin/env python
# Create some test images we need
command += oiiotool ("--create 320x240 3 -d uint8 -o black.tif")
command += oiiotool ("--pattern constant:color=0.5,0.5,0.5 128x128 3 -d half -o grey128.exr")
command += oiiotool ("--pattern constant:color=0.5,0.5,0.5 64x64 3 -d half -o grey64.exr")
command += oiiotool ("--create 256x256 3 --fill:color=1,.5,.5 256x256 --fill:color=0,1,0 80x80+100+100 -d uint8 -o filled.tif")
# test --autotrim
command += oiiotool ("black.tif --fill:color=0,1,0 80x80+100+100 --autotrim -d uint8 -o autotrim.tif")
# test --colorcount (using the results of the --fill test)
command += oiiotool ("filled.tif --colorcount:eps=.1,.1,.1 0,0,0:1,.5,.5:0,1,0")
# test --rangecheck (using the results of the --fill test)
command += oiiotool ("filled.tif --rangecheck 0,0,0 1,0.9,1")
# test --rangecompress & --rangeexpand
command += oiiotool ("../common/tahoe-small.tif --rangecompress -d uint8 -o rangecompress.tif")
command += oiiotool ("rangecompress.tif --rangeexpand -d uint8 -o rangeexpand.tif")
command += oiiotool ("../common/tahoe-small.tif --rangecompress:luma=1 -d uint8 -o rangecompress-luma.tif")
command += oiiotool ("rangecompress-luma.tif --rangeexpand:luma=1 -d uint8 -o rangeexpand-luma.tif")
# Test --add
command += oiiotool ("--pattern constant:color=.1,.2,.3 64x64+0+0 3 "
+ " --pattern constant:color=.1,.1,.1 64x64+20+20 3 "
+ " --add -d half -o add.exr")
# Test --addc val (add to all channels the same scalar)
command += oiiotool ("grey128.exr --addc 0.25 -o cadd1.exr")
# Test --addc val,val,val... (add per-channel scalars)
command += oiiotool ("grey128.exr --addc 0,0.25,-0.25 -o cadd2.exr")
# Test --sub, subc
command += oiiotool ("--pattern constant:color=.1,.2,.3 64x64+0+0 3 "
+ " --pattern constant:color=.1,.1,.1 64x64+20+20 3 "
+ " --sub -d half -o sub.exr")
command += oiiotool ("--pattern constant:color=.1,.2,.3 64x64+0+0 3 "
+ " --subc 0.1,0.1,0.1 -d half -o subc.exr")
# test --mul of images
command += oiiotool ("grey64.exr -pattern constant:color=1.5,1,0.5 64x64 3 --mul -o mul.exr")
# Test --mulc val (multiply all channels by the same scalar)
command += oiiotool ("grey128.exr --mulc 1.5 -o cmul1.exr")
# Test --mulc val,val,val... (multiply per-channel scalars)
command += oiiotool ("grey128.exr --mulc 1.5,1,0.5 -o cmul2.exr")
# Test --divc val (divide all channels by the same scalar)
command += oiiotool ("grey64.exr --divc 2.0 -d half -o divc1.exr")
# Test --divc val,val,val... (divide per-channel scalars)
command += oiiotool ("grey64.exr --divc 2.0,1,0.5 -d half -o divc2.exr")
# Test --div of images
command += oiiotool ("grey64.exr --pattern constant:color=2.0,1,0.5 64x64 3 "
+ "--div -d half -o div.exr")
# test --mad of images
command += oiiotool ("grey64.exr -pattern constant:color=1.5,1,0.5 64x64 3 "
+ "-pattern constant:color=.1,.1,.1 64x64 3 --mad -o mad.exr")
# test --invert
command += oiiotool ("../common/tahoe-small.tif --invert -o invert.tif")
# Test --powc val (raise all channels by the same power)
command += oiiotool ("grey128.exr --powc 2 -o cpow1.exr")
# Test --powc val,val,val... (per-channel powers)
command += oiiotool ("grey128.exr --powc 2,2,1 -o cpow2.exr")
# Test --abs, --absdiff, --absdiffc
# First, make a test image that's 0.5 on the left, -0.5 on the right
command += oiiotool ("-pattern constant:color=-0.25,-0.25,-0.25 64x128 3 "
+ "-pattern constant:color=0.5,0.5,0.5 64x128 3 "
+ "-mosaic 2x1 -d half -o negpos.exr")
command += oiiotool ("negpos.exr -abs -o abs.exr")
command += oiiotool ("negpos.exr -pattern constant:color=0.2,0.2,0.2 128x128 3 "
+ "-absdiff -d half -o absdiff.exr")
command += oiiotool ("negpos.exr -absdiffc 0.2,0.2,0.2 -d half -o absdiffc.exr")
# test --chsum
command += oiiotool ("../common/tahoe-small.tif --chsum:weight=.2126,.7152,.0722 "
+ "-d uint8 -o chsum.tif")
# test --trim
command += oiiotool ("--create 320x240 3 -fill:color=.1,.5,.1 120x80+50+70 "
+ " -rotate 30 -trim -origin +0+0 -fullpixels -d uint8 -o trim.tif")
# test --trim, tricky case of multiple subimages
command += oiiotool ( "-a --create 320x240 3 -fill:color=.1,.5,.1 120x80+50+70 -rotate 30 "
+ "--create 320x240 3 -fill:color=.5,.5,.1 100x10+70+70 -rotate 140 "
+ "--siappend -trim -origin +0+0 -fullpixels -d uint8 -o trimsubimages.tif")
# test channel shuffling
command += oiiotool (OIIO_TESTSUITE_IMAGEDIR + "/grid.tif"
+ " --ch =0.25,B,G -o chanshuffle.tif")
# test --ch to separate RGBA from an RGBAZ file
command += oiiotool ("src/rgbaz.exr --ch R,G,B,A -o ch-rgba.exr")
command += oiiotool ("src/rgbaz.exr --ch Z -o ch-z.exr")
# test --chappend to merge RGBA and Z
command += oiiotool ("ch-rgba.exr ch-z.exr --chappend -o chappend-rgbaz.exr")
# test --chnames to rename channels
command += oiiotool ("src/rgbaz.exr --chnames Red,,,,Depth -o chname.exr")
command += info_command ("chname.exr", safematch=1)
# test -d to change data formats
command += oiiotool ("src/rgbaz.exr -d half -o allhalf.exr")
command += info_command ("allhalf.exr", safematch=1)
# test -d NAME=fmt to change data format of one channel, and to make
# sure oiiotool will output per-channel formats.
command += oiiotool ("src/rgbaz.exr -d half -d Z=float -o rgbahalf-zfloat.exr")
command += info_command ("rgbahalf-zfloat.exr", safematch=1)
# test hole filling
command += oiiotool ("ref/hole.tif --fillholes -o tahoe-filled.tif")
# test hole filling for a cropped image
command += oiiotool ("-pattern checker 64x64+32+32 3 -ch R,G,B,A=1.0 -fullsize 128x128+0+0 --croptofull -fillholes -d uint8 -o growholes.tif")
# Test --min/--max
command += oiiotool ("--pattern fill:top=0,0,0:bottom=1,1,1 64x64 3 "
+ "--pattern fill:left=0,0,0:right=1,1,1 64x64 3 "
+ "--min -d uint8 -o min.exr")
command += oiiotool ("--pattern fill:top=0,0,0:bottom=1,1,1 64x64 3 "
+ "--pattern fill:left=0,0,0:right=1,1,1 64x64 3 "
+ "--max -d uint8 -o max.exr")
# Test --minc/maxc val (min to all channels the same scalar)
command += oiiotool ("--pattern fill:top=0,0,0:bottom=1,1,1 64x64 3 "
+ "--minc 0.25 -o cmin1.exr")
command += oiiotool ("--pattern fill:top=0,0,0:bottom=1,1,1 64x64 3 "
+ "--maxc 0.75 -o cmax1.exr")
# Test --minc/maxc val,val,val... (min per-channel scalars)
command += oiiotool ("--pattern fill:top=0,0,0:bottom=1,1,1 64x64 3 "
+ "--minc 0.75,0.5,0.25 -o cmin2.exr")
command += oiiotool ("--pattern fill:top=0,0,0:bottom=1,1,1 64x64 3 "
+ "--maxc 0.75,0.5,0.25 -o cmax2.exr")
# test --maxchan, --minchan
command += oiiotool ("--pattern fill:topleft=0,0,0.2:topright=1,0,0.2:bottomleft=0,1,0.2:bottomright=1,1,0.2 100x100 3 " +
" --maxchan -d uint8 -o maxchan.tif")
command += oiiotool ("--pattern fill:topleft=0,0,0.8:topright=1,0,0.8:bottomleft=0,1,0.8:bottomright=1,1,0.8 100x100 3 " +
" --minchan -d uint8 -o minchan.tif")
# test clamping
command += oiiotool (OIIO_TESTSUITE_IMAGEDIR + "/grid.tif --resize 50%"
+ " --clamp:min=0.2:max=,,0.5,1 -o grid-clamped.tif")
# test kernel
command += oiiotool ("--kernel bspline 15x15 -o bsplinekernel.exr")
# test convolve
command += oiiotool ("../common/tahoe-small.tif --kernel bspline 15x15 --convolve "
+ "-d uint8 -o bspline-blur.tif")
# test blur
command += oiiotool ("../common/tahoe-small.tif --blur 5x5 -d uint8 -o gauss5x5-blur.tif")
# test median filter
command += oiiotool ("../common/tahoe-small.tif --median 5x5 -d uint8 -o tahoe-median.tif")
# test dilate and erode
# command += oiiotool ("--pattern constant:color=0.1,0.1,0.1 80x64 3 --text:x=8:y=54:size=40:font=DroidSerif Aai -o morphsource.tif")
command += oiiotool ("src/morphsource.tif --dilate 3x3 -d uint8 -o dilate.tif")
command += oiiotool ("src/morphsource.tif --erode 3x3 -d uint8 -o erode.tif")
# command += oiiotool ("morphsource.tif --erode 3x3 --dilate 3x3 -d uint8 -o morphopen.tif")
# command += oiiotool ("morphsource.tif --dilate 3x3 --erode 3x3 -d uint8 -o morphclose.tif")
# command += oiiotool ("morphsource.tif morphopen.tif -sub -d uint8 -o tophat.tif")
# command += oiiotool ("morphclose.tif morphsource.tif -sub -d uint8 -o bottomhat.tif")
# test unsharp mask
command += oiiotool ("../common/tahoe-small.tif --unsharp -d uint8 -o unsharp.tif")
# test unsharp mask with median filter
command += oiiotool ("../common/tahoe-small.tif --unsharp:kernel=median -d uint8 -o unsharp-median.tif")
# test laplacian
command += oiiotool ("../common/tahoe-tiny.tif --laplacian -d uint8 -o tahoe-laplacian.tif")
# test fft, ifft
command += oiiotool ("../common/tahoe-tiny.tif --ch 2 --fft -d float -o fft.exr")
command += oiiotool ("fft.exr --ifft --ch 0 -d float -o ifft.exr")
# test --polar, --unpolar
# note that fft.exr that we built above is in complex form
command += oiiotool ("fft.exr --polar -d float -o polar.exr")
command += oiiotool ("polar.exr --unpolar -d float -o unpolar.exr")
# test labels
command += oiiotool (
" --pattern constant:color=0.5,0.0,0.0 128x128 3 --label R " +
" --pattern constant:color=0.0,0.5,0.0 128x128 3 --label G " +
" --pattern constant:color=0.5,0.0,0.0 128x128 3 --label B " +
" --pop --pop --pop " +
" R G --add -d half -o labeladd.exr")
# test subimages
command += oiiotool ("--pattern constant:color=0.5,0.0,0.0 64x64 3 " +
"--pattern constant:color=0.0,0.5,0.0 64x64 3 " +
"--siappend -d half -o subimages-2.exr")
command += oiiotool ("--pattern constant:color=0.5,0.0,0.0 64x64 3 --text A -attrib oiio:subimagename layerA " +
"--pattern constant:color=0.0,0.5,0.0 64x64 3 --text B -attrib oiio:subimagename layerB " +
"--pattern constant:color=0.0,0.0,0.5 64x64 3 --text C -attrib oiio:subimagename layerC " +
"--pattern constant:color=0.5,0.5,0.0 64x64 3 --text D -attrib oiio:subimagename layerD " +
"--siappendall -d half -o subimages-4.exr")
command += oiiotool ("subimages-4.exr --subimage 3 -o subimageD3.exr")
command += oiiotool ("subimages-4.exr --subimage layerB -o subimageB1.exr")
command += oiiotool ("subimages-4.exr --subimage:delete=1 layerB -o subimage-noB.exr")
command += oiiotool ("subimages-2.exr --sisplit -o subimage2.exr " +
"--pop -o subimage1.exr")
command += oiiotool ("subimages-4.exr -cmul:subimages=0,2 0.5 -o subimage-individual.exr")
# Test --statsnow
command += oiiotool ("../common/tahoe-tiny.tif --echo \"--printstats:\" --printstats")
command += oiiotool ("../common/tahoe-tiny.tif --printstats:window=10x10+50+50 --echo \" \"")
# test --iconfig
command += oiiotool ("--info -v -metamatch Debug --iconfig oiio:DebugOpenConfig! 1 black.tif")
# test -i:ch=...
command += oiiotool ("--pattern fill:color=.6,.5,.4,.3,.2 64x64 5 -d uint8 -o const5.tif")
command += oiiotool ("-i:ch=R,G,B const5.tif -o const5-rgb.tif")
# Test that combining two images, if the first has no alpha but the second
# does, gets the right channel names instead of just copying from the first.
command += oiiotool ("-pattern constant:color=1,0,0 64x64 3 -pattern constant:color=0,1,0,1 64x64 4 -add -o add_rgb_rgba.exr")
command += info_command ("add_rgb_rgba.exr", safematch=True)
# Test --missingfile
command += oiiotool ("--create 320x240 4 --box:color=1,0,0,1:fill=1 10,10,200,100 -d uint8 -o box.tif")
# Test again using --missingfile black
command += oiiotool ("--missingfile black box.tif missing.tif --over -o box_over_missing2.tif || true")
# Test again using --missingfile checker
command += oiiotool ("--missingfile checker box.tif missing.tif --over -o box_over_missing3.tif || true")
# Test --dumpdata
command += oiiotool ("--pattern fill:left=0,0,0:right=1,1,0 2x2 3 -d half -o dump.exr")
command += oiiotool ("-echo dumpdata: --dumpdata dump.exr")
command += oiiotool ("-echo dumpdata:C --dumpdata:C=data dump.exr")
# To add more tests, just append more lines like the above and also add
# the new 'feature.tif' (or whatever you call it) to the outputs list,
# below.
# Outputs to check against references
outputs = [
"filled.tif",
"autotrim.tif",
"trim.tif", "trimsubimages.tif",
"chanshuffle.tif", "ch-rgba.exr", "ch-z.exr",
"chappend-rgbaz.exr", "chname.exr",
"add.exr", "cadd1.exr", "cadd2.exr",
"sub.exr", "subc.exr",
"mul.exr", "cmul1.exr", "cmul2.exr",
"div.exr", "divc1.exr", "divc2.exr",
"mad.exr", "invert.tif",
"cpow1.exr", "cpow2.exr",
"abs.exr", "absdiff.exr", "absdiffc.exr",
"chsum.tif",
"rgbahalf-zfloat.exr",
"tahoe-filled.tif", "growholes.tif",
"rangecompress.tif", "rangeexpand.tif",
"rangecompress-luma.tif", "rangeexpand-luma.tif",
"min.exr", "cmin1.exr", "cmin2.exr",
"max.exr", "cmax1.exr", "cmax2.exr",
"maxchan.tif", "minchan.tif",
"grid-clamped.tif",
"bsplinekernel.exr", "bspline-blur.tif",
"gauss5x5-blur.tif", "tahoe-median.tif",
"dilate.tif", "erode.tif",
"unsharp.tif", "unsharp-median.tif", "tahoe-laplacian.tif",
"fft.exr", "ifft.exr",
"polar.exr", "unpolar.exr",
"subimages-2.exr",
"subimages-4.exr",
"subimageD3.exr",
"subimageB1.exr",
"subimage-noB.exr",
"subimage-individual.exr",
"subimage1.exr",
"labeladd.exr",
"const5-rgb.tif",
"box_over_missing2.tif",
"box_over_missing3.tif",
"out.txt" ]
#print "Running this command:\n" + command + "\n"
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for the firebase_admin.auth module."""
import json
import os
import time
from google.auth import crypt
from google.auth import exceptions
from google.auth import jwt
import google.oauth2.id_token
import pytest
import six
import firebase_admin
from firebase_admin import auth
from firebase_admin import credentials
from firebase_admin import _user_mgt
from tests import testutils
FIREBASE_AUDIENCE = ('https://identitytoolkit.googleapis.com/'
'google.identity.identitytoolkit.v1.IdentityToolkit')
MOCK_UID = 'user1'
MOCK_CREDENTIAL = credentials.Certificate(
testutils.resource_filename('service_account.json'))
MOCK_PUBLIC_CERTS = testutils.resource('public_certs.json')
MOCK_PRIVATE_KEY = testutils.resource('private_key.pem')
MOCK_SERVICE_ACCOUNT_EMAIL = MOCK_CREDENTIAL.service_account_email
INVALID_STRINGS = [None, '', 0, 1, True, False, list(), tuple(), dict()]
INVALID_BOOLS = [None, '', 'foo', 0, 1, list(), tuple(), dict()]
INVALID_DICTS = [None, 'foo', 0, 1, True, False, list(), tuple()]
MOCK_GET_USER_RESPONSE = testutils.resource('get_user.json')
class AuthFixture(object):
def __init__(self, name=None):
if name:
self.app = firebase_admin.get_app(name)
else:
self.app = None
def create_custom_token(self, *args):
if self.app:
return auth.create_custom_token(*args, app=self.app)
else:
return auth.create_custom_token(*args)
def verify_id_token(self, *args):
if self.app:
return auth.verify_id_token(*args, app=self.app)
else:
return auth.verify_id_token(*args)
def setup_module():
firebase_admin.initialize_app(MOCK_CREDENTIAL)
firebase_admin.initialize_app(MOCK_CREDENTIAL, name='testApp')
def teardown_module():
firebase_admin.delete_app(firebase_admin.get_app())
firebase_admin.delete_app(firebase_admin.get_app('testApp'))
@pytest.fixture(params=[None, 'testApp'], ids=['DefaultApp', 'CustomApp'])
def authtest(request):
"""Returns an AuthFixture instance.
Instances returned by this fixture are parameterized to use either the defult App instance,
or a custom App instance named 'testApp'. Due to this parameterization, each test case that
depends on this fixture will get executed twice (as two test cases); once with the default
App, and once with the custom App.
"""
return AuthFixture(request.param)
@pytest.fixture
def non_cert_app():
"""Returns an App instance initialized with a mock non-cert credential.
The lines of code following the yield statement are guaranteed to run after each test case
that depends on this fixture. This ensures the proper cleanup of the App instance after
tests.
"""
app = firebase_admin.initialize_app(testutils.MockCredential(), name='non-cert-app')
yield app
firebase_admin.delete_app(app)
def verify_custom_token(custom_token, expected_claims):
assert isinstance(custom_token, six.binary_type)
token = google.oauth2.id_token.verify_token(
custom_token,
testutils.MockRequest(200, MOCK_PUBLIC_CERTS),
FIREBASE_AUDIENCE)
assert token['uid'] == MOCK_UID
assert token['iss'] == MOCK_SERVICE_ACCOUNT_EMAIL
assert token['sub'] == MOCK_SERVICE_ACCOUNT_EMAIL
header = jwt.decode_header(custom_token)
assert header.get('typ') == 'JWT'
assert header.get('alg') == 'RS256'
if expected_claims:
for key, value in expected_claims.items():
assert value == token['claims'][key]
def _merge_jwt_claims(defaults, overrides):
defaults.update(overrides)
for key, value in overrides.items():
if value is None:
del defaults[key]
return defaults
def get_id_token(payload_overrides=None, header_overrides=None):
signer = crypt.RSASigner.from_string(MOCK_PRIVATE_KEY)
headers = {
'kid': 'mock-key-id-1'
}
payload = {
'aud': MOCK_CREDENTIAL.project_id,
'iss': 'https://securetoken.google.com/' + MOCK_CREDENTIAL.project_id,
'iat': int(time.time()) - 100,
'exp': int(time.time()) + 3600,
'sub': '1234567890',
'admin': True,
}
if header_overrides:
headers = _merge_jwt_claims(headers, header_overrides)
if payload_overrides:
payload = _merge_jwt_claims(payload, payload_overrides)
return jwt.encode(signer, payload, header=headers)
TEST_ID_TOKEN = get_id_token()
class TestCreateCustomToken(object):
valid_args = {
'Basic': (MOCK_UID, {'one': 2, 'three': 'four'}),
'NoDevClaims': (MOCK_UID, None),
'EmptyDevClaims': (MOCK_UID, {}),
}
invalid_args = {
'NoUid': (None, None, ValueError),
'EmptyUid': ('', None, ValueError),
'LongUid': ('x'*129, None, ValueError),
'BoolUid': (True, None, ValueError),
'IntUid': (1, None, ValueError),
'ListUid': ([], None, ValueError),
'EmptyDictUid': ({}, None, ValueError),
'NonEmptyDictUid': ({'a':1}, None, ValueError),
'BoolClaims': (MOCK_UID, True, ValueError),
'IntClaims': (MOCK_UID, 1, ValueError),
'StrClaims': (MOCK_UID, 'foo', ValueError),
'ListClaims': (MOCK_UID, [], ValueError),
'TupleClaims': (MOCK_UID, (1, 2), ValueError),
'ReservedClaims': (MOCK_UID, {'sub':'1234'}, ValueError),
}
@pytest.mark.parametrize('user,claims', valid_args.values(),
ids=list(valid_args))
def test_valid_params(self, authtest, user, claims):
verify_custom_token(authtest.create_custom_token(user, claims), claims)
@pytest.mark.parametrize('user,claims,error', invalid_args.values(),
ids=list(invalid_args))
def test_invalid_params(self, authtest, user, claims, error):
with pytest.raises(error):
authtest.create_custom_token(user, claims)
def test_noncert_credential(self, non_cert_app):
with pytest.raises(ValueError):
auth.create_custom_token(MOCK_UID, app=non_cert_app)
class TestVerifyIdToken(object):
valid_tokens = {
'BinaryToken': TEST_ID_TOKEN,
'TextToken': TEST_ID_TOKEN.decode('utf-8'),
}
invalid_tokens = {
'NoKid': get_id_token(header_overrides={'kid': None}),
'WrongKid': get_id_token(header_overrides={'kid': 'foo'}),
'BadAudience': get_id_token({'aud': 'bad-audience'}),
'BadIssuer': get_id_token({
'iss': 'https://securetoken.google.com/wrong-issuer'
}),
'EmptySubject': get_id_token({'sub': ''}),
'IntSubject': get_id_token({'sub': 10}),
'LongStrSubject': get_id_token({'sub': 'a' * 129}),
'FutureToken': get_id_token({'iat': int(time.time()) + 1000}),
'ExpiredToken': get_id_token({
'iat': int(time.time()) - 10000,
'exp': int(time.time()) - 3600
}),
'NoneToken': None,
'EmptyToken': '',
'BoolToken': True,
'IntToken': 1,
'ListToken': [],
'EmptyDictToken': {},
'NonEmptyDictToken': {'a': 1},
'BadFormatToken': 'foobar'
}
def setup_method(self):
auth._request = testutils.MockRequest(200, MOCK_PUBLIC_CERTS)
@pytest.mark.parametrize('id_token', valid_tokens.values(), ids=list(valid_tokens))
def test_valid_token(self, authtest, id_token):
claims = authtest.verify_id_token(id_token)
assert claims['admin'] is True
assert claims['uid'] == claims['sub']
@pytest.mark.parametrize('id_token', invalid_tokens.values(),
ids=list(invalid_tokens))
def test_invalid_token(self, authtest, id_token):
with pytest.raises(ValueError):
authtest.verify_id_token(id_token)
def test_project_id_env_var(self, non_cert_app):
gcloud_project = os.environ.get(auth.GCLOUD_PROJECT_ENV_VAR)
try:
os.environ[auth.GCLOUD_PROJECT_ENV_VAR] = MOCK_CREDENTIAL.project_id
claims = auth.verify_id_token(TEST_ID_TOKEN, non_cert_app)
assert claims['admin'] is True
finally:
if gcloud_project:
os.environ[auth.GCLOUD_PROJECT_ENV_VAR] = gcloud_project
else:
del os.environ[auth.GCLOUD_PROJECT_ENV_VAR]
def test_no_project_id(self, non_cert_app):
gcloud_project = os.environ.get(auth.GCLOUD_PROJECT_ENV_VAR)
if gcloud_project:
del os.environ[auth.GCLOUD_PROJECT_ENV_VAR]
try:
with pytest.raises(ValueError):
auth.verify_id_token(TEST_ID_TOKEN, non_cert_app)
finally:
if gcloud_project:
os.environ[auth.GCLOUD_PROJECT_ENV_VAR] = gcloud_project
def test_custom_token(self, authtest):
id_token = authtest.create_custom_token(MOCK_UID)
with pytest.raises(ValueError):
authtest.verify_id_token(id_token)
def test_certificate_request_failure(self, authtest):
auth._request = testutils.MockRequest(404, 'not found')
with pytest.raises(exceptions.TransportError):
authtest.verify_id_token(TEST_ID_TOKEN)
@pytest.fixture(scope='module')
def user_mgt_app():
app = firebase_admin.initialize_app(testutils.MockCredential(), name='userMgt')
yield app
firebase_admin.delete_app(app)
def _instrument_user_manager(app, status, payload):
auth_service = auth._get_auth_service(app)
user_manager = auth_service.user_manager
recorder = []
user_manager._session.mount(
_user_mgt.ID_TOOLKIT_URL,
testutils.MockAdapter(payload, status, recorder))
return user_manager, recorder
def _check_user_record(user):
assert user.uid == 'testuser'
assert user.email == 'testuser@example.com'
assert user.phone_number == '+1234567890'
assert user.display_name == 'Test User'
assert user.photo_url == 'http://www.example.com/testuser/photo.png'
assert user.disabled is False
assert user.email_verified is True
assert user.user_metadata.creation_timestamp == 1234567890
assert user.user_metadata.last_sign_in_timestamp is None
assert user.provider_id == 'firebase'
assert len(user.provider_data) == 2
provider = user.provider_data[0]
assert provider.uid == 'testuser@example.com'
assert provider.email == 'testuser@example.com'
assert provider.phone_number is None
assert provider.display_name == 'Test User'
assert provider.photo_url == 'http://www.example.com/testuser/photo.png'
assert provider.provider_id == 'password'
provider = user.provider_data[1]
assert provider.uid == '+1234567890'
assert provider.email is None
assert provider.phone_number == '+1234567890'
assert provider.display_name is None
assert provider.photo_url is None
assert provider.provider_id == 'phone'
class TestUserRecord(object):
# Input dict must be non-empty, and must not contain unsupported keys.
@pytest.mark.parametrize('data', INVALID_DICTS + [{}, {'foo':'bar'}])
def test_invalid_record(self, data):
with pytest.raises(ValueError):
auth.UserRecord(data)
@pytest.mark.parametrize('data', INVALID_DICTS)
def test_invalid_metadata(self, data):
with pytest.raises(ValueError):
auth.UserMetadata(data)
def test_metadata(self):
metadata = auth.UserMetadata({'createdAt' : 10, 'lastLoginAt' : 20})
assert metadata.creation_timestamp == 10
assert metadata.last_sign_in_timestamp == 20
metadata = auth.UserMetadata({})
assert metadata.creation_timestamp is None
assert metadata.last_sign_in_timestamp is None
@pytest.mark.parametrize('data', INVALID_DICTS + [{}, {'foo':'bar'}])
def test_invalid_provider(self, data):
with pytest.raises(ValueError):
auth._ProviderUserInfo(data)
class TestGetUser(object):
VALID_UID = 'testuser'
VALID_EMAIL = 'testuser@example.com'
VALID_PHONE = '+1234567890'
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['a'*129])
def test_invalid_get_user(self, arg):
with pytest.raises(ValueError):
auth.get_user(arg)
def test_get_user(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 200, MOCK_GET_USER_RESPONSE)
_check_user_record(auth.get_user(self.VALID_UID, user_mgt_app))
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['not-an-email'])
def test_invalid_get_user_by_email(self, arg):
with pytest.raises(ValueError):
auth.get_user_by_email(arg)
def test_get_user_by_email(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 200, MOCK_GET_USER_RESPONSE)
_check_user_record(auth.get_user_by_email(self.VALID_EMAIL, user_mgt_app))
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['not-a-phone'])
def test_invalid_get_user_by_phone(self, arg):
with pytest.raises(ValueError):
auth.get_user_by_phone_number(arg)
def test_get_user_by_phone(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 200, MOCK_GET_USER_RESPONSE)
_check_user_record(auth.get_user_by_phone_number(self.VALID_PHONE, user_mgt_app))
def test_get_user_non_existing(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 200, '{"users":[]}')
with pytest.raises(auth.AuthError) as excinfo:
auth.get_user('nonexistentuser', user_mgt_app)
assert excinfo.value.code == _user_mgt.USER_NOT_FOUND_ERROR
def test_get_user_http_error(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 500, '{"error":"test"}')
with pytest.raises(auth.AuthError) as excinfo:
auth.get_user('testuser', user_mgt_app)
assert excinfo.value.code == _user_mgt.INTERNAL_ERROR
assert '{"error":"test"}' in str(excinfo.value)
def test_get_user_by_email_http_error(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 500, '{"error":"test"}')
with pytest.raises(auth.AuthError) as excinfo:
auth.get_user_by_email('non.existent.user@example.com', user_mgt_app)
assert excinfo.value.code == _user_mgt.INTERNAL_ERROR
assert '{"error":"test"}' in str(excinfo.value)
def test_get_user_by_phone_http_error(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 500, '{"error":"test"}')
with pytest.raises(auth.AuthError) as excinfo:
auth.get_user_by_phone_number(self.VALID_PHONE, user_mgt_app)
assert excinfo.value.code == _user_mgt.INTERNAL_ERROR
assert '{"error":"test"}' in str(excinfo.value)
class TestCreateUser(object):
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['a'*129])
def test_invalid_uid(self, arg):
with pytest.raises(ValueError):
auth.create_user(uid=arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['not-an-email'])
def test_invalid_email(self, arg):
with pytest.raises(ValueError):
auth.create_user(email=arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['not-a-phone', '+'])
def test_invalid_phone(self, arg):
with pytest.raises(ValueError):
auth.create_user(phone_number=arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS)
def test_invalid_display_name(self, arg):
with pytest.raises(ValueError):
auth.create_user(display_name=arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['not-a-url'])
def test_invalid_photo_url(self, arg):
with pytest.raises(ValueError):
auth.create_user(photo_url=arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['short'])
def test_invalid_password(self, arg):
with pytest.raises(ValueError):
auth.create_user(password=arg)
@pytest.mark.parametrize('arg', INVALID_BOOLS)
def test_invalid_email_verified(self, arg):
with pytest.raises(ValueError):
auth.create_user(email_verified=arg)
@pytest.mark.parametrize('arg', INVALID_BOOLS)
def test_invalid_disabled(self, arg):
with pytest.raises(ValueError):
auth.create_user(disabled=arg)
def test_invalid_property(self):
with pytest.raises(ValueError):
auth.create_user(unsupported='value')
def test_create_user(self, user_mgt_app):
user_mgt, recorder = _instrument_user_manager(user_mgt_app, 200, '{"localId":"testuser"}')
assert user_mgt.create_user() == 'testuser'
request = json.loads(recorder[0].body.decode())
assert request == {}
@pytest.mark.parametrize('phone', [
'+11234567890', '+1 123 456 7890', '+1 (123) 456-7890',
])
def test_create_user_with_phone(self, user_mgt_app, phone):
user_mgt, recorder = _instrument_user_manager(user_mgt_app, 200, '{"localId":"testuser"}')
assert user_mgt.create_user(phone_number=phone) == 'testuser'
request = json.loads(recorder[0].body.decode())
assert request == {'phoneNumber' : phone}
def test_create_user_with_email(self, user_mgt_app):
user_mgt, recorder = _instrument_user_manager(user_mgt_app, 200, '{"localId":"testuser"}')
assert user_mgt.create_user(email='test@example.com') == 'testuser'
request = json.loads(recorder[0].body.decode())
assert request == {'email' : 'test@example.com'}
def test_create_user_with_id(self, user_mgt_app):
user_mgt, recorder = _instrument_user_manager(user_mgt_app, 200, '{"localId":"testuser"}')
assert user_mgt.create_user(uid='testuser') == 'testuser'
request = json.loads(recorder[0].body.decode())
assert request == {'localId' : 'testuser'}
def test_create_user_error(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 500, '{"error":"test"}')
with pytest.raises(auth.AuthError) as excinfo:
auth.create_user(app=user_mgt_app)
assert excinfo.value.code == _user_mgt.USER_CREATE_ERROR
assert '{"error":"test"}' in str(excinfo.value)
class TestUpdateUser(object):
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['a'*129])
def test_invalid_uid(self, arg):
with pytest.raises(ValueError):
auth.update_user(arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['not-an-email'])
def test_invalid_email(self, arg):
with pytest.raises(ValueError):
auth.update_user('user', email=arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS[1:] + ['not-a-phone', '+'])
def test_invalid_phone(self, arg):
with pytest.raises(ValueError):
auth.update_user('user', phone_number=arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS[1:])
def test_invalid_display_name(self, arg):
with pytest.raises(ValueError):
auth.update_user('user', display_name=arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS[1:] + ['not-a-url'])
def test_invalid_photo_url(self, arg):
with pytest.raises(ValueError):
auth.update_user('user', photo_url=arg)
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['short'])
def test_invalid_password(self, arg):
with pytest.raises(ValueError):
auth.update_user('user', password=arg)
@pytest.mark.parametrize('arg', INVALID_BOOLS)
def test_invalid_email_verified(self, arg):
with pytest.raises(ValueError):
auth.update_user('user', email_verified=arg)
@pytest.mark.parametrize('arg', INVALID_BOOLS)
def test_invalid_disabled(self, arg):
with pytest.raises(ValueError):
auth.update_user('user', disabled=arg)
def test_invalid_property(self):
with pytest.raises(ValueError):
auth.update_user('user', unsupported='arg')
def test_update_user(self, user_mgt_app):
user_mgt, recorder = _instrument_user_manager(user_mgt_app, 200, '{"localId":"testuser"}')
user_mgt.update_user('testuser')
request = json.loads(recorder[0].body.decode())
assert request == {'localId' : 'testuser'}
def test_update_user_delete_fields(self, user_mgt_app):
user_mgt, recorder = _instrument_user_manager(user_mgt_app, 200, '{"localId":"testuser"}')
user_mgt.update_user('testuser', display_name=None, photo_url=None, phone_number=None)
request = json.loads(recorder[0].body.decode())
assert request == {
'localId' : 'testuser',
'deleteAttribute' : ['DISPLAY_NAME', 'PHOTO_URL'],
'deleteProvider' : ['phone'],
}
def test_update_user_error(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 500, '{"error":"test"}')
with pytest.raises(auth.AuthError) as excinfo:
auth.update_user('user', app=user_mgt_app)
assert excinfo.value.code == _user_mgt.USER_UPDATE_ERROR
assert '{"error":"test"}' in str(excinfo.value)
class TestDeleteUser(object):
@pytest.mark.parametrize('arg', INVALID_STRINGS + ['a'*129])
def test_invalid_delete_user(self, arg):
with pytest.raises(ValueError):
auth.get_user(arg)
def test_delete_user(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 200, '{"kind":"deleteresponse"}')
# should not raise
auth.delete_user('testuser', user_mgt_app)
def test_delete_user_error(self, user_mgt_app):
_instrument_user_manager(user_mgt_app, 500, '{"error":"test"}')
with pytest.raises(auth.AuthError) as excinfo:
auth.delete_user('user', app=user_mgt_app)
assert excinfo.value.code == _user_mgt.USER_DELETE_ERROR
assert '{"error":"test"}' in str(excinfo.value)
| |
import unittest
from .. import dispatchonvalue as dv
class TestExamples(unittest.TestCase):
def test_example1(self):
called = [0]
dispatch_on_value = dv.DispatchOnValue()
@dispatch_on_value.add([1, 2, 3])
def _(a):
called[0] = 1
return 2
@dispatch_on_value.add([4, 5, 6])
def _(a):
called[0] = 2
return 3
p = [4, 5, 6]
# Should call second function above
assert dispatch_on_value.dispatch(p) == 3
assert called[0] == 2
def test_1_multi_dispatch_on_value(self):
called = [0]
dispatch_on_value = dv.DispatchOnValue()
@dispatch_on_value.add([1, 2, 3])
def fn_1(a):
assert a == [1, 2, 3]
called[0] = 1
@dispatch_on_value.add([4, 5, 6])
def fn_2(a):
assert a == [4, 5, 6]
called[0] = 2
p = [1, 2, 3]
dispatch_on_value.dispatch(p) # Call fn_1 and return True
assert called[0] == 1
p = [4, 5, 6]
dispatch_on_value.dispatch(p) # Call fn_2 and return True
assert called[0] == 2
raised_exception = [0]
called = [0]
try:
p = [1, 2, 6]
dispatch_on_value.dispatch(p) # Not call anything and return False
except dv.DispatchFailed:
raised_exception[0] = 1
assert raised_exception[0] == 1
assert called[0] == 0
exception_raised = [0]
p = [7, 8, 9]
try:
dispatch_on_value.dispatch(p)
except dv.DispatchFailed:
exception_raised[0] = 1
assert exception_raised[0] == 1
def test_2_arbitrary_nested(self):
called = [0]
dispatch_on_value = dv.DispatchOnValue()
@dispatch_on_value.add({'one': 3, 'animals': ['frog', 'mouse']})
def fn_1(a):
assert a == {'one': 3, 'animals': ['frog', 'mouse']}
called[0] = 1
dispatch_on_value.dispatch({'one': 3, 'animals': ['frog', 'mouse']})
assert called[0] == 1
def test_3_wildcard(self):
called = [0]
dispatch_on_value = dv.DispatchOnValue()
@dispatch_on_value.add([dv.any_a, 'b', 3, [3, 'd', dv.any_a]])
def _(a):
called[0] = 1
# Will match
dispatch_on_value.dispatch(['c', 'b', 3, [3, 'd', 'c']])
assert called[0] == 1
called[0] = 0
# Will match
dispatch_on_value.dispatch(['f', 'b', 3, [3, 'd', 'f']])
assert called[0] == 1
raised_exception = [0]
called[0] = 0
try:
# Will not match
dispatch_on_value.dispatch(['c', 'b', 3, [3, 'd', 'f']])
except dv.DispatchFailed:
raised_exception[0] = 1
assert raised_exception[0] == 1
assert called[0] == 0
def test_4_pass_parameters(self):
called = [0]
dispatch_on_value = dv.DispatchOnValue()
@dispatch_on_value.add([1, 2])
def _(a, my_abc, my_def):
assert a == [1, 2]
assert my_abc == 'abc'
assert my_def == 'def'
called[0] = 1
# Do something
dispatch_on_value.dispatch([1, 2], 'abc', 'def')
assert called[0] == 1
def test_4b_pass_keywords(self):
called = [0]
dispatch_on_value = dv.DispatchOnValue()
@dispatch_on_value.add([3, 4])
def _(a, my_abc, **kwargs):
assert my_abc == 'abc'
assert 'para1' in kwargs
called[0] = 1
# Do something
dispatch_on_value.dispatch([3, 4], 'abc', para1=3)
def test_5_use_lambdas1(self):
called = [0]
dispatch_on_value = dv.DispatchOnValue()
@dispatch_on_value.add([1, 2, lambda x: 3 < x < 7, 'hello'])
def _(a):
called[0] = 1
dispatch_on_value.dispatch([1, 2, 4, 'hello']) # This will match
assert called[0] == 1
raised_exception = [0]
called[0] = 0
try:
dispatch_on_value.dispatch([1, 2, 2, 'hello']) # This will not match
except dv.DispatchFailed:
raised_exception[0] = 1
assert raised_exception[0] == 1
assert called[0] == 0
def test_5_use_lambdas2(self):
called = [0]
dispatch_on_value = dv.DispatchOnValue()
@dispatch_on_value.add(['a', 2, lambda x: x == 'b' or x == 'c'])
def _(a):
called[0] = 1
dispatch_on_value.dispatch(['a', 2, 'c']) # This will match
assert called[0] == 1
raised_exception = [0]
called[0] = 0
try:
dispatch_on_value.dispatch(['a', 2, 's']) # This will not match
except dv.DispatchFailed:
raised_exception[0] = 1
assert raised_exception[0] == 1
assert called[0] == 0
def test_partial_or_strict(self):
called = [0]
dispatch_on_value = dv.DispatchOnValue()
@dispatch_on_value.add({'name': 'john', 'age': 32})
def _(a):
called[0] = 1
# These will match because they contain the minimal dictionary items
dispatch_on_value.dispatch({'name': 'john', 'age': 32})
assert called[0] == 1
called[0] = 0
dispatch_on_value.dispatch({'name': 'john', 'age': 32, 'sex': 'male'})
assert called[0] == 1
# This will match because it's strict and the pattern is exactly the
# same
called[0] = 0
dispatch_on_value.dispatch_strict({'name': 'john', 'age': 32})
assert called[0] == 1
raised_exception = [0]
called[0] = 0
try:
# This will not match because the dictionary doesn't match exactly
dispatch_on_value.dispatch_strict(
{'name': 'john', 'age': 32, 'sex': 'male'}
)
except dv.DispatchFailed:
raised_exception[0] = 1
assert raised_exception[0] == 1
assert called[0] == 0
| |
# Copyright (c) 2015 Faculty of Engineering of the University of Porto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Module for the Git Extractor. """
import multiprocessing
import os
import git
from .abstract_extractor import *
from schwa.repository import *
from schwa.parsing import JavaParser, ParsingError
current_repo = None # Curent repository wrapper
def extract_commit_wrapper(hexsha):
""" Multiprocessing wrapper for extracting a commit"""
return current_repo.extract_commit(hexsha)
class GitExtractor(AbstractExtractor):
""" A Git Extractor.
This class relies on GitPython library to extract data from a local repository.
"""
def __init__(self, path):
super().__init__(path)
self.repo = git.Repo(path, odbt=git.GitCmdObjectDB)
def extract(self, ignore_regex="^$", max_commits=None, method_granularity=False, parallel=True):
""" Extract a repository.
It extracts commits from a repository that are important to the analysis. Therefore, only commits
related to code are important. For the sake of supporting big repositories, it is possible to set
the maximum number of commits.
Args:
ignore_regex: An optional string that is a regex pattern to ignore unnecessary files.
max_commits: An optional int that is the maximum number of commits to extract since the last one.
method_granularity: An optional boolean that enables extraction until the method granularity.
parallel: An optional boolean that enables multiprocessing extraction.
Returns:
A Repository instance.
"""
# Multiprocessing setup
global current_repo
current_repo = self
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError: # pragma: no cover
cpus = 2 # pragma: no cover
self.ignore_regex = ignore_regex
self.method_granularity = method_granularity
# Extract commits
iter_commits = self.repo.iter_commits(max_count=max_commits) if max_commits else self.repo.iter_commits()
commits = [commit.hexsha for commit in iter_commits]
pool = multiprocessing.Pool(processes=cpus)
if parallel and os.name != "nt":
commits = pool.map(extract_commit_wrapper, commits)
else:
commits = map(extract_commit_wrapper, commits)
commits = list(reversed([commit for commit in commits if commit]))
# Timestamps
try:
begin_ts = list(self.repo.iter_commits())[-1].committed_date
last_ts = list(self.repo.iter_commits(max_count=1))[0].committed_date
except TypeError:
raise RepositoryExtractionException("Error extracting repository: cannot parse begin or last timestamps!")
# Repository
repo = Repository(commits, begin_ts, last_ts)
return repo
def extract_commit(self, hexsha):
""" Extract a commit.
Iterates over commits diffs to extract important information such as changed files, classes and methods.
Args:
hexsha: A string representing the commit ID
Returns:
A Commit instance.
"""
commit = self.repo.commit(hexsha)
_id = hexsha
try:
message = commit.message
except (UnicodeDecodeError, TypeError): # pragma: no cover
return None # pragma: no cover
author = commit.author.email
timestamp = commit.committed_date
diffs_list = []
# First commit
if not commit.parents:
for blob in commit.tree.traverse():
if self.is_good_blob(blob):
diffs_list.extend(self.get_new_file_diffs(blob))
else:
for parent in commit.parents:
for diff in parent.diff(commit):
# Shortcut
if not self.is_good_blob(diff.a_blob) and not self.is_good_blob(diff.b_blob):
continue
# New file
if diff.new_file and self.is_good_blob(diff.b_blob):
diffs_list.extend(self.get_new_file_diffs(diff.b_blob))
# Renamed file
elif diff.renamed and self.is_good_blob(diff.a_blob) and self.is_good_blob(diff.b_blob):
diffs_list.extend(self.get_renamed_file_diffs(diff.a_blob, diff.b_blob))
# Deleted file
elif diff.deleted_file:
diffs_list.append(DiffFile(file_a=diff.a_blob.path, removed=True))
# Modified file
else:
diffs_list.extend(self.get_modified_file_diffs(diff.a_blob, diff.b_blob))
return Commit(_id, message, author, timestamp, diffs_list) if len(diffs_list) > 0 else None
def get_new_file_diffs(self, blob):
diffs_list = [DiffFile(file_b=blob.path, added=True)]
if can_parse_file(blob.path) and self.method_granularity:
source = GitExtractor.get_source(blob)
file_parsed = GitExtractor.parse(blob.path, source)
if file_parsed:
classes_set = file_parsed.get_classes_set()
methods_set = file_parsed.get_functions_set()
for c in classes_set:
diffs_list.append(DiffClass(file_name=blob.path, class_b=c, added=True))
for c, m in methods_set:
diffs_list.append(DiffMethod(file_name=blob.path, class_name=c, method_b=m, added=True))
return diffs_list
def get_modified_file_diffs(self, blob_a, blob_b):
diffs_list = [DiffFile(file_a=blob_a.path, file_b=blob_b.path, modified=True)]
try:
if can_parse_file(blob_a.path) and can_parse_file(blob_b.path) and self.method_granularity:
source_a = GitExtractor.get_source(blob_a)
source_b = GitExtractor.get_source(blob_b)
diffs_list.extend(GitExtractor.diff((blob_a.path, source_a), (blob_b.path, source_b)))
except ParsingError:
pass
return diffs_list
def get_renamed_file_diffs(self, blob_a, blob_b):
diffs_list = [DiffFile(file_a=blob_a.path, file_b=blob_b.path, renamed=True)]
try:
if can_parse_file(blob_a.path) and can_parse_file(blob_b.path) and self.method_granularity:
source_a = GitExtractor.get_source(blob_a)
source_b = GitExtractor.get_source(blob_b)
diffs_list.extend(GitExtractor.diff((blob_a.path, source_a), (blob_b.path, source_b)))
except ParsingError:
pass
return diffs_list
def is_good_blob(self, blob):
return blob and is_code_file(blob.path) and not re.search(self.ignore_regex, blob.path)
@staticmethod
def get_source(blob):
try:
stream = blob.data_stream.read()
source = stream.decode("UTF-8")
except AttributeError:
raise ParsingError
return source
@staticmethod
def parse(path, source):
try:
if "java" in path:
components = JavaParser.parse(source)
return components
except ParsingError:
pass
return False
@staticmethod
def diff(file_a, file_b):
try:
if "java" in file_a[0]:
components_diff = JavaParser.diff(file_a, file_b)
return components_diff
except ParsingError:
pass
return []
| |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""Python support for CodeIntel"""
import os
from os.path import (isfile, isdir, exists, dirname, splitext,
join, basename, normcase)
import sys
import logging
import random
import parser
from glob import glob
import weakref
import re
import imp
from pprint import pprint, pformat
import itertools
import SilverCity
from SilverCity.Lexer import Lexer
from SilverCity import ScintillaConstants
from SilverCity.Keywords import python_keywords
from codeintel2.common import *
from codeintel2.citadel import (CitadelBuffer, CitadelEvaluator, ImportHandler,
CitadelLangIntel)
from codeintel2.indexer import PreloadLibRequest
from codeintel2 import pythoncile
from codeintel2.util import (banner, indent, markup_text, isident, isdigit,
makePerformantLogger)
from codeintel2 import tree
from codeintel2.tree_python import PythonTreeEvaluator, PythonImportLibGenerator
from codeintel2.langintel import (ParenStyleCalltipIntelMixin,
ProgLangTriggerIntelMixin,
PythonCITDLExtractorMixin)
from codeintel2.tree import tree_from_cix
if _xpcom_:
from xpcom.server import UnwrapObject
#---- globals
_SCAN_BINARY_FILES = False
lang = "Python"
log = logging.getLogger("codeintel.python")
# log.setLevel(logging.DEBUG)
makePerformantLogger(log)
CACHING = True # DEPRECATED: kill it
# See http://effbot.org/zone/pythondoc.htm
_g_pythondoc_tags = list(sorted("param keyparam return exception def "
"defreturn see link linkplain".split()))
_g_python_magic_method_names = sorted([
'__init__',
'__new__',
'__del__',
'__repr__',
'__str__',
'__lt__',
'__le__',
'__eq__',
'__ne__',
'__gt__',
'__ge__',
'__cmp__',
'__rcmp__',
'__hash__',
'__nonzero__',
'__unicode__',
# Attribute access
'__getattr__',
'__setattr__',
'__delattr__',
# New style classes
'__getattribute__',
'__call__',
# Sequence classes
'__len__',
'__getitem__',
'__setitem__',
'__delitem__',
'__iter__',
'__reversed__',
'__contains__',
'__getslice__',
'__setslice__',
'__delslice__',
# Integer like operators
'__add__',
'__sub__',
'__mul__',
'__floordiv__',
'__mod__',
'__divmod__',
'__pow__',
'__lshift__',
'__rshift__',
'__and__',
'__xor__',
'__or__',
'__div__',
'__truediv__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rtruediv__',
'__rfloordiv__',
'__rmod__',
'__rdivmod__',
'__rpow__',
'__rlshift__',
'__rrshift__',
'__rand__',
'__rxor__',
'__ror__',
'__iadd__',
'__isub__',
'__imul__',
'__idiv__',
'__itruediv__',
'__ifloordiv__',
'__imod__',
'__ipow__',
'__ilshift__',
'__irshift__',
'__iand__',
'__ixor__',
'__ior__',
'__neg__',
'__pos__',
'__abs__',
'__invert__',
'__complex__',
'__int__',
'__long__',
'__float__',
'__oct__',
'__hex__',
'__index__',
'__coerce__',
# Context managers
'__enter__',
'__exit__',
])
#---- language support
class PythonLexer(Lexer):
lang = lang
def __init__(self):
self._properties = SilverCity.PropertySet()
self._lexer = SilverCity.find_lexer_module_by_id(
ScintillaConstants.SCLEX_PYTHON)
self._keyword_lists = [
SilverCity.WordList(python_keywords),
SilverCity.WordList(""), # hilighted identifiers
]
class PythonImportsEvaluator(Evaluator):
lang = lang
def __str__(self):
return "Python imports"
def eval(self, mgr):
try:
imp_prefix = tuple(self.trg.extra["imp_prefix"])
if imp_prefix:
libs = self.buf.libs
if not imp_prefix[0]:
if not imp_prefix[-1]:
# Deal with last item being empty, i.e. "from ."
imp_prefix = imp_prefix[:-1]
lookuppath = self.buf.path
while imp_prefix and not imp_prefix[0]:
lookuppath = dirname(lookuppath)
imp_prefix = imp_prefix[1:]
libs = [mgr.db.get_lang_lib(self.lang, "curdirlib",
[lookuppath])]
else:
# We use a special lib generator - that will lazily load
# additional directory libs when there are no matches found.
# This is a smart import facility - to detect imports from
# a parent directory when they are not explicitly on the
# included path list, quite common for Django and other
# Python frameworks that mangle the sys.path at runtime.
libs = PythonImportLibGenerator(mgr, self.lang,
self.buf.path, imp_prefix,
libs)
self.ctlr.set_desc("subimports of '%s'" % '.'.join(imp_prefix))
cplns = []
for lib in libs:
imports = lib.get_blob_imports(imp_prefix)
if imports:
cplns.extend(
((is_dir_import and "directory" or "module"), name)
for name, is_dir_import in imports
)
if self.trg.type == "module-members":
# Also add top-level members of the specified module.
dotted_prefix = '.'.join(imp_prefix)
if lib.has_blob(dotted_prefix):
blob = lib.get_blob(dotted_prefix)
for name in blob.names:
elem = blob.names[name]
cplns.append((elem.get(
"ilk") or elem.tag, name))
# TODO: Consider using the value of __all__
# if defined.
for e in blob:
attrs = e.get("attributes", "").split()
if "__hidden__" not in attrs:
try:
cplns += self._members_from_elem(
e, mgr)
except CodeIntelError as ex:
log.warn(
"%s (skipping members for %s)",
ex, e)
if cplns:
break
if cplns:
cplns = list(set(cplns)) # remove duplicates
else:
self.ctlr.set_desc("available imports")
all_imports = set()
for lib in self.buf.libs:
all_imports.update(lib.get_blob_imports(imp_prefix))
cplns = [((is_dir_import and "directory" or "module"), name)
for name, is_dir_import in all_imports]
if cplns:
cplns.sort(key=lambda i: i[1].upper())
self.ctlr.set_cplns(cplns)
finally:
self.ctlr.done("success")
# XXX: This function is shamelessly copy/pasted from
# tree_python.py:PythonTreeEvaluator because there was no clear
# way to reuse this shared functionality. See another XXX below, though.
def _members_from_elem(self, elem, mgr):
"""Return the appropriate set of autocomplete completions for
the given element. Typically this is just one, but can be more for
'*'-imports
"""
members = set()
if elem.tag == "import":
alias = elem.get("alias")
symbol_name = elem.get("symbol")
module_name = elem.get("module")
if symbol_name:
import_handler = mgr.citadel.import_handler_from_lang(
self.trg.lang)
try:
blob = import_handler.import_blob_name(
module_name, self.buf.libs, self.ctlr)
except:
log.warn(
"limitation in handling imports in imported modules")
raise
if symbol_name == "*": # can it be so?
for m_name, m_elem in list(blob.names.items()):
m_type = m_elem.get("ilk") or m_elem.tag
members.add((m_type, m_name))
elif symbol_name in blob.names:
symbol = blob.names[symbol_name]
member_type = (symbol.get("ilk") or symbol.tag)
members.add((member_type, alias or symbol_name))
else:
# To correctly determine the type, we'd need to
# examine all the imports of this blob, and then see
# if any of those imports match the name... which is
# better left to the tree evaluator (tree_python).
#
# For now, we just add it as an unknown type.
members.add(('unknown', alias or symbol_name))
log.info(
"could not resolve symbol %r on %r, added as 'unknown'",
symbol_name, module_name)
else:
cpln_name = alias or module_name.split('.', 1)[0]
members.add(("module", cpln_name))
else:
members.add((elem.get("ilk") or elem.tag, elem.get("name")))
return members
class PythonLangIntel(CitadelLangIntel, ParenStyleCalltipIntelMixin,
ProgLangTriggerIntelMixin,
PythonCITDLExtractorMixin):
lang = lang
interpreterPrefName = "python"
extraPathsPrefName = "pythonExtraPaths"
# Used by ProgLangTriggerIntelMixin.preceding_trg_from_pos().
trg_chars = tuple(" (.")
citdl_from_literal_type = {"string": "str"}
@LazyClassAttribute
def keywords(self):
from SilverCity.Keywords import python_keywords
return python_keywords.split(" ")
def async_eval_at_trg(self, buf, trg, ctlr):
if _xpcom_:
trg = UnwrapObject(trg)
ctlr = UnwrapObject(ctlr)
ctlr.start(buf, trg)
if trg.type in ("object-members", "call-signature",
"literal-members") or \
trg.form == TRG_FORM_DEFN:
line = buf.accessor.line_from_pos(trg.pos)
if trg.type == "literal-members":
# We could leave this to citdl_expr_from_trg, but this is a
# little bit faster, since we already know the citdl expr.
citdl_expr = trg.extra.get("citdl_expr")
else:
try:
citdl_expr = self.citdl_expr_from_trg(buf, trg)
except CodeIntelError as ex:
ctlr.error(str(ex))
ctlr.done("error")
return
evalr = PythonTreeEvaluator(ctlr, buf, trg, citdl_expr, line)
buf.mgr.request_eval(evalr)
elif trg.id == (self.lang, TRG_FORM_CPLN, "local-symbols"):
line = buf.accessor.line_from_pos(trg.pos)
citdl_expr = trg.extra.get("citdl_expr")
evalr = PythonTreeEvaluator(ctlr, buf, trg, citdl_expr, line)
buf.mgr.request_eval(evalr)
elif trg.id == (self.lang, TRG_FORM_CPLN, "magic-symbols"):
symbolstype = trg.extra.get("symbolstype")
cplns = []
if symbolstype == "string":
cplns = [("variable", "__main__")]
elif symbolstype == "def":
posttext = trg.extra.get("posttext", "")
posttext = posttext.split("\n", 1)[0]
if posttext and "(" in posttext:
cplns = [(
"function", t) for t in _g_python_magic_method_names]
else:
cplns = [(
"function", t + "(self") for t in _g_python_magic_method_names]
elif symbolstype == "global":
text = trg.extra.get("text")
if text.endswith("if"):
# Add the extended name version.
cplns = [("variable", t) for t in (
"__file__", "__loader__", "__name__ == '__main__':", "__package__")]
else:
cplns = [("variable", t) for t in (
"__file__", "__loader__", "__name__", "__package__")]
ctlr.set_cplns(cplns)
ctlr.done("success")
elif trg.id == (self.lang, TRG_FORM_CPLN, "pythondoc-tags"):
# TODO: Would like a "tag" completion image name.
cplns = [("variable", t) for t in _g_pythondoc_tags]
ctlr.set_cplns(cplns)
ctlr.done("success")
elif trg.type == "available-exceptions":
evalr = PythonTreeEvaluator(ctlr, buf, trg, None, -1)
buf.mgr.request_eval(evalr)
elif trg.type in ("available-imports", "module-members"):
evalr = PythonImportsEvaluator(ctlr, buf, trg)
buf.mgr.request_eval(evalr)
else:
raise NotImplementedError("not yet implemented: completion for "
"Python '%s' trigger" % trg.name)
# Note: Python 1.5.2 does not support sys.version_info.
info_cmd = (
r"import sys;"
r"sys.stdout.write('.'.join(map(str, sys.version_info))+'\n');"
r"sys.stdout.write(sys.prefix+'\n');"
r"sys.stdout.write('\n'.join(sys.path));")
def _python_info_from_python(self, python, env):
"""Call the given Python and return:
(<version>, <sys.prefix>, <lib-dir>, <site-lib-dir>, <sys.path>)
TODO: Unicode path issues?
"""
import process
argv = [python, "-c", self.info_cmd]
log.debug("run `%s -c ...'", python)
p = process.ProcessOpen(argv, env=env.get_all_envvars(), stdin=None)
stdout, stderr = p.communicate()
stdout_lines = stdout.splitlines(0)
retval = p.returncode
if retval:
log.warn("failed to determine Python info:\n"
" path: %s\n"
" retval: %s\n"
" stdout:\n%s\n"
" stderr:\n%s\n",
python, retval, indent('\n'.join(stdout_lines)),
indent(stderr))
# We are only to rely on the first 2 digits being in the form x.y.
ver_match = re.search("([0-9]+.[0-9]+)", stdout_lines[0])
if ver_match:
ver = ver_match.group(1)
else:
ver = None
prefix = stdout_lines[1]
if sys.platform == "win32":
libdir = join(prefix, "Lib")
else:
libdir = join(prefix, "lib", "python"+ver)
sitelibdir = join(libdir, "site-packages")
sys_path = stdout_lines[2:]
return ver, prefix, libdir, sitelibdir, sys_path
def _gen_python_import_paths_from_dirs(self, dirs):
"""Generate all Python import paths from a given list of dirs.
This involves handling .pth files on the given dirs. It generates
import "paths" rather than "dirs" because Python .egg files can be
returned.
Dev Notes:
- Python's .pth files can have *executable* Python code. This
currently is not handled (those kinds of lines are skipped).
"""
for dir in dirs:
if not exists(dir):
continue
yield dir
try:
for pth_path in glob(join(dir, "*.pth")):
for p in self._gen_python_import_paths_from_pth_path(pth_path):
yield p
except EnvironmentError as ex:
log.warn("error analyzing .pth files in '%s': %s", dir, ex)
def _gen_python_import_paths_from_pth_path(self, pth_path):
pth_dir = dirname(pth_path)
for line in open(pth_path, 'r'):
line = line.strip()
if line.startswith("#"): # comment line
continue
path = join(pth_dir, line)
if exists(path):
yield path
def _extra_dirs_from_env(self, env):
extra_dirs = set()
for pref in env.get_all_prefs(self.extraPathsPrefName):
if not pref:
continue
extra_dirs.update(d.strip() for d in pref.split(os.pathsep)
if exists(d.strip()))
if extra_dirs:
extra_dirs = set(
self._gen_python_import_paths_from_dirs(extra_dirs)
)
log.debug("Python extra lib dirs: %r", extra_dirs)
return tuple(extra_dirs)
def interpreter_from_env(self, env):
"""Returns:
- absolute path to either the preferred or
default system interpreter
- None if none of the above exists
"""
# Gather information about the current python.
python = None
if env.has_pref(self.interpreterPrefName):
python = env.get_pref(self.interpreterPrefName).strip() or None
if not python or not exists(python):
import which
syspath = env.get_envvar("PATH", "")
path = [d.strip() for d in syspath.split(os.pathsep)
if d.strip()]
try:
python = which.which("python", path=path)
except which.WhichError:
pass # intentionally supressed
if python:
python = os.path.abspath(python)
return python
def python_info_from_env(self, env):
cache_key = self.lang + "-info"
info = env.cache.get(cache_key)
if info is None:
python = self.interpreter_from_env(env)
if not python:
log.warn("no Python was found from which to determine the "
"codeintel information")
info = None, None, None, None, []
else:
info = self._python_info_from_python(python, env)
env.cache[cache_key] = info
return info
def _buf_indep_libs_from_env(self, env):
"""Create the buffer-independent list of libs."""
cache_key = self.lang + "-libs"
libs = env.cache.get(cache_key)
if libs is None:
env.add_pref_observer(
self.interpreterPrefName, self._invalidate_cache)
env.add_pref_observer(self.extraPathsPrefName,
self._invalidate_cache_and_rescan_extra_dirs)
env.add_pref_observer("codeintel_selected_catalogs",
self._invalidate_cache)
db = self.mgr.db
ver, prefix, libdir, sitelibdir, sys_path \
= self.python_info_from_env(env)
libs = []
# - extradirslib
extra_dirs = self._extra_dirs_from_env(env)
if extra_dirs:
libs.append(db.get_lang_lib(self.lang, "extradirslib",
extra_dirs))
# Figure out which sys.path dirs belong to which lib.
paths_from_libname = {"sitelib": [], "envlib": [], "stdlib": []}
canon_sitelibdir = sitelibdir and normcase(sitelibdir) or None
canon_prefix = prefix and normcase(prefix) or None
canon_libdir = libdir and normcase(libdir) or None
canon_libdir_plat_prefix = libdir and normcase(
join(libdir, "plat-")) or None
canon_libdir_lib_prefix = libdir and normcase(
join(libdir, "lib-")) or None
for dir in sys_path:
STATE = "envlib"
canon_dir = normcase(dir)
if dir == "": # -> curdirlib (already handled)
continue
elif canon_dir.endswith(".zip") and isfile(dir):
log.warn("`%s': not handling .zip file on Python sys.path",
dir)
continue
elif canon_dir.endswith(".egg") and isfile(dir):
# log.warn("`%s': not handling .egg file on Python sys.path",
# dir)
continue
elif canon_dir.startswith(canon_sitelibdir):
STATE = "sitelib"
# Check against the known list of standard library locations.
elif canon_dir == canon_libdir or \
canon_dir.startswith(canon_libdir_plat_prefix) or \
canon_dir.startswith(canon_libdir_lib_prefix):
STATE = "stdlib"
if not exists(dir):
continue
paths_from_libname[STATE].append(dir)
log.debug("Python %s paths for each lib:\n%s",
ver, indent(pformat(paths_from_libname)))
# - envlib, sitelib, cataloglib, stdlib
if paths_from_libname["envlib"]:
libs.append(db.get_lang_lib(self.lang, "envlib",
paths_from_libname["envlib"]))
if paths_from_libname["sitelib"]:
libs.append(db.get_lang_lib(self.lang, "sitelib",
paths_from_libname["sitelib"]))
catalog_selections = env.get_pref("codeintel_selected_catalogs")
libs += [
db.get_catalog_lib(self.lang, catalog_selections),
db.get_stdlib(self.lang, ver)
]
env.cache[cache_key] = libs
return libs
def libs_from_buf(self, buf):
env = buf.env
# A buffer's libs depend on its env and the buf itself so
# we cache it on the env and key off the buffer.
cache_key = self.lang + "-buf-libs"
cache = env.cache.get(cache_key) # <buf-weak-ref> -> <libs>
if cache is None:
cache = weakref.WeakKeyDictionary()
env.cache[cache_key] = cache
if buf not in cache:
# - curdirlib
# Using the dirname of this buffer isn't always right, but
# hopefully is a good first approximation.
libs = []
if buf.path:
cwd = dirname(buf.path)
if cwd != "<Unsaved>":
libs = [self.mgr.db.get_lang_lib(
self.lang, "curdirlib", [cwd])]
libs += self._buf_indep_libs_from_env(env)
cache[buf] = libs
return cache[buf]
def _invalidate_cache(self, env, pref_name):
for key in (self.lang + "-buf-libs", self.lang + "-libs"):
if key in env.cache:
log.debug("invalidate '%s' cache on %r", key, env)
del env.cache[key]
def _invalidate_cache_and_rescan_extra_dirs(self, env, pref_name):
self._invalidate_cache(env, pref_name)
extra_dirs = self._extra_dirs_from_env(env)
if extra_dirs:
extradirslib = self.mgr.db.get_lang_lib(
self.lang, "extradirslib", extra_dirs)
request = PreloadLibRequest(extradirslib)
self.mgr.idxr.stage_request(request, 1.0)
# class PythonCitadelEvaluator(CitadelEvaluator):
# def post_process_cplns(self, cplns):
# """Drop special __FOO__ methods.
#
# Note: Eventually for some Python completions we might want to leave
# these in. For example:
#
# class Bar(Foo):
# def __init__(self):
# Foo.<|> # completions should include "__init__" here
# """
# for i in range(len(cplns)-1, -1, -1):
# value = cplns[i][1]
# if value.startswith("__") and value.endswith("__"):
# del cplns[i]
# return CitadelEvaluator.post_process_cplns(self, cplns)
# "from", "from .", "from .."
_dotted_from_rx = re.compile(r'from($|\s+\.+)')
class PythonBuffer(CitadelBuffer):
lang = lang
# Fillup chars for Python: basically, any non-identifier char.
# - remove '*' from fillup chars because: "from foo import <|>*"
cpln_fillup_chars = "~`!@#$%^&()-=+{}[]|\\;:'\",.<>?/ "
cpln_stop_chars = "~`!@#$%^&*()-=+{}[]|\\;:'\",.<>?/ "
sce_prefixes = ["SCE_P_"]
cb_show_if_empty = True
keyword_style = ScintillaConstants.SCE_P_WORD
identifier_style = ScintillaConstants.SCE_P_IDENTIFIER
@property
def libs(self):
return self.langintel.libs_from_buf(self)
def trg_from_pos(self, pos, implicit=True):
"""Python trigger types:
python-complete-object-members
python-calltip-call-signature
python-complete-pythondoc-tags
complete-available-imports
complete-module-members
Not yet implemented:
complete-available-classes
calltip-base-signature
"""
DEBUG = False # not using 'logging' system, because want to be fast
if DEBUG:
print("\n----- Python trg_from_pos(pos=%r, implicit=%r) -----"\
% (pos, implicit))
if pos == 0:
return None
accessor = self.accessor
last_pos = pos - 1
last_char = accessor.char_at_pos(last_pos)
if DEBUG:
print(" last_pos: %s" % last_pos)
print(" last_char: %r" % last_char)
# Quick out if the preceding char isn't a trigger char.
# Note: Cannot use this now that we have a 2-char locals trigger.
# if last_char not in " .(@_,":
# if DEBUG:
# print "trg_from_pos: no: %r is not in ' .(@'_" % last_char
# return None
style = accessor.style_at_pos(last_pos)
if DEBUG:
style_names = self.style_names_from_style_num(style)
print(" style: %s (%s)" % (style, ", ".join(style_names)))
if last_char == "@":
# Possibly python-complete-pythondoc-tags (the only trigger
# on '@').
#
# Notes:
# - PythonDoc 2.1b6 started allowing pythondoc tags in doc
# strings which we are yet supporting here.
# - Trigger in comments should only happen if the comment
# begins with the "##" pythondoc signifier. We don't
# bother checking that (PERF).
if style in self.comment_styles():
# Only trigger at start of comment line.
WHITESPACE = tuple(" \t")
SENTINEL = 20
i = last_pos-1
while i >= max(0, last_pos-SENTINEL):
ch = accessor.char_at_pos(i)
if ch == "#":
return Trigger(self.lang, TRG_FORM_CPLN,
"pythondoc-tags", pos, implicit)
elif ch in WHITESPACE:
pass
else:
return None
i -= 1
return None
# Remaing triggers should never trigger in some styles.
if (implicit and style in self.implicit_completion_skip_styles and last_char != '_'
or style in self.completion_skip_styles):
if DEBUG:
print("trg_from_pos: no: completion is suppressed "\
"in style at %s: %s (%s)"\
% (last_pos, style, ", ".join(style_names)))
return None
if last_char == " ":
# used for:
# * complete-available-imports
# * complete-module-members
# * complete-available-exceptions
# Triggering examples ('_' means a space here):
# import_ from_
# Non-triggering examples:
# from FOO import_ Ximport_
# Not bothering to support:
#; if FOO:import_ FOO;import_
# Typing a space is very common so lets have a quick out before
# doing the more correct processing:
if last_pos-1 < 0 or accessor.char_at_pos(last_pos-1) not in "etm,":
return None
working_text = accessor.text_range(max(0, last_pos-200),
last_pos)
line = self._last_logical_line(working_text).strip()
if not line:
return None
ch = line[-1]
line = line.replace('\t', ' ')
# from <|>
# import <|>
if line == "from" or line == "import":
return Trigger(self.lang, TRG_FORM_CPLN,
"available-imports", pos, implicit,
imp_prefix=())
# is it "from FOO import <|>" ?
if line.endswith(" import"):
if line.startswith('from '):
imp_prefix = tuple(line[len('from '):-len(
' import')].strip().split('.'))
return Trigger(self.lang, TRG_FORM_CPLN,
"module-members", pos, implicit,
imp_prefix=imp_prefix)
if line == "except" or line == "raise" or line.endswith((" except", " raise")):
return Trigger(self.lang, TRG_FORM_CPLN,
"available-exceptions", pos, implicit)
if ch == ',':
# is it "from FOO import BAR, <|>" ?
if line.startswith('from ') and ' import ' in line:
imp_prefix = tuple(line[len('from '):line.index(
' import')].strip().split('.'))
# Need better checks
return Trigger(self.lang, TRG_FORM_CPLN,
"module-members", pos, implicit,
imp_prefix=imp_prefix)
elif last_char == '.': # must be "complete-object-members" or None
# If the first non-whitespace character preceding the '.' in the
# same statement is an identifer character then trigger, if it
# is a ')', then _maybe_ we should trigger (yes if this is
# function call paren).
#
# Triggering examples:
# FOO. FOO . FOO; BAR.
# FOO(). FOO.BAR. FOO(BAR, BAZ.
# FOO().BAR. FOO("blah();", "blam"). FOO = {BAR.
# FOO(BAR. FOO[BAR.
# ...more cases showing possible delineation of expression
# Non-triggering examples:
# FOO..
# FOO[1]. too hard to determine sequence element types
# from FOO import (BAR.
# Not sure if want to support:
# "foo". do we want to support literals? what about
# lists? tuples? dicts?
working_text = accessor.text_range(max(0, last_pos-200),
last_pos)
line = self._last_logical_line(working_text).strip()
if line:
ch = line[-1]
if (isident(ch) or isdigit(ch) or ch in '.)'):
line = line.replace('\t', ' ')
m = _dotted_from_rx.match(line)
if m:
dots = len(m.group(1).strip())
# magic value for imp_prefix, means "from .<|>"
imp_prefix = tuple('' for i in range(dots+2))
return Trigger(self.lang, TRG_FORM_CPLN,
"available-imports", pos, implicit,
imp_prefix=imp_prefix)
elif line.startswith('from '):
if ' import ' in line:
# we're in "from FOO import BAR." territory,
# which is not a trigger
return None
# from FOO.
imp_prefix = tuple(line[len(
'from '):].strip().split('.'))
return Trigger(self.lang, TRG_FORM_CPLN,
"available-imports", pos, implicit,
imp_prefix=imp_prefix)
elif line.startswith('import '):
# import FOO.
# figure out the dotted parts of "FOO" above
imp_prefix = tuple(line[len(
'import '):].strip().split('.'))
return Trigger(self.lang, TRG_FORM_CPLN,
"available-imports", pos, implicit,
imp_prefix=imp_prefix)
else:
return Trigger(self.lang, TRG_FORM_CPLN,
"object-members", pos, implicit)
elif ch in ("\"'"):
return Trigger(self.lang, TRG_FORM_CPLN,
"literal-members", pos, implicit,
citdl_expr="str")
else:
ch = None
if DEBUG:
print("trg_from_pos: no: non-ws char preceding '.' is not "\
"an identifier char or ')': %r" % ch)
return None
elif last_char == "_":
# used for:
# * complete-magic-symbols
# Triggering examples:
# def __<|>init__
# if __<|>name__ == '__main__':
# __<|>file__
# Ensure double "__".
if last_pos-1 < 0 or accessor.char_at_pos(last_pos-1) != "_":
return None
beforeChar = None
beforeStyle = None
if last_pos-2 >= 0:
beforeChar = accessor.char_at_pos(last_pos-2)
beforeStyle = accessor.style_at_pos(last_pos-2)
if DEBUG:
print("trg_from_pos:: checking magic symbol, beforeChar: %r" % (beforeChar))
if beforeChar and beforeChar in "\"'" and beforeStyle in self.string_styles():
if DEBUG:
print("trg_from_pos:: magic-symbols - string")
return Trigger(self.lang, TRG_FORM_CPLN,
"magic-symbols", last_pos-1, implicit,
symbolstype="string")
elif beforeChar == "." and beforeStyle != style:
# Turned this off, as it interferes with regular "xxx." object
# completions.
return None
if beforeStyle == style:
# No change in styles between the characters -- abort.
return None
text = accessor.text_range(max(0, last_pos-20), last_pos-1).strip()
if beforeChar and beforeChar in " \t":
if text.endswith("def"):
posttext = accessor.text_range(pos,
min(accessor.length, pos+20)
).replace(" ", "")
if DEBUG:
print("trg_from_pos:: magic-symbols - def")
return Trigger(self.lang, TRG_FORM_CPLN,
"magic-symbols", last_pos-1, implicit,
symbolstype="def",
posttext=posttext)
if DEBUG:
print("trg_from_pos:: magic-symbols - global")
return Trigger(self.lang, TRG_FORM_CPLN,
"magic-symbols", last_pos-1, implicit,
symbolstype="global", text=text)
elif last_char == '(':
# If the first non-whitespace character preceding the '(' in the
# same statement is an identifer character then trigger calltip,
#
# Triggering examples:
# FOO. FOO ( FOO; BAR(
# FOO.BAR( FOO(BAR, BAZ( FOO = {BAR(
# FOO(BAR( FOO[BAR(
# Non-triggering examples:
# FOO()( a function call returning a callable that is
# immediately called again is too rare to bother
# with
# def foo( might be a "calltip-base-signature", but this
# trigger is not yet implemented
# import ( will be handled by complete_members
# class Foo( is an "complete-available-classes" trigger,
# but this is not yet implemented
working_text = accessor.text_range(max(0, last_pos-200), last_pos)
line = self._last_logical_line(working_text).rstrip()
if line:
ch = line[-1]
if isident(ch) or isdigit(ch):
# If this is:
# def foo(
# then this might be the (as yet unimplemented)
# "calltip-base-signature" trigger or it should not be a
# trigger point.
#
# If this is:
# class Foo(
# then this should be the (as yet unimplemented)
# "complete-available-classes" trigger.
line = line.replace('\t', ' ')
lstripped = line.lstrip()
if lstripped.startswith("def"):
if DEBUG:
print("trg_from_pos: no: point is function declaration")
elif lstripped.startswith("class") and '(' not in lstripped:
# Second test is necessary to not exclude:
# class Foo(bar(<|>
if DEBUG:
print("trg_from_pos: no: point is class declaration")
elif lstripped.startswith('from ') and ' import' in lstripped:
# Need better checks
# is it "from FOO import (<|>" ?
imp_prefix = tuple(lstripped[len(
'from '):lstripped.index(' import')].split('.'))
if DEBUG:
print("trg_from_pos: from FOO import (")
return Trigger(self.lang, TRG_FORM_CPLN,
"module-members", pos, implicit,
imp_prefix=imp_prefix)
else:
return Trigger(self.lang, TRG_FORM_CALLTIP,
"call-signature", pos, implicit)
else:
if DEBUG:
print("trg_from_pos: no: non-ws char preceding "\
"'(' is not an identifier char: %r" % ch)
else:
if DEBUG:
print("trg_from_pos: no: no chars preceding '('")
return None
elif last_char == ',':
working_text = accessor.text_range(max(0, last_pos - 200), last_pos)
line = self._last_logical_line(working_text)
if line:
last_bracket = line.rfind("(")
if last_bracket >= 0:
pos = (pos - (len(line) - last_bracket))
return Trigger(self.lang, TRG_FORM_CALLTIP,
"call-signature", pos, implicit)
return None
else:
return None
elif pos >= 2 and style in (self.identifier_style, self.keyword_style):
# 2 character trigger for local symbols
if DEBUG:
if style == self.identifier_style:
print("Identifier style")
else:
print("Identifier keyword style")
# Previous char also need to be an identifier/word, then the one
# before that needs to be something different (operator/space).
if (accessor.style_at_pos(last_pos-1) != style or
(pos > 2 and accessor.style_at_pos(last_pos-2) == style)):
if DEBUG:
print("Not a block of two ident/word chars")
return None
if pos > 2 and accessor.char_at_pos(last_pos-2) == ".":
if DEBUG:
print(" preceeded by '.' operator - not a trigger")
return None
# Check if it makes sense to show the completions here. If defining
# a class name, or function name, you don't want to see completions.
# Also, do not override another completion type (e.g. imports).
start = accessor.line_start_pos_from_pos(pos)
preceeding_text = accessor.text_range(start, last_pos-2).strip()
if preceeding_text:
first_word = preceeding_text.split(" ")[0]
if first_word in ("class", "def", "import", "from", "except"):
if DEBUG:
print(" no trigger, as starts with %r" % (first_word, ))
# Don't trigger over the top of another trigger, i.e.
# complete-available-imports
# complete-module-members
# complete-available-exceptions
return None
citdl_expr = accessor.text_range(last_pos-1, last_pos+1)
if DEBUG:
print(" triggered 2 char symbol trigger: %r" % (citdl_expr, ))
return Trigger(self.lang, TRG_FORM_CPLN, "local-symbols",
last_pos-1, implicit,
citdl_expr=citdl_expr,
preceeding_text=preceeding_text)
def _last_logical_line(self, text):
lines = text.splitlines(0) or ['']
logicalline = lines.pop()
while lines and lines[-1].endswith('\\'):
logicalline = lines.pop()[:-1] + ' ' + logicalline
return logicalline
class PythonImportHandler(ImportHandler):
lang = lang # XXX do this for other langs as well
PATH_ENV_VAR = "PYTHONPATH"
sep = '.'
def __init__(self, mgr):
ImportHandler.__init__(self, mgr)
self.__stdCIXScanId = None
# TODO: may not be used. If so, drop it.
def _shellOutForPath(self, compiler):
import process
argv = [compiler, "-c", "import sys; print('\\n'.join(sys.path))"]
# Can't use -E to ignore PYTHONPATH because older versions of
# Python don't have it (e.g. v1.5.2).
env = dict(os.environ)
if "PYTHONPATH" in env:
del env["PYTHONPATH"]
if "PYTHONHOME" in env:
del env["PYTHONHOME"]
if "PYTHONSTARTUP" in env:
del env["PYTHONSTARTUP"]
p = process.ProcessOpen(argv, env=env, stdin=None)
stdout, stderr = p.communicate()
retval = p.returncode
path = [line for line in stdout.splitlines(0)]
if path and (path[0] == "" or path[0] == os.getcwd()):
del path[0] # cwd handled separately
return path
def setCorePath(self, compiler=None, extra=None):
if compiler is None:
import which
compiler = which.which("python")
self.corePath = self._shellOutForPath(compiler)
def _findScannableFiles(self, xxx_todo_changeme,
dirname, names):
(files, searchedDirs, skipRareImports,
importableOnly) = xxx_todo_changeme
if sys.platform.startswith("win"):
cpath = dirname.lower()
else:
cpath = dirname
if cpath in searchedDirs:
while names:
del names[0]
return
else:
searchedDirs[cpath] = 1
if skipRareImports:
if (basename(dirname) == "encodings"
and "undefined.py" in names):
# Skip most of the specific encoding definitions (saves
# about 50 files).
names = [n for n in names if n == "__init__.py"
or os.path.splitext(n)[0].endswith("_codec")]
for i in range(len(names)-1, -1, -1): # backward so can del from list
path = os.path.join(dirname, names[i])
if os.path.isdir(path):
if skipRareImports:
# Skip Python's test package (saves over 200 files)
# and other likely test dirs.
if names[i] in ("test", "tests"):
del names[i]
continue
if importableOnly:
possibles = [os.path.join(path, "__init__.py"),
os.path.join(path, "__init__.pyc"),
os.path.join(path, "__init__.pyo")]
for possible in possibles:
if os.path.isfile(possible):
break
else:
del names[i] # don't traverse non-package dirs
continue
if path.endswith(os.path.join("win32com", "gen_py")):
del names[i]
continue
elif os.path.splitext(names[i])[1] in self._gen_suffixes():
# XXX The list of Python extensions should be settable on
# the ImportHandler and Komodo should set whatever is
# set in prefs.
# XXX This check for "Python" files should probably include
# python scripts, which might likely not have the
# extension: need to grow filetype-from-content smarts.
files.append(path)
def _gen_suffixes(self):
"""Generate a sequence of scannable file suffixes in the
preferred order of scanning.
"""
yield ".py"
yield ".pyw"
if _SCAN_BINARY_FILES:
yield ".pyc"
yield ".pyo"
for suffix, mode, mod_type in imp.get_suffixes():
if suffix[0] == '.' and mod_type == imp.C_EXTENSION:
yield suffix
def find_importables_in_dir(self, imp_dir):
"""See citadel.py::ImportHandler.find_importables_in_dir() for
details.
Importables for Python look like this:
{"foo": ("foo.py", None, False),
"foolib": ("foolib/__init__.py", "__init__", False),
"bar": ("bar.pyc", None, False),
"baz": ("baz.pyo", None, False),
"qoox": ("qoox.pyd", None, False),
"qooz": ("qooz.so", None, False),
Note: .pyd are .so handling depends on the platform.
If several files happen to have the same name but different
suffixes, the one with preferred suffix wins. The suffixe preference
is defined by the order of elements in the sequence generated
by _gen_suffixes().
This particularly means that sources always win over binaries.
"""
if imp_dir == "<Unsaved>":
# TODO: stop these getting in here.
return {}
importables = {}
if os.path.isdir(imp_dir):
suffixes = dict((s, i) for i, s
in enumerate(self._gen_suffixes(), 1))
modules = []
for name in os.listdir(imp_dir):
mod, suffix = os.path.splitext(name)
if mod != '__init__':
init = os.path.join(name, '__init__.py')
if os.path.exists(os.path.join(imp_dir, init)):
modules.append((0, name, (
init, '__init__', False)))
else:
if suffix in suffixes:
modules.append((suffixes[suffix], mod,
(name, None, False)))
modules.sort(key=lambda mod: mod[0])
for _, mod, importable in modules:
if mod not in importables:
importables[mod] = importable
return importables
class PythonCILEDriver(CILEDriver):
lang = lang
def scan_purelang(self, buf):
log.info("scan_purelang: path: %r lang: %s", buf.path, buf.lang)
# log.warn("TODO: python cile that uses elementtree")
content = buf.accessor.text
el = pythoncile.scan_et(content, buf.path, lang=self.lang)
return el
def scan_binary(self, buf):
log.info("scan_binary: path: %r lang: %s", buf.path, buf.lang)
from codeintel2 import pybinary
python = buf.langintel.interpreter_from_env(buf.env)
if not python:
raise CodeIntelError("cannot find a usable Python interpreter")
cix = pybinary.safe_scan(buf.path, python)
return tree_from_cix(cix)
#---- internal support stuff
#---- registration
def register(mgr):
"""Register language support with the Manager."""
mgr.set_lang_info(lang,
silvercity_lexer=PythonLexer(),
buf_class=PythonBuffer,
langintel_class=PythonLangIntel,
import_handler_class=PythonImportHandler,
cile_driver_class=PythonCILEDriver,
is_cpln_lang=True)
| |
import py, pytest
from _pytest.mark import MarkGenerator as Mark
class TestMark:
def test_markinfo_repr(self):
from _pytest.mark import MarkInfo
m = MarkInfo("hello", (1,2), {})
repr(m)
def test_pytest_exists_in_namespace_all(self):
assert 'mark' in py.test.__all__
assert 'mark' in pytest.__all__
def test_pytest_mark_notcallable(self):
mark = Mark()
pytest.raises((AttributeError, TypeError), mark)
def test_pytest_mark_name_starts_with_underscore(self):
mark = Mark()
pytest.raises(AttributeError, getattr, mark, '_some_name')
def test_pytest_mark_bare(self):
mark = Mark()
def f():
pass
mark.hello(f)
assert f.hello
def test_pytest_mark_keywords(self):
mark = Mark()
def f():
pass
mark.world(x=3, y=4)(f)
assert f.world
assert f.world.kwargs['x'] == 3
assert f.world.kwargs['y'] == 4
def test_apply_multiple_and_merge(self):
mark = Mark()
def f():
pass
mark.world
mark.world(x=3)(f)
assert f.world.kwargs['x'] == 3
mark.world(y=4)(f)
assert f.world.kwargs['x'] == 3
assert f.world.kwargs['y'] == 4
mark.world(y=1)(f)
assert f.world.kwargs['y'] == 1
assert len(f.world.args) == 0
def test_pytest_mark_positional(self):
mark = Mark()
def f():
pass
mark.world("hello")(f)
assert f.world.args[0] == "hello"
mark.world("world")(f)
def test_pytest_mark_positional_func_and_keyword(self):
mark = Mark()
def f():
raise Exception
m = mark.world(f, omega="hello")
def g():
pass
assert m(g) == g
assert g.world.args[0] is f
assert g.world.kwargs["omega"] == "hello"
def test_pytest_mark_reuse(self):
mark = Mark()
def f():
pass
w = mark.some
w("hello", reason="123")(f)
assert f.some.args[0] == "hello"
assert f.some.kwargs['reason'] == "123"
def g():
pass
w("world", reason2="456")(g)
assert g.some.args[0] == "world"
assert 'reason' not in g.some.kwargs
assert g.some.kwargs['reason2'] == "456"
def test_ini_markers(testdir):
testdir.makeini("""
[pytest]
markers =
a1: this is a webtest marker
a2: this is a smoke marker
""")
testdir.makepyfile("""
def test_markers(pytestconfig):
markers = pytestconfig.getini("markers")
print (markers)
assert len(markers) >= 2
assert markers[0].startswith("a1:")
assert markers[1].startswith("a2:")
""")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
def test_markers_option(testdir):
testdir.makeini("""
[pytest]
markers =
a1: this is a webtest marker
a1some: another marker
""")
result = testdir.runpytest("--markers", )
result.stdout.fnmatch_lines([
"*a1*this is a webtest*",
"*a1some*another marker",
])
def test_markers_option_with_plugin_in_current_dir(testdir):
testdir.makeconftest('pytest_plugins = "flip_flop"')
testdir.makepyfile(flip_flop="""\
def pytest_configure(config):
config.addinivalue_line("markers", "flip:flop")
def pytest_generate_tests(metafunc):
try:
mark = metafunc.function.flipper
except AttributeError:
return
metafunc.parametrize("x", (10, 20))""")
testdir.makepyfile("""\
import pytest
@pytest.mark.flipper
def test_example(x):
assert x""")
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines(["*flip*flop*"])
def test_mark_on_pseudo_function(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.r(lambda x: 0/0)
def test_hello():
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_strict_prohibits_unregistered_markers(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.unregisteredmark
def test_hello():
pass
""")
result = testdir.runpytest("--strict")
assert result.ret != 0
result.stdout.fnmatch_lines([
"*unregisteredmark*not*registered*",
])
@pytest.mark.parametrize("spec", [
("xyz", ("test_one",)),
("xyz and xyz2", ()),
("xyz2", ("test_two",)),
("xyz or xyz2", ("test_one", "test_two"),)
])
def test_mark_option(spec, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xyz
def test_one():
pass
@pytest.mark.xyz2
def test_two():
pass
""")
opt, passed_result = spec
rec = testdir.inline_run("-m", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize("spec", [
("interface", ("test_interface",)),
("not interface", ("test_nointer",)),
])
def test_mark_option_custom(spec, testdir):
testdir.makeconftest("""
import pytest
def pytest_collection_modifyitems(items):
for item in items:
if "interface" in item.nodeid:
item.keywords["interface"] = pytest.mark.interface
""")
testdir.makepyfile("""
def test_interface():
pass
def test_nointer():
pass
""")
opt, passed_result = spec
rec = testdir.inline_run("-m", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize("spec", [
("interface", ("test_interface",)),
("not interface", ("test_nointer", "test_pass")),
("pass", ("test_pass",)),
("not pass", ("test_interface", "test_nointer")),
])
def test_keyword_option_custom(spec, testdir):
testdir.makepyfile("""
def test_interface():
pass
def test_nointer():
pass
def test_pass():
pass
""")
opt, passed_result = spec
rec = testdir.inline_run("-k", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize("spec", [
("None", ("test_func[None]",)),
("1.3", ("test_func[1.3]",)),
("2-3", ("test_func[2-3]",))
])
def test_keyword_option_parametrize(spec, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
def test_func(arg):
pass
""")
opt, passed_result = spec
rec = testdir.inline_run("-k", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
class TestFunctional:
def test_mark_per_function(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.hello
def test_hello():
assert hasattr(test_hello, 'hello')
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_mark_per_module(self, testdir):
item = testdir.getitem("""
import pytest
pytestmark = pytest.mark.hello
def test_func():
pass
""")
keywords = item.keywords
assert 'hello' in keywords
def test_marklist_per_class(self, testdir):
item = testdir.getitem("""
import pytest
class TestClass:
pytestmark = [pytest.mark.hello, pytest.mark.world]
def test_func(self):
assert TestClass.test_func.hello
assert TestClass.test_func.world
""")
keywords = item.keywords
assert 'hello' in keywords
def test_marklist_per_module(self, testdir):
item = testdir.getitem("""
import pytest
pytestmark = [pytest.mark.hello, pytest.mark.world]
class TestClass:
def test_func(self):
assert TestClass.test_func.hello
assert TestClass.test_func.world
""")
keywords = item.keywords
assert 'hello' in keywords
assert 'world' in keywords
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_mark_per_class_decorator(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.hello
class TestClass:
def test_func(self):
assert TestClass.test_func.hello
""")
keywords = item.keywords
assert 'hello' in keywords
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_mark_per_class_decorator_plus_existing_dec(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.hello
class TestClass:
pytestmark = pytest.mark.world
def test_func(self):
assert TestClass.test_func.hello
assert TestClass.test_func.world
""")
keywords = item.keywords
assert 'hello' in keywords
assert 'world' in keywords
def test_merging_markers(self, testdir):
p = testdir.makepyfile("""
import pytest
pytestmark = pytest.mark.hello("pos1", x=1, y=2)
class TestClass:
# classlevel overrides module level
pytestmark = pytest.mark.hello(x=3)
@pytest.mark.hello("pos0", z=4)
def test_func(self):
pass
""")
items, rec = testdir.inline_genitems(p)
item, = items
keywords = item.keywords
marker = keywords['hello']
assert marker.args == ("pos0", "pos1")
assert marker.kwargs == {'x': 1, 'y': 2, 'z': 4}
# test the new __iter__ interface
l = list(marker)
assert len(l) == 3
assert l[0].args == ("pos0",)
assert l[1].args == ()
assert l[2].args == ("pos1", )
@pytest.mark.xfail(reason='unfixed')
def test_merging_markers_deep(self, testdir):
# issue 199 - propagate markers into nested classes
p = testdir.makepyfile("""
import pytest
class TestA:
pytestmark = pytest.mark.a
def test_b(self):
assert True
class TestC:
# this one didnt get marked
def test_d(self):
assert True
""")
items, rec = testdir.inline_genitems(p)
for item in items:
print (item, item.keywords)
assert 'a' in item.keywords
def test_mark_with_wrong_marker(self, testdir):
reprec = testdir.inline_runsource("""
import pytest
class pytestmark:
pass
def test_func():
pass
""")
l = reprec.getfailedcollections()
assert len(l) == 1
assert "TypeError" in str(l[0].longrepr)
def test_mark_dynamically_in_funcarg(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_funcarg__arg(request):
request.applymarker(pytest.mark.hello)
def pytest_terminal_summary(terminalreporter):
l = terminalreporter.stats['passed']
terminalreporter.writer.line("keyword: %s" % l[0].keywords)
""")
testdir.makepyfile("""
def test_func(arg):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"keyword: *hello*"
])
def test_merging_markers_two_functions(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.hello("pos1", z=4)
@pytest.mark.hello("pos0", z=3)
def test_func():
pass
""")
items, rec = testdir.inline_genitems(p)
item, = items
keywords = item.keywords
marker = keywords['hello']
l = list(marker)
assert len(l) == 2
assert l[0].args == ("pos0",)
assert l[1].args == ("pos1",)
def test_no_marker_match_on_unmarked_names(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.shouldmatch
def test_marked():
assert 1
def test_unmarked():
assert 1
""")
reprec = testdir.inline_run("-m", "test_unmarked", p)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) + len(skipped) + len(failed) == 0
dlist = reprec.getcalls("pytest_deselected")
deselected_tests = dlist[0].items
assert len(deselected_tests) == 2
def test_keywords_at_node_level(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="session", autouse=True)
def some(request):
request.keywords["hello"] = 42
assert "world" not in request.keywords
@pytest.fixture(scope="function", autouse=True)
def funcsetup(request):
assert "world" in request.keywords
assert "hello" in request.keywords
@pytest.mark.world
def test_function():
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_keyword_added_for_session(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_collection_modifyitems(session):
session.add_marker("mark1")
session.add_marker(pytest.mark.mark2)
session.add_marker(pytest.mark.mark3)
pytest.raises(ValueError, lambda:
session.add_marker(10))
""")
testdir.makepyfile("""
def test_some(request):
assert "mark1" in request.keywords
assert "mark2" in request.keywords
assert "mark3" in request.keywords
assert 10 not in request.keywords
marker = request.node.get_marker("mark1")
assert marker.name == "mark1"
assert marker.args == ()
assert marker.kwargs == {}
""")
reprec = testdir.inline_run("-m", "mark1")
reprec.assertoutcome(passed=1)
class TestKeywordSelection:
def test_select_simple(self, testdir):
file_test = testdir.makepyfile("""
def test_one():
assert 0
class TestClass(object):
def test_method_one(self):
assert 42 == 43
""")
def check(keyword, name):
reprec = testdir.inline_run("-s", "-k", keyword, file_test)
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
assert failed[0].nodeid.split("::")[-1] == name
assert len(reprec.getcalls('pytest_deselected')) == 1
for keyword in ['test_one', 'est_on']:
check(keyword, 'test_one')
check('TestClass and test', 'test_method_one')
@pytest.mark.parametrize("keyword", [
'xxx', 'xxx and test_2', 'TestClass', 'xxx and not test_1',
'TestClass and test_2', 'xxx and TestClass and test_2'])
def test_select_extra_keywords(self, testdir, keyword):
p = testdir.makepyfile(test_select="""
def test_1():
pass
class TestClass:
def test_2(self):
pass
""")
testdir.makepyfile(conftest="""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(name):
outcome = yield
if name == "TestClass":
item = outcome.get_result()
item.extra_keyword_matches.add("xxx")
""")
reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword)
py.builtin.print_("keyword", repr(keyword))
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) == 1
assert passed[0].nodeid.endswith("test_2")
dlist = reprec.getcalls("pytest_deselected")
assert len(dlist) == 1
assert dlist[0].items[0].name == 'test_1'
def test_select_starton(self, testdir):
threepass = testdir.makepyfile(test_threepass="""
def test_one(): assert 1
def test_two(): assert 1
def test_three(): assert 1
""")
reprec = testdir.inline_run("-k", "test_two:", threepass)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) == 2
assert not failed
dlist = reprec.getcalls("pytest_deselected")
assert len(dlist) == 1
item = dlist[0].items[0]
assert item.name == "test_one"
def test_keyword_extra(self, testdir):
p = testdir.makepyfile("""
def test_one():
assert 0
test_one.mykeyword = True
""")
reprec = testdir.inline_run("-k", "mykeyword", p)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 1
@pytest.mark.xfail
def test_keyword_extra_dash(self, testdir):
p = testdir.makepyfile("""
def test_one():
assert 0
test_one.mykeyword = True
""")
# with argparse the argument to an option cannot
# start with '-'
reprec = testdir.inline_run("-k", "-mykeyword", p)
passed, skipped, failed = reprec.countoutcomes()
assert passed + skipped + failed == 0
def test_no_magic_values(self, testdir):
"""Make sure the tests do not match on magic values,
no double underscored values, like '__dict__',
and no instance values, like '()'.
"""
p = testdir.makepyfile("""
def test_one(): assert 1
""")
def assert_test_is_not_selected(keyword):
reprec = testdir.inline_run("-k", keyword, p)
passed, skipped, failed = reprec.countoutcomes()
dlist = reprec.getcalls("pytest_deselected")
assert passed + skipped + failed == 0
deselected_tests = dlist[0].items
assert len(deselected_tests) == 1
assert_test_is_not_selected("__")
assert_test_is_not_selected("()")
| |
"""
Backrefs Re parser.
Licensed under MIT
Copyright (c) 2011 - 2018 Isaac Muse <isaacmuse@gmail.com>
"""
from __future__ import unicode_literals
import re as _re
from . import util as _util
import sre_parse as _sre_parse
import unicodedata as _unicodedata
from . import uniprops as _uniprops
__all__ = ("ReplaceTemplate",)
_SCOPED_FLAG_SUPPORT = _util.PY36
_ASCII_LETTERS = frozenset(
(
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
)
)
_DIGIT = frozenset(('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'))
_OCTAL = frozenset(('0', '1', '2', '3', '4', '5', '6', '7'))
_HEX = frozenset(('a', 'b', 'c', 'd', 'e', 'f', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'))
_LETTERS_UNDERSCORE = _ASCII_LETTERS | frozenset(('_',))
_WORD = _LETTERS_UNDERSCORE | _DIGIT
_STANDARD_ESCAPES = frozenset(('a', 'b', 'f', 'n', 'r', 't', 'v'))
_CURLY_BRACKETS = frozenset(('{', '}'))
_PROPERTY_STRIP = frozenset((' ', '-', '_'))
_PROPERTY = _WORD | _DIGIT | _PROPERTY_STRIP
if _util.PY3:
_GLOBAL_FLAGS = frozenset(('a', 'u', 'L'))
else:
_GLOBAL_FLAGS = frozenset(('u', 'L'))
_SCOPED_FLAGS = frozenset(('i', 'm', 's', 'u', 'x'))
_CURLY_BRACKETS_ORD = frozenset((0x7b, 0x7d))
# Case upper or lower
_UPPER = 1
_LOWER = 2
# Format Constants
_BACK_SLASH_TRANSLATION = {
"\\a": '\a',
"\\b": '\b',
"\\f": '\f',
"\\r": '\r',
"\\t": '\t',
"\\n": '\n',
"\\v": '\v',
"\\\\": '\\'
}
_FMT_CONV_TYPE = ('a', 'r', 's') if _util.PY3 else ('r', 's')
class LoopException(Exception):
"""Loop exception."""
class GlobalRetryException(Exception):
"""Global retry exception."""
class _SearchParser(object):
"""Search Template."""
_new_refs = ("e", "l", "L", "c", "C", "p", "P", "N", "Q", "E", "m", "M", "R", "X")
_re_escape = r"\x1b"
_re_start_wb = r"\b(?=\w)"
_re_end_wb = r"\b(?<=\w)"
_line_break = r'(?:\r\n|(?!\r\n)[\n\v\f\r\x85\u2028\u2029])'
_binary_line_break = r'(?:\r\n|(?!\r\n)[\n\v\f\r\x85])'
# (?:\PM\pM*(?!\pM)) ~= (?>\PM\pM*)
_grapheme_cluster = r'(?:%s%s*(?!%s))'
def __init__(self, search, re_verbose=False, re_unicode=None):
"""Initialize."""
if isinstance(search, _util.binary_type):
self.binary = True
else:
self.binary = False
if self.binary:
self._re_line_break = self._binary_line_break
else:
self._re_line_break = self._line_break
self.search = search
self.re_verbose = re_verbose
self.re_unicode = re_unicode
def process_quotes(self, text):
"""Process quotes."""
escaped = False
in_quotes = False
current = []
quoted = []
i = _util.StringIter(text)
iter(i)
for t in i:
if not escaped and t == "\\":
escaped = True
elif escaped:
escaped = False
if t == "E":
if in_quotes:
current.append(_re.escape("".join(quoted)))
quoted = []
in_quotes = False
elif t == "Q" and not in_quotes:
in_quotes = True
elif in_quotes:
quoted.extend(["\\", t])
else:
current.extend(["\\", t])
elif in_quotes:
quoted.extend(t)
else:
current.append(t)
if in_quotes and escaped:
quoted.append("\\")
elif escaped:
current.append("\\")
if quoted:
current.append(_re.escape("".join(quoted)))
return "".join(current)
def verbose_comment(self, t, i):
"""Handle verbose comments."""
current = []
escaped = False
try:
while t != "\n":
if not escaped and t == "\\":
escaped = True
current.append(t)
elif escaped:
escaped = False
if t in self._new_refs:
current.append("\\")
current.append(t)
else:
current.append(t)
t = next(i)
except StopIteration:
pass
if t == "\n":
current.append(t)
return current
def flags(self, text, scoped=False):
"""Analyze flags."""
global_retry = False
if _util.PY3 and ('a' in text or 'L' in text) and self.unicode:
self.unicode = False
if not _SCOPED_FLAG_SUPPORT or not scoped:
self.temp_global_flag_swap["unicode"] = True
global_retry = True
elif 'u' in text and not self.unicode and not self.binary:
self.unicode = True
if not _SCOPED_FLAG_SUPPORT or not scoped:
self.temp_global_flag_swap["unicode"] = True
global_retry = True
if _SCOPED_FLAG_SUPPORT and '-x' in text and self.verbose:
self.verbose = False
elif 'x' in text and not self.verbose:
self.verbose = True
if not _SCOPED_FLAG_SUPPORT or not scoped:
self.temp_global_flag_swap["verbose"] = True
global_retry = True
if global_retry:
raise GlobalRetryException('Global Retry')
def get_unicode_property(self, i):
"""Get Unicode property."""
index = i.index
prop = []
value = []
try:
c = next(i)
if c.upper() in _ASCII_LETTERS:
prop.append(c)
elif c != '{':
raise SyntaxError("Unicode property missing '{' at %d!" % (i.index - 1))
else:
c = next(i)
if c == '^':
prop.append(c)
c = next(i)
while c not in (':', '=', '}'):
if c not in _PROPERTY:
raise SyntaxError('Invalid Unicode property character at %d!' % (i.index - 1))
if c not in _PROPERTY_STRIP:
prop.append(c)
c = next(i)
if c in (':', '='):
c = next(i)
while c != '}':
if c not in _PROPERTY:
raise SyntaxError('Invalid Unicode property character at %d!' % (i.index - 1))
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
if not value:
raise SyntaxError('Invalid Unicode property!')
except StopIteration:
raise SyntaxError("Missing or unmatched '{' at %d!" % index)
return ''.join(prop).lower(), ''.join(value).lower()
def get_named_unicode(self, i):
"""Get Unicode name."""
index = i.index
value = []
try:
if next(i) != '{':
raise ValueError("Named Unicode missing '{' %d!" % (i.index - 1))
c = next(i)
while c != '}':
value.append(c)
c = next(i)
except Exception:
raise SyntaxError("Unmatched '{' at %d!" % index)
return ''.join(value)
def get_wide_unicode(self, i):
"""Get narrow Unicode."""
value = []
for x in range(3):
c = next(i)
if c == '0':
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1))
c = next(i)
if c in ('0', '1'):
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1))
for x in range(4):
c = next(i)
if c.lower() in _HEX:
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1))
return ''.join(value)
def get_narrow_unicode(self, i):
"""Get narrow Unicode."""
value = []
for x in range(4):
c = next(i)
if c.lower() in _HEX:
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid Unicode character at %d!' % (i.index - 1))
return ''.join(value)
def get_unicode(self, i, wide=False):
"""Get Unicode character."""
value = int(self.get_wide_unicode(i) if wide else self.get_narrow_unicode(i), 16)
return ('\\%03o' % value) if value <= 0xFF else _util.uchr(value)
def reference(self, t, i, in_group=False):
"""Handle references."""
current = []
if not in_group and t == "m":
current.append(self._re_start_wb)
elif not in_group and t == "M":
current.append(self._re_end_wb)
elif not in_group and t == "R":
current.append(self._re_line_break)
elif not in_group and t == "X":
no_mark = self.unicode_props("^m", None, in_group=False)[0]
mark = self.unicode_props("m", None, in_group=False)[0]
current.extend(self._grapheme_cluster % (no_mark, mark, mark))
elif t == "e":
current.append(self._re_escape)
elif t == "l":
current.extend(self.letter_case_props(_LOWER, in_group))
elif t == "L":
current.extend(self.letter_case_props(_LOWER, in_group, negate=True))
elif t == "c":
current.extend(self.letter_case_props(_UPPER, in_group))
elif t == "C":
current.extend(self.letter_case_props(_UPPER, in_group, negate=True))
elif _util.PY2 and not self.binary and t == "U":
current.append(self.get_unicode(i, True))
elif _util.PY2 and not self.binary and t == "u":
current.append(self.get_unicode(i))
elif t == 'p':
prop = self.get_unicode_property(i)
current.extend(self.unicode_props(prop[0], prop[1], in_group=in_group))
if in_group:
self.found_property = True
elif t == 'P':
prop = self.get_unicode_property(i)
current.extend(self.unicode_props(prop[0], prop[1], in_group=in_group, negate=True))
if in_group:
self.found_property = True
elif t == "N":
text = self.get_named_unicode(i)
current.extend(self.unicode_name(text, in_group))
if in_group:
self.found_property = True
else:
current.extend(["\\", t])
return current
def get_comments(self, i):
"""Get comments."""
index = i.index
value = ['(']
escaped = False
try:
c = next(i)
if c != '?':
i.rewind(1)
return None
value.append(c)
c = next(i)
if c != '#':
i.rewind(2)
return None
value.append(c)
c = next(i)
while c != ')' or escaped is True:
if escaped:
escaped = False
elif c == '\\':
escaped = True
value.append(c)
c = next(i)
value.append(c)
except StopIteration:
raise SyntaxError("Unmatched '(' at %d!" % (index - 1))
return ''.join(value)
def get_flags(self, i, scoped=False):
"""Get flags."""
if scoped and not _SCOPED_FLAG_SUPPORT:
return None
index = i.index
value = ['(']
toggle = False
end = ':' if scoped else ')'
try:
c = next(i)
if c != '?':
i.rewind(1)
return None
value.append(c)
c = next(i)
while c != end:
if toggle:
if c not in _SCOPED_FLAGS:
raise ValueError('Bad scope')
toggle = False
elif scoped and c == '-':
toggle = True
elif not _util.PY37 and scoped and c in _GLOBAL_FLAGS:
raise ValueError("Bad flag")
elif c not in _GLOBAL_FLAGS and c not in _SCOPED_FLAGS:
raise ValueError("Bad flag")
value.append(c)
c = next(i)
value.append(c)
except Exception:
i.rewind(i.index - index)
value = []
return ''.join(value) if value else None
def subgroup(self, t, i):
"""Handle parenthesis."""
current = []
# (?flags)
flags = self.get_flags(i)
if flags:
self.flags(flags[2:-1])
return [flags]
# (?#comment)
comments = self.get_comments(i)
if comments:
return [comments]
verbose = self.verbose
unicode_flag = self.unicode
# (?flags:pattern)
flags = self.get_flags(i, True)
if flags: # pragma: no cover
t = flags
self.flags(flags[2:-1], scoped=True)
current = []
try:
while t != ')':
if not current:
current.append(t)
else:
current.extend(self.normal(t, i))
t = next(i)
except StopIteration:
pass
# Restore flags after group
self.verbose = verbose
self.unicode = unicode_flag
if t == ")":
current.append(t)
return current
def get_posix(self, i):
"""Get POSIX."""
index = i.index
value = []
try:
c = next(i)
if c != ':':
raise ValueError('Not a valid property!')
else:
c = next(i)
if c == '^':
value.append(c)
c = next(i)
while c != ':':
if c not in _PROPERTY:
raise ValueError('Not a valid property!')
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
if next(i) != ']' or not value:
raise ValueError('Not a valid property!')
except Exception:
i.rewind(i.index - index)
value = []
return ''.join(value).lower() if value else None
def char_groups(self, t, i):
"""Handle character groups."""
current = []
pos = i.index - 1
found = False
escaped = False
first = None
self.found_property = False
try:
while True:
if not escaped and t == "\\":
escaped = True
elif escaped:
escaped = False
current.extend(self.reference(t, i, True))
elif t == "[" and not found:
found = True
first = pos
current.append(t)
elif t == "[":
posix = self.get_posix(i)
if posix:
current.extend(self.posix_props(posix, in_group=True))
self.found_property = True
pos = i.index - 2
else:
current.append(t)
elif t == "^" and found and (pos == first + 1):
first = pos
current.append(t)
elif t == "]" and found and (pos != first + 1):
found = False
current.append(t)
break
else:
current.append(t)
pos += 1
t = next(i)
except StopIteration:
pass
if escaped:
current.append(t)
# Handle properties that return an empty string.
# This will occur when a property's values exceed
# either the Unicode char limit on a narrow system,
# or the ASCII limit in a byte string pattern.
if self.found_property:
value = "".join(current)
if value == '[]':
# We specified some properities, but they are all
# out of reach. Therefore we can match nothing.
current = ['[^%s]' % ('\x00-\xff' if self.binary else _uniprops.UNICODE_RANGE)]
elif value == '[^]':
current = ['[%s]' % ('\x00-\xff' if self.binary else _uniprops.UNICODE_RANGE)]
else:
current = [value]
return current
def normal(self, t, i):
"""Handle normal chars."""
current = []
if t == "\\":
try:
t = next(i)
current.extend(self.reference(t, i))
except StopIteration:
current.append(t)
elif t == "(":
current.extend(self.subgroup(t, i))
elif self.verbose and t == "#":
current.extend(self.verbose_comment(t, i))
elif t == "[":
current.extend(self.char_groups(t, i))
else:
current.append(t)
return current
def posix_props(self, prop, in_group=False):
"""
Insert POSIX properties.
Posix style properties are not as forgiving
as Unicode properties. Case does matter,
and whitespace and '-' and '_' will not be tolerated.
"""
try:
if self.binary or not self.unicode:
pattern = _uniprops.get_posix_property(
prop, (_uniprops.POSIX_BINARY if self.binary else _uniprops.POSIX)
)
else:
pattern = _uniprops.get_posix_property(prop, _uniprops.POSIX_UNICODE)
except Exception:
raise ValueError('Invalid POSIX property!')
if not in_group and not pattern: # pragma: no cover
pattern = '^%s' % ('\x00-\xff' if self.binary else _uniprops.UNICODE_RANGE)
return [pattern]
def unicode_name(self, name, in_group=False):
"""Insert Unicode value by its name."""
value = _util.uord(_unicodedata.lookup(name))
if (self.binary and value > 0xFF):
value = ""
if not in_group and value == "":
return '[^%s]' % ('\x00-\xff' if self.binary else _uniprops.UNICODE_RANGE)
elif value == "":
return value
else:
return ['\\%03o' % value if value <= 0xFF else _util.uchr(value)]
def unicode_props(self, props, value, in_group=False, negate=False):
"""
Insert Unicode properties.
Unicode properties are very forgiving.
Case doesn't matter and `[ -_]` will be stripped out.
"""
# 'GC = Some_Unpredictable-Category Name' -> 'gc=someunpredictablecategoryname'
category = None
# \p{^negated} Strip off the caret after evaluation.
if props.startswith("^"):
negate = not negate
if props.startswith("^"):
props = props[1:]
# Get the property and value.
# If a property is present and not block,
# we can assume GC as that is all we support.
# If we are wrong it will fail.
if value:
if _uniprops.is_enum(props):
category = props
props = value
elif value in ('y', 'yes', 't', 'true'):
category = 'binary'
elif value in ('n', 'no', 'f', 'false'):
category = 'binary'
negate = not negate
else:
raise ValueError('Invalid Unicode property!')
v = _uniprops.get_unicode_property(("^" if negate else "") + props, category, self.binary)
if not in_group:
if not v:
v = '^%s' % ('\x00-\xff' if self.binary else _uniprops.UNICODE_RANGE)
v = "[%s]" % v
properties = [v]
return properties
def letter_case_props(self, case, in_group, negate=False):
"""Insert letter (ASCII or Unicode) case properties."""
# Use traditional ASCII upper/lower case unless:
# 1. The strings fed in are not binary
# 2. And the the unicode flag was used
if not in_group:
v = self.posix_props(("^" if negate else "") + ("upper" if case == _UPPER else "lower"), in_group=in_group)
v[0] = "[%s]" % v[0]
else:
v = self.posix_props(("^" if negate else "") + ("upper" if case == _UPPER else "lower"), in_group=in_group)
return v
def main_group(self, i):
"""The main group: group 0."""
current = []
while True:
try:
t = next(i)
current.extend(self.normal(t, i))
except StopIteration:
break
return current
def parse(self):
"""Apply search template."""
self.verbose = bool(self.re_verbose)
self.unicode = bool(self.re_unicode)
self.global_flag_swap = {
"unicode": ((self.re_unicode is not None) if not _util.PY37 else False),
"verbose": False
}
self.temp_global_flag_swap = {
"unicode": False,
"verbose": False
}
if _util.PY3:
self.ascii = self.re_unicode is not None and not self.re_unicode
else:
self.ascii = False
if _util.PY3 and not self.unicode and not self.ascii:
self.unicode = True
new_pattern = []
text = self.process_quotes(self.search.decode('latin-1') if self.binary else self.search)
i = _util.StringIter(text)
iter(i)
retry = True
while retry:
retry = False
try:
new_pattern = self.main_group(i)
except GlobalRetryException:
# Prevent a loop of retry over and over for a pattern like ((?u)(?a))
# or (?-x:(?x))
if self.temp_global_flag_swap['unicode']:
if self.global_flag_swap['unicode']:
raise LoopException('Global unicode flag recursion.')
else:
self.global_flag_swap["unicode"] = True
if self.temp_global_flag_swap['verbose']:
if self.global_flag_swap['verbose']:
raise LoopException('Global verbose flag recursion.')
else:
self.global_flag_swap['verbose'] = True
self.temp_global_flag_swap = {
"unicode": False,
"verbose": False
}
i.rewind(i.index)
retry = True
return "".join(new_pattern).encode('latin-1') if self.binary else "".join(new_pattern)
class _ReplaceParser(object):
"""Pre-replace template."""
def __init__(self):
"""Initialize."""
self.end_found = False
self.group_slots = []
self.literal_slots = []
self.result = []
self.span_stack = []
self.single_stack = []
self.slot = 0
self.manual = False
self.auto = False
self.auto_index = 0
def parse_format_index(self, text):
"""Parse format index."""
base = 10
prefix = text[1:3] if text[0] == "-" else text[:2]
if prefix[0:1] == "0":
char = prefix[-1]
if char == "b":
base = 2
elif char == "o":
base = 8
elif char == "x":
base = 16
try:
text = int(text, base)
except Exception:
pass
return text
def get_format(self, c, i):
"""Get format group."""
index = i.index
field = ''
value = []
try:
if c == '}':
value.append((_util.FMT_FIELD, ''))
else:
# Field
if c in _LETTERS_UNDERSCORE:
# Handle name
value.append(c)
c = self.format_next(i)
while c in _WORD:
value.append(c)
c = self.format_next(i)
elif c in _DIGIT:
# Handle group number
value.append(c)
c = self.format_next(i)
while c in _DIGIT:
value.append(c)
c = self.format_next(i)
# Try and covert to integer index
field = ''.join(value).strip()
try:
value = [(_util.FMT_FIELD, _util.string_type(int(field, 10)))]
except ValueError:
value = [(_util.FMT_FIELD, field)]
pass
# Attributes and indexes
while c in ('[', '.'):
if c == '[':
findex = []
sindex = i.index - 1
c = self.format_next(i)
try:
while c != ']':
findex.append(c)
c = self.format_next(i)
except StopIteration:
raise SyntaxError("Unmatched '[' at %d" % (sindex - 1))
idx = self.parse_format_index(''.join(findex))
if isinstance(idx, int):
value.append((_util.FMT_INDEX, idx))
else:
value.append((_util.FMT_INDEX, idx))
c = self.format_next(i)
else:
findex = []
c = self.format_next(i)
while c in _WORD:
findex.append(c)
c = self.format_next(i)
value.append((_util.FMT_ATTR, ''.join(findex)))
# Conversion
if c == '!':
c = self.format_next(i)
if c not in _FMT_CONV_TYPE:
raise SyntaxError("Invalid conversion type at %d!" % (i.index - 1))
value.append((_util.FMT_CONV, c))
c = self.format_next(i)
# Format spec
if c == ':':
fill = None
width = []
align = None
convert = None
c = self.format_next(i)
if c in ('<', '>', '^'):
# Get fill and alignment
align = c
c = self.format_next(i)
if c in ('<', '>', '^'):
fill = align
align = c
c = self.format_next(i)
elif c in _DIGIT:
# Get Width
fill = c
c = self.format_next(i)
if c in ('<', '>', '^'):
align = c
c = self.format_next(i)
else:
width.append(fill)
fill = None
else:
fill = c
c = self.format_next(i)
if fill == 's' and c == '}':
convert = fill
fill = None
if fill is not None:
if c not in ('<', '>', '^'):
raise SyntaxError('Invalid format spec char at %d!' % (i.index - 1))
align = c
c = self.format_next(i)
while c in _DIGIT:
width.append(c)
c = self.format_next(i)
if not align and len(width) and width[0] == '0':
raise ValueError("'=' alignment is not supported!")
if align and not fill and len(width) and width[0] == '0':
fill = '0'
if c == 's':
convert = c
c = self.format_next(i)
if fill and self.binary:
fill = fill.encode('latin-1')
elif not fill:
fill = b' ' if self.binary else ' '
value.append((_util.FMT_SPEC, (fill, align, (int(''.join(width)) if width else 0), convert)))
if c != '}':
raise SyntaxError("Unmatched '{' at %d" % (index - 1))
except StopIteration:
raise SyntaxError("Unmatched '{' at %d!" % (index - 1))
return field, value
def handle_format(self, t, i):
"""Handle format."""
if t == '{':
t = self.format_next(i)
if t == '{':
self.get_single_stack()
self.result.append(t)
else:
field, text = self.get_format(t, i)
self.handle_format_group(field, text)
else:
t = self.format_next(i)
if t == '}':
self.get_single_stack()
self.result.append(t)
else:
raise SyntaxError("Unmatched '}' at %d!" % (i.index - 2))
def get_octal(self, c, i):
"""Get octal."""
index = i.index
value = []
zero_count = 0
try:
if c == '0':
for x in range(3):
if c != '0':
break
value.append(c)
c = next(i)
zero_count = len(value)
if zero_count < 3:
for x in range(3 - zero_count):
if c not in _OCTAL:
break
value.append(c)
c = next(i)
i.rewind(1)
except StopIteration:
pass
octal_count = len(value)
if not (self.use_format and octal_count) and not (zero_count and octal_count < 3) and octal_count != 3:
i.rewind(i.index - index)
value = []
return ''.join(value) if value else None
def parse_octal(self, text, i):
"""Parse octal value."""
value = int(text, 8)
if value > 0xFF and self.binary:
# Re fails on octal greater than 0o377 or 0xFF
raise ValueError("octal escape value outside of range 0-0o377!")
else:
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(_util.uchr(value), self.span_stack[-1])
value = _util.uord(self.convert_case(text, single)) if single is not None else _util.uord(text)
elif single:
value = _util.uord(self.convert_case(_util.uchr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(_util.uchr(value), i)
elif value <= 0xFF:
self.result.append('\\%03o' % value)
else:
self.result.append(_util.uchr(value))
def get_named_unicode(self, i):
"""Get named Unicode."""
index = i.index
value = []
try:
if next(i) != '{':
raise SyntaxError("Named Unicode missing '{'' at %d!" % (i.index - 1))
c = next(i)
while c != '}':
value.append(c)
c = next(i)
except StopIteration:
raise SyntaxError("Unmatched '}' at %d!" % index)
return ''.join(value)
def parse_named_unicode(self, i):
"""Parse named Unicode."""
value = _util.uord(_unicodedata.lookup(self.get_named_unicode(i)))
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(_util.uchr(value), self.span_stack[-1])
value = _util.uord(self.convert_case(text, single)) if single is not None else _util.uord(text)
elif single:
value = _util.uord(self.convert_case(_util.uchr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(_util.uchr(value), i)
elif value <= 0xFF:
self.result.append('\\%03o' % value)
else:
self.result.append(_util.uchr(value))
def get_wide_unicode(self, i):
"""Get narrow Unicode."""
value = []
for x in range(3):
c = next(i)
if c == '0':
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1))
c = next(i)
if c in ('0', '1'):
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1))
for x in range(4):
c = next(i)
if c.lower() in _HEX:
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1))
return ''.join(value)
def get_narrow_unicode(self, i):
"""Get narrow Unicode."""
value = []
for x in range(4):
c = next(i)
if c.lower() in _HEX:
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid Unicode character at %d!' % (i.index - 1))
return ''.join(value)
def parse_unicode(self, i, wide=False):
"""Parse Unicode."""
text = self.get_wide_unicode(i) if wide else self.get_narrow_unicode(i)
value = int(text, 16)
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(_util.uchr(value), self.span_stack[-1])
value = _util.uord(self.convert_case(text, single)) if single is not None else _util.uord(text)
elif single:
value = _util.uord(self.convert_case(_util.uchr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(_util.uchr(value), i)
elif value <= 0xFF:
self.result.append('\\%03o' % value)
else:
self.result.append(_util.uchr(value))
def get_byte(self, i):
"""Get byte."""
value = []
for x in range(2):
c = next(i)
if c.lower() in _HEX:
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid byte character at %d!' % (i.index - 1))
return ''.join(value)
def parse_bytes(self, i):
"""Parse byte."""
value = int(self.get_byte(i), 16)
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = _util.uord(self.convert_case(text, single)) if single is not None else _util.uord(text)
elif single:
value = _util.uord(self.convert_case(chr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(_util.uchr(value), i)
else:
self.result.append('\\%03o' % value)
def get_named_group(self, t, i):
"""Get group number."""
index = i.index
value = [t]
try:
c = next(i)
if c != "<":
raise SyntaxError("Group missing '<' at %d!" % (i.index - 1))
value.append(c)
c = next(i)
if c in _DIGIT:
value.append(c)
c = next(i)
while c != '>':
if c in _DIGIT:
value.append(c)
c = next(i)
value.append(c)
elif c in _LETTERS_UNDERSCORE:
value.append(c)
c = next(i)
while c != '>':
if c in _WORD:
value.append(c)
c = next(i)
value.append(c)
else:
raise SyntaxError("Invalid group character at %d!" % (i.index - 1))
except StopIteration:
raise SyntaxError("Unmatched '<' at %d!" % index)
return ''.join(value)
def get_group(self, t, i):
"""Get group number."""
try:
value = []
if t in _DIGIT and t != '0':
value.append(t)
t = next(i)
if t in _DIGIT:
value.append(t)
else:
i.rewind(1)
except StopIteration:
pass
return ''.join(value) if value else None
def format_next(self, i):
"""Get next format char."""
c = next(i)
return self.format_references(next(i), i) if c == '\\' else c
def format_references(self, t, i):
"""Handle format references."""
octal = self.get_octal(t, i)
if octal:
value = int(octal, 8)
if value > 0xFF and self.binary:
# Re fails on octal greater than 0o377 or 0xFF
raise ValueError("octal escape value outside of range 0-0o377!")
value = _util.uchr(value)
elif t in _STANDARD_ESCAPES or t == '\\':
value = _BACK_SLASH_TRANSLATION['\\' + t]
elif not self.binary and t == "U":
value = _util.uchr(int(self.get_wide_unicode(i), 16))
elif not self.binary and t == "u":
value = _util.uchr(int(self.get_narrow_unicode(i), 16))
elif not self.binary and t == "N":
value = _unicodedata.lookup(self.get_named_unicode(i))
elif t == "x":
value = _util.uchr(int(self.get_byte(i), 16))
else:
i.rewind(1)
value = '\\'
return value
def reference(self, t, i):
"""Handle references."""
octal = self.get_octal(t, i)
if t in _OCTAL and octal:
self.parse_octal(octal, i)
elif (t in _DIGIT or t == 'g') and not self.use_format:
group = self.get_group(t, i)
if not group:
group = self.get_named_group(t, i)
self.handle_group('\\' + group)
elif t in _STANDARD_ESCAPES:
self.get_single_stack()
self.result.append('\\' + t)
elif t == "l":
self.single_case(i, _LOWER)
elif t == "L":
self.span_case(i, _LOWER)
elif t == "c":
self.single_case(i, _UPPER)
elif t == "C":
self.span_case(i, _UPPER)
elif t == "E":
self.end_found = True
elif not self.binary and t == "U":
self.parse_unicode(i, True)
elif not self.binary and t == "u":
self.parse_unicode(i)
elif not self.binary and t == "N":
self.parse_named_unicode(i)
elif t == "x":
self.parse_bytes(i)
elif self.use_format and t in _CURLY_BRACKETS:
self.result.append('\\\\')
self.handle_format(t, i)
elif self.use_format and t == 'g':
self.result.append('\\\\')
self.result.append(t)
else:
value = '\\' + t
self.get_single_stack()
if self.span_stack:
value = self.convert_case(value, self.span_stack[-1])
self.result.append(value)
def parse_template(self, pattern):
"""Parse template."""
i = _util.StringIter((self._original.decode('latin-1') if self.binary else self._original))
iter(i)
self.result = [""]
while True:
try:
t = next(i)
if self.use_format and t in _CURLY_BRACKETS:
self.handle_format(t, i)
elif t == '\\':
try:
t = next(i)
self.reference(t, i)
except StopIteration:
self.result.append(t)
raise
else:
self.result.append(t)
except StopIteration:
break
if len(self.result) > 1:
self.literal_slots.append("".join(self.result))
del self.result[:]
self.result.append("")
self.slot += 1
if self.binary:
self._template = "".join(self.literal_slots).encode('latin-1')
else:
self._template = "".join(self.literal_slots)
self.groups, self.literals = _sre_parse.parse_template(self._template, pattern)
def span_case(self, i, case):
"""Uppercase or lowercase the next range of characters until end marker is found."""
self.span_stack.append(case)
self.end_found = False
try:
while not self.end_found:
t = next(i)
if self.use_format and t in _CURLY_BRACKETS:
self.handle_format(t, i)
elif t == '\\':
try:
t = next(i)
self.reference(t, i)
except StopIteration:
self.result.append(t)
raise
elif self.single_stack:
single = self.get_single_stack()
text = self.convert_case(t, case)
if single:
text = self.convert_case(text[0], single) + text[1:]
self.result.append(text)
else:
self.result.append(self.convert_case(t, case))
if self.end_found:
self.end_found = False
break
except StopIteration:
pass
self.span_stack.pop()
def convert_case(self, value, case):
"""Convert case."""
if self.binary:
cased = []
for c in value:
if c in _ASCII_LETTERS:
cased.append(c.lower() if case == _LOWER else c.upper())
else:
cased.append(c)
return "".join(cased)
else:
return value.lower() if case == _LOWER else value.upper()
def single_case(self, i, case):
"""Uppercase or lowercase the next character."""
self.single_stack.append(case)
try:
t = next(i)
if self.use_format and t in _CURLY_BRACKETS:
self.handle_format(t, i)
elif t == '\\':
try:
t = next(i)
self.reference(t, i)
except StopIteration:
self.result.append(t)
raise
else:
self.result.append(self.convert_case(t, self.get_single_stack()))
except StopIteration:
pass
def get_single_stack(self):
"""Get the correct single stack item to use."""
single = None
while self.single_stack:
single = self.single_stack.pop()
return single
def handle_format_group(self, field, text):
"""Handle format group."""
# Handle auto incrementing group indexes
if field == '':
if self.auto:
field = _util.string_type(self.auto_index)
text[0] = (_util.FMT_FIELD, field)
self.auto_index += 1
elif not self.manual and not self.auto:
self.auto = True
field = _util.string_type(self.auto_index)
text[0] = (_util.FMT_FIELD, field)
self.auto_index += 1
else:
raise ValueError("Cannot switch to auto format during manual format!")
elif not self.manual and not self.auto:
self.manual = True
elif not self.manual:
raise ValueError("Cannot switch to manual format during auto format!")
self.handle_group(field, tuple(text), True)
def handle_group(self, text, capture=None, is_format=False):
"""Handle groups."""
if capture is None:
capture = tuple() if self.binary else ''
if len(self.result) > 1:
self.literal_slots.append("".join(self.result))
if is_format:
self.literal_slots.extend(["\\g<", text, ">"])
else:
self.literal_slots.append(text)
del self.result[:]
self.result.append("")
self.slot += 1
elif is_format:
self.literal_slots.extend(["\\g<", text, ">"])
else:
self.literal_slots.append(text)
self.group_slots.append(
(
self.slot,
(
(self.span_stack[-1] if self.span_stack else None),
self.get_single_stack(),
capture
)
)
)
self.slot += 1
def get_base_template(self):
"""Return the unmodified template before expansion."""
return self._original
def parse(self, pattern, template, use_format=False):
"""Parse template."""
if isinstance(template, _util.binary_type):
self.binary = True
else:
self.binary = False
if isinstance(pattern.pattern, _util.binary_type) != self.binary:
raise TypeError('Pattern string type must match replace template string type!')
self._original = template
self.use_format = use_format
self.parse_template(pattern)
return ReplaceTemplate(
tuple(self.groups),
tuple(self.group_slots),
tuple(self.literals),
hash(pattern),
self.use_format,
self.binary
)
class ReplaceTemplate(_util.Immutable):
"""Replacement template expander."""
__slots__ = ("groups", "group_slots", "literals", "pattern_hash", "use_format", "_hash", "_binary")
def __init__(self, groups, group_slots, literals, pattern_hash, use_format, binary):
"""Initialize."""
super(ReplaceTemplate, self).__init__(
use_format=use_format,
groups=groups,
group_slots=group_slots,
literals=literals,
pattern_hash=pattern_hash,
_binary=binary,
_hash=hash(
(
type(self),
groups, group_slots, literals,
pattern_hash, use_format, binary
)
)
)
def __call__(self, m):
"""Call."""
return self.expand(m)
def __hash__(self):
"""Hash."""
return self._hash
def __eq__(self, other):
"""Equal."""
return (
isinstance(other, ReplaceTemplate) and
self.groups == other.groups and
self.group_slots == other.group_slots and
self.literals == other.literals and
self.pattern_hash == other.pattern_hash and
self.use_format == other.use_format and
self._binary == other._binary
)
def __ne__(self, other):
"""Equal."""
return (
not isinstance(other, ReplaceTemplate) or
self.groups != other.groups or
self.group_slots != other.group_slots or
self.literals != other.literals or
self.pattern_hash != other.pattern_hash or
self.use_format != other.use_format or
self._binary != self._binary
)
def __repr__(self): # pragma: no cover
"""Representation."""
return "%s.%s(%r, %r, %r, %r, %r)" % (
self.__module__, self.__class__.__name__,
self.groups, self.group_slots, self.literals,
self.pattern_hash, self.use_format
)
def _get_group_index(self, index):
"""Find and return the appropriate group index."""
g_index = None
for group in self.groups:
if group[0] == index:
g_index = group[1]
break
return g_index
def _get_group_attributes(self, index):
"""Find and return the appropriate group case."""
g_case = (None, None, -1)
for group in self.group_slots:
if group[0] == index:
g_case = group[1]
break
return g_case
def expand(self, m):
"""Using the template, expand the string."""
if m is None:
raise ValueError("Match is None!")
sep = m.string[:0]
if isinstance(sep, _util.binary_type) != self._binary:
raise TypeError('Match string type does not match expander string type!')
text = []
# Expand string
for x in range(0, len(self.literals)):
index = x
l = self.literals[x]
if l is None:
g_index = self._get_group_index(index)
span_case, single_case, capture = self._get_group_attributes(index)
if not self.use_format:
# Non format replace
try:
l = m.group(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % g_index)
else:
# String format replace
try:
obj = m.group(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % g_index)
l = _util.format(m, obj, capture, self._binary)
if span_case is not None:
if span_case == _LOWER:
l = l.lower()
else:
l = l.upper()
if single_case is not None:
if single_case == _LOWER:
l = l[0:1].lower() + l[1:]
else:
l = l[0:1].upper() + l[1:]
text.append(l)
return sep.join(text)
def _pickle(r):
"""Pickle."""
return ReplaceTemplate, (r.groups, r.group_slots, r.literals, r.pattern_hash, r.use_format, r._binary)
_util.copyreg.pickle(ReplaceTemplate, _pickle)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(test.TestCase):
def doTestFtrlwithoutRegularization(self, use_resource=False):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
if use_resource:
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0],
dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0],
dtype=dtype)
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]), v0_val, half_rtol=1e-2)
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]), v1_val)
def testFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=False)
def testResourceFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=True)
def testFtrlwithoutRegularization2(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.55607247, -3.98729396]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28232238, -0.56096673]), v1_val)
def testMinimizeSparseResourceVariable(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]],
dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = ftrl.FtrlOptimizer(1.0).minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1]],
self.evaluate(var0),
atol=0.01)
def testFtrlWithL1(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]), v1_val)
def testFtrlWithBeta(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(3.0, initial_accumulator_value=0.1, beta=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-6.096838, -9.162214]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.717741, -1.425132]), v1_val)
def testFtrlWithL2_Beta(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.1,
beta=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.735487, -4.704625]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.294335, -0.586556]), v1_val)
def testFtrlWithL1_L2(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.24059935, -0.46829352]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.02406147, -0.04830509]), v1_val)
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
The addition of this parameter which places a constant pressure on weights
towards the origin causes the gradient descent trajectory to differ. The
weights will tend to have smaller magnitudes with this parameter set.
"""
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.22578995, -0.44345796]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.14378493, -0.13229476]), v1_val)
def testFtrlWithL1_L2_L2ShrinkageSparse(self):
"""Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
grads0 = indexed_slices.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = indexed_slices.IndexedSlices(
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([[-0.22578995], [2.]], v0_val)
self.assertAllCloseAccordingToType([[4.], [-0.13229476]], v1_val)
def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([1.0, 2.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.1, 0.2], dtype=dtype)
opt0 = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
opt1 = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update0 = opt0.apply_gradients([(grads0, var0)])
update1 = opt1.apply_gradients([(grads1, var1)])
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([1.0, 2.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update0.run()
update1.run()
v0_val, v1_val = self.evaluate([var0, var1])
# var0 is experiencing L2 shrinkage so it should be smaller than var1
# in magnitude.
self.assertTrue((v0_val**2 < v1_val**2).all())
accum0 = list(self.evaluate(opt0._slots)["accum"].values())[0]
accum1 = list(self.evaluate(opt1._slots)["accum"].values())[0]
# L2 shrinkage should not change how we update grad accumulator.
self.assertAllCloseAccordingToType(accum0, accum1)
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[0.0], [0.0]], dtype=dtype)
var1 = variables.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = indexed_slices.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = indexed_slices.IndexedSlices(
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
sess = ops.get_default_session()
v0_val, v1_val = self.evaluate([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.cached_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype)
self.assertAllCloseAccordingToType(val0, val2, half_rtol=2e-3)
self.assertAllCloseAccordingToType(val1, val3, half_rtol=2e-3)
def testEquivSparseAdagradwithoutRegularization(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.cached_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseGradientDescentwithoutRegularization(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivGradientDescentwithoutRegularization(self):
# The v1 optimizers do not support eager execution
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 17 13:47:34 2014
@author: Slawomir Figiel
"""
import click
import scipy.io
from os.path import isfile
from numpy import average as avr, std as nstd, load
from matrixmultiplication import multiply_cpu, multiply_ellpack, \
multiply_sliced, multiply_sertilp, \
multiply_ertilp, multiply_csr
from matrixutilites import string_vector, result_equals, dict_vector_paths
from filesutilites import path_reduction, sort_paths
COLORS = {'success' : 'green',
'info' : 'cyan',
'danger' : 'red',
'warning' : 'yellow'
}
@click.command()
@click.option('-b', '--block', default=128,
help='Block size for CUDA. Default: 128')
@click.option('-ss', '--slice-size', 'ss', default=64,
help='Slice size for ...Ellpack. Default: 64')
@click.option('-tpr', '--thread-per-row', 'tpr', default=2,
help='Thread per row. Default: 2')
@click.option('-a', '--align', default=32,
help='Align const for Ellpack. Default: 32')
@click.option('-p', '--prefetch', default=2,
help='PreFetch for SlicedEllpack. Default: 2')
@click.option('-r', '--repeat', default=1,
help='Count of repetitions calculations. Deafult: 1')
@click.option('-csr', '--csr', 'csr', is_flag=True, help='Use CSR format')
@click.option('-ell', '--ellpack', 'ell', is_flag=True,
help='Use Ellpack format')
@click.option('-sle', '--sliced', 'sle', is_flag=True,
help='Use Sliced Ellpack format')
@click.option('-see', '--sertilp', 'see', is_flag=True,
help='Use Sertilp Ellpack format')
@click.option('-ert', '--ertilp', 'ert', is_flag=True,
help='Use Ertilp format')
@click.option('-cpu', '--cpu', 'cpu', is_flag=True,
help='Use CPU method multiplication (without GPU) on Numpy')
@click.option('-rst', '--result', is_flag=True,
help='Print result multiplication.')
@click.option('-t', '--time', is_flag=True,
help='Print list of time multiplication')
@click.option('-avr', '--avrtime', is_flag=True,
help='Print average time multiplication')
@click.option('-std', '--standard-deviation', 'std', is_flag=True,
help='Print standard deviation of time multiplication')
@click.option('--test', type=click.FLOAT,
help='Testing result multiplication. Print bad row. Value is '
'confidence factor.')
@click.option('-com', '--compensate', 'com', type=click.INT,
help='N first time are remove (returned times decremented '
'by n). Recommended in testing the speed, because the n '
'first times (e. g. one) are a long delay.')
@click.option('-o', '--output', type=click.File(mode='a', lazy=True),
help='File to save raport. Format CSV. If exist append new '
'data. Added to the file it info if it is created.')
@click.option('-param', '--parameters', is_flag=True,
help='Print value of parameters.')
@click.option('-sep', '--separator', 'sep', default='; ',
help='Separator for data in report. Default: "; "')
@click.argument('vector-path', nargs=1, required=True,
type=click.Path(exists=True))
@click.argument('matrix-path', nargs=1, required=True,
type=click.Path(exists=True))
def cli(block, ss, tpr, align, prefetch, csr, ell, sle, see, ert, cpu,
repeat, result, time, avrtime, std, test, com, output, parameters,
vector_path, sep, matrix_path):
'''
Command line interface for test execute matrix multiplication.
'''
eol = '\n'
param = {'Block' : str(block),
'Slice size' : str(ss),
'Threads per row' : str(tpr),
'Align' : str(align),
'Prefetch' : str(prefetch),
'Repeat' : str(repeat),
'Compensate' : str(com) if com else '0'
}
vectors_dict = dict_vector_paths(
sort_paths(path_reduction(
[str(vector_path),]), '.npy')
['.npy'])
matrices_paths = sort_paths(
path_reduction(
[str(matrix_path),]),
'.mtx')['.mtx']
if com:
repeat += com
#Print parameters
if parameters:
click.secho(_get_msg('paramInfo'), fg=COLORS['info'])
param_rows = []
for key, value in param.items():
param_rows.append(' {0:<20}{1}'.format(key, value))
click.echo('\n'.join(param_rows))
#Create file and add headers
if output and not isfile(output.name):
output.write(sep.join(param.keys()) + eol)
output.write(sep.join(param.values()) + eol)
output.write(eol)
headers = ['matrix', 'format', 'average time',
'standard deviation time', 'times']
output.write(sep.join(headers) + eol)
for matrix_path in matrices_paths:
try:
matrix = scipy.io.mmread(matrix_path)
except:
click.secho(_get_msg('open_failed') % matrix_path,
fg=COLORS['danger'])
continue
#Find vector to matrix
cols = matrix.shape[1]
if not cols in vectors_dict.keys():
click.secho(_get_msg('bad_length') % matrix_path,
fg=COLORS['danger'])
continue
vector_path = vectors_dict[cols]
click.secho(_get_msg('multiply') % (matrix_path, vector_path),
fg=COLORS['success'])
vector = load(vector_path)
result_numpy = ''
#Multiplication
if cpu:
click.secho(_get_msg('multiplyCpu'), fg=COLORS['warning'])
result_multiply = multiply_cpu(matrix, repeat=repeat,
vector=vector)
if test:
result_numpy = result_multiply[0]
_resume_result(result_multiply=result_multiply,
result_print=result, time_print=time,
avr_time_print=avrtime, std_time_print=std,
output=output, format_name='cpu', compensate=com,
matrix_name=matrix_path, sep=sep, eol=eol)
elif test:
result_numpy = multiply_cpu(matrix, vector=vector,
repeat=repeat)[0]
if csr:
click.secho(_get_msg('multiply_csr'), fg=COLORS['warning'])
result_multiply = multiply_csr(matrix, vector=vector,
repeat=repeat, block_size=block)
_resume_result(result_multiply=result_multiply,
result_print=result, time_print=time,
avr_time_print=avrtime, std_time_print=std,
output=output, format_name='csr', compensate=com,
matrix_name=matrix_path, sep=sep, eol=eol)
if test:
_test_result(result_numpy, result_multiply[0], test)
if ell:
click.secho(_get_msg('multiplyEll'), fg=COLORS['warning'])
result_multiply = multiply_ellpack(matrix, vector=vector,
repeat=repeat, block_size=block)
_resume_result(result_multiply=result_multiply,
result_print=result, time_print=time,
avr_time_print=avrtime, std_time_print=std,
output=output, format_name='ellpack',
compensate=com, matrix_name=matrix_path,
sep=sep, eol=eol)
if test:
_test_result(result_numpy, result_multiply[0], test)
if sle:
click.secho(_get_msg('multiplySliced'), fg=COLORS['warning'])
result_multiply = multiply_sliced(matrix, vector=vector,
align=align, slice_size=ss,
threads_per_row=tpr,
repeat=repeat)
_resume_result(result_multiply=result_multiply,
result_print=result, time_print=time,
avr_time_print=avrtime, std_time_print=std,
output=output, format_name='sliced',
compensate=com, matrix_name=matrix_path, sep=sep,
eol=eol)
if test:
_test_result(result_numpy, result_multiply[0], test)
if see:
click.secho(_get_msg('multiply_sertilp'), fg=COLORS['warning'])
result_multiply = multiply_sertilp(matrix, vector=vector,
align=align, slice_size=ss,
threads_per_row=tpr,
prefetch=prefetch,
repeat=repeat)
_resume_result(result_multiply=result_multiply,
result_print=result, time_print=time,
avr_time_print=avrtime, std_time_print=std,
output=output, format_name='sertilp',
compensate=com, matrix_name=matrix_path,
sep=sep, eol=eol)
if test:
_test_result(result_numpy, result_multiply[0], test)
if ert:
click.secho(_get_msg('multiply_ertilp'), fg=COLORS['warning'])
result_multiply = multiply_ertilp(matrix, vector=vector,
block_size=block,
threads_per_row=tpr,
prefetch=prefetch, repeat=repeat)
_resume_result(result_multiply=result_multiply,
result_print=result, time_print=time,
avr_time_print=avrtime, std_time_print=std,
output=output, format_name='ertilp',
compensate=com, matrix_name=matrix_path,
sep=sep, eol=eol)
if test:
_test_result(result_numpy, result_multiply[0], test)
def _resume_result(result_multiply, result_print, time_print, avr_time_print,
std_time_print, output, format_name, compensate,
matrix_name, sep, eol):
'''
Method generalized processing resume result multiplication.
'''
times = result_multiply[1]
if compensate:
times = times[compensate:]
if result_print:
click.echo(_get_msg('result') + string_vector(result_multiply[0]))
if time_print:
click.echo(_get_msg('timeList') + str(times))
if avr_time_print:
click.echo(_get_msg('avr_time') + str(avr(times)))
if std_time_print:
click.echo(_get_msg('std_time') + str(nstd(times)))
if output:
avr_time = str(avr(times))
std_time = str(nstd(times))
data = [matrix_name, format_name, avr_time, std_time]
data.extend(map(str, times))
output.write(sep.join(data) + eol)
def _test_result(model, check, confidence_factor):
'''Method equal vectors and print result if errors.'''
string = [_get_msg('test'), ]
vector_string = string_vector(
map(str, result_equals(model, check,
confidence_factor)),
value_format='%s',
width=100,
row_format=' {0:<7}{1:<}'
)
if vector_string.lstrip():
string.append(vector_string)
click.echo('\n'.join(string))
def _get_msg(id_message):
''' Method return message to be printed on console. '''
return {'error' : 'error',
'title' : 'Process matrix: ',
'multiply' : 'Multiply matrix %s by the vector %s',
'multiplyCpu' : 'Multiplication with Numpy (only CPU)',
'multiplyEll' : 'Multiplication with ELLPACK',
'multiply_sertilp' : 'Multiplication with SERTILP',
'multiplySliced' : 'Multiplication with SLICED',
'multiply_ertilp' : 'Multiplication with ERTILP',
'multiply_csr' : 'Multiplication with CSR',
'result' : 'Result: ',
'timeList' : 'List of times multiplication [ms]: ',
'avr_time' : 'Average time [ms]: ',
'test': 'Errors (position, different, relative error): ',
'vec': 'Representation of data vector: ',
'paramInfo': 'Parameters for multiplication: ',
'open_failed': "File %s open failed.",
'bad_length': 'Does not exist a vector with length equal to '
'number of columns of matrix: %s.',
'std_time': 'Standard deviation: '
}.get(id_message, 'error')
if __name__ == '__main__':
cli()
| |
#!/usr/bin/env python3
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from collections import defaultdict
import logging
import os
import re
import shutil
import sys
import tempfile
import zipfile
import dex
import dex_jdk_libs
from pylib.dex import dex_parser
from util import build_utils
from util import diff_utils
_API_LEVEL_VERSION_CODE = [
(21, 'L'),
(22, 'LollipopMR1'),
(23, 'M'),
(24, 'N'),
(25, 'NMR1'),
(26, 'O'),
(27, 'OMR1'),
(28, 'P'),
(29, 'Q'),
(30, 'R'),
(31, 'S'),
]
def _ParseOptions():
args = build_utils.ExpandFileArgs(sys.argv[1:])
parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(parser)
parser.add_argument('--r8-path',
required=True,
help='Path to the R8.jar to use.')
parser.add_argument(
'--desugar-jdk-libs-json', help='Path to desugar_jdk_libs.json.')
parser.add_argument('--input-paths',
action='append',
required=True,
help='GN-list of .jar files to optimize.')
parser.add_argument('--desugar-jdk-libs-jar',
help='Path to desugar_jdk_libs.jar.')
parser.add_argument('--desugar-jdk-libs-configuration-jar',
help='Path to desugar_jdk_libs_configuration.jar.')
parser.add_argument('--output-path', help='Path to the generated .jar file.')
parser.add_argument(
'--proguard-configs',
action='append',
required=True,
help='GN-list of configuration files.')
parser.add_argument(
'--apply-mapping', help='Path to ProGuard mapping to apply.')
parser.add_argument(
'--mapping-output',
required=True,
help='Path for ProGuard to output mapping file to.')
parser.add_argument(
'--extra-mapping-output-paths',
help='GN-list of additional paths to copy output mapping file to.')
parser.add_argument(
'--classpath',
action='append',
help='GN-list of .jar files to include as libraries.')
parser.add_argument('--main-dex-rules-path',
action='append',
help='Path to main dex rules for multidex.')
parser.add_argument(
'--min-api', help='Minimum Android API level compatibility.')
parser.add_argument('--enable-obfuscation',
action='store_true',
help='Minify symbol names')
parser.add_argument(
'--verbose', '-v', action='store_true', help='Print all ProGuard output')
parser.add_argument(
'--repackage-classes', help='Package all optimized classes are put in.')
parser.add_argument(
'--disable-outlining',
action='store_true',
help='Disable the outlining optimization provided by R8.')
parser.add_argument(
'--disable-checks',
action='store_true',
help='Disable -checkdiscard directives and missing symbols check')
parser.add_argument('--sourcefile', help='Value for source file attribute')
parser.add_argument(
'--force-enable-assertions',
action='store_true',
help='Forcefully enable javac generated assertion code.')
parser.add_argument(
'--feature-jars',
action='append',
help='GN list of path to jars which comprise the corresponding feature.')
parser.add_argument(
'--dex-dest',
action='append',
dest='dex_dests',
help='Destination for dex file of the corresponding feature.')
parser.add_argument(
'--feature-name',
action='append',
dest='feature_names',
help='The name of the feature module.')
parser.add_argument(
'--uses-split',
action='append',
help='List of name pairs separated by : mapping a feature module to a '
'dependent feature module.')
parser.add_argument(
'--keep-rules-targets-regex',
metavar='KEEP_RULES_REGEX',
help='If passed outputs keep rules for references from all other inputs '
'to the subset of inputs that satisfy the KEEP_RULES_REGEX.')
parser.add_argument(
'--keep-rules-output-path',
help='Output path to the keep rules for references to the '
'--keep-rules-targets-regex inputs from the rest of the inputs.')
parser.add_argument('--warnings-as-errors',
action='store_true',
help='Treat all warnings as errors.')
parser.add_argument('--show-desugar-default-interface-warnings',
action='store_true',
help='Enable desugaring warnings.')
parser.add_argument('--dump-inputs',
action='store_true',
help='Use when filing R8 bugs to capture inputs.'
' Stores inputs to r8inputs.zip')
parser.add_argument(
'--stamp',
help='File to touch upon success. Mutually exclusive with --output-path')
parser.add_argument('--desugared-library-keep-rule-output',
help='Path to desugared library keep rule output file.')
diff_utils.AddCommandLineFlags(parser)
options = parser.parse_args(args)
if options.feature_names:
if options.output_path:
parser.error('Feature splits cannot specify an output in GN.')
if not options.actual_file and not options.stamp:
parser.error('Feature splits require a stamp file as output.')
elif not options.output_path:
parser.error('Output path required when feature splits aren\'t used')
if bool(options.keep_rules_targets_regex) != bool(
options.keep_rules_output_path):
raise Exception('You must path both --keep-rules-targets-regex and '
'--keep-rules-output-path')
options.classpath = build_utils.ParseGnList(options.classpath)
options.proguard_configs = build_utils.ParseGnList(options.proguard_configs)
options.input_paths = build_utils.ParseGnList(options.input_paths)
options.extra_mapping_output_paths = build_utils.ParseGnList(
options.extra_mapping_output_paths)
if options.feature_names:
if 'base' not in options.feature_names:
parser.error('"base" feature required when feature arguments are used.')
if len(options.feature_names) != len(options.feature_jars) or len(
options.feature_names) != len(options.dex_dests):
parser.error('Invalid feature argument lengths.')
options.feature_jars = [
build_utils.ParseGnList(x) for x in options.feature_jars
]
split_map = {}
if options.uses_split:
for split_pair in options.uses_split:
child, parent = split_pair.split(':')
for name in (child, parent):
if name not in options.feature_names:
parser.error('"%s" referenced in --uses-split not present.' % name)
split_map[child] = parent
options.uses_split = split_map
return options
class _SplitContext(object):
def __init__(self, name, output_path, input_jars, work_dir, parent_name=None):
self.name = name
self.parent_name = parent_name
self.input_jars = set(input_jars)
self.final_output_path = output_path
self.staging_dir = os.path.join(work_dir, name)
os.mkdir(self.staging_dir)
def CreateOutput(self, has_imported_lib=False, keep_rule_output=None):
found_files = build_utils.FindInDirectory(self.staging_dir)
if not found_files:
raise Exception('Missing dex outputs in {}'.format(self.staging_dir))
if self.final_output_path.endswith('.dex'):
if has_imported_lib:
raise Exception(
'Trying to create a single .dex file, but a dependency requires '
'JDK Library Desugaring (which necessitates a second file).'
'Refer to %s to see what desugaring was required' %
keep_rule_output)
if len(found_files) != 1:
raise Exception('Expected exactly 1 dex file output, found: {}'.format(
'\t'.join(found_files)))
shutil.move(found_files[0], self.final_output_path)
return
# Add to .jar using Python rather than having R8 output to a .zip directly
# in order to disable compression of the .jar, saving ~500ms.
tmp_jar_output = self.staging_dir + '.jar'
build_utils.DoZip(found_files, tmp_jar_output, base_dir=self.staging_dir)
shutil.move(tmp_jar_output, self.final_output_path)
def _DeDupeInputJars(split_contexts_by_name):
"""Moves jars used by multiple splits into common ancestors.
Updates |input_jars| for each _SplitContext.
"""
def count_ancestors(split_context):
ret = 0
if split_context.parent_name:
ret += 1
ret += count_ancestors(split_contexts_by_name[split_context.parent_name])
return ret
base_context = split_contexts_by_name['base']
# Sort by tree depth so that ensure children are visited before their parents.
sorted_contexts = list(split_contexts_by_name.values())
sorted_contexts.remove(base_context)
sorted_contexts.sort(key=count_ancestors, reverse=True)
# If a jar is present in multiple siblings, promote it to their parent.
seen_jars_by_parent = defaultdict(set)
for split_context in sorted_contexts:
seen_jars = seen_jars_by_parent[split_context.parent_name]
new_dupes = seen_jars.intersection(split_context.input_jars)
parent_context = split_contexts_by_name[split_context.parent_name]
parent_context.input_jars.update(new_dupes)
seen_jars.update(split_context.input_jars)
def ancestor_jars(parent_name, dest=None):
dest = dest or set()
if not parent_name:
return dest
parent_context = split_contexts_by_name[parent_name]
dest.update(parent_context.input_jars)
return ancestor_jars(parent_context.parent_name, dest)
# Now that jars have been moved up the tree, remove those that appear in
# ancestors.
for split_context in sorted_contexts:
split_context.input_jars -= ancestor_jars(split_context.parent_name)
def _OptimizeWithR8(options,
config_paths,
libraries,
dynamic_config_data,
print_stdout=False):
with build_utils.TempDir() as tmp_dir:
if dynamic_config_data:
dynamic_config_path = os.path.join(tmp_dir, 'dynamic_config.flags')
with open(dynamic_config_path, 'w') as f:
f.write(dynamic_config_data)
config_paths = config_paths + [dynamic_config_path]
tmp_mapping_path = os.path.join(tmp_dir, 'mapping.txt')
# If there is no output (no classes are kept), this prevents this script
# from failing.
build_utils.Touch(tmp_mapping_path)
tmp_output = os.path.join(tmp_dir, 'r8out')
os.mkdir(tmp_output)
split_contexts_by_name = {}
if options.feature_names:
for name, dest_dex, input_jars in zip(options.feature_names,
options.dex_dests,
options.feature_jars):
parent_name = options.uses_split.get(name)
if parent_name is None and name != 'base':
parent_name = 'base'
split_context = _SplitContext(name,
dest_dex,
input_jars,
tmp_output,
parent_name=parent_name)
split_contexts_by_name[name] = split_context
else:
# Base context will get populated via "extra_jars" below.
split_contexts_by_name['base'] = _SplitContext('base',
options.output_path, [],
tmp_output)
base_context = split_contexts_by_name['base']
# R8 OOMs with the default xmx=1G.
cmd = build_utils.JavaCmd(options.warnings_as_errors, xmx='2G') + [
'-Dcom.android.tools.r8.allowTestProguardOptions=1',
'-Dcom.android.tools.r8.verticalClassMerging=1',
'-Dcom.android.tools.r8.disableHorizontalClassMerging=1',
]
if options.disable_outlining:
cmd += ['-Dcom.android.tools.r8.disableOutlining=1']
if options.dump_inputs:
cmd += ['-Dcom.android.tools.r8.dumpinputtofile=r8inputs.zip']
cmd += [
'-cp',
options.r8_path,
'com.android.tools.r8.R8',
'--no-data-resources',
'--output',
base_context.staging_dir,
'--pg-map-output',
tmp_mapping_path,
]
if options.disable_checks:
# Info level priority logs are not printed by default.
cmd += ['--map-diagnostics:CheckDiscardDiagnostic', 'error', 'info']
if options.desugar_jdk_libs_json:
cmd += [
'--desugared-lib',
options.desugar_jdk_libs_json,
'--desugared-lib-pg-conf-output',
options.desugared_library_keep_rule_output,
]
if options.min_api:
cmd += ['--min-api', options.min_api]
if options.force_enable_assertions:
cmd += ['--force-enable-assertions']
for lib in libraries:
cmd += ['--lib', lib]
for config_file in config_paths:
cmd += ['--pg-conf', config_file]
if options.main_dex_rules_path:
for main_dex_rule in options.main_dex_rules_path:
cmd += ['--main-dex-rules', main_dex_rule]
_DeDupeInputJars(split_contexts_by_name)
# Add any extra inputs to the base context (e.g. desugar runtime).
extra_jars = set(options.input_paths)
for split_context in split_contexts_by_name.values():
extra_jars -= split_context.input_jars
base_context.input_jars.update(extra_jars)
for split_context in split_contexts_by_name.values():
if split_context is base_context:
continue
for in_jar in sorted(split_context.input_jars):
cmd += ['--feature', in_jar, split_context.staging_dir]
cmd += sorted(base_context.input_jars)
try:
stderr_filter = dex.CreateStderrFilter(
options.show_desugar_default_interface_warnings)
logging.debug('Running R8')
build_utils.CheckOutput(cmd,
print_stdout=print_stdout,
stderr_filter=stderr_filter,
fail_on_output=options.warnings_as_errors)
except build_utils.CalledProcessError as err:
debugging_link = ('\n\nR8 failed. Please see {}.'.format(
'https://chromium.googlesource.com/chromium/src/+/HEAD/build/'
'android/docs/java_optimization.md#Debugging-common-failures\n'))
raise build_utils.CalledProcessError(err.cwd, err.args,
err.output + debugging_link)
base_has_imported_lib = False
if options.desugar_jdk_libs_json:
logging.debug('Running L8')
existing_files = build_utils.FindInDirectory(base_context.staging_dir)
jdk_dex_output = os.path.join(base_context.staging_dir,
'classes%d.dex' % (len(existing_files) + 1))
# Use -applymapping to avoid name collisions.
l8_dynamic_config_path = os.path.join(tmp_dir, 'l8_dynamic_config.flags')
with open(l8_dynamic_config_path, 'w') as f:
f.write("-applymapping '{}'\n".format(tmp_mapping_path))
# Pass the dynamic config so that obfuscation options are picked up.
l8_config_paths = [dynamic_config_path, l8_dynamic_config_path]
if os.path.exists(options.desugared_library_keep_rule_output):
l8_config_paths.append(options.desugared_library_keep_rule_output)
base_has_imported_lib = dex_jdk_libs.DexJdkLibJar(
options.r8_path, options.min_api, options.desugar_jdk_libs_json,
options.desugar_jdk_libs_jar,
options.desugar_jdk_libs_configuration_jar, jdk_dex_output,
options.warnings_as_errors, l8_config_paths)
if int(options.min_api) >= 24 and base_has_imported_lib:
with open(jdk_dex_output, 'rb') as f:
dexfile = dex_parser.DexFile(bytearray(f.read()))
for m in dexfile.IterMethodSignatureParts():
print('{}#{}'.format(m[0], m[2]))
assert False, (
'Desugared JDK libs are disabled on Monochrome and newer - see '
'crbug.com/1159984 for details, and see above list for desugared '
'classes and methods.')
logging.debug('Collecting ouputs')
base_context.CreateOutput(base_has_imported_lib,
options.desugared_library_keep_rule_output)
for split_context in split_contexts_by_name.values():
if split_context is not base_context:
split_context.CreateOutput()
with open(options.mapping_output, 'w') as out_file, \
open(tmp_mapping_path) as in_file:
# Mapping files generated by R8 include comments that may break
# some of our tooling so remove those (specifically: apkanalyzer).
out_file.writelines(l for l in in_file if not l.startswith('#'))
return base_context
def _OutputKeepRules(r8_path, input_paths, classpath, targets_re_string,
keep_rules_output):
cmd = build_utils.JavaCmd(False) + [
'-cp', r8_path, 'com.android.tools.r8.tracereferences.TraceReferences',
'--map-diagnostics:MissingDefinitionsDiagnostic', 'error', 'warning',
'--keep-rules', '--output', keep_rules_output
]
targets_re = re.compile(targets_re_string)
for path in input_paths:
if targets_re.search(path):
cmd += ['--target', path]
else:
cmd += ['--source', path]
for path in classpath:
cmd += ['--lib', path]
build_utils.CheckOutput(cmd, print_stderr=False, fail_on_output=False)
def _CheckForMissingSymbols(r8_path, dex_files, classpath, warnings_as_errors,
error_title):
cmd = build_utils.JavaCmd(warnings_as_errors) + [
'-cp', r8_path, 'com.android.tools.r8.tracereferences.TraceReferences',
'--map-diagnostics:MissingDefinitionsDiagnostic', 'error', 'warning',
'--check'
]
for path in classpath:
cmd += ['--lib', path]
for path in dex_files:
cmd += ['--source', path]
def stderr_filter(stderr):
ignored_lines = [
# Summary contains warning count, which our filtering makes wrong.
'Warning: Tracereferences found',
# TODO(agrieve): Create interface jars for these missing classes rather
# than allowlisting here.
'dalvik/system',
'libcore/io',
'sun/misc/Unsafe',
# Found in: com/facebook/fbui/textlayoutbuilder/StaticLayoutHelper
('android/text/StaticLayout;<init>(Ljava/lang/CharSequence;IILandroid'
'/text/TextPaint;ILandroid/text/Layout$Alignment;Landroid/text/'
'TextDirectionHeuristic;FFZLandroid/text/TextUtils$TruncateAt;II)V'),
# Found in
# com/google/android/gms/cast/framework/media/internal/ResourceProvider
# Missing due to setting "strip_resources = true".
'com/google/android/gms/cast/framework/R',
# Found in com/google/android/gms/common/GoogleApiAvailability
# Missing due to setting "strip_drawables = true".
'com/google/android/gms/base/R$drawable',
# Explicictly guarded by try (NoClassDefFoundError) in Flogger's
# PlatformProvider.
'com/google/common/flogger/backend/google/GooglePlatform',
'com/google/common/flogger/backend/system/DefaultPlatform',
# trichrome_webview_google_bundle contains this missing reference.
# TODO(crbug.com/1142530): Fix this missing reference properly.
'org/chromium/build/NativeLibraries',
# TODO(agrieve): Exclude these only when use_jacoco_coverage=true.
'Ljava/lang/instrument/ClassFileTransformer',
'Ljava/lang/instrument/IllegalClassFormatException',
'Ljava/lang/instrument/Instrumentation',
'Ljava/lang/management/ManagementFactory',
'Ljavax/management/MBeanServer',
'Ljavax/management/ObjectInstance',
'Ljavax/management/ObjectName',
'Ljavax/management/StandardMBean',
# Explicitly guarded by try (NoClassDefFoundError) in Firebase's
# KotlinDetector: com.google.firebase.platforminfo.KotlinDetector.
'Lkotlin/KotlinVersion',
]
had_unfiltered_items = ' ' in stderr
stderr = build_utils.FilterLines(
stderr, '|'.join(re.escape(x) for x in ignored_lines))
if stderr:
if ' ' in stderr:
stderr = error_title + """
Tip: Build with:
is_java_debug=false
treat_warnings_as_errors=false
enable_proguard_obfuscation=false
and then use dexdump to see which class(s) reference them.
E.g.:
third_party/android_sdk/public/build-tools/*/dexdump -d \
out/Release/apks/YourApk.apk > dex.txt
""" + stderr
if 'FragmentActivity' in stderr:
stderr += """
You may need to update build configs to run FragmentActivityReplacer for
additional targets. See
https://chromium.googlesource.com/chromium/src.git/+/master/docs/ui/android/bytecode_rewriting.md.
"""
elif had_unfiltered_items:
# Left only with empty headings. All indented items filtered out.
stderr = ''
return stderr
logging.debug('cmd: %s', ' '.join(cmd))
build_utils.CheckOutput(cmd,
print_stdout=True,
stderr_filter=stderr_filter,
fail_on_output=warnings_as_errors)
def _CombineConfigs(configs, dynamic_config_data, exclude_generated=False):
ret = []
# Sort in this way so //clank versions of the same libraries will sort
# to the same spot in the file.
def sort_key(path):
return tuple(reversed(path.split(os.path.sep)))
for config in sorted(configs, key=sort_key):
if exclude_generated and config.endswith('.resources.proguard.txt'):
continue
with open(config) as config_file:
contents = config_file.read().rstrip()
if not contents.strip():
# Ignore empty files.
continue
# Fix up line endings (third_party configs can have windows endings).
contents = contents.replace('\r', '')
# Remove numbers from generated rule comments to make file more
# diff'able.
contents = re.sub(r' #generated:\d+', '', contents)
ret.append('# File: ' + config)
ret.append(contents)
ret.append('')
if dynamic_config_data:
ret.append('# File: //build/android/gyp/proguard.py (generated rules)')
ret.append(dynamic_config_data)
ret.append('')
return '\n'.join(ret)
def _CreateDynamicConfig(options):
# Our scripts already fail on output. Adding -ignorewarnings makes R8 output
# warnings rather than throw exceptions so we can selectively ignore them via
# dex.py's ignore list. Context: https://crbug.com/1180222
ret = ["-ignorewarnings"]
if options.sourcefile:
ret.append("-renamesourcefileattribute '%s' # OMIT FROM EXPECTATIONS" %
options.sourcefile)
if options.enable_obfuscation:
ret.append("-repackageclasses ''")
else:
ret.append("-dontobfuscate")
if options.apply_mapping:
ret.append("-applymapping '%s'" % options.apply_mapping)
_min_api = int(options.min_api) if options.min_api else 0
for api_level, version_code in _API_LEVEL_VERSION_CODE:
annotation_name = 'org.chromium.base.annotations.VerifiesOn' + version_code
if api_level > _min_api:
ret.append('-keep @interface %s' % annotation_name)
ret.append("""\
-if @%s class * {
*** *(...);
}
-keep,allowobfuscation class <1> {
*** <2>(...);
}""" % annotation_name)
ret.append("""\
-keepclassmembers,allowobfuscation class ** {
@%s <methods>;
}""" % annotation_name)
return '\n'.join(ret)
def _VerifyNoEmbeddedConfigs(jar_paths):
failed = False
for jar_path in jar_paths:
with zipfile.ZipFile(jar_path) as z:
for name in z.namelist():
if name.startswith('META-INF/proguard/'):
failed = True
sys.stderr.write("""\
Found embedded proguard config within {}.
Embedded configs are not permitted (https://crbug.com/989505)
""".format(jar_path))
break
if failed:
sys.exit(1)
def _ContainsDebuggingConfig(config_str):
debugging_configs = ('-whyareyoukeeping', '-whyareyounotinlining')
return any(config in config_str for config in debugging_configs)
def _MaybeWriteStampAndDepFile(options, inputs):
output = options.output_path
if options.stamp:
build_utils.Touch(options.stamp)
output = options.stamp
if options.depfile:
build_utils.WriteDepfile(options.depfile, output, inputs=inputs)
def main():
build_utils.InitLogging('PROGUARD_DEBUG')
options = _ParseOptions()
logging.debug('Preparing configs')
proguard_configs = options.proguard_configs
# ProGuard configs that are derived from flags.
dynamic_config_data = _CreateDynamicConfig(options)
# ProGuard configs that are derived from flags.
merged_configs = _CombineConfigs(
proguard_configs, dynamic_config_data, exclude_generated=True)
print_stdout = _ContainsDebuggingConfig(merged_configs) or options.verbose
if options.expected_file:
diff_utils.CheckExpectations(merged_configs, options)
if options.only_verify_expectations:
build_utils.WriteDepfile(options.depfile,
options.actual_file,
inputs=options.proguard_configs)
return
logging.debug('Looking for embedded configs')
libraries = []
for p in options.classpath:
# TODO(bjoyce): Remove filter once old android support libraries are gone.
# Fix for having Library class extend program class dependency problem.
if 'com_android_support' in p or 'android_support_test' in p:
continue
# If a jar is part of input no need to include it as library jar.
if p not in libraries and p not in options.input_paths:
libraries.append(p)
_VerifyNoEmbeddedConfigs(options.input_paths + libraries)
if options.keep_rules_output_path:
_OutputKeepRules(options.r8_path, options.input_paths, options.classpath,
options.keep_rules_targets_regex,
options.keep_rules_output_path)
return
base_context = _OptimizeWithR8(options, proguard_configs, libraries,
dynamic_config_data, print_stdout)
if not options.disable_checks:
logging.debug('Running tracereferences')
all_dex_files = []
if options.output_path:
all_dex_files.append(options.output_path)
if options.dex_dests:
all_dex_files.extend(options.dex_dests)
error_title = 'DEX contains references to non-existent symbols after R8.'
_CheckForMissingSymbols(options.r8_path, all_dex_files, options.classpath,
options.warnings_as_errors, error_title)
# Also ensure that base module doesn't have any references to child dex
# symbols.
# TODO(agrieve): Remove this check once r8 desugaring is fixed to not put
# synthesized classes in the base module.
error_title = 'Base module DEX contains references symbols within DFMs.'
_CheckForMissingSymbols(options.r8_path, [base_context.final_output_path],
options.classpath, options.warnings_as_errors,
error_title)
for output in options.extra_mapping_output_paths:
shutil.copy(options.mapping_output, output)
inputs = options.proguard_configs + options.input_paths + libraries
if options.apply_mapping:
inputs.append(options.apply_mapping)
_MaybeWriteStampAndDepFile(options, inputs)
if __name__ == '__main__':
main()
| |
"""
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
_str_types = (str, bytes)
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
UPPER_VERSION_EXCLUSIVE = None
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if not isinstance(path, str):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
if UPPER_VERSION_EXCLUSIVE is None:
req = DIST_NAME
else:
req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE)
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, str):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, str):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
| |
from __future__ import unicode_literals
from django.db.models import CharField, TextField, Value as V
from django.db.models.functions import (
Coalesce, Concat, Length, Lower, Substr, Upper,
)
from django.test import TestCase
from django.utils import six, timezone
from .models import Article, Author
lorem_ipsum = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua."""
class FunctionTests(TestCase):
def test_coalesce(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(display_name=Coalesce('alias', 'name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'smithj',
'Rhonda',
],
lambda a: a.display_name
)
with self.assertRaisesMessage(ValueError, 'Coalesce must take at least two expressions'):
Author.objects.annotate(display_name=Coalesce('alias'))
def test_coalesce_mixed_values(self):
a1 = Author.objects.create(name='John Smith', alias='smithj')
a2 = Author.objects.create(name='Rhonda')
ar1 = Article.objects.create(
title="How to Django",
text=lorem_ipsum,
written=timezone.now(),
)
ar1.authors.add(a1)
ar1.authors.add(a2)
# mixed Text and Char
article = Article.objects.annotate(
headline=Coalesce('summary', 'text', output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [
lorem_ipsum,
],
lambda a: a.headline
)
# mixed Text and Char wrapped
article = Article.objects.annotate(
headline=Coalesce(Lower('summary'), Lower('text'), output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [
lorem_ipsum.lower(),
],
lambda a: a.headline
)
def test_coalesce_ordering(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.order_by(Coalesce('alias', 'name'))
self.assertQuerysetEqual(
authors, [
'Rhonda',
'John Smith',
],
lambda a: a.name
)
authors = Author.objects.order_by(Coalesce('alias', 'name').asc())
self.assertQuerysetEqual(
authors, [
'Rhonda',
'John Smith',
],
lambda a: a.name
)
authors = Author.objects.order_by(Coalesce('alias', 'name').desc())
self.assertQuerysetEqual(
authors, [
'John Smith',
'Rhonda',
],
lambda a: a.name
)
def test_concat(self):
Author.objects.create(name='Jayden')
Author.objects.create(name='John Smith', alias='smithj', goes_by='John')
Author.objects.create(name='Margaret', goes_by='Maggie')
Author.objects.create(name='Rhonda', alias='adnohR')
authors = Author.objects.annotate(joined=Concat('alias', 'goes_by'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'',
'smithjJohn',
'Maggie',
'adnohR',
],
lambda a: a.joined
)
with self.assertRaisesMessage(ValueError, 'Concat must take at least two expressions'):
Author.objects.annotate(joined=Concat('alias'))
def test_concat_many(self):
Author.objects.create(name='Jayden')
Author.objects.create(name='John Smith', alias='smithj', goes_by='John')
Author.objects.create(name='Margaret', goes_by='Maggie')
Author.objects.create(name='Rhonda', alias='adnohR')
authors = Author.objects.annotate(
joined=Concat('name', V(' ('), 'goes_by', V(')'), output_field=CharField()),
)
self.assertQuerysetEqual(
authors.order_by('name'), [
'Jayden ()',
'John Smith (John)',
'Margaret (Maggie)',
'Rhonda ()',
],
lambda a: a.joined
)
def test_concat_mixed_char_text(self):
Article.objects.create(title='The Title', text=lorem_ipsum, written=timezone.now())
article = Article.objects.annotate(
title_text=Concat('title', V(' - '), 'text', output_field=TextField()),
).get(title='The Title')
self.assertEqual(article.title + ' - ' + article.text, article.title_text)
# wrap the concat in something else to ensure that we're still
# getting text rather than bytes
article = Article.objects.annotate(
title_text=Upper(Concat('title', V(' - '), 'text', output_field=TextField())),
).get(title='The Title')
expected = article.title + ' - ' + article.text
self.assertEqual(expected.upper(), article.title_text)
def test_lower(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(lower_name=Lower('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'john smith',
'rhonda',
],
lambda a: a.lower_name
)
Author.objects.update(name=Lower('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
('john smith', 'john smith'),
('rhonda', 'rhonda'),
],
lambda a: (a.lower_name, a.name)
)
def test_upper(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(upper_name=Upper('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'JOHN SMITH',
'RHONDA',
],
lambda a: a.upper_name
)
Author.objects.update(name=Upper('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
('JOHN SMITH', 'JOHN SMITH'),
('RHONDA', 'RHONDA'),
],
lambda a: (a.upper_name, a.name)
)
def test_length(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(
name_length=Length('name'),
alias_length=Length('alias'))
self.assertQuerysetEqual(
authors.order_by('name'), [
(10, 6),
(6, None),
],
lambda a: (a.name_length, a.alias_length)
)
self.assertEqual(authors.filter(alias_length__lte=Length('name')).count(), 1)
def test_length_ordering(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='John Smith', alias='smithj1')
Author.objects.create(name='Rhonda', alias='ronny')
authors = Author.objects.order_by(Length('name'), Length('alias'))
self.assertQuerysetEqual(
authors, [
('Rhonda', 'ronny'),
('John Smith', 'smithj'),
('John Smith', 'smithj1'),
],
lambda a: (a.name, a.alias)
)
def test_substr(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(name_part=Substr('name', 5, 3))
self.assertQuerysetEqual(
authors.order_by('name'), [
' Sm',
'da',
],
lambda a: a.name_part
)
authors = Author.objects.annotate(name_part=Substr('name', 2))
self.assertQuerysetEqual(
authors.order_by('name'), [
'ohn Smith',
'honda',
],
lambda a: a.name_part
)
# if alias is null, set to first 5 lower characters of the name
Author.objects.filter(alias__isnull=True).update(
alias=Lower(Substr('name', 1, 5)),
)
self.assertQuerysetEqual(
authors.order_by('name'), [
'smithj',
'rhond',
],
lambda a: a.alias
)
def test_substr_start(self):
Author.objects.create(name='John Smith', alias='smithj')
a = Author.objects.annotate(
name_part_1=Substr('name', 1),
name_part_2=Substr('name', 2),
).get(alias='smithj')
self.assertEqual(a.name_part_1[1:], a.name_part_2)
with six.assertRaisesRegex(self, ValueError, "'pos' must be greater than 0"):
Author.objects.annotate(raises=Substr('name', 0))
def test_substr_with_expressions(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(name_part=Substr('name', V(5), V(3)))
self.assertQuerysetEqual(
authors.order_by('name'), [
' Sm',
'da',
],
lambda a: a.name_part
)
def test_nested_function_ordering(self):
Author.objects.create(name='John Smith')
Author.objects.create(name='Rhonda Simpson', alias='ronny')
authors = Author.objects.order_by(Length(Coalesce('alias', 'name')))
self.assertQuerysetEqual(
authors, [
'Rhonda Simpson',
'John Smith',
],
lambda a: a.name
)
authors = Author.objects.order_by(Length(Coalesce('alias', 'name')).desc())
self.assertQuerysetEqual(
authors, [
'John Smith',
'Rhonda Simpson',
],
lambda a: a.name
)
| |
import unittest
class SolrEscapeTests(unittest.TestCase):
def _callFUT(self, s):
from alm.solrindex.handlers import solr_escape
return solr_escape(s)
def test_empty(self):
self.assertEqual(self._callFUT(''), '')
def test_simple(self):
self.assertEqual(self._callFUT('abc'), 'abc')
def test_unicode(self):
self.assertEqual(self._callFUT(u'smile \u30b7'), u'smile \u30b7')
def test_quotes(self):
self.assertEqual(self._callFUT(u'I am "quoted"'),
u'I am \\"quoted\\"')
def test_all_escaped_characters(self):
s = '\\:?*~"^][}{)(!|&+-'
expect = ''.join('\\' + c for c in s)
actual = self._callFUT(s)
self.assertEqual(actual, expect)
class DefaultFieldHandlerTests(unittest.TestCase):
def _getTargetClass(self):
from alm.solrindex.handlers import DefaultFieldHandler
return DefaultFieldHandler
def _makeOne(self):
return self._getTargetClass()()
def test_verifyImplements(self):
from zope.interface.verify import verifyClass
from alm.solrindex.interfaces import ISolrFieldHandler
verifyClass(ISolrFieldHandler, self._getTargetClass())
def test_verifyProvides(self):
from zope.interface.verify import verifyObject
from alm.solrindex.interfaces import ISolrFieldHandler
verifyObject(ISolrFieldHandler, self._makeOne())
def test_simple_query(self):
field = DummyField()
field_query = 'hello'
handler = self._makeOne()
param = handler.parse_query(field, field_query)
self.assertEqual(param, {'fq': u'dummyfield:"hello"'})
def test_escaped_query(self):
field = DummyField()
field_query = u'Hello "Solr"! \u30b7'
handler = self._makeOne()
param = handler.parse_query(field, field_query)
self.assertEqual(param,
{'fq': u'dummyfield:"Hello \\"Solr\\"\\! \u30b7"'})
def test_query_multiple_default_operator(self):
field = DummyField()
field_query = ['news', 'sports', '"local"']
handler = self._makeOne()
param = handler.parse_query(field, field_query)
self.assertEqual(param,
{'fq': u'dummyfield:("news" OR "sports" OR "\\"local\\"")'})
def test_query_multiple_and_operator(self):
field = DummyField()
field_query = {
'query': ['news', 'sports', '"local"'],
'operator': 'and',
}
handler = self._makeOne()
param = handler.parse_query(field, field_query)
self.assertEqual(param,
{'fq': u'dummyfield:("news" AND "sports" AND "\\"local\\"")'})
def test_convert_none(self):
handler = self._makeOne()
self.assertEqual(handler.convert(None), ())
def test_convert_string(self):
handler = self._makeOne()
actual = handler.convert('abc')
self.assertEqual(len(actual), 1)
self.assert_(isinstance(actual[0], unicode))
self.assertEqual(actual, [u'abc'])
def test_convert_multiple(self):
handler = self._makeOne()
actual = handler.convert(('abc', 'def'))
self.assertEqual(len(actual), 2)
self.assert_(isinstance(actual[0], unicode))
self.assertEqual(actual, [u'abc', u'def'])
def test_convert_invalid_xml(self):
handler = self._makeOne()
actual = handler.convert('A backspace\x08 escaped\x1b!')
self.assertEqual(len(actual), 1)
self.assert_(isinstance(actual[0], unicode))
self.assertEqual(actual, [u'A backspace escaped!'])
class BoolFieldHandlerTests(unittest.TestCase):
def _getTargetClass(self):
from alm.solrindex.handlers import BoolFieldHandler
return BoolFieldHandler
def _makeOne(self):
return self._getTargetClass()()
def test_verifyImplements(self):
from zope.interface.verify import verifyClass
from alm.solrindex.interfaces import ISolrFieldHandler
verifyClass(ISolrFieldHandler, self._getTargetClass())
def test_verifyProvides(self):
from zope.interface.verify import verifyObject
from alm.solrindex.interfaces import ISolrFieldHandler
verifyObject(ISolrFieldHandler, self._makeOne())
def test_convert_none(self):
handler = self._makeOne()
self.assertEqual(handler.convert(None), ())
def test_convert_true(self):
handler = self._makeOne()
actual = handler.convert(True)
self.assertEqual(actual, ['true'])
def test_convert_false(self):
handler = self._makeOne()
actual = handler.convert(False)
self.assertEqual(actual, ['false'])
def test_convert_list(self):
handler = self._makeOne()
actual = handler.convert([False, True, True])
self.assertEqual(actual, ['false', 'true', 'true'])
class DateFieldHandlerTests(unittest.TestCase):
def _getTargetClass(self):
from alm.solrindex.handlers import DateFieldHandler
return DateFieldHandler
def _makeOne(self):
return self._getTargetClass()()
def test_verifyImplements(self):
from zope.interface.verify import verifyClass
from alm.solrindex.interfaces import ISolrFieldHandler
verifyClass(ISolrFieldHandler, self._getTargetClass())
def test_verifyProvides(self):
from zope.interface.verify import verifyObject
from alm.solrindex.interfaces import ISolrFieldHandler
verifyObject(ISolrFieldHandler, self._makeOne())
def test_convert_none(self):
handler = self._makeOne()
self.assertEqual(handler.convert(None), ())
def test_convert_datetime(self):
import datetime
handler = self._makeOne()
actual = handler.convert(datetime.datetime(2009, 9, 8, 11, 51, 30))
self.assertEqual(actual, ['2009-09-08T11:51:30.000Z'])
def test_convert_date(self):
import datetime
handler = self._makeOne()
actual = handler.convert(datetime.date(2009, 9, 8))
self.assertEqual(actual, ['2009-09-08T00:00:00.000Z'])
def test_convert_multiple_dates(self):
import datetime
handler = self._makeOne()
actual = handler.convert(
[datetime.date(2009, 9, 8), datetime.date(2009, 9, 9)])
self.assertEqual(actual,
['2009-09-08T00:00:00.000Z', '2009-09-09T00:00:00.000Z'])
def test_convert_string(self):
handler = self._makeOne()
actual = handler.convert('September 8, 2009 11:51:31.512 AM UTC')
self.assertEqual(actual, ['2009-09-08T11:51:31.512Z'])
def test_convert_float(self):
import calendar
t = calendar.timegm((2009, 9, 8, 11, 51, 32))
handler = self._makeOne()
actual = handler.convert(t)
self.assertEqual(actual, ['2009-09-08T11:51:32.000Z'])
def test_convert_DateTime(self):
from DateTime.DateTime import DateTime
t = DateTime('2009-09-08 11:51:34.000 UTC')
handler = self._makeOne()
actual = handler.convert(t)
self.assertEqual(actual, ['2009-09-08T11:51:34.000Z'])
def test_convert_other(self):
handler = self._makeOne()
self.assertRaises(TypeError, handler.convert, object())
class TextFieldHandlerTests(unittest.TestCase):
def _getTargetClass(self):
from alm.solrindex.handlers import TextFieldHandler
return TextFieldHandler
def _makeOne(self):
return self._getTargetClass()()
def test_verifyImplements(self):
from zope.interface.verify import verifyClass
from alm.solrindex.interfaces import ISolrFieldHandler
verifyClass(ISolrFieldHandler, self._getTargetClass())
def test_verifyProvides(self):
from zope.interface.verify import verifyObject
from alm.solrindex.interfaces import ISolrFieldHandler
verifyObject(ISolrFieldHandler, self._makeOne())
def test_simple(self):
field = DummyField()
field_query = u'alpha beta'
handler = self._makeOne()
param = handler.parse_query(field, field_query)
self.assertEqual(param, {'q': u'+dummyfield:(alpha beta)'})
def test_complex(self):
field = DummyField()
field_query = u'(fun OR play) +with Solr^4'
handler = self._makeOne()
param = handler.parse_query(field, field_query)
self.assertEqual(param,
{'q': u'+dummyfield:((fun OR play) +with Solr^4)'})
class DummyField:
name = 'dummyfield'
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SolrEscapeTests))
suite.addTest(unittest.makeSuite(DefaultFieldHandlerTests))
suite.addTest(unittest.makeSuite(BoolFieldHandlerTests))
suite.addTest(unittest.makeSuite(DateFieldHandlerTests))
suite.addTest(unittest.makeSuite(TextFieldHandlerTests))
return suite
| |
# -*- coding: utf-8 -*-
"""Unit tests for checking the InfluxDB server.
The good/expected interaction between:
+ the python client.. (obviously)
+ and a *_real_* server instance running.
This basically duplicates what's in client_test.py
but without mocking around every call.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from functools import partial
import os
import time
import unittest
import warnings
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
from influxdb.tests import skip_if_pypy, using_pypy, skip_server_tests
from influxdb.tests.server_tests.base import ManyTestCasesWithServerMixin
from influxdb.tests.server_tests.base import SingleTestCaseWithServerMixin
from influxdb.tests.server_tests.base import ManyTestCasesWithServerGzipMixin
from influxdb.tests.server_tests.base import SingleTestCaseWithServerGzipMixin
# By default, raise exceptions on warnings
warnings.simplefilter('error', FutureWarning)
if not using_pypy:
import pandas as pd
from pandas.util.testing import assert_frame_equal
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
def point(series_name, timestamp=None, tags=None, **fields):
"""Define what a point looks like."""
res = {'measurement': series_name}
if timestamp:
res['time'] = timestamp
if tags:
res['tags'] = tags
res['fields'] = fields
return res
dummy_point = [ # some dummy points
{
"measurement": "cpu_load_short",
"tags": {
"host": "server01",
"region": "us-west"
},
"time": "2009-11-10T23:00:00Z",
"fields": {
"value": 0.64
}
}
]
dummy_points = [ # some dummy points
dummy_point[0],
{
"measurement": "memory",
"tags": {
"host": "server01",
"region": "us-west"
},
"time": "2009-11-10T23:01:35Z",
"fields": {
"value": 33.0
}
}
]
if not using_pypy:
dummy_point_df = {
"measurement": "cpu_load_short",
"tags": {"host": "server01",
"region": "us-west"},
"dataframe": pd.DataFrame(
[[0.64]], columns=['value'],
index=pd.to_datetime(["2009-11-10T23:00:00Z"]))
}
dummy_points_df = [{
"measurement": "cpu_load_short",
"tags": {"host": "server01", "region": "us-west"},
"dataframe": pd.DataFrame(
[[0.64]], columns=['value'],
index=pd.to_datetime(["2009-11-10T23:00:00Z"])),
}, {
"measurement": "memory",
"tags": {"host": "server01", "region": "us-west"},
"dataframe": pd.DataFrame(
[[33]], columns=['value'],
index=pd.to_datetime(["2009-11-10T23:01:35Z"])
)
}]
dummy_point_without_timestamp = [
{
"measurement": "cpu_load_short",
"tags": {
"host": "server02",
"region": "us-west"
},
"fields": {
"value": 0.64
}
}
]
@skip_server_tests
class SimpleTests(SingleTestCaseWithServerMixin, unittest.TestCase):
"""Define the class of simple tests."""
influxdb_template_conf = os.path.join(THIS_DIR, 'influxdb.conf.template')
def test_fresh_server_no_db(self):
"""Test a fresh server without database."""
self.assertEqual([], self.cli.get_list_database())
def test_create_database(self):
"""Test create a database."""
self.assertIsNone(self.cli.create_database('new_db_1'))
self.assertIsNone(self.cli.create_database('new_db_2'))
self.assertEqual(
self.cli.get_list_database(),
[{'name': 'new_db_1'}, {'name': 'new_db_2'}]
)
def test_drop_database(self):
"""Test drop a database."""
self.test_create_database()
self.assertIsNone(self.cli.drop_database('new_db_1'))
self.assertEqual([{'name': 'new_db_2'}], self.cli.get_list_database())
def test_query_fail(self):
"""Test that a query failed."""
with self.assertRaises(InfluxDBClientError) as ctx:
self.cli.query('select column_one from foo')
self.assertIn('database not found: db',
ctx.exception.content)
def test_query_fail_ignore_errors(self):
"""Test query failed but ignore errors."""
result = self.cli.query('select column_one from foo',
raise_errors=False)
self.assertEqual(result.error, 'database not found: db')
def test_create_user(self):
"""Test create user."""
self.cli.create_user('test_user', 'secret_password')
rsp = list(self.cli.query("SHOW USERS")['results'])
self.assertIn({'user': 'test_user', 'admin': False},
rsp)
def test_create_user_admin(self):
"""Test create admin user."""
self.cli.create_user('test_user', 'secret_password', True)
rsp = list(self.cli.query("SHOW USERS")['results'])
self.assertIn({'user': 'test_user', 'admin': True},
rsp)
def test_create_user_blank_password(self):
"""Test create user with a blank pass."""
self.cli.create_user('test_user', '')
rsp = list(self.cli.query("SHOW USERS")['results'])
self.assertIn({'user': 'test_user', 'admin': False},
rsp)
def test_get_list_users_empty(self):
"""Test get list of users, but empty."""
rsp = self.cli.get_list_users()
self.assertEqual([], rsp)
def test_get_list_users(self):
"""Test get list of users."""
self.cli.query("CREATE USER test WITH PASSWORD 'test'")
rsp = self.cli.get_list_users()
self.assertEqual(
[{'user': 'test', 'admin': False}],
rsp
)
def test_create_user_blank_username(self):
"""Test create blank username."""
with self.assertRaises(InfluxDBClientError) as ctx:
self.cli.create_user('', 'secret_password')
self.assertIn('username required',
ctx.exception.content)
rsp = list(self.cli.query("SHOW USERS")['results'])
self.assertEqual(rsp, [])
def test_drop_user(self):
"""Test drop a user."""
self.cli.query("CREATE USER test WITH PASSWORD 'test'")
self.cli.drop_user('test')
users = list(self.cli.query("SHOW USERS")['results'])
self.assertEqual(users, [])
def test_drop_user_nonexisting(self):
"""Test dropping a nonexistent user."""
with self.assertRaises(InfluxDBClientError) as ctx:
self.cli.drop_user('test')
self.assertIn('user not found',
ctx.exception.content)
@unittest.skip("Broken as of 0.9.0")
def test_revoke_admin_privileges(self):
"""Test revoking admin privs, deprecated as of v0.9.0."""
self.cli.create_user('test', 'test', admin=True)
self.assertEqual([{'user': 'test', 'admin': True}],
self.cli.get_list_users())
self.cli.revoke_admin_privileges('test')
self.assertEqual([{'user': 'test', 'admin': False}],
self.cli.get_list_users())
def test_grant_privilege(self):
"""Test grant privs to user."""
self.cli.create_user('test', 'test')
self.cli.create_database('testdb')
self.cli.grant_privilege('all', 'testdb', 'test')
# TODO: when supported by InfluxDB, check if privileges are granted
def test_grant_privilege_invalid(self):
"""Test grant invalid privs to user."""
self.cli.create_user('test', 'test')
self.cli.create_database('testdb')
with self.assertRaises(InfluxDBClientError) as ctx:
self.cli.grant_privilege('', 'testdb', 'test')
self.assertEqual(400, ctx.exception.code)
self.assertIn('{"error":"error parsing query: ',
ctx.exception.content)
def test_revoke_privilege(self):
"""Test revoke privs from user."""
self.cli.create_user('test', 'test')
self.cli.create_database('testdb')
self.cli.revoke_privilege('all', 'testdb', 'test')
# TODO: when supported by InfluxDB, check if privileges are revoked
def test_revoke_privilege_invalid(self):
"""Test revoke invalid privs from user."""
self.cli.create_user('test', 'test')
self.cli.create_database('testdb')
with self.assertRaises(InfluxDBClientError) as ctx:
self.cli.revoke_privilege('', 'testdb', 'test')
self.assertEqual(400, ctx.exception.code)
self.assertIn('{"error":"error parsing query: ',
ctx.exception.content)
def test_invalid_port_fails(self):
"""Test invalid port access fails."""
with self.assertRaises(ValueError):
InfluxDBClient('host', '80/redir', 'username', 'password')
@skip_server_tests
class CommonTests(ManyTestCasesWithServerMixin, unittest.TestCase):
"""Define a class to handle common tests for the server."""
influxdb_template_conf = os.path.join(THIS_DIR, 'influxdb.conf.template')
def test_write(self):
"""Test write to the server."""
self.assertIs(True, self.cli.write(
{'points': dummy_point},
params={'db': 'db'},
))
def test_write_check_read(self):
"""Test write and check read of data to server."""
self.test_write()
time.sleep(1)
rsp = self.cli.query('SELECT * FROM cpu_load_short', database='db')
self.assertListEqual([{'value': 0.64, 'time': '2009-11-10T23:00:00Z',
"host": "server01", "region": "us-west"}],
list(rsp.get_points()))
def test_write_points(self):
"""Test writing points to the server."""
self.assertIs(True, self.cli.write_points(dummy_point))
@skip_if_pypy
def test_write_points_DF(self):
"""Test writing points with dataframe."""
self.assertIs(
True,
self.cliDF.write_points(
dummy_point_df['dataframe'],
dummy_point_df['measurement'],
dummy_point_df['tags']
)
)
def test_write_points_check_read(self):
"""Test writing points and check read back."""
self.test_write_points()
time.sleep(1) # same as test_write_check_read()
rsp = self.cli.query('SELECT * FROM cpu_load_short')
self.assertEqual(
list(rsp),
[[
{'value': 0.64,
'time': '2009-11-10T23:00:00Z',
"host": "server01",
"region": "us-west"}
]]
)
rsp2 = list(rsp.get_points())
self.assertEqual(len(rsp2), 1)
pt = rsp2[0]
self.assertEqual(
pt,
{'time': '2009-11-10T23:00:00Z',
'value': 0.64,
"host": "server01",
"region": "us-west"}
)
@unittest.skip("Broken as of 0.9.0")
def test_write_points_check_read_DF(self):
"""Test write points and check back with dataframe."""
self.test_write_points_DF()
time.sleep(1) # same as test_write_check_read()
rsp = self.cliDF.query('SELECT * FROM cpu_load_short')
assert_frame_equal(
rsp['cpu_load_short'],
dummy_point_df['dataframe']
)
# Query with Tags
rsp = self.cliDF.query(
"SELECT * FROM cpu_load_short GROUP BY *")
assert_frame_equal(
rsp[('cpu_load_short',
(('host', 'server01'), ('region', 'us-west')))],
dummy_point_df['dataframe']
)
def test_write_multiple_points_different_series(self):
"""Test write multiple points to different series."""
self.assertIs(True, self.cli.write_points(dummy_points))
time.sleep(1)
rsp = self.cli.query('SELECT * FROM cpu_load_short')
lrsp = list(rsp)
self.assertEqual(
[[
{'value': 0.64,
'time': '2009-11-10T23:00:00Z',
"host": "server01",
"region": "us-west"}
]],
lrsp
)
rsp = list(self.cli.query('SELECT * FROM memory'))
self.assertEqual(
rsp,
[[
{'value': 33,
'time': '2009-11-10T23:01:35Z',
"host": "server01",
"region": "us-west"}
]]
)
def test_select_into_as_post(self):
"""Test SELECT INTO is POSTed."""
self.assertIs(True, self.cli.write_points(dummy_points))
time.sleep(1)
rsp = self.cli.query('SELECT * INTO "newmeas" FROM "memory"')
rsp = self.cli.query('SELECT * FROM "newmeas"')
lrsp = list(rsp)
self.assertEqual(
lrsp,
[[
{'value': 33,
'time': '2009-11-10T23:01:35Z',
"host": "server01",
"region": "us-west"}
]]
)
@unittest.skip("Broken as of 0.9.0")
def test_write_multiple_points_different_series_DF(self):
"""Test write multiple points using dataframe to different series."""
for i in range(2):
self.assertIs(
True, self.cliDF.write_points(
dummy_points_df[i]['dataframe'],
dummy_points_df[i]['measurement'],
dummy_points_df[i]['tags']))
time.sleep(1)
rsp = self.cliDF.query('SELECT * FROM cpu_load_short')
assert_frame_equal(
rsp['cpu_load_short'],
dummy_points_df[0]['dataframe']
)
rsp = self.cliDF.query('SELECT * FROM memory')
assert_frame_equal(
rsp['memory'],
dummy_points_df[1]['dataframe']
)
def test_write_points_batch(self):
"""Test writing points in a batch."""
dummy_points = [
{"measurement": "cpu_usage", "tags": {"unit": "percent"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 12.34}},
{"measurement": "network", "tags": {"direction": "in"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 123.00}},
{"measurement": "network", "tags": {"direction": "out"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 12.00}}
]
self.cli.write_points(points=dummy_points,
tags={"host": "server01",
"region": "us-west"},
batch_size=2)
time.sleep(5)
net_in = self.cli.query("SELECT value FROM network "
"WHERE direction=$dir",
bind_params={'dir': 'in'}
).raw
net_out = self.cli.query("SELECT value FROM network "
"WHERE direction='out'").raw
cpu = self.cli.query("SELECT value FROM cpu_usage").raw
self.assertIn(123, net_in['series'][0]['values'][0])
self.assertIn(12, net_out['series'][0]['values'][0])
self.assertIn(12.34, cpu['series'][0]['values'][0])
def test_write_points_batch_generator(self):
"""Test writing points in a batch from a generator."""
dummy_points = [
{"measurement": "cpu_usage", "tags": {"unit": "percent"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 12.34}},
{"measurement": "network", "tags": {"direction": "in"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 123.00}},
{"measurement": "network", "tags": {"direction": "out"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 12.00}}
]
dummy_points_generator = (point for point in dummy_points)
self.cli.write_points(points=dummy_points_generator,
tags={"host": "server01",
"region": "us-west"},
batch_size=2)
time.sleep(5)
net_in = self.cli.query("SELECT value FROM network "
"WHERE direction=$dir",
bind_params={'dir': 'in'}
).raw
net_out = self.cli.query("SELECT value FROM network "
"WHERE direction='out'").raw
cpu = self.cli.query("SELECT value FROM cpu_usage").raw
self.assertIn(123, net_in['series'][0]['values'][0])
self.assertIn(12, net_out['series'][0]['values'][0])
self.assertIn(12.34, cpu['series'][0]['values'][0])
def test_query(self):
"""Test querying data back from server."""
self.assertIs(True, self.cli.write_points(dummy_point))
@unittest.skip('Not implemented for 0.9')
def test_query_chunked(self):
"""Test query for chunked response from server."""
cli = InfluxDBClient(database='db')
example_object = {
'points': [
[1415206250119, 40001, 667],
[1415206244555, 30001, 7],
[1415206228241, 20001, 788],
[1415206212980, 10001, 555],
[1415197271586, 10001, 23]
],
'name': 'foo',
'columns': [
'time',
'sequence_number',
'val'
]
}
del cli
del example_object
# TODO ?
def test_delete_series_invalid(self):
"""Test delete invalid series."""
with self.assertRaises(InfluxDBClientError):
self.cli.delete_series()
def test_default_retention_policy(self):
"""Test add default retention policy."""
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'name': 'autogen',
'duration': '0s',
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'default': True}
],
rsp
)
def test_create_retention_policy_default(self):
"""Test create a new default retention policy."""
self.cli.create_retention_policy('somename', '1d', 1, default=True)
self.cli.create_retention_policy('another', '2d', 1, default=False)
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': False,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'},
{'duration': '24h0m0s',
'default': True,
'replicaN': 1,
'shardGroupDuration': u'1h0m0s',
'name': 'somename'},
{'duration': '48h0m0s',
'default': False,
'replicaN': 1,
'shardGroupDuration': u'24h0m0s',
'name': 'another'}
],
rsp
)
def test_create_retention_policy(self):
"""Test creating a new retention policy, not default."""
self.cli.create_retention_policy('somename', '1d', 1)
# NB: creating a retention policy without specifying
# shard group duration
# leads to a shard group duration of 1 hour
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': True,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'},
{'duration': '24h0m0s',
'default': False,
'replicaN': 1,
'shardGroupDuration': u'1h0m0s',
'name': 'somename'}
],
rsp
)
self.cli.drop_retention_policy('somename', 'db')
# recreate the RP
self.cli.create_retention_policy('somename', '1w', 1,
shard_duration='1h')
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': True,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'},
{'duration': '168h0m0s',
'default': False,
'replicaN': 1,
'shardGroupDuration': u'1h0m0s',
'name': 'somename'}
],
rsp
)
self.cli.drop_retention_policy('somename', 'db')
# recreate the RP
self.cli.create_retention_policy('somename', '1w', 1)
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': True,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'},
{'duration': '168h0m0s',
'default': False,
'replicaN': 1,
'shardGroupDuration': u'24h0m0s',
'name': 'somename'}
],
rsp
)
def test_alter_retention_policy(self):
"""Test alter a retention policy, not default."""
self.cli.create_retention_policy('somename', '1d', 1)
# Test alter duration
self.cli.alter_retention_policy('somename', 'db',
duration='4d',
shard_duration='2h')
# NB: altering retention policy doesn't change shard group duration
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': True,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'},
{'duration': '96h0m0s',
'default': False,
'replicaN': 1,
'shardGroupDuration': u'2h0m0s',
'name': 'somename'}
],
rsp
)
# Test alter replication
self.cli.alter_retention_policy('somename', 'db',
replication=4)
# NB: altering retention policy doesn't change shard group duration
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': True,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'},
{'duration': '96h0m0s',
'default': False,
'replicaN': 4,
'shardGroupDuration': u'2h0m0s',
'name': 'somename'}
],
rsp
)
# Test alter default
self.cli.alter_retention_policy('somename', 'db',
default=True)
# NB: altering retention policy doesn't change shard group duration
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': False,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'},
{'duration': '96h0m0s',
'default': True,
'replicaN': 4,
'shardGroupDuration': u'2h0m0s',
'name': 'somename'}
],
rsp
)
# Test alter shard_duration
self.cli.alter_retention_policy('somename', 'db',
shard_duration='4h')
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': False,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'},
{'duration': '96h0m0s',
'default': True,
'replicaN': 4,
'shardGroupDuration': u'4h0m0s',
'name': 'somename'}
],
rsp
)
def test_alter_retention_policy_invalid(self):
"""Test invalid alter retention policy."""
self.cli.create_retention_policy('somename', '1d', 1)
with self.assertRaises(InfluxDBClientError) as ctx:
self.cli.alter_retention_policy('somename', 'db')
self.assertEqual(400, ctx.exception.code)
self.assertIn('{"error":"error parsing query: ',
ctx.exception.content)
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': True,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'},
{'duration': '24h0m0s',
'default': False,
'replicaN': 1,
'shardGroupDuration': u'1h0m0s',
'name': 'somename'}
],
rsp
)
def test_drop_retention_policy(self):
"""Test drop a retention policy."""
self.cli.create_retention_policy('somename', '1d', 1)
# Test drop retention
self.cli.drop_retention_policy('somename', 'db')
rsp = self.cli.get_list_retention_policies()
self.assertEqual(
[
{'duration': '0s',
'default': True,
'replicaN': 1,
'shardGroupDuration': u'168h0m0s',
'name': 'autogen'}
],
rsp
)
def test_create_continuous_query(self):
"""Test continuous query creation."""
self.cli.create_retention_policy('some_rp', '1d', 1)
query = 'select count("value") into "some_rp"."events" from ' \
'"events" group by time(10m)'
self.cli.create_continuous_query('test_cq', query, 'db')
cqs = self.cli.get_list_continuous_queries()
expected_cqs = [
{
'db': [
{
'name': 'test_cq',
'query': 'CREATE CONTINUOUS QUERY test_cq ON db '
'BEGIN SELECT count(value) INTO '
'db.some_rp.events FROM db.autogen.events '
'GROUP BY time(10m) END'
}
]
}
]
self.assertEqual(cqs, expected_cqs)
def test_drop_continuous_query(self):
"""Test continuous query drop."""
self.test_create_continuous_query()
self.cli.drop_continuous_query('test_cq', 'db')
cqs = self.cli.get_list_continuous_queries()
expected_cqs = [{'db': []}]
self.assertEqual(cqs, expected_cqs)
def test_issue_143(self):
"""Test for PR#143 from repo."""
pt = partial(point, 'a_series_name', timestamp='2015-03-30T16:16:37Z')
pts = [
pt(value=15),
pt(tags={'tag_1': 'value1'}, value=5),
pt(tags={'tag_1': 'value2'}, value=10),
]
self.cli.write_points(pts)
time.sleep(1)
rsp = list(self.cli.query('SELECT * FROM a_series_name \
GROUP BY tag_1').get_points())
self.assertEqual(
[
{'time': '2015-03-30T16:16:37Z', 'value': 15},
{'time': '2015-03-30T16:16:37Z', 'value': 5},
{'time': '2015-03-30T16:16:37Z', 'value': 10}
],
rsp
)
# a slightly more complex one with 2 tags values:
pt = partial(point, 'series2', timestamp='2015-03-30T16:16:37Z')
pts = [
pt(tags={'tag1': 'value1', 'tag2': 'v1'}, value=0),
pt(tags={'tag1': 'value1', 'tag2': 'v2'}, value=5),
pt(tags={'tag1': 'value2', 'tag2': 'v1'}, value=10),
]
self.cli.write_points(pts)
time.sleep(1)
rsp = self.cli.query('SELECT * FROM series2 GROUP BY tag1,tag2')
self.assertEqual(
[
{'value': 0, 'time': '2015-03-30T16:16:37Z'},
{'value': 5, 'time': '2015-03-30T16:16:37Z'},
{'value': 10, 'time': '2015-03-30T16:16:37Z'}
],
list(rsp['series2'])
)
all_tag2_equal_v1 = list(rsp.get_points(tags={'tag2': 'v1'}))
self.assertEqual(
[{'value': 0, 'time': '2015-03-30T16:16:37Z'},
{'value': 10, 'time': '2015-03-30T16:16:37Z'}],
all_tag2_equal_v1,
)
def test_query_multiple_series(self):
"""Test query for multiple series."""
pt = partial(point, 'series1', timestamp='2015-03-30T16:16:37Z')
pts = [
pt(tags={'tag1': 'value1', 'tag2': 'v1'}, value=0),
]
self.cli.write_points(pts)
pt = partial(point, 'series2', timestamp='1970-03-30T16:16:37Z')
pts = [
pt(tags={'tag1': 'value1', 'tag2': 'v1'},
value=0, data1=33, data2="bla"),
]
self.cli.write_points(pts)
def test_get_list_series(self):
"""Test get a list of series from the database."""
dummy_points = [
{
"measurement": "cpu_load_short",
"tags": {
"host": "server01",
"region": "us-west"
},
"time": "2009-11-10T23:00:00.123456Z",
"fields": {
"value": 0.64
}
}
]
dummy_points_2 = [
{
"measurement": "memory_usage",
"tags": {
"host": "server02",
"region": "us-east"
},
"time": "2009-11-10T23:00:00.123456Z",
"fields": {
"value": 80
}
}
]
self.cli.write_points(dummy_points)
self.cli.write_points(dummy_points_2)
self.assertEquals(
self.cli.get_list_series(),
['cpu_load_short,host=server01,region=us-west',
'memory_usage,host=server02,region=us-east']
)
self.assertEquals(
self.cli.get_list_series(measurement='memory_usage'),
['memory_usage,host=server02,region=us-east']
)
self.assertEquals(
self.cli.get_list_series(measurement='memory_usage'),
['memory_usage,host=server02,region=us-east']
)
self.assertEquals(
self.cli.get_list_series(tags={'host': 'server02'}),
['memory_usage,host=server02,region=us-east'])
self.assertEquals(
self.cli.get_list_series(
measurement='cpu_load_short', tags={'host': 'server02'}),
[])
@skip_server_tests
class UdpTests(ManyTestCasesWithServerMixin, unittest.TestCase):
"""Define a class to test UDP series."""
influxdb_udp_enabled = True
influxdb_template_conf = os.path.join(THIS_DIR,
'influxdb.conf.template')
def test_write_points_udp(self):
"""Test write points UDP."""
cli = InfluxDBClient(
'localhost',
self.influxd_inst.http_port,
'root',
'',
database='db',
use_udp=True,
udp_port=self.influxd_inst.udp_port
)
cli.write_points(dummy_point)
# The points are not immediately available after write_points.
# This is to be expected because we are using udp (no response !).
# So we have to wait some time,
time.sleep(3) # 3 sec seems to be a good choice.
rsp = self.cli.query('SELECT * FROM cpu_load_short')
self.assertEqual(
# this is dummy_points :
[
{'value': 0.64,
'time': '2009-11-10T23:00:00Z',
"host": "server01",
"region": "us-west"}
],
list(rsp['cpu_load_short'])
)
# Run the tests again, but with gzip enabled this time
@skip_server_tests
class GzipSimpleTests(SimpleTests, SingleTestCaseWithServerGzipMixin):
"""Repeat the simple tests with InfluxDBClient where gzip=True."""
pass
@skip_server_tests
class GzipCommonTests(CommonTests, ManyTestCasesWithServerGzipMixin):
"""Repeat the common tests with InfluxDBClient where gzip=True."""
pass
@skip_server_tests
class GzipUdpTests(UdpTests, ManyTestCasesWithServerGzipMixin):
"""Repeat the UDP tests with InfluxDBClient where gzip=True."""
pass
| |
# =======================================================================
# 2019 Changes by Natalia Clementi @ncclementi
# Add documentation and comments.
# Add translation center to allow creation of meshes not centered on (0,0,0)
# Modify script to save faces and vertices in different files
# Add argument parser to accept input variables chosen by user For example:
# to create a an ellipsoid of 180 triangles (faces = 20*N^2), with principal
# axes (a1, a2, a3) = (2, 4, 5.5), centered on (1.5, -2, 4.2) and filename:
# ellipsoid_example the user will run
#
# $ python mesh_ellipsoid.py -n 3 -a1 2 -a2 4 -a3 5.5 -xyzc 1.5,-2,4.2 -fn ellipsoid_example
#
# This will create two files ellipsoid_example.vert (which contains the
# coordinates of the vertices) and ellipsoid_example.face (which contains the
# connectivity).
#
# Warning!! In the resulting mesh files, for our needs, the counting of the
# indices for the triangles starts on 1 (check line 311)
# =======================================================================
# 2016 Changes by ARM (abhilashreddy.com)
# - made to work with Python 3+
# - made to work with recent versions of matplotlib
# =======================================================================
# Author: William G.K. Martin (wgm2111@cu where cu=columbia.edu)
# copyright (c) 2010
# licence: BSD style
# ======================================================================
import numpy
import matplotlib.tri as Triang
from argparse import ArgumentParser, ArgumentTypeError
def coords_center(s):
'''Adapted from https://stackoverflow.com/questions/9978880
Splitter of string x,y,z into variables to use in parser
Arguments
---------
s: string, has to have form x,y,z
Returns
-------
x, y, z : floats, coordinates x, y, z.
'''
try:
x, y, z = map(float, s.split(','))
return x, y, z
except:
raise ArgumentTypeError("Coordinates must be x,y,z")
def read_inputs():
"""
Parse command-line arguments to run mesh_ellipsoide.
User should provide:
-n : int, number of points in a triangle edge desired after refinement.
-a1 : float, a1 principal semi-axis (a1, 0, 0).
-a2 : float, a2 principal semi-axis (0, a2, 0).
-a3 : float, a3 principal semi-axis (0, 0, a3).
"""
parser = ArgumentParser(description='Manage mesh_ellipsoid command line arguments')
parser.add_argument('-n', '--num_points', dest='n', type=int, default=None,
help="number of points in a triangle edge to refine into")
parser.add_argument('-a1', '--a1_semi_ax', dest='a1', type=float, default=None,
help="a1 principal semi-axis (a1, 0, 0)")
parser.add_argument('-a2', '--a2_semi_ax', dest='a2', type=float, default=None,
help="a2 principal semi-axis (0, a2, 0)")
parser.add_argument('-a3', '--a3_semi_ax', dest='a3', type=float, default=None,
help="a3 principal semi-axis (0, 0, a3)")
parser.add_argument('-xyzc', '--xyz_center', dest='xyzc', type=coords_center,
default='0,0,0', help="xc,yc,zc center coordinates")
parser.add_argument('-fn', '--filename', dest='filename', type=str, default=None,
help="output file name")
return parser.parse_args()
def get_points():
'''
Creates the coordinates of the vertices of the base icosahedron using
the golden ratio https://en.wikipedia.org/wiki/Regular_icosahedron.
Returns
-------
p[reorder_index, :] array, contains the points of the icosahedron, with
indices reordered in downward spiral
'''
# Define the vertices with the golden ratio
a = (1. + numpy.sqrt(5.)) / 2.0 # golden ratio
p = numpy.array([[a, -a, -a, a, 1, 1, -1, -1, 0, 0, 0, 0],
[0, 0, 0, 0, a, -a, -a, a, 1, 1, -1, -1],
[1, 1, -1, -1, 0, 0, 0, 0, a, -a, -a, a]]).transpose()
# normalize to fall into a unit sphere.
p = p / numpy.sqrt((p**2).sum(1))[0]
# rotate top point to the z-axis
ang = numpy.arctan(p[0, 0] / p[0, 2])
ca, sa = numpy.cos(ang), numpy.sin(ang)
rotation = numpy.array([[ca, 0, -sa], [0, 1.0, 0], [sa, 0, ca]])
p = numpy.inner(rotation, p).transpose()
# reorder in a downward spiral
reorder_index = [0, 3, 4, 8, -1, 5, -2, -3, 7, 1, 6, 2]
return p[reorder_index, :]
def get_barymat(n):
'''
Define the barycentric matrix that will refine points on a triangle.
Arguments
---------
n : integer, number of points in a triangle edge after refinement.
Returns
-------
bcmat : matrix, dimensions (n*(n+1)/2, 3). Each row contains the
barycentric coordinates of each point after division.
'''
numrows = n*(n+1)//2
# define the values that will be needed
ns = numpy.arange(n)
vals = ns / float(n-1)
# initialize array
bcmat = numpy.zeros((numrows, 3))
# loop over blocks to fill in the matrix
shifts = numpy.arange(n, 0, -1)
starts = numpy.zeros(n, dtype=int)
starts[1:] += numpy.cumsum(shifts[:-1]) # starts are the cumulative shifts
stops = starts + shifts
for n_, start, stop, shift in zip(ns, starts, stops, shifts):
bcmat[start:stop, 0] = vals[shift-1::-1]
bcmat[start:stop, 1] = vals[:shift]
bcmat[start:stop, 2] = vals[n_]
return bcmat
class icosahedron(object):
"""
The vertices of an icosahedron, together with triangles, edges,
triangle midpoints and edge midpoints.
"""
# define points (vertices)
p = get_points()
# define triangles (faces)
tri = numpy.array([
[1, 2, 3, 4, 5, 6, 2, 7, 2, 8, 3, 9, 10, 10, 6, 6, 7, 8, 9, 10],
[0, 0, 0, 0, 0, 1, 7, 2, 3, 3, 4, 4, 4, 5, 5, 7, 8, 9, 10, 6],
[2, 3, 4, 5, 1, 7, 1, 8, 8, 9, 9, 10, 5, 6, 1, 11, 11, 11, 11, 11]
]).transpose()
trimids = (p[tri[:, 0]] + p[tri[:, 1]] + p[tri[:, 2]]) / 3.0
# define bars (edges)
bar = list()
for t in tri:
bar += [numpy.array([i, j]) for i, j
in [t[0:2], t[1:], t[[2, 0]]] if j > i]
bar = numpy.array(bar)
barmids = (p[bar[:, 0]] + p[bar[:, 1]]) / 2.0
def triangulate_bary(bary):
"""
Triangulate a single barycentric triangle using matplotlib.
Argument
--------
bary: barycentric matrix obtained using get_barymat.
Return
------
dely.edges: array (nedges, 2) that contains the indices of the two vertices
that form each edge after the triangulation.
dely.triangles:array (ntriangles, 3) that contains the indices of the three
vertices that form each triangle after the triangulation.
"""
x = numpy.cos(-numpy.pi/4.)*bary[:, 0] + numpy.sin(-numpy.pi/4.)*bary[:, 1]
y = bary[:, 2]
dely = Triang.Triangulation(x, y)
return dely.edges, dely.triangles
def get_triangulation(n, ico=icosahedron()):
"""
Compute the triangulation of the sphere by refining each face of the
icosahedron to an nth order barycentric triangle. There are two key issues
that this routine addresses.
1) calculate the triangles (unique by construction)
2) remove non-unique nodes and edges
Arguments
---------
n : integer, number of points in a triangle edge after refinement.
Returns
-------
univerts : array, new vertices coordinates after removing repeated nodes
unitri : array, new triangles indices.
unibar : array, new edges indices.
"""
verts = numpy.array([ico.p[ico.tri[:, 0]],
ico.p[ico.tri[:, 1]],
ico.p[ico.tri[:, 2]]])
bary = get_barymat(n)
newverts = numpy.tensordot(verts,
bary, axes=[(0,), (-1,)]).transpose(0, 2, 1)
numverts = newverts.shape[1]
if newverts.size/3 > 1e6:
print("newverts.size/3 is high: {0}".format(newverts.size / 3))
flat_coordinates = numpy.arange(newverts.size / 3).reshape(20, numverts)
barbary, tribary = triangulate_bary(bary)
newtri = numpy.zeros((20, tribary.shape[0], 3), dtype=int)
newbar = numpy.zeros((20, barbary.shape[0], 2), dtype=int)
for i in range(20):
for j in range(3):
newtri[i, :, j] = flat_coordinates[i, tribary[:, j]]
if j < 2:
newbar[i, :, j] = flat_coordinates[i, barbary[:, j]]
newverts = newverts.reshape(newverts.size//3, 3)
newtri = newtri.reshape(newtri.size//3, 3)
newbar = newbar.reshape(newbar.size//2, 2)
# normalize vertices
scalars = numpy.sqrt((newverts**2).sum(-1))
newverts = (newverts.T / scalars).T
# remove repeated vertices
aux, iunique, irepeat = numpy.unique(numpy.dot(newverts//1e-8,
100*numpy.arange(1, 4, 1)),
return_index=True,
return_inverse=True)
univerts = newverts[iunique]
unitri = irepeat[newtri]
unibar = irepeat[newbar]
mid = .5 * (univerts[unibar[:, 0]] + univerts[unibar[:, 1]])
aux, iu = numpy.unique(numpy.dot(mid//1e-8, 100*numpy.arange(1, 4, 1)),
return_index=True)
unimid = mid[iu]
unibar = unibar[iu, :]
return univerts, unitri, unibar
class icosphere(icosahedron):
"""
Define an icosahedron based discretization of the sphere
n is the order of barycentric triangles used to refine each
face of the icosahedral base mesh.
"""
def __init__(self, n):
self.p, self.tri, self.bar = get_triangulation(n+1, icosahedron)
def cart2sph(xyz):
"""
Convert Cartesian coordinates to spherical coordinates.
https://stackoverflow.com/q/4116658
Arguments
---------
xyz : array (m, 3) that contains xyz coordinates. Where m is the amount of
points.
Returns
-------
ptsnew: array (m, 3) that contains the converted spherical coordinates.
"""
ptsnew = numpy.zeros_like(xyz)
xy = xyz[:, 0]**2 + xyz[:, 1]**2
ptsnew[:, 0] = numpy.sqrt(xy + xyz[:, 2]**2)
# for elevation angle defined from Z-axis down
ptsnew[:, 1] = numpy.arctan2(numpy.sqrt(xy), xyz[:, 2])
# for elevation angle defined from XY-plane up
ptsnew[:, 2] = numpy.arctan2(xyz[:, 1], xyz[:, 0])
return ptsnew
if __name__ == "__main__":
args = read_inputs()
n = args.n
a1 = args.a1
a2 = args.a2
a3 = args.a3
xc, yc, zc = args.xyzc
filename = args.filename
# Get a unit sphere triangulation with a specified level of refinement.
# A refinement level of N will have (20*N^2) faces and (10*N^2 + 2)
# vertices
isph = icosphere(n)
vertices = isph.p
faces = isph.tri + 1 # Agrees with msms format
# get spherical coordinates for each point and project it to the
# corresponding point on the ellipsoid. a1,a2,a3 are the semi-major axes
# of the ellipsoid
spvert = cart2sph(vertices)
vertices[:, 0] = a1*numpy.cos(spvert[:, 2])*numpy.sin(spvert[:, 1])
vertices[:, 1] = a2*numpy.sin(spvert[:, 2])*numpy.sin(spvert[:, 1])
vertices[:, 2] = a3*numpy.cos(spvert[:, 1])
numpy.savetxt(filename+'.vert', vertices+numpy.array([xc,yc,zc]), fmt='%.4f')
numpy.savetxt(filename+'.face', faces, fmt='%i')
# plotting
#fig = pyplot.figure(figsize=(10,10))
#ax = fig.gca(projection='3d')
#ax.plot_trisurf(vertices[:,0],vertices[:,1],vertices[:,2], color='white',
# triangles=faces, linewidth=0.20,edgecolor='black',alpha=1.0)
#arrayOfTicks = np.linspace(-70,70, 5)
#ax.xaxis.pane.fill = False
#ax.yaxis.pane.fill = False
#ax.zaxis.pane.fill = False
#ax.grid(True)
#ax.w_xaxis.set_ticks(arrayOfTicks)
#ax.w_yaxis.set_ticks(arrayOfTicks)
#ax.w_zaxis.set_ticks(arrayOfTicks)
#ilim = arrayOfTicks.min()
#slim = arrayOfTicks.max()
#ax.set_xlim3d(ilim, slim)
#ax.set_ylim3d(ilim, slim)
#ax.set_zlim3d(ilim, slim)
#ax.view_init(0, -75)
| |
import unittest
from unittest.mock import Mock
from constants import TEMPERATURE, PRECIPITATION_CHANCE, PRECIPITATION_AMOUNT, WIND
from chief_lunch_officer import ChiefLunchOfficer, WeatherOpinion, FoodTaste
class FoodTasteTest(unittest.TestCase):
def setUp(self):
self.food_taste = FoodTaste()
self.food_taste.preferences({
'item1': 1,
'item2': 2,
'item3': 3,
'item4': 4,
'item5': 5,
'item6_part1 item6_part2': 6
})
def test_unknown_food_one_item_menu(self):
self.assertEqual(0, self.food_taste.rate('unknown_item'))
def test_known_food_one_item_menu(self):
self.assertEqual(3, self.food_taste.rate('item3'))
def test_composite_menu_some_parts_unknown_highest_rating_is_chosen(self):
self.assertEqual(4, self.food_taste.rate('item3 item4 then also something else'))
def test_composite_menu_all_parts_unknown(self):
self.assertEqual(4, self.food_taste.rate('item3 item4 item2'))
def test_empty_menu_rating_is_zero(self):
self.assertEqual(0, self.food_taste.rate(''))
def test_repeating_item_on_menu_does_not_change_rating(self):
self.assertEqual(1, self.food_taste.rate('item1 item1 item1 item1 item1'))
def test_no_preferences_configured(self):
self.food_taste = FoodTaste()
self.assertEqual(0, self.food_taste.rate('item1'))
def test_spaces_in_preferences(self):
self.assertEqual(6, self.food_taste.rate('item6_part1 item6_part2 something else'))
def test_case_in_menu_is_ignored(self):
self.assertEqual(1, self.food_taste.rate('iTeM1'))
class WeatherOpinionTest(unittest.TestCase):
def setUp(self):
self.opinion = WeatherOpinion()
self.weather = {
TEMPERATURE: 10,
PRECIPITATION_CHANCE: 10,
PRECIPITATION_AMOUNT: 0.2,
WIND: 5
}
self.opinion.weather(self.weather)
def test_if_too_strong_wind_weather_is_bad(self):
self.weather[WIND] = 15
self.assertFalse(self.opinion.is_positive())
def test_if_too_cold_weather_is_bad(self):
self.weather[TEMPERATURE] = -30
self.assertFalse(self.opinion.is_positive())
def test_if_too_hot_weather_is_bad(self):
self.weather[TEMPERATURE] = 30
self.assertFalse(self.opinion.is_positive())
def test_if_a_lot_of_precipitation_with_high_chance_weather_is_bad(self):
self.weather[PRECIPITATION_CHANCE] = 80
self.weather[PRECIPITATION_AMOUNT] = 2.0
self.assertFalse(self.opinion.is_positive())
def test_if_temperature_is_mild_minor_chance_of_minor_precipitation_and_wind_mild(self):
self.weather[WIND] = 5
self.weather[TEMPERATURE] = -2
self.weather[PRECIPITATION_CHANCE] = 10
self.weather[PRECIPITATION_AMOUNT] = 0.5
self.assertTrue(self.opinion.is_positive())
def test_if_no_weather_information_then_rating_is_None(self):
self.opinion = WeatherOpinion()
self.assertIsNone(self.opinion.is_positive())
class ChiefLunchOfficerTest(unittest.TestCase):
def setUp(self):
self.taste = FoodTaste().preferences({})
self.weather = WeatherOpinion()
self.weather.is_positive = Mock(return_value=True)
self.clo = ChiefLunchOfficer(food_taste=self.taste, weather_opinion=self.weather)
def test_if_only_one_cafe_to_choose_from_it_is_chosen(self):
self.clo.cafes({
'cafe1': {
'menu': 'food'
}
})
self.assertEqual('cafe1', self.clo.decide_one())
def test_if_all_same_but_one_cafe_has_better_rating_it_is_chosen(self):
def rate(menu):
ratings = {
'good_food': 2,
'excellent_food': 3,
'some_food': 1
}
return ratings[menu]
self.taste.rate = Mock(side_effect=rate)
self.clo.cafes({
'cafe1': {
'menu': 'good_food',
'distance': 1
},
'cafe2': {
'menu': 'excellent_food',
'distance': 1
},
'cafe3': {
'menu': 'some_food',
'distance': 1
}
})
self.assertEqual(['cafe2', 'cafe1', 'cafe3'], self.clo.decide())
def test_if_all_same_and_bad_weather_then_cafe_with_shortest_distance_is_chosen(self):
self.taste.rate = Mock(return_value=0)
self.weather.is_positive = Mock(return_value=False)
self.clo.cafes({
'cafe1': {
'menu': 'food1',
'distance': 2
},
'cafe2': {
'menu': 'food2',
'distance': 3
},
'cafe3': {
'menu': 'food3',
'distance': 1
}
})
self.assertEqual(['cafe3', 'cafe1', 'cafe2'], self.clo.decide())
def test_if_all_same_but_history_not_visited_cafe_is_preferred(self):
self.clo.cafes({
'cafe1': {
'menu': 'food1',
'distance': 1
},
'cafe2': {
'menu': 'food2',
'distance': 1
},
'cafe3': {
'menu': 'food3',
'distance': 1
}
})
self.clo.lunched(['cafe2', 'cafe3', 'cafe3', 'cafe1', 'cafe1', 'cafe1'])
self.assertEqual(['cafe2', 'cafe3', 'cafe1'], self.clo.decide())
def test_if_all_same_choose_cafe_with_higher_rating(self):
self.clo.cafes({
'cafe1': {
'menu': 'food1',
'distance': 1,
'rating': 2
},
'cafe2': {
'menu': 'food2',
'distance': 1,
'rating': 3
},
'cafe3': {
'menu': 'food3',
'distance': 1,
'rating': 1
}
})
self.clo.lunched([])
self.assertEqual(['cafe2', 'cafe1', 'cafe3'], self.clo.decide())
def test_if_all_same_and_some_cafe_preferred_on_this_weekday_choose_this_cafe(self):
self.clo.weekday(4)
self.clo.cafes({
'cafe1': {
'menu': 'food1',
'distance': 1,
'preferred_weekdays': [3]
},
'cafe2': {
'menu': 'food2',
'distance': 1,
'preferred_weekdays': [4]
}
})
self.assertEqual(['cafe2', 'cafe1'], self.clo.decide())
self.clo.cafes({
'cafe2': {
'menu': 'food2',
'distance': 1
},
'cafe3': {
'menu': 'food3',
'distance': 1,
'preferred_weekdays': [4]
}
})
self.assertEqual(['cafe3', 'cafe2'], self.clo.decide())
def test_if_some_cafe_is_closed_then_do_not_choose_this_cafe(self):
self.clo.weekday(5)
self.clo.cafes({
'cafe1': {
'menu': 'food1',
'distance': 1,
'closed_weekdays': [5, 6]
},
'cafe2': {
'menu': 'food2',
'distance': 1
}
})
self.assertEqual(['cafe2'], self.clo.decide())
self.clo.weekday(4)
self.assertEqual({'cafe1', 'cafe2'}, set(self.clo.decide()))
def test_if_some_cafe_configured_as_once_per_week_and_already_went_there_then_skip_it(self):
self.clo.cafes({
'cafe1': {
'menu': 'food1',
'distance': 1,
'once_per_week': True
},
'cafe2': {
'menu': 'food2',
'distance': 1
}
})
self.clo.lunched([])
self.assertEqual({'cafe1', 'cafe2'}, set(self.clo.decide()))
self.clo.lunched(['cafe1', 'cafe2'])
self.assertEqual({'cafe2'}, set(self.clo.decide()))
def test_if_no_weather_opinion_then_still_decide(self):
self.weather.is_positive = Mock(return_value=None)
self.clo.cafes({
'cafe1': {
'menu': 'food1',
'distance': 1
}
})
self.assertEqual(['cafe1'], self.clo.decide())
def test_if_no_cafes_provided_then_empty_list(self):
self.clo.cafes({})
self.assertEqual([], self.clo.decide())
self.assertEqual('No idea', self.clo.decide_one())
def test_if_no_history_provided_then_empty_list(self):
self.clo.lunched([])
self.clo.cafes({
'cafe1': {
'menu': 'food1',
'distance': 1
}
})
self.assertEqual(['cafe1'], self.clo.decide())
def test_if_no_cafes_provided_then_empty_list(self):
self.clo.cafes({}).lunched([]).weather(None).weekday(None)
self.assertEqual([], self.clo.decide())
self.assertEqual('No idea', self.clo.decide_one())
| |
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import salt.ext.tornado.escape as tornado_escape
import salt.ext.tornado.web as tornado_web
import zlib
from salt.ext.tornado.concurrent import TracebackFuture
from salt.ext.tornado.escape import utf8, native_str, to_unicode
from salt.ext.tornado import gen, httpclient, httputil
from salt.ext.tornado.ioloop import IOLoop, PeriodicCallback
from salt.ext.tornado.iostream import StreamClosedError
from salt.ext.tornado.log import gen_log, app_log
from salt.ext.tornado import simple_httpclient
from salt.ext.tornado.tcpclient import TCPClient
from salt.ext.tornado.util import _websocket_mask, PY3
if PY3:
from urllib.parse import urlparse # py2
xrange = range
else:
from urlparse import urlparse # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado_web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
Custom upgrade response headers can be sent by overriding
`~tornado.web.RequestHandler.set_default_headers` or
`~tornado.web.RequestHandler.prepare`.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
If the application setting ``websocket_ping_interval`` has a non-zero
value, a ping will be sent periodically, and the connection will be
closed if a response is not received before the ``websocket_ping_timeout``.
Messages larger than the ``websocket_max_message_size`` application setting
(default 10MiB) will not be accepted.
.. versionchanged:: 4.5
Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
``websocket_max_message_size``.
"""
def __init__(self, application, request, **kwargs):
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado_web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.ws_connection.accept_connection()
else:
self.set_status(426, "Upgrade Required")
self.set_header("Sec-WebSocket-Version", "7, 8, 13")
self.finish()
stream = None
@property
def ping_interval(self):
"""The interval for websocket keep-alive pings.
Set websocket_ping_interval = 0 to disable pings.
"""
return self.settings.get('websocket_ping_interval', None)
@property
def ping_timeout(self):
"""If no ping is received in this many seconds,
close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
Default is max of 3 pings or 30 seconds.
"""
return self.settings.get('websocket_ping_timeout', None)
@property
def max_message_size(self):
"""Maximum allowed message size.
If the remote peer sends a message larger than this, the connection
will be closed.
Default is 10MiB.
"""
return self.settings.get('websocket_max_message_size', None)
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado_escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the following compression options:
``compression_level`` specifies the compression level.
``mem_level`` specifies the amount of memory used for the internal compression state.
These parameters are documented in details here:
https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
.. versionadded:: 4.1
.. versionchanged:: 4.5
Added ``compression_level`` and ``mem_level``.
"""
# TODO: Add wbits option.
return None
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
.. versionchanged:: 4.5
``on_message`` can be a coroutine.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_ping(self, data):
"""Invoked when the a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
.. warning::
This is an important security measure; don't disable it
without understanding the security implications. In
particular, if your authentication is cookie-based, you
must either restrict the origins allowed by
``check_origin()`` or implement your own XSRF-like
protection for websocket connections. See `these
<https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_
`articles
<https://devcenter.heroku.com/articles/websocket-security>`_
for more.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
self._break_cycles()
def _break_cycles(self):
# WebSocketHandlers call finish() early, but we don't want to
# break up reference cycles (which makes it impossible to call
# self.render_string) until after we've really closed the
# connection (if it was established in the first place,
# indicated by status code 101).
if self.get_status() != 101 or self._on_close_called:
super(WebSocketHandler, self)._break_cycles()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self):
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
return WebSocketProtocol13(
self, compression_options=self.get_compression_options())
def _attach_stream(self):
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# disable non-WS methods
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(self, method, _raise_not_supported_for_websockets)
def _raise_not_supported_for_websockets(*args, **kwargs):
raise RuntimeError("Method not supported for Web Sockets")
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
If the callback is a coroutine, returns its Future. On error, aborts the
websocket connection and returns None.
"""
try:
result = callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
getattr(self.request, 'path', None), exc_info=True)
self._abort()
else:
if result is not None:
result = gen.convert_yielded(result)
self.stream.io_loop.add_future(result, lambda f: f.result())
return result
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits, compression_options=None):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if compression_options is None or 'compression_level' not in compression_options:
self._compression_level = tornado_web.GZipContentEncoding.GZIP_LEVEL
else:
self._compression_level = compression_options['compression_level']
if compression_options is None or 'mem_level' not in compression_options:
self._mem_level = 8
else:
self._mem_level = compression_options['mem_level']
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits, compression_options=None):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None):
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
self.ping_callback = None
self.last_ping = 0
self.last_pong = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
except ValueError:
self.handler.set_status(400)
log_msg = "Missing/Invalid WebSocket headers"
self.handler.finish(log_msg)
gen_log.debug(log_msg)
return
try:
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
self.handler.set_header("Sec-WebSocket-Protocol", selected)
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1], self._compression_options)
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
self.handler.set_header("Sec-WebSocket-Extensions",
httputil._encode_header(
'permessage-deflate', ext[1]))
break
self.handler.clear_header("Content-Type")
self.handler.set_status(101)
self.handler.set_header("Upgrade", "websocket")
self.handler.set_header("Connection", "Upgrade")
self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response())
self.handler.finish()
self.handler._attach_stream()
self.stream = self.handler.stream
self.start_pinging()
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters, compression_options=None):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
options['compression_options'] = compression_options
return options
def _create_compressors(self, side, agreed_parameters, compression_options=None):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters, compression_options))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters, compression_options))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
return self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado_escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None and self._frame_opcode != 0:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self._read_frame_data(False)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _read_frame_data(self, masked):
new_len = self._frame_length
if self._fragmented_message_buffer is not None:
new_len += len(self._fragmented_message_buffer)
if new_len > (self.handler.max_message_size or 10 * 1024 * 1024):
self.close(1009, "message too big")
return
self.stream.read_bytes(
self._frame_length,
self._on_masked_frame_data if masked else self._on_frame_data)
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self._read_frame_data(False)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self._read_frame_data(False)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self._read_frame_data(True)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
handled_future = None
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
handled_future = self._handle_message(opcode, data)
if not self.client_terminated:
if handled_future:
# on_message is a coroutine, process more frames once it's done.
handled_future.add_done_callback(
lambda future: self._receive_frame())
else:
self._receive_frame()
def _handle_message(self, opcode, data):
"""Execute on_message, returning its Future if it is a coroutine."""
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
return self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
return self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA:
# Pong
self.last_pong = IOLoop.current().time()
return self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
@property
def ping_interval(self):
interval = self.handler.ping_interval
if interval is not None:
return interval
return 0
@property
def ping_timeout(self):
timeout = self.handler.ping_timeout
if timeout is not None:
return timeout
return max(3 * self.ping_interval, 30)
def start_pinging(self):
"""Start sending periodic pings to keep the connection alive"""
if self.ping_interval > 0:
self.last_ping = self.last_pong = IOLoop.current().time()
self.ping_callback = PeriodicCallback(
self.periodic_ping, self.ping_interval * 1000)
self.ping_callback.start()
def periodic_ping(self):
"""Send a ping to keep the websocket alive
Called periodically if the websocket_ping_interval is set and non-zero.
"""
if self.stream.closed() and self.ping_callback is not None:
self.ping_callback.stop()
return
# Check for timeout on pong. Make sure that we really have
# sent a recent ping in case the machine with both server and
# client has been suspended since the last ping.
now = IOLoop.current().time()
since_last_pong = now - self.last_pong
since_last_ping = now - self.last_ping
if (since_last_ping < 2 * self.ping_interval and
since_last_pong > self.ping_timeout):
self.close()
return
self.write_ping(b'')
self.last_ping = now
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, on_message_callback=None,
compression_options=None, ping_interval=None, ping_timeout=None,
max_message_size=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = self.close_reason = None
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
self.max_message_size = max_message_size
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol.start_pinging()
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
return self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def on_ping(self, data):
pass
def get_websocket_protocol(self):
return WebSocketProtocol13(self, mask_outgoing=True,
compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None,
ping_interval=None, ping_timeout=None,
max_message_size=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
.. versionchanged:: 4.5
Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
arguments, which have the same meaning as in `WebSocketHandler`.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request,
on_message_callback=on_message_callback,
compression_options=compression_options,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
| |
"""
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one may specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
import warnings
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
matplotlib.use('Agg')
except UserWarning:
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
else:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.{{ default_fmt }}
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. figure:: {{ build_dir }}/{{ img.basename }}.pdf
{% for option in options -%}
{{ option }}
{% endfor %}
{{ caption }}
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def get_plot_formats(config):
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
plot_formats = plot_formats.split(',')
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
if ':' in fmt:
suffix, dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt) == 2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
return formats
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the images in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
formats = get_plot_formats(config)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
formats = get_plot_formats(config)
default_fmt = formats[0][0]
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
default_fmt=default_fmt,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and len(images),
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| |
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring
class ElasticBlockStore(BaseResponse):
def attach_volume(self):
volume_id = self._get_param("VolumeId")
instance_id = self._get_param("InstanceId")
device_path = self._get_param("Device")
if self.is_not_dryrun("AttachVolume"):
attachment = self.ec2_backend.attach_volume(
volume_id, instance_id, device_path
)
template = self.response_template(ATTACHED_VOLUME_RESPONSE)
return template.render(attachment=attachment)
def copy_snapshot(self):
source_snapshot_id = self._get_param("SourceSnapshotId")
source_region = self._get_param("SourceRegion")
description = self._get_param("Description")
tags = self._parse_tag_specification("TagSpecification")
snapshot_tags = tags.get("snapshot", {})
if self.is_not_dryrun("CopySnapshot"):
snapshot = self.ec2_backend.copy_snapshot(
source_snapshot_id, source_region, description
)
snapshot.add_tags(snapshot_tags)
template = self.response_template(COPY_SNAPSHOT_RESPONSE)
return template.render(snapshot=snapshot)
def create_snapshot(self):
volume_id = self._get_param("VolumeId")
description = self._get_param("Description")
tags = self._parse_tag_specification("TagSpecification")
snapshot_tags = tags.get("snapshot", {})
if self.is_not_dryrun("CreateSnapshot"):
snapshot = self.ec2_backend.create_snapshot(volume_id, description)
snapshot.add_tags(snapshot_tags)
template = self.response_template(CREATE_SNAPSHOT_RESPONSE)
return template.render(snapshot=snapshot)
def create_volume(self):
size = self._get_param("Size")
zone = self._get_param("AvailabilityZone")
snapshot_id = self._get_param("SnapshotId")
tags = self._parse_tag_specification("TagSpecification")
volume_tags = tags.get("volume", {})
encrypted = self._get_bool_param("Encrypted", if_none=False)
kms_key_id = self._get_param("KmsKeyId")
if self.is_not_dryrun("CreateVolume"):
volume = self.ec2_backend.create_volume(
size, zone, snapshot_id, encrypted, kms_key_id
)
volume.add_tags(volume_tags)
template = self.response_template(CREATE_VOLUME_RESPONSE)
return template.render(volume=volume)
def delete_snapshot(self):
snapshot_id = self._get_param("SnapshotId")
if self.is_not_dryrun("DeleteSnapshot"):
self.ec2_backend.delete_snapshot(snapshot_id)
return DELETE_SNAPSHOT_RESPONSE
def delete_volume(self):
volume_id = self._get_param("VolumeId")
if self.is_not_dryrun("DeleteVolume"):
self.ec2_backend.delete_volume(volume_id)
return DELETE_VOLUME_RESPONSE
def describe_snapshots(self):
filters = filters_from_querystring(self.querystring)
snapshot_ids = self._get_multi_param("SnapshotId")
snapshots = self.ec2_backend.describe_snapshots(
snapshot_ids=snapshot_ids, filters=filters
)
template = self.response_template(DESCRIBE_SNAPSHOTS_RESPONSE)
return template.render(snapshots=snapshots)
def describe_volumes(self):
filters = filters_from_querystring(self.querystring)
volume_ids = self._get_multi_param("VolumeId")
volumes = self.ec2_backend.describe_volumes(
volume_ids=volume_ids, filters=filters
)
template = self.response_template(DESCRIBE_VOLUMES_RESPONSE)
return template.render(volumes=volumes)
def describe_volume_attribute(self):
raise NotImplementedError(
"ElasticBlockStore.describe_volume_attribute is not yet implemented"
)
def describe_volume_status(self):
raise NotImplementedError(
"ElasticBlockStore.describe_volume_status is not yet implemented"
)
def detach_volume(self):
volume_id = self._get_param("VolumeId")
instance_id = self._get_param("InstanceId")
device_path = self._get_param("Device")
if self.is_not_dryrun("DetachVolume"):
attachment = self.ec2_backend.detach_volume(
volume_id, instance_id, device_path
)
template = self.response_template(DETATCH_VOLUME_RESPONSE)
return template.render(attachment=attachment)
def enable_volume_io(self):
if self.is_not_dryrun("EnableVolumeIO"):
raise NotImplementedError(
"ElasticBlockStore.enable_volume_io is not yet implemented"
)
def import_volume(self):
if self.is_not_dryrun("ImportVolume"):
raise NotImplementedError(
"ElasticBlockStore.import_volume is not yet implemented"
)
def describe_snapshot_attribute(self):
snapshot_id = self._get_param("SnapshotId")
groups = self.ec2_backend.get_create_volume_permission_groups(snapshot_id)
user_ids = self.ec2_backend.get_create_volume_permission_userids(snapshot_id)
template = self.response_template(DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE)
return template.render(snapshot_id=snapshot_id, groups=groups, userIds=user_ids)
def modify_snapshot_attribute(self):
snapshot_id = self._get_param("SnapshotId")
operation_type = self._get_param("OperationType")
groups = self._get_multi_param("UserGroup")
user_ids = self._get_multi_param("UserId")
if self.is_not_dryrun("ModifySnapshotAttribute"):
if operation_type == "add":
self.ec2_backend.add_create_volume_permission(
snapshot_id, user_ids=user_ids, groups=groups
)
elif operation_type == "remove":
self.ec2_backend.remove_create_volume_permission(
snapshot_id, user_ids=user_ids, groups=groups
)
return MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE
def modify_volume_attribute(self):
if self.is_not_dryrun("ModifyVolumeAttribute"):
raise NotImplementedError(
"ElasticBlockStore.modify_volume_attribute is not yet implemented"
)
def reset_snapshot_attribute(self):
if self.is_not_dryrun("ResetSnapshotAttribute"):
raise NotImplementedError(
"ElasticBlockStore.reset_snapshot_attribute is not yet implemented"
)
CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ volume.id }}</volumeId>
<size>{{ volume.size }}</size>
{% if volume.snapshot_id %}
<snapshotId>{{ volume.snapshot_id }}</snapshotId>
{% else %}
<snapshotId/>
{% endif %}
<encrypted>{{ 'true' if volume.encrypted else 'false' }}</encrypted>
{% if volume.encrypted %}
<kmsKeyId>{{ volume.kms_key_id }}</kmsKeyId>
{% endif %}
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
<status>creating</status>
<createTime>{{ volume.create_time}}</createTime>
{% if volume.get_tags() %}
<tagSet>
{% for tag in volume.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
{% endif %}
<volumeType>standard</volumeType>
</CreateVolumeResponse>"""
DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeSet>
{% for volume in volumes %}
<item>
<volumeId>{{ volume.id }}</volumeId>
<size>{{ volume.size }}</size>
{% if volume.snapshot_id %}
<snapshotId>{{ volume.snapshot_id }}</snapshotId>
{% else %}
<snapshotId/>
{% endif %}
<encrypted>{{ 'true' if volume.encrypted else 'false' }}</encrypted>
{% if volume.encrypted %}
<kmsKeyId>{{ volume.kms_key_id }}</kmsKeyId>
{% endif %}
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
<status>{{ volume.status }}</status>
<createTime>{{ volume.create_time}}</createTime>
<attachmentSet>
{% if volume.attachment %}
<item>
<volumeId>{{ volume.id }}</volumeId>
<instanceId>{{ volume.attachment.instance.id }}</instanceId>
<device>{{ volume.attachment.device }}</device>
<status>attached</status>
<attachTime>{{volume.attachment.attach_time}}</attachTime>
<deleteOnTermination>false</deleteOnTermination>
</item>
{% endif %}
</attachmentSet>
{% if volume.get_tags() %}
<tagSet>
{% for tag in volume.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
{% endif %}
<volumeType>standard</volumeType>
</item>
{% endfor %}
</volumeSet>
</DescribeVolumesResponse>"""
DELETE_VOLUME_RESPONSE = """<DeleteVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteVolumeResponse>"""
ATTACHED_VOLUME_RESPONSE = """<AttachVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ attachment.volume.id }}</volumeId>
<instanceId>{{ attachment.instance.id }}</instanceId>
<device>{{ attachment.device }}</device>
<status>attaching</status>
<attachTime>{{attachment.attach_time}}</attachTime>
</AttachVolumeResponse>"""
DETATCH_VOLUME_RESPONSE = """<DetachVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ attachment.volume.id }}</volumeId>
<instanceId>{{ attachment.instance.id }}</instanceId>
<device>{{ attachment.device }}</device>
<status>detaching</status>
<attachTime>2013-10-04T17:38:53.000Z</attachTime>
</DetachVolumeResponse>"""
CREATE_SNAPSHOT_RESPONSE = """<CreateSnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotId>{{ snapshot.id }}</snapshotId>
<volumeId>{{ snapshot.volume.id }}</volumeId>
<status>pending</status>
<startTime>{{ snapshot.start_time}}</startTime>
<progress>60%</progress>
<ownerId>{{ snapshot.owner_id }}</ownerId>
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
<description>{{ snapshot.description }}</description>
<encrypted>{{ snapshot.encrypted }}</encrypted>
<tagSet>
{% for tag in snapshot.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</CreateSnapshotResponse>"""
COPY_SNAPSHOT_RESPONSE = """<CopySnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotId>{{ snapshot.id }}</snapshotId>
<tagSet>
{% for tag in snapshot.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</CopySnapshotResponse>"""
DESCRIBE_SNAPSHOTS_RESPONSE = """<DescribeSnapshotsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotSet>
{% for snapshot in snapshots %}
<item>
<snapshotId>{{ snapshot.id }}</snapshotId>
<volumeId>{{ snapshot.volume.id }}</volumeId>
<status>{{ snapshot.status }}</status>
<startTime>{{ snapshot.start_time}}</startTime>
<progress>100%</progress>
<ownerId>{{ snapshot.owner_id }}</ownerId>
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
<description>{{ snapshot.description }}</description>
<encrypted>{{ snapshot.encrypted }}</encrypted>
<tagSet>
{% for tag in snapshot.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</snapshotSet>
</DescribeSnapshotsResponse>"""
DELETE_SNAPSHOT_RESPONSE = """<DeleteSnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteSnapshotResponse>"""
DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE = """
<DescribeSnapshotAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>a9540c9f-161a-45d8-9cc1-1182b89ad69f</requestId>
<snapshotId>snap-a0332ee0</snapshotId>
<createVolumePermission>
{% for group in groups %}
<item>
<group>{{ group }}</group>
</item>
{% endfor %}
{% for userId in userIds %}
<item>
<userId>{{ userId }}</userId>
</item>
{% endfor %}
</createVolumePermission>
</DescribeSnapshotAttributeResponse>
"""
MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE = """
<ModifySnapshotAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>666d2944-9276-4d6a-be12-1f4ada972fd8</requestId>
<return>true</return>
</ModifySnapshotAttributeResponse>
"""
| |
from datetime import timedelta
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.core import mail
from django.core.paginator import Paginator
from django.utils import timezone
from wagtail.tests.models import SimplePage, EventPage, EventPageCarouselItem, StandardIndex, BusinessIndex, BusinessChild, BusinessSubIndex, TaggedPage
from wagtail.tests.utils import unittest, WagtailTestUtils
from wagtail.wagtailcore.models import Page, PageRevision
from wagtail.wagtailcore.signals import page_published, page_unpublished
from wagtail.wagtailusers.models import UserProfile
def submittable_timestamp(timestamp):
"""
Helper function to translate a possibly-timezone-aware datetime into the format used in the
go_live_at / expire_at form fields - "YYYY-MM-DD hh:mm", with no timezone indicator.
This will be interpreted as being in the server's timezone (settings.TIME_ZONE), so we
need to pass it through timezone.localtime to ensure that the client and server are in
agreement about what the timestamp means.
"""
return str(timezone.localtime(timestamp)).split('.')[0]
class TestPageExplorer(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(
title="Hello world!",
slug="hello-world",
)
self.root_page.add_child(instance=self.child_page)
# Login
self.login()
def test_explore(self):
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(self.root_page, response.context['parent_page'])
self.assertTrue(response.context['pages'].paginator.object_list.filter(id=self.child_page.id).exists())
def test_explore_root(self):
response = self.client.get(reverse('wagtailadmin_explore_root'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(Page.objects.get(id=1), response.context['parent_page'])
self.assertTrue(response.context['pages'].paginator.object_list.filter(id=self.root_page.id).exists())
def test_ordering(self):
response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'content_type'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'content_type')
def test_invalid_ordering(self):
response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'invalid_order'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'title')
def test_reordering(self):
response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'ord'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'ord')
# Pages must not be paginated
self.assertNotIsInstance(response.context['pages'], Paginator)
def make_pages(self):
for i in range(150):
self.root_page.add_child(instance=SimplePage(
title="Page " + str(i),
slug="page-" + str(i),
))
def test_pagination(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got the correct page
self.assertEqual(response.context['pages'].number, 2)
def test_pagination_invalid(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 'Hello World!'})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got page one
self.assertEqual(response.context['pages'].number, 1)
def test_pagination_out_of_range(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got the last page
self.assertEqual(response.context['pages'].number, response.context['pages'].paginator.num_pages)
class TestPageCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Login
self.user = self.login()
def test_add_subpage(self):
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.root_page.id, )))
self.assertEqual(response.status_code, 200)
def test_add_subpage_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get add subpage page
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.root_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_add_subpage_nonexistantparent(self):
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(100000, )))
self.assertEqual(response.status_code, 404)
def test_create_simplepage(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)))
self.assertEqual(response.status_code, 200)
def test_create_simplepage_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get page
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_create_simplepage_post(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
self.assertEqual(page.title, post_data['title'])
self.assertIsInstance(page, SimplePage)
self.assertFalse(page.live)
def test_create_simplepage_scheduled(self):
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
# Find the page and check the scheduled times
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
self.assertEqual(page.go_live_at.date(), go_live_at.date())
self.assertEqual(page.expire_at.date(), expire_at.date())
self.assertEqual(page.expired, False)
self.assertTrue(page.status_string, "draft")
# No revisions with approved_go_live_at
self.assertFalse(PageRevision.objects.filter(page=page).exclude(approved_go_live_at__isnull=True).exists())
def test_create_simplepage_scheduled_go_live_before_expiry(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(timezone.now() + timedelta(days=2)),
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=1)),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'go_live_at', "Go live date/time must be before expiry date/time")
self.assertFormError(response, 'form', 'expire_at', "Go live date/time must be before expiry date/time")
def test_create_simplepage_scheduled_expire_in_the_past(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=-1)),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'expire_at', "Expiry date/time must be in the future")
def test_create_simplepage_post_publish(self):
# Connect a mock signal handler to page_published signal
signal_fired = [False]
signal_page = [None]
def page_published_handler(sender, instance, **kwargs):
signal_fired[0] = True
signal_page[0] = instance
page_published.connect(page_published_handler)
# Post
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
self.assertEqual(page.title, post_data['title'])
self.assertIsInstance(page, SimplePage)
self.assertTrue(page.live)
# Check that the page_published signal was fired
self.assertTrue(signal_fired[0])
self.assertEqual(signal_page[0], page)
self.assertEqual(signal_page[0], signal_page[0].specific)
def test_create_simplepage_post_publish_scheduled(self):
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
self.assertEqual(page.go_live_at.date(), go_live_at.date())
self.assertEqual(page.expire_at.date(), expire_at.date())
self.assertEqual(page.expired, False)
# A revision with approved_go_live_at should exist now
self.assertTrue(PageRevision.objects.filter(page=page).exclude(approved_go_live_at__isnull=True).exists())
# But Page won't be live
self.assertFalse(page.live)
self.assertTrue(page.status_string, "scheduled")
def test_create_simplepage_post_submit(self):
# Create a moderator user for testing email
moderator = get_user_model().objects.create_superuser('moderator', 'moderator@email.com', 'password')
# Submit
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
self.assertEqual(page.title, post_data['title'])
self.assertIsInstance(page, SimplePage)
self.assertFalse(page.live)
# The latest revision for the page should now be in moderation
self.assertTrue(page.get_latest_revision().submitted_for_moderation)
# Check that the moderator got an email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['moderator@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "New page!" has been submitted for moderation')
def test_create_simplepage_post_existing_slug(self):
# This tests the existing slug checking on page save
# Create a page
self.child_page = SimplePage()
self.child_page.title = "Hello world!"
self.child_page.slug = "hello-world"
self.root_page.add_child(instance=self.child_page)
# Attempt to create a new one with the same slug
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'slug', "This slug is already in use")
def test_create_nonexistantparent(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', 100000)))
self.assertEqual(response.status_code, 404)
def test_create_nonpagetype(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('wagtailimages', 'image', self.root_page.id)))
self.assertEqual(response.status_code, 404)
def test_preview_on_create(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_preview_on_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Check the response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "New page!")
# Check that the treebeard attributes were set correctly on the page object
self.assertEqual(response.context['self'].depth, self.root_page.depth + 1)
self.assertTrue(response.context['self'].path.startswith(self.root_page.path))
self.assertEqual(response.context['self'].get_parent(), self.root_page)
class TestPageEdit(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage()
self.child_page.title = "Hello world!"
self.child_page.slug = "hello-world"
self.child_page.live = True
self.root_page.add_child(instance=self.child_page)
self.child_page.save_revision()
# Add event page (to test edit handlers)
self.event_page = EventPage()
self.event_page.title = "Event page"
self.event_page.slug = "event-page"
self.root_page.add_child(instance=self.event_page)
# Login
self.user = self.login()
def test_page_edit(self):
# Tests that the edit page loads
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
self.assertEqual(response.status_code, 200)
def test_page_edit_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get edit page
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_page_edit_post(self):
# Tests simple editing
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# The page should have "has_unpublished_changes" flag set
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertTrue(child_page_new.has_unpublished_changes)
def test_edit_post_scheduled(self):
# put go_live_at and expire_at several days away from the current date, to avoid
# false matches in content_json__contains tests
go_live_at = timezone.now() + timedelta(days=10)
expire_at = timezone.now() + timedelta(days=20)
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page will still be live
self.assertTrue(child_page_new.live)
# A revision with approved_go_live_at should not exist
self.assertFalse(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
# But a revision with go_live_at and expire_at in their content json *should* exist
self.assertTrue(PageRevision.objects.filter(page=child_page_new, content_json__contains=str(go_live_at.date())).exists())
self.assertTrue(PageRevision.objects.filter(page=child_page_new, content_json__contains=str(expire_at.date())).exists())
def test_edit_scheduled_go_live_before_expiry(self):
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(timezone.now() + timedelta(days=2)),
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=1)),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'go_live_at', "Go live date/time must be before expiry date/time")
self.assertFormError(response, 'form', 'expire_at', "Go live date/time must be before expiry date/time")
def test_edit_scheduled_expire_in_the_past(self):
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=-1)),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'expire_at', "Expiry date/time must be in the future")
def test_page_edit_post_publish(self):
# Connect a mock signal handler to page_published signal
signal_fired = [False]
signal_page = [None]
def page_published_handler(sender, instance, **kwargs):
signal_fired[0] = True
signal_page[0] = instance
page_published.connect(page_published_handler)
# Tests publish from edit page
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page was edited
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertEqual(child_page_new.title, post_data['title'])
# Check that the page_published signal was fired
self.assertTrue(signal_fired[0])
self.assertEqual(signal_page[0], child_page_new)
self.assertEqual(signal_page[0], signal_page[0].specific)
# The page shouldn't have "has_unpublished_changes" flag set
self.assertFalse(child_page_new.has_unpublished_changes)
def test_edit_post_publish_scheduled(self):
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page should not be live anymore
self.assertFalse(child_page_new.live)
# Instead a revision with approved_go_live_at should now exist
self.assertTrue(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
def test_edit_post_publish_now_an_already_scheduled(self):
# First let's publish a page with a go_live_at in the future
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page should not be live anymore
self.assertFalse(child_page_new.live)
# Instead a revision with approved_go_live_at should now exist
self.assertTrue(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
# Now, let's edit it and publish it right now
go_live_at = timezone.now()
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': "",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page should be live now
self.assertTrue(child_page_new.live)
# And a revision with approved_go_live_at should not exist
self.assertFalse(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
def test_page_edit_post_submit(self):
# Create a moderator user for testing email
moderator = get_user_model().objects.create_superuser('moderator', 'moderator@email.com', 'password')
# Tests submitting from edit page
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# The page should have "has_unpublished_changes" flag set
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertTrue(child_page_new.has_unpublished_changes)
# The latest revision for the page should now be in moderation
self.assertTrue(child_page_new.get_latest_revision().submitted_for_moderation)
# Check that the moderator got an email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['moderator@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been submitted for moderation') # Note: should this be "I've been edited!"?
def test_page_edit_post_existing_slug(self):
# This tests the existing slug checking on page edit
# Create a page
self.child_page = SimplePage()
self.child_page.title = "Hello world 2"
self.child_page.slug = "hello-world2"
self.root_page.add_child(instance=self.child_page)
# Attempt to change the slug to one thats already in use
post_data = {
'title': "Hello world 2",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'slug', "This slug is already in use")
def test_preview_on_edit(self):
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_preview_on_edit', args=(self.child_page.id, )), post_data)
# Check the response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "I've been edited!")
class TestPageEditReordering(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add event page
self.event_page = EventPage()
self.event_page.title = "Event page"
self.event_page.slug = "event-page"
self.event_page.carousel_items = [
EventPageCarouselItem(caption='1234567', sort_order=1),
EventPageCarouselItem(caption='7654321', sort_order=2),
EventPageCarouselItem(caption='abcdefg', sort_order=3),
]
self.root_page.add_child(instance=self.event_page)
# Login
self.user = self.login()
def check_order(self, response, expected_order):
inline_panel = response.context['edit_handler'].children[0].children[9]
order = [child.form.instance.caption for child in inline_panel.children]
self.assertEqual(order, expected_order)
def test_order(self):
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
self.assertEqual(response.status_code, 200)
self.check_order(response, ['1234567', '7654321', 'abcdefg'])
def test_reorder(self):
post_data = {
'title': "Event page",
'slug': 'event-page',
'date_from': '01/01/2014',
'cost': '$10',
'audience': 'public',
'location': 'somewhere',
'related_links-INITIAL_FORMS': 0,
'related_links-MAX_NUM_FORMS': 1000,
'related_links-TOTAL_FORMS': 0,
'speakers-INITIAL_FORMS': 0,
'speakers-MAX_NUM_FORMS': 1000,
'speakers-TOTAL_FORMS': 0,
'carousel_items-INITIAL_FORMS': 3,
'carousel_items-MAX_NUM_FORMS': 1000,
'carousel_items-TOTAL_FORMS': 3,
'carousel_items-0-id': self.event_page.carousel_items.all()[0].id,
'carousel_items-0-caption': self.event_page.carousel_items.all()[0].caption,
'carousel_items-0-ORDER': 2,
'carousel_items-1-id': self.event_page.carousel_items.all()[1].id,
'carousel_items-1-caption': self.event_page.carousel_items.all()[1].caption,
'carousel_items-1-ORDER': 3,
'carousel_items-2-id': self.event_page.carousel_items.all()[2].id,
'carousel_items-2-caption': self.event_page.carousel_items.all()[2].caption,
'carousel_items-2-ORDER': 1,
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check order
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
self.assertEqual(response.status_code, 200)
self.check_order(response, ['abcdefg', '1234567', '7654321'])
def test_reorder_with_validation_error(self):
post_data = {
'title': "", # Validation error
'slug': 'event-page',
'date_from': '01/01/2014',
'cost': '$10',
'audience': 'public',
'location': 'somewhere',
'related_links-INITIAL_FORMS': 0,
'related_links-MAX_NUM_FORMS': 1000,
'related_links-TOTAL_FORMS': 0,
'speakers-INITIAL_FORMS': 0,
'speakers-MAX_NUM_FORMS': 1000,
'speakers-TOTAL_FORMS': 0,
'carousel_items-INITIAL_FORMS': 3,
'carousel_items-MAX_NUM_FORMS': 1000,
'carousel_items-TOTAL_FORMS': 3,
'carousel_items-0-id': self.event_page.carousel_items.all()[0].id,
'carousel_items-0-caption': self.event_page.carousel_items.all()[0].caption,
'carousel_items-0-ORDER': 2,
'carousel_items-1-id': self.event_page.carousel_items.all()[1].id,
'carousel_items-1-caption': self.event_page.carousel_items.all()[1].caption,
'carousel_items-1-ORDER': 3,
'carousel_items-2-id': self.event_page.carousel_items.all()[2].id,
'carousel_items-2-caption': self.event_page.carousel_items.all()[2].caption,
'carousel_items-2-ORDER': 1,
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )), post_data)
self.assertEqual(response.status_code, 200)
self.check_order(response, ['abcdefg', '1234567', '7654321'])
class TestPageDelete(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage()
self.child_page.title = "Hello world!"
self.child_page.slug = "hello-world"
self.root_page.add_child(instance=self.child_page)
# Login
self.user = self.login()
def test_page_delete(self):
response = self.client.get(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
self.assertEqual(response.status_code, 200)
def test_page_delete_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get delete page
response = self.client.get(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_page_delete_post(self):
# Connect a mock signal handler to page_unpublished signal
signal_fired = [False]
signal_page = [None]
def page_unpublished_handler(sender, instance, **kwargs):
signal_fired[0] = True
signal_page[0] = instance
page_unpublished.connect(page_unpublished_handler)
# Post
post_data = {'hello': 'world'} # For some reason, this test doesn't work without a bit of POST data
response = self.client.post(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is gone
self.assertEqual(Page.objects.filter(path__startswith=self.root_page.path, slug='hello-world').count(), 0)
# Check that the page_unpublished signal was fired
self.assertTrue(signal_fired[0])
self.assertEqual(signal_page[0], self.child_page)
self.assertEqual(signal_page[0], signal_page[0].specific)
def test_page_delete_notlive_post(self):
# Same as above, but this makes sure the page_unpublished signal is not fired
# when if the page is not live when it is deleted
# Unpublish the page
self.child_page.live = False
self.child_page.save()
# Connect a mock signal handler to page_unpublished signal
signal_fired = [False]
def page_unpublished_handler(sender, instance, **kwargs):
signal_fired[0] = True
page_unpublished.connect(page_unpublished_handler)
# Post
post_data = {'hello': 'world'} # For some reason, this test doesn't work without a bit of POST data
response = self.client.post(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is gone
self.assertEqual(Page.objects.filter(path__startswith=self.root_page.path, slug='hello-world').count(), 0)
# Check that the page_unpublished signal was not fired
self.assertFalse(signal_fired[0])
class TestPageSearch(TestCase, WagtailTestUtils):
def setUp(self):
# Login
self.login()
def get(self, params=None, **extra):
return self.client.get(reverse('wagtailadmin_pages_search'), params or {}, **extra)
def test_view(self):
response = self.get()
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
self.assertEqual(response.status_code, 200)
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
self.assertEqual(response.context['query_string'], "Hello")
def test_ajax(self):
response = self.get({'q': "Hello"}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'wagtailadmin/pages/search.html')
self.assertTemplateUsed(response, 'wagtailadmin/pages/search_results.html')
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'q': "Hello", 'p': page})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
def test_root_can_appear_in_search_results(self):
response = self.get({'q': "roo"})
self.assertEqual(response.status_code, 200)
# 'pages' list in the response should contain root
results = response.context['pages']
self.assertTrue(any([r.slug == 'root' for r in results]))
class TestPageMove(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Create two sections
self.section_a = SimplePage()
self.section_a.title = "Section A"
self.section_a.slug = "section-a"
self.root_page.add_child(instance=self.section_a)
self.section_b = SimplePage()
self.section_b.title = "Section B"
self.section_b.slug = "section-b"
self.root_page.add_child(instance=self.section_b)
# Add test page into section A
self.test_page = SimplePage()
self.test_page.title = "Hello world!"
self.test_page.slug = "hello-world"
self.section_a.add_child(instance=self.test_page)
# Login
self.user = self.login()
def test_page_move(self):
response = self.client.get(reverse('wagtailadmin_pages_move', args=(self.test_page.id, )))
self.assertEqual(response.status_code, 200)
def test_page_move_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get move page
response = self.client.get(reverse('wagtailadmin_pages_move', args=(self.test_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_page_move_confirm(self):
response = self.client.get(reverse('wagtailadmin_pages_move_confirm', args=(self.test_page.id, self.section_b.id)))
self.assertEqual(response.status_code, 200)
def test_page_set_page_position(self):
response = self.client.get(reverse('wagtailadmin_pages_set_page_position', args=(self.test_page.id, )))
self.assertEqual(response.status_code, 200)
class TestPageUnpublish(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
# Create a page to unpublish
self.root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
live=True,
)
self.root_page.add_child(instance=self.page)
def test_unpublish_view(self):
"""
This tests that the unpublish view responds with an unpublish confirm page
"""
# Get unpublish page
response = self.client.get(reverse('wagtailadmin_pages_unpublish', args=(self.page.id, )))
# Check that the user recieved an unpublish confirm page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/confirm_unpublish.html')
def test_unpublish_view_invalid_page_id(self):
"""
This tests that the unpublish view returns an error if the page id is invalid
"""
# Get unpublish page
response = self.client.get(reverse('wagtailadmin_pages_unpublish', args=(12345, )))
# Check that the user recieved a 404 response
self.assertEqual(response.status_code, 404)
def test_unpublish_view_bad_permissions(self):
"""
This tests that the unpublish view doesn't allow users without unpublish permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get unpublish page
response = self.client.get(reverse('wagtailadmin_pages_unpublish', args=(self.page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_unpublish_view_post(self):
"""
This posts to the unpublish view and checks that the page was unpublished
"""
# Connect a mock signal handler to page_unpublished signal
signal_fired = [False]
signal_page = [None]
def page_unpublished_handler(sender, instance, **kwargs):
signal_fired[0] = True
signal_page[0] = instance
page_unpublished.connect(page_unpublished_handler)
# Post to the unpublish page
response = self.client.post(reverse('wagtailadmin_pages_unpublish', args=(self.page.id, )), {
'foo': "Must post something or the view won't see this as a POST request",
})
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page was unpublished
self.assertFalse(SimplePage.objects.get(id=self.page.id).live)
# Check that the page_unpublished signal was fired
self.assertTrue(signal_fired[0])
self.assertEqual(signal_page[0], self.page)
self.assertEqual(signal_page[0], signal_page[0].specific)
class TestApproveRejectModeration(TestCase, WagtailTestUtils):
def setUp(self):
self.submitter = get_user_model().objects.create_superuser(
username='submitter',
email='submitter@email.com',
password='password',
)
self.user = self.login()
# Create a page and submit it for moderation
root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
live=False,
)
root_page.add_child(instance=self.page)
self.page.save_revision(user=self.submitter, submitted_for_moderation=True)
self.revision = self.page.get_latest_revision()
def test_approve_moderation_view(self):
"""
This posts to the approve moderation view and checks that the page was approved
"""
# Connect a mock signal handler to page_published signal
signal_fired = [False]
signal_page = [None]
def page_published_handler(sender, instance, **kwargs):
signal_fired[0] = True
signal_page[0] = instance
page_published.connect(page_published_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )), {
'foo': "Must post something or the view won't see this as a POST request",
})
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Page must be live
self.assertTrue(Page.objects.get(id=self.page.id).live)
# Check that the page_published signal was fired
self.assertTrue(signal_fired[0])
self.assertEqual(signal_page[0], self.page)
self.assertEqual(signal_page[0], signal_page[0].specific)
def test_approve_moderation_view_bad_revision_id(self):
"""
This tests that the approve moderation view handles invalid revision ids correctly
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(12345, )), {
'foo': "Must post something or the view won't see this as a POST request",
})
# Check that the user recieved a 404 response
self.assertEqual(response.status_code, 404)
def test_approve_moderation_view_bad_permissions(self):
"""
This tests that the approve moderation view doesn't allow users without moderation permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Post
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )), {
'foo': "Must post something or the view won't see this as a POST request",
})
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_reject_moderation_view(self):
"""
This posts to the reject moderation view and checks that the page was rejected
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(self.revision.id, )), {
'foo': "Must post something or the view won't see this as a POST request",
})
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Page must not be live
self.assertFalse(Page.objects.get(id=self.page.id).live)
# Revision must no longer be submitted for moderation
self.assertFalse(PageRevision.objects.get(id=self.revision.id).submitted_for_moderation)
def test_reject_moderation_view_bad_revision_id(self):
"""
This tests that the reject moderation view handles invalid revision ids correctly
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(12345, )), {
'foo': "Must post something or the view won't see this as a POST request",
})
# Check that the user recieved a 404 response
self.assertEqual(response.status_code, 404)
def test_reject_moderation_view_bad_permissions(self):
"""
This tests that the reject moderation view doesn't allow users without moderation permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Post
response = self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(self.revision.id, )), {
'foo': "Must post something or the view won't see this as a POST request",
})
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_preview_for_moderation(self):
response = self.client.get(reverse('wagtailadmin_pages_preview_for_moderation', args=(self.revision.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "Hello world!")
class TestContentTypeUse(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.login()
def test_content_type_use(self):
# Get use of event page
response = self.client.get(reverse('wagtailadmin_pages_type_use', args=('tests', 'eventpage')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/content_type_use.html')
self.assertContains(response, "Christmas")
class TestSubpageBusinessRules(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add standard page (allows subpages of any type)
self.standard_index = StandardIndex()
self.standard_index.title = "Standard Index"
self.standard_index.slug = "standard-index"
self.root_page.add_child(instance=self.standard_index)
# Add business page (allows BusinessChild and BusinessSubIndex as subpages)
self.business_index = BusinessIndex()
self.business_index.title = "Business Index"
self.business_index.slug = "business-index"
self.root_page.add_child(instance=self.business_index)
# Add business child (allows no subpages)
self.business_child = BusinessChild()
self.business_child.title = "Business Child"
self.business_child.slug = "business-child"
self.business_index.add_child(instance=self.business_child)
# Add business subindex (allows only BusinessChild as subpages)
self.business_subindex = BusinessSubIndex()
self.business_subindex.title = "Business Subindex"
self.business_subindex.slug = "business-subindex"
self.business_index.add_child(instance=self.business_subindex)
# Login
self.login()
def test_standard_subpage(self):
add_subpage_url = reverse('wagtailadmin_pages_add_subpage', args=(self.standard_index.id, ))
# explorer should contain a link to 'add child page'
response = self.client.get(reverse('wagtailadmin_explore', args=(self.standard_index.id, )))
self.assertEqual(response.status_code, 200)
self.assertContains(response, add_subpage_url)
# add_subpage should give us the full set of page types to choose
response = self.client.get(add_subpage_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Standard Child')
self.assertContains(response, 'Business Child')
def test_business_subpage(self):
add_subpage_url = reverse('wagtailadmin_pages_add_subpage', args=(self.business_index.id, ))
# explorer should contain a link to 'add child page'
response = self.client.get(reverse('wagtailadmin_explore', args=(self.business_index.id, )))
self.assertEqual(response.status_code, 200)
self.assertContains(response, add_subpage_url)
# add_subpage should give us a cut-down set of page types to choose
response = self.client.get(add_subpage_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'Standard Child')
self.assertContains(response, 'Business Child')
def test_business_child_subpage(self):
add_subpage_url = reverse('wagtailadmin_pages_add_subpage', args=(self.business_child.id, ))
# explorer should not contain a link to 'add child page', as this page doesn't accept subpages
response = self.client.get(reverse('wagtailadmin_explore', args=(self.business_child.id, )))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, add_subpage_url)
# this also means that fetching add_subpage is blocked at the permission-check level
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.business_child.id, )))
self.assertEqual(response.status_code, 403)
def test_cannot_add_invalid_subpage_type(self):
# cannot add SimplePage as a child of BusinessIndex, as SimplePage is not present in subpage_types
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.business_index.id)))
self.assertEqual(response.status_code, 403)
# likewise for BusinessChild which has an empty subpage_types list
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.business_child.id)))
self.assertEqual(response.status_code, 403)
# but we can add a BusinessChild to BusinessIndex
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'businesschild', self.business_index.id)))
self.assertEqual(response.status_code, 200)
def test_not_prompted_for_page_type_when_only_one_choice(self):
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.business_subindex.id, )))
# BusinessChild is the only valid subpage type of BusinessSubIndex, so redirect straight there
self.assertRedirects(response, reverse('wagtailadmin_pages_create', args=('tests', 'businesschild', self.business_subindex.id)))
class TestNotificationPreferences(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Login
self.user = self.login()
# Create two moderator users for testing 'submitted' email
User = get_user_model()
self.moderator = User.objects.create_superuser('moderator', 'moderator@email.com', 'password')
self.moderator2 = User.objects.create_superuser('moderator2', 'moderator2@email.com', 'password')
# Create a submitter for testing 'rejected' and 'approved' emails
self.submitter = User.objects.create_user('submitter', 'submitter@email.com', 'password')
# User profiles for moderator2 and the submitter
self.moderator2_profile = UserProfile.get_for_user(self.moderator2)
self.submitter_profile = UserProfile.get_for_user(self.submitter)
# Create a page and submit it for moderation
self.child_page = SimplePage(
title="Hello world!",
slug='hello-world',
live=False,
)
self.root_page.add_child(instance=self.child_page)
# POST data to edit the page
self.post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
def submit(self):
return self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), self.post_data)
def silent_submit(self):
"""
Sets up the child_page as needing moderation, without making a request
"""
self.child_page.save_revision(user=self.submitter, submitted_for_moderation=True)
self.revision = self.child_page.get_latest_revision()
def approve(self):
return self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )), {
'foo': "Must post something or the view won't see this as a POST request",
})
def reject(self):
return self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(self.revision.id, )), {
'foo': "Must post something or the view won't see this as a POST request",
})
def test_vanilla_profile(self):
# Check that the vanilla profile has rejected notifications on
self.assertEqual(self.submitter_profile.rejected_notifications, True)
# Check that the vanilla profile has approved notifications on
self.assertEqual(self.submitter_profile.approved_notifications, True)
def test_submit_notifications_sent(self):
# Submit
self.submit()
# Check that both the moderators got an email, and no others
self.assertEqual(len(mail.outbox), 1)
self.assertIn(self.moderator.email, mail.outbox[0].to)
self.assertIn(self.moderator2.email, mail.outbox[0].to)
self.assertEqual(len(mail.outbox[0].to), 2)
def test_submit_notification_preferences_respected(self):
# moderator2 doesn't want emails
self.moderator2_profile.submitted_notifications = False
self.moderator2_profile.save()
# Submit
self.submit()
# Check that only one moderator got an email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual([self.moderator.email], mail.outbox[0].to)
def test_approved_notifications(self):
# Set up the page version
self.silent_submit()
# Approve
self.approve()
# Submitter must recieve an approved email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['submitter@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been approved')
def test_approved_notifications_preferences_respected(self):
# Submitter doesn't want 'approved' emails
self.submitter_profile.approved_notifications = False
self.submitter_profile.save()
# Set up the page version
self.silent_submit()
# Approve
self.approve()
# No email to send
self.assertEqual(len(mail.outbox), 0)
def test_rejected_notifications(self):
# Set up the page version
self.silent_submit()
# Reject
self.reject()
# Submitter must recieve a rejected email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['submitter@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been rejected')
def test_rejected_notification_preferences_respected(self):
# Submitter doesn't want 'rejected' emails
self.submitter_profile.rejected_notifications = False
self.submitter_profile.save()
# Set up the page version
self.silent_submit()
# Reject
self.reject()
# No email to send
self.assertEqual(len(mail.outbox), 0)
class TestIssue197(TestCase, WagtailTestUtils):
def test_issue_197(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Create a tagged page with no tags
self.tagged_page = self.root_page.add_child(instance=TaggedPage(
title="Tagged page",
slug='tagged-page',
live=False,
))
# Login
self.user = self.login()
# Add some tags and publish using edit view
post_data = {
'title': "Tagged page",
'slug':'tagged-page',
'tags': "hello, world",
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.tagged_page.id, )), post_data)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that both tags are in the pages tag set
page = TaggedPage.objects.get(id=self.tagged_page.id)
self.assertIn('hello', page.tags.slugs())
self.assertIn('world', page.tags.slugs())
| |
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .. import get_config as _get_config
from ..exceptions import NonBLASDotWarning
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
if _get_config()['assume_finite']:
return
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : array or sparse matrix
"""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
if X.dtype.kind in 'uib' and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : string, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : boolean
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, six.string_types):
accept_sparse = [accept_sparse]
if accept_sparse is False:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError("When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value.")
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse))
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, boolean or list/tuple of strings (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
# accept_sparse 'None' deprecation check
if accept_sparse is None:
warnings.warn(
"Passing 'None' to parameter 'accept_sparse' in methods "
"check_array and check_X_y is deprecated in version 0.19 "
"and will be removed in 0.21. Use 'accept_sparse=False' "
" instead.", DeprecationWarning)
accept_sparse = False
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, six.string_types) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
raise ValueError(
"Expected 2D array, got 1D array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array))
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter: str
The searched parameter.
Returns
-------
is_parameter: bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg.:
``["coef_", "estimator_", ...], "coef_"``
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# simple library for stroring python dictionaries in sqlite database
#
__author__ = 'Andrey Usov <https://github.com/ownport/scrapy-dblite>'
__version__ = '0.2.7'
import os
import re
import inspect
import sqlite3
from .query import SQLBuilder
from urlparse import urlparse
from .settings import SUPPORTED_BACKENDS
from .settings import ITEMS_PER_REQUEST
class DuplicateItem(Exception):
pass
class SQLError(Exception):
pass
def open(item, uri, autocommit=False):
''' open sqlite database by uri and Item class
'''
return Storage(item, uri, autocommit)
def copy(src, trg, transform=None):
''' copy items with optional fields transformation
'''
source = open(src[0], src[1])
target = open(trg[0], trg[1], autocommit=1000)
for item in source.get():
item = dict(item)
if '_id' in item:
del item['_id']
if transform:
item = transform(item)
target.put(trg[0](item))
source.close()
target.commit()
target.close()
def _regexp(expr, item):
''' REGEXP function for Sqlite
'''
reg = re.compile(expr)
return reg.search(item) is not None
class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
| |
"""File wrangling."""
from coverage.backward import to_string
from coverage.misc import CoverageException
import fnmatch, os, re, sys
class FileLocator(object):
"""Understand how filenames work."""
def __init__(self):
# The absolute path to our current directory.
self.relative_dir = self.abs_file(os.curdir) + os.sep
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
self.canonical_filename_cache = {}
def abs_file(self, filename):
"""Return the absolute normalized form of `filename`."""
return os.path.normcase(os.path.abspath(os.path.realpath(filename)))
def relative_filename(self, filename):
"""Return the relative form of `filename`.
The filename will be relative to the current directory when the
`FileLocator` was constructed.
"""
if filename.startswith(self.relative_dir):
filename = filename.replace(self.relative_dir, "")
return filename
def canonical_filename(self, filename):
"""Return a canonical filename for `filename`.
An absolute path with no redundant components and normalized case.
"""
if filename not in self.canonical_filename_cache:
f = filename
if os.path.isabs(f) and not os.path.exists(f):
if self.get_zip_data(f) is None:
f = os.path.basename(f)
if not os.path.isabs(f):
for path in [os.curdir] + sys.path:
if path is None:
continue
g = os.path.join(path, f)
if os.path.exists(g):
f = g
break
cf = self.abs_file(f)
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
def get_zip_data(self, filename):
"""Get data from `filename` if it is a zip file path.
Returns the string data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
import zipimport
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return to_string(data)
return None
class TreeMatcher(object):
"""A matcher for files in a tree."""
def __init__(self, directories):
self.dirs = directories[:]
def __repr__(self):
return "<TreeMatcher %r>" % self.dirs
def add(self, directory):
"""Add another directory to the list we match for."""
self.dirs.append(directory)
def match(self, fpath):
"""Does `fpath` indicate a file in one of our trees?"""
for d in self.dirs:
if fpath.startswith(d):
if fpath == d:
# This is the same file!
return True
if fpath[len(d)] == os.sep:
# This is a file in the directory
return True
return False
class FnmatchMatcher(object):
"""A matcher for files by filename pattern."""
def __init__(self, pats):
self.pats = pats[:]
def __repr__(self):
return "<FnmatchMatcher %r>" % self.pats
def match(self, fpath):
"""Does `fpath` match one of our filename patterns?"""
for pat in self.pats:
if fnmatch.fnmatch(fpath, pat):
return True
return False
def sep(s):
"""Find the path separator used in this string, or os.sep if none."""
sep_match = re.search(r"[\\/]", s)
if sep_match:
the_sep = sep_match.group(0)
else:
the_sep = os.sep
return the_sep
class PathAliases(object):
"""A collection of aliases for paths.
When combining data files from remote machines, often the paths to source
code are different, for example, due to OS differences, or because of
serialized checkouts on continuous integration machines.
A `PathAliases` object tracks a list of pattern/result pairs, and can
map a path through those aliases to produce a unified path.
`locator` is a FileLocator that is used to canonicalize the results.
"""
def __init__(self, locator=None):
self.aliases = []
self.locator = locator
def add(self, pattern, result):
"""Add the `pattern`/`result` pair to the list of aliases.
`pattern` is an `fnmatch`-style pattern. `result` is a simple
string. When mapping paths, if a path starts with a match against
`pattern`, then that match is replaced with `result`. This models
isomorphic source trees being rooted at different places on two
different machines.
`pattern` can't end with a wildcard component, since that would
match an entire tree, and not just its root.
"""
# The pattern can't end with a wildcard component.
pattern = pattern.rstrip(r"\/")
if pattern.endswith("*"):
raise CoverageException("Pattern must not end with wildcards.")
pattern_sep = sep(pattern)
pattern += pattern_sep
# Make a regex from the pattern. fnmatch always adds a \Z or $ to
# match the whole string, which we don't want.
regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
if regex_pat.endswith("$"):
regex_pat = regex_pat[:-1]
# We want */a/b.py to match on Windows to, so change slash to match
# either separator.
regex_pat = regex_pat.replace(r"\/", r"[\\/]")
# We want case-insensitive matching, so add that flag.
regex = re.compile("(?i)" + regex_pat)
# Normalize the result: it must end with a path separator.
result_sep = sep(result)
result = result.rstrip(r"\/") + result_sep
self.aliases.append((regex, result, pattern_sep, result_sep))
def map(self, path):
"""Map `path` through the aliases.
`path` is checked against all of the patterns. The first pattern to
match is used to replace the root of the path with the result root.
Only one pattern is ever used. If no patterns match, `path` is
returned unchanged.
The separator style in the result is made to match that of the result
in the alias.
"""
for regex, result, pattern_sep, result_sep in self.aliases:
m = regex.match(path)
if m:
new = path.replace(m.group(0), result)
if pattern_sep != result_sep:
new = new.replace(pattern_sep, result_sep)
if self.locator:
new = self.locator.canonical_filename(new)
return new
return path
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively."""
for dirpath, dirnames, filenames in os.walk(dirname, topdown=True):
if '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
if fnmatch.fnmatch(filename, "*.py"):
yield os.path.join(dirpath, filename)
| |
#!/usr/bin/env python
"""
AUTHOR: Gabriel Bassett
DATE: 12-17-2013
DEPENDENCIES: ipwhois
Copyright 2014 Gabriel Bassett
LICENSE:
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
DESCRIPTION:
Functions necessary to enrich the context graph
"""
# PRE-USER SETUP
pass
########### NOT USER EDITABLE ABOVE THIS POINT #################
# USER VARIABLES
WHOIS_CONFIG_FILE = "ipwhois.yapsy-plugin"
STATES = {'AA': 'armed forces americas', 'AE': 'armed forces middle east', 'AK': 'alaska', 'AL': 'alabama',
'AP': 'armed forces pacific', 'AR': 'arkansas', 'AS': 'american samoa', 'AZ': 'arizona', 'CA': 'california',
'CO': 'colorado', 'CT': 'connecticut', 'DC': 'district of columbia', 'DE': 'delaware', 'FL': 'florida',
'FM': 'federated states of micronesia', 'GA': 'georgia', 'GU': 'guam', 'HI': 'hawaii', 'IA': 'iowa',
'ID': 'idaho', 'IL': 'illinois', 'IN': 'indiana', 'KS': 'kansas', 'KY': 'kentucky', 'LA': 'louisiana',
'MA': 'massachusetts', 'MD': 'maryland', 'ME': 'maine', 'MH': 'marshall islands', 'MI': 'michigan',
'MN': 'minnesota', 'MO': 'missouri', 'MP': 'northern mariana islands', 'MS': 'mississippi', 'MT': 'montana',
'NC': 'north carolina', 'ND': 'north dakota', 'NE': 'nebraska', 'NH': 'new hampshire', 'NJ': 'new jersey',
'NM': 'new mexico', 'NV': 'nevada', 'NY': 'new york', 'OH': 'ohio', 'OK': 'oklahoma', 'OR': 'oregon',
'PA': 'pennsylvania', 'PR': 'puerto rico', 'PW': 'palau', 'RI': 'rhode island', 'SC': 'south carolina',
'SD': 'south dakota', 'TN': 'tennessee', 'TX': 'texas', 'UT': 'utah', 'VA': 'virginia',
'VI': 'virgin islands', 'VT': 'vermont', 'WA': 'washington', 'WI': 'wisconsin', 'WV': 'west virginia',
'WY': 'wyoming'}
NAME = "IP Whois Enrichment"
########### NOT USER EDITABLE BELOW THIS POINT #################
## IMPORTS
from yapsy.IPlugin import IPlugin
import logging
import ConfigParser
import networkx as nx
from datetime import datetime # timedelta imported above
import dateutil # to parse variable time strings
import uuid
import inspect
import socket
import tldextract
try:
from ipwhois import IPWhois
module_import_success = True
except:
module_import_success = False
logging.error("Module import failed. Please install the following module: ipwhois.")
raise
## SETUP
__author__ = "Gabriel Bassett"
loc = inspect.getfile(inspect.currentframe())
ind = loc.rfind("/")
loc = loc[:ind+1]
config = ConfigParser.SafeConfigParser()
config.readfp(open(loc + WHOIS_CONFIG_FILE))
if config.has_section('Core'):
if 'name' in config.options('Core'):
NAME = config.get('Core', 'name')
## EXECUTION
class PluginOne(IPlugin):
def __init__(self):
pass
def configure(self):
"""
:return: return list of [configure success (bool), name, description, list of acceptable inputs, resource cost (1-10, 1=low), speed (1-10, 1=fast)]
"""
config_options = config.options("Configuration")
if 'cost' in config_options:
cost = config.get('Configuration', 'cost')
else:
cost = 9999
if 'speed' in config_options:
speed = config.get('Configuration', 'speed')
else:
speed = 9999
if 'type' in config_options:
plugin_type = config.get('Configuration', 'type')
else:
logging.error("'Type' not specified in config file.")
return [None, False, NAME, "Takes a whois record as a list of strings in a specific format and returns a networkx graph of the information.", None, cost, speed]
if 'inputs' in config_options:
inputs = config.get('Configuration', 'Inputs')
inputs = [l.strip().lower() for l in inputs.split(",")]
else:
logging.error("No input types specified in config file.")
return [plugin_type, False, NAME, "Takes a whois record as a list of strings in a specific format and returns a networkx graph of the information.", None, cost, speed]
if not module_import_success:
logging.error("Module import failure caused configuration failure.")
return [plugin_type, False, NAME, "Takes a whois record as a list of strings in a specific format and returns a networkx graph of the information.", inputs, cost, speed]
else:
return [plugin_type, True, NAME, "Takes a whois record as a list of strings in a specific format and returns a networkx graph of the information.", inputs, cost, speed]
def run(self, domain, start_time=""):
""" str, str -> networkx multiDiGraph
:param domain: a string containing a domain to look up
:param start_time: string in ISO 8601 combined date and time format (e.g. 2014-11-01T10:34Z) or datetime object.
:return: a networkx graph representing the whois information about the domain
"""
ip = socket.gethostbyname(domain) # This has a habit of failing
record = [None] * 10
obj = IPWhois(ip)
results = obj.lookup()
nets = results.pop("nets")
for i in range(len(nets)):
net = nets[i]
record[0] = i
if "updated" in net:
record[1] = net['updated'][:10]
elif "created" in net:
record[1] = net['created'][:10]
record[2] = domain
if "name" in net:
record[3] = net['name']
if "organization" in net:
record[4] = net['organization']
if 'address' in net:
record[5] = net['address']
if 'city' in net:
record[6] = net['city']
if 'state' in net:
record[7] = net['state']
if 'country' in net:
record[8] = net['country']
if 'misc_emails' in net and net['misc_emails'] is not None:
emails = net['misc_emails'].split("\n")
record[9] = emails[0]
return self.enrich_record(record, start_time)
def enrich_record(self, record, start_time=""):
"""
:param record: Takes a domain name as a list: [row,Date,Domain,Reg_name,Reg_org,Reg_addr,Reg_city,Reg_state,Reg_country,Reg_email]
:param start_time: A default start time
:return: a networkx graph representing the response. (All fields captured.)
"""
# Parse the start_time
if type(start_time) is str:
try:
time = dateutil.parser.parse(start_time).strftime("%Y-%m-%dT%H:%M:%SZ")
except:
time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
elif type(start_time) is datetime:
time = start_time.strftime("%Y-%m-%dT%H:%M:%SZ")
else:
time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
# Create the graph
g = nx.MultiDiGraph()
# try and validate the record
if type(record) == list and \
len(record) == 10:
pass
else:
raise ValueError("Record not in correct format.")
try:
record_time = dateutil.parser.parse(record[1]).strftime("%Y-%m-%dT%H:%M:%SZ")
except:
raise ValueError("Record date {0} in wrong format.".format(record[1]))
try:
_ = tldextract.extract(record[2])
except:
raise ValueError("Record domain {0} is not valid.".format(record[2]))
if type(record[3]) in (int, str, type(None)) and \
type(record[4]) in (int, str, type(None)) and \
type(record[5]) in (int, str, type(None)) and \
type(record[6]) in (int, str, type(None)) and \
type(record[7]) in (int, str, type(None)) and \
type(record[8]) in (int, str, type(None)) and \
type(record[9]) in (int, str, type(None)):
pass
else:
raise ValueError("Record contains incompatible types.")
# Get or create Domain node
domain_uri = "class=attribute&key={0}&value={1}".format("domain", record[2])
g.add_node(domain_uri, {
'class': 'attribute',
'key': "domain",
"value": record[2],
"start_time": record_time,
"uri": domain_uri
})
# If 'no parser', there's no data, just return just the domain node
if 'No Parser' in record:
return g
# Get or create Enrichment node
whois_record_uri = "class=attribute&key={0}&value={1}".format("enrichment", "whois_record")
g.add_node(whois_record_uri, {
'class': 'attribute',
'key': "enrichment",
"value": "whois_record",
"start_time": time,
"uri": whois_record_uri
})
if record[3] and record[3].lower() != 'none':
# Registrant Name node
name_uri = "class=attribute&key={0}&value={1}".format("name", record[3].encode("ascii", "ignore"))
g.add_node(name_uri, {
'class': 'attribute',
'key': "name",
"value": record[3],
"start_time": record_time,
"uri": name_uri
})
# Registrant Name Edge
edge_attr = {
"relationship": "describedBy",
"start_time": record_time,
"describeBy": "registrant_name",
"origin": "ipwhois_record_enrichment"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, name_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, name_uri, edge_uri, edge_attr)
if record[4] and record[4].lower() != 'none':
# Registrant Organization Node
reg_org_uri = "class=attribute&key={0}&value={1}".format("organization", record[4].encode("ascii", "ignore"))
g.add_node(reg_org_uri, {
'class': 'attribute',
'key': "organization",
"value": record[4],
"start_time": record_time,
"uri": reg_org_uri
})
# Registrant Organization Edge
edge_attr = {
"relationship": "describedBy",
"start_time": record_time,
"describeBy": "registrant_organization",
"origin": "ipwhois_record_enrichment"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, reg_org_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, reg_org_uri, edge_uri, edge_attr)
if record[5] and record[5].lower() != 'none':
# Registrant Organization Address Node
reg_org_addr_uri = "class=attribute&key={0}&value={1}".format("address", record[5].encode("ascii", "ignore"))
g.add_node(reg_org_addr_uri, {
'class': 'attribute',
'key': "address",
"value": record[5],
"start_time": record_time,
"uri": reg_org_addr_uri
})
# Registrant Organization Address Edge
edge_attr = {
"relationship": "describedBy",
"start_time": record_time,
"describeBy": "registrant_organization_address",
"origin": "ipwhois_record_enrichment"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, reg_org_addr_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, reg_org_addr_uri, edge_uri, edge_attr)
if record[6] and record[6].lower() != 'none':
# Registrant Organization City Node
reg_org_city_uri = "class=attribute&key={0}&value={1}".format("city", record[6].encode("ascii", "ignore").lower())
g.add_node(reg_org_city_uri, {
'class': 'attribute',
'key': "city",
"value": record[6].lower(),
"start_time": record_time,
"uri": reg_org_city_uri
})
# Registrant Organization City Edge
edge_attr = {
"relationship": "describedBy",
"start_time": record_time,
"describeBy": "registrant_organization_city",
"origin": "ipwhois_record_enrichment"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, reg_org_city_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, reg_org_city_uri, edge_uri, edge_attr)
if record[7] and record[7].lower() != 'none':
# Check for state abbreviation
if len(record[7]) == 2 and record[7] in STATES:
state = STATES[record[7]]
else:
state = record[7]
# Registrant Organization State Node
reg_org_state_uri = "class=attribute&key={0}&value={1}".format("state", state.encode("ascii", "ignore").lower())
g.add_node(reg_org_state_uri, {
'class': 'attribute',
'key': "state",
"value": state.lower(),
"start_time": record_time,
"uri": reg_org_state_uri
})
# Registrant Organization State Edge
edge_attr = {
"relationship": "describedBy",
"start_time": record_time,
"describeBy": "registrant_organization_state",
"origin": "ipwhois_record_enrichment"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, reg_org_state_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, reg_org_state_uri, edge_uri, edge_attr)
if record[8] and record[8].lower() != 'none':
# Registrant Organization Country Node
reg_org_country_uri = "class=attribute&key={0}&value={1}".format("country", record[8].encode("ascii", "ignore").lower())
g.add_node(reg_org_country_uri, {
'class': 'attribute',
'key': "country",
"value": record[8].lower(),
"start_time": record_time,
"uri": reg_org_country_uri
})
# Registrant Organization Country Edge
edge_attr = {
"relationship": "describedBy",
"start_time": record_time,
"describeBy": "registrant_organization_country",
"origin": "ipwhois_record_enrichment"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, reg_org_country_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, reg_org_country_uri, edge_uri, edge_attr)
if record[9] and record[9].lower() != 'none':
# Registrant Organization email Node
reg_org_email_uri = "class=attribute&key={0}&value={1}".format("email_address", record[9].encode("ascii", "ignore"))
g.add_node(reg_org_email_uri, {
'class': 'attribute',
'key': "email_address",
"value": record[9],
"start_time": record_time,
"uri": reg_org_email_uri
})
# Registrant Organization email Edge
edge_attr = {
"relationship": "describedBy",
"start_time": record_time,
"describeBy": "registrant_email",
"origin": "ipwhois_record_enrichment"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, reg_org_email_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, reg_org_email_uri, edge_uri, edge_attr)
# Enrichment Edge
edge_attr = {
"relationship": "describedBy",
"start_time": time,
"origin": "ipwhois_record_enrichment"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, whois_record_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, whois_record_uri, edge_uri, edge_attr)
return g
| |
""" Test usage of communication.authenticate and communication.gen_request
"""
from unittest import TestCase
from pythonzimbra.exceptions.communication import UnknownRequestType
from pythonzimbra.request_json import RequestJson
from pythonzimbra.request_xml import RequestXml
from pythonzimbra.tools.auth import authenticate
from pythonzimbra.communication import Communication
from tests import get_config
class TestGenrequest(TestCase):
def test_genrequest_default(self):
""" Create a request only using the Communication-object
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(token=token)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
def test_genrequest_fail(self):
""" Create a request only using the Communication-object
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
self.assertRaises(
UnknownRequestType,
comm.gen_request,
request_type="INVALID",
token=token
)
request = comm.gen_request(token=token)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
# Deliberately break the request
request.request_type = "INVALID"
self.assertRaises(
UnknownRequestType,
comm.send_request,
request
)
def test_genrequest_xml(self):
""" Create a request only using the Communication-object
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(request_type="xml", token=token)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
self.assertEqual(
response.response_type,
"xml",
"Invalid response type %s" % response.response_type
)
def test_genrequest_batch(self):
""" Create a batch-request only using the Communication-object
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(token=token, set_batch=True)
self.assertEqual(
type(request),
RequestJson,
"Generated request wasn't an json-request, which should be "
"the default."
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
self.assertEqual(
response.is_batch(),
True,
"Batch-request didn't return a Batch response."
)
expected_batch = {
'nameToId': {
'NoOpResponse': [
'1',
'2'
]
},
'hasFault': False,
'idToName': {
'1': 'NoOpResponse',
'2': 'NoOpResponse'
}
}
self.assertEqual(
response.get_batch(),
expected_batch,
"Batch-dictionary wasn't expected"
)
def test_genrequest_batch_xml(self):
""" Create a batch-request only using the Communication-object (
xml-version)
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(
request_type="xml",
token=token,
set_batch=True
)
self.assertEqual(
type(request),
RequestXml,
"Generated request wasn't an xml-request"
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
self.assertEqual(
response.is_batch(),
True,
"Batch-request didn't return a Batch response."
)
expected_batch = {
'nameToId': {
'NoOpResponse': [
'1',
'2'
]
},
'hasFault': False,
'idToName': {
'1': 'NoOpResponse',
'2': 'NoOpResponse'
}
}
self.assertEqual(
response.get_batch(),
expected_batch,
"Batch-dictionary wasn't expected"
)
def test_genrequest_check_response(self):
""" Create a request only using the Communication-object, send it and
check the response
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(token=token)
request.add_request(
"GetInfoRequest",
{
},
"urn:zimbraAccount"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
self.assertEqual(
response.get_response()["GetInfoResponse"]["name"],
config.get("genrequest_test", "account"),
"Request returned unexpected response"
)
def test_genrequest_check_response_batch(self):
""" Create a batch-request only using the Communication-object
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(token=token, set_batch=True)
self.assertEqual(
type(request),
RequestJson,
"Generated request wasn't an json-request, which should be "
"the default."
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
request.add_request(
"GetInfoRequest",
{
},
"urn:zimbraAccount"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
self.assertEqual(
response.get_response(2)["GetInfoResponse"]["name"],
config.get("genrequest_test", "account"),
"Request returned unexpected response"
)
def test_genrequest_check_response_xml(self):
""" Create a request only using the Communication-object, send it and
check the response
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(request_type="xml", token=token)
request.add_request(
"GetInfoRequest",
{
},
"urn:zimbraAccount"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
self.assertEqual(
response.get_response()["GetInfoResponse"]["name"],
config.get("genrequest_test", "account"),
"Request returned unexpected response"
)
def test_genrequest_check_response_batch_xml(self):
""" Create a batch-request only using the Communication-object
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(
request_type="xml",
token=token,
set_batch=True
)
self.assertEqual(
type(request),
RequestXml,
"Generated request wasn't an json-request, which should be "
"the default."
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
request.add_request(
"GetInfoRequest",
{
},
"urn:zimbraAccount"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
self.assertEqual(
response.get_response(2)["GetInfoResponse"]["name"],
config.get("genrequest_test", "account"),
"Request returned unexpected response"
)
def test_genrequest_batch_invalid(self):
""" Create a batchrequest only using the Communication-object,
send it and request an invalid request id
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(token=token, set_batch=True)
self.assertEqual(
type(request),
RequestJson,
"Generated request wasn't an json-request, which should be "
"the default."
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
self.assertIsNone(
response.get_response(3),
"Querying an invalid requestId didn't return None"
)
def test_genrequest_batch_invalid_xml(self):
""" Create a batchrequest only using the Communication-object,
send it and request an invalid request id (xml)
"""
config = get_config()
if config.getboolean("genrequest_test", "enabled"):
# Run only if enabled
comm = Communication(config.get("genrequest_test", "url"))
token = authenticate(
config.get("genrequest_test", "url"),
config.get("genrequest_test", "account"),
config.get("genrequest_test", "preauthkey")
)
self.assertNotEqual(
token,
None,
"Cannot authenticate."
)
request = comm.gen_request(
request_type="xml",
token=token,
set_batch=True
)
self.assertEqual(
type(request),
RequestXml,
"Generated request wasn't an json-request, which should be "
"the default."
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
request.add_request(
"NoOpRequest",
{
},
"urn:zimbraMail"
)
response = comm.send_request(request)
if response.is_fault():
self.fail(
"Reponse failed: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
self.assertIsNone(
response.get_response(3),
"Querying an invalid requestId didn't return None"
)
| |
# -*- coding: utf-8 -*-
from tensor_analysis.arraypy import Arraypy, TensorArray, list2arraypy, \
list2tensor, matrix2arraypy, matrix2tensor
from tensor_analysis.tensor_methods import tensor_product
from sympy import Symbol, symbols
from sympy.matrices import Matrix, MatrixSymbol
import sys
def test_arraypy_initiation():
arr_with_one_element = Arraypy()
assert len(arr_with_one_element) == 1
assert arr_with_one_element[0] == 0
assert arr_with_one_element.rank == 1
arr_with_symbol_element = Arraypy('Py')
assert len(arr_with_symbol_element) == 1
assert arr_with_symbol_element[0] == Symbol('Py[0]')
assert arr_with_symbol_element.rank == 1
vector_length = 5
vector = Arraypy(vector_length)
assert len(vector) == vector_length
assert vector.shape == (vector_length,)
assert vector.start_index == (0,)
assert vector.end_index == (vector_length - 1,)
assert vector.rank == 1
array_shape = (3, 3, 3, 3)
n_dim_array = Arraypy(array_shape)
assert len(n_dim_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == array_shape
assert n_dim_array.start_index == (0, 0, 0, 0)
assert n_dim_array.end_index == (2, 2, 2, 2)
assert n_dim_array.rank == 4
sparse_array = Arraypy(array_shape, 'sparse')
assert sparse_array._sparse is True
assert len(sparse_array._output) == 0
assert len(sparse_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == array_shape
assert n_dim_array.start_index == (0, 0, 0, 0)
assert n_dim_array.end_index == (2, 2, 2, 2)
assert n_dim_array.rank == 4
arr_with_ranged_index = Arraypy('1..3, 2 .. 4, 3..5')
assert arr_with_ranged_index.shape == (3, 3, 3)
assert len(arr_with_ranged_index) == 3 * 3 * 3
assert arr_with_ranged_index.start_index == (1, 2, 3)
assert arr_with_ranged_index.end_index == (3, 4, 5)
assert arr_with_ranged_index.rank == 3
combined_arg = [2, 3, 1]
array_with_combined_arg = Arraypy(combined_arg)
assert len(array_with_combined_arg) == 3 * 3
assert array_with_combined_arg.shape == (3, 3)
assert array_with_combined_arg.start_index == (1, 1)
assert array_with_combined_arg.end_index == (3, 3)
assert array_with_combined_arg.rank == 2
shape = (3, 3)
array_with_many_arg = Arraypy(shape, 'X', 'sparse')
assert len(array_with_many_arg) == 3 * 3
assert array_with_many_arg.shape == shape
assert array_with_many_arg._sparse is True
assert array_with_many_arg[0, 0] == Symbol('X[0, 0]')
assert array_with_many_arg.rank == 2
def test_reshape():
array = Arraypy(50)
assert array.shape == (50,)
assert array.rank == 1
array = array.reshape((5, 5, 2))
assert array.shape == (5, 5, 2)
assert array.rank == 3
assert len(array) == 50
def test_next_index():
array = Arraypy((2, 2), 'Py')
assert array.next_index((0, 0)) == (0, 1)
assert array.next_index((0, 1)) == (1, 0)
assert array.next_index((1, 0)) == (1, 1)
assert array.next_index((1, 1)) == (0, 0)
def test_index_list():
array = Arraypy((2, 2))
indecies = array.index_list
assert indecies[0] == (0, 0) == array.start_index
assert indecies[1] == (0, 1)
assert indecies[2] == (1, 0)
assert indecies[3] == (1, 1) == array.end_index
def test_iterator():
if sys.version_info[0] >= 3:
array = list2arraypy([0, 1, 2, 3], (2, 2))
j = 0
for i in array:
assert i == j
j += 1
array = array.reshape(4)
j = 0
for i in array:
assert i == j
j += 1
def test_sparse():
sparse_array = Arraypy((2, 2), 'sparse')
assert len(sparse_array) == 2 * 2
# dictionary where all data is
assert len(sparse_array._output) == 0
# it's empty, even thought Arraypy knows that 'empty' data is zero
if sys.version_info[0] >= 3:
for i in sparse_array:
assert i == 0
else:
idx = sparse_array.start_index
for i in range(len(sparse_array.start_index)):
assert sparse_array[idx] == 0
idx = sparse_array.next_index(idx)
sparse_array[0, 0] = 123
assert len(sparse_array._output) == 1
assert sparse_array[0, 0] == 123
# when element in sparse array become zero it will disappear from
# dictionary
sparse_array[0, 0] = 0
assert len(sparse_array._output) == 0
assert sparse_array[0, 0] == 0
def test_calculation():
# Arraypy
list_of_ones = [1 for i in range(9)]
list_of_nines = [9 for i in range(9)]
shape = (3, 3)
a = list2arraypy(list_of_ones, shape)
b = list2arraypy(list_of_nines, shape)
if sys.version_info[0] >= 3:
c = a + b
for i in c:
assert i == 10
c = b - a
for i in c:
assert i == 8
else:
c = a + b
idx = c.start_index
for i in range(len(c)):
assert c[idx] == 10
idx = c.next_index(idx)
idx = c.start_index
c = b - a
for i in range(len(c)):
assert c[idx] == 8
idx = c.next_index(idx)
# TensorArray
x0, x1, y0, y1 = symbols('X[0], X[1], Y[0], Y[1]')
tensor1 = TensorArray(Arraypy(2, 'X'), 1)
tensor2 = TensorArray(Arraypy(2, 'Y'), -1)
assert tensor1.rank == tensor2.rank == 1
res_tensor = tensor_product(tensor1, tensor2)
assert len(res_tensor) == 4
assert res_tensor.rank == 2
assert res_tensor[0, 0] == x0 * y0
assert res_tensor[0, 1] == x0 * y1
assert res_tensor[1, 0] == x1 * y0
assert res_tensor[1, 1] == x1 * y1
def test_tensor_contract():
tensor = list2tensor(list(range(9)), (3, 3), (1, -1))
# 0 1 2
# 3 4 5
# 6 7 8
assert tensor.rank == 2
# contract of matrix will be summ of diagonal elements: 0 + 4 + 8 == 12
res_tensor = tensor.contract(1, 2)
assert res_tensor.rank == 1
assert len(res_tensor) == 1
assert res_tensor[0] == 12
def test_arraypy_converting():
arr_arraypy = list2arraypy([1, 2, 3, 4], (2, 2))
arr_list = arr_arraypy.to_list()
assert (isinstance(arr_list, list))
arr_tensor = arr_arraypy.to_tensor((1, -1))
assert (isinstance(arr_tensor, TensorArray))
arr_matrix = arr_arraypy.to_matrix()
assert (isinstance(arr_matrix, Matrix))
idx = (0, 0)
for i in range(len(arr_arraypy)):
assert arr_arraypy[idx] == arr_tensor[idx] == arr_matrix[idx]
idx = arr_arraypy.next_index(idx)
def test_tensor_initiation():
base_shape = (2, 2, 2)
tensor_base = Arraypy(base_shape)
index_character = (1, -1, 1)
tensor = TensorArray(tensor_base, index_character)
assert tensor.ind_char == index_character
assert tensor.shape == base_shape
assert tensor.type_pq == (2, 1)
assert tensor.rank == 3
def test_tensor_converting():
arr_tensor = TensorArray(Arraypy((2, 2)), (1, 1))
arr_list = arr_tensor.to_list()
assert (isinstance(arr_list, list))
arr_arraypy = arr_tensor.to_arraypy()
assert (isinstance(arr_arraypy, Arraypy))
arr_matrix = arr_tensor.to_matrix()
assert (isinstance(arr_matrix, Matrix))
def test_converting_functions():
arr_list = [1, 2, 3, 4]
arr_matrix = Matrix(((1, 2), (3, 4)))
# list
arr_arraypy = list2arraypy(arr_list, (2, 2))
assert (isinstance(arr_arraypy, Arraypy))
arr_tensor = list2tensor(arr_list, (2, 2), (-1, -1))
assert (isinstance(arr_tensor, TensorArray))
# Matrix
arr_arraypy = matrix2arraypy(arr_matrix)
assert (isinstance(arr_arraypy, Arraypy))
arr_tensor = matrix2tensor(arr_matrix, (-1, -1))
assert (isinstance(arr_tensor, TensorArray))
def test_equality():
first_list = [1, 2, 3, 4]
second_list = [1, 2, 3, 4]
third_list = [4, 3, 2, 1]
assert first_list == second_list
assert first_list != third_list
first_arraypy = list2arraypy(first_list, (2, 2))
second_arraypy = list2arraypy(second_list, (2, 2))
third_arraypy = list2arraypy(third_list, (2, 2))
fourth_arraypy = list2arraypy(first_list, 4)
assert first_arraypy == second_arraypy
second_arraypy[0, 0] = 0
assert first_arraypy != second_arraypy
assert first_arraypy != third_arraypy
assert first_arraypy != fourth_arraypy
first_tensor = list2tensor(first_list, (2, 2), (1, 1))
second_tensor = list2tensor(second_list, (2, 2), (1, 1))
third_tensor = list2tensor(second_list, (2, 2), (-1, 1))
assert first_tensor == second_tensor
assert first_tensor != third_tensor
| |
"""praw constants."""
import sys
__version__ = '6.0.0'
API_PATH = {
'about_edited': 'r/{subreddit}/about/edited/',
'about_log': 'r/{subreddit}/about/log/',
'about_modqueue': 'r/{subreddit}/about/modqueue/',
'about_reports': 'r/{subreddit}/about/reports/',
'about_spam': 'r/{subreddit}/about/spam/',
'about_sticky': 'r/{subreddit}/about/sticky/',
'about_stylesheet': 'r/{subreddit}/about/stylesheet/',
'about_traffic': 'r/{subreddit}/about/traffic/',
'about_unmoderated': 'r/{subreddit}/about/unmoderated/',
'accept_mod_invite': 'r/{subreddit}/api/accept_moderator_invite',
'approve': 'api/approve/',
'block': 'api/block',
'block_user': '/api/block_user/',
'blocked': 'prefs/blocked/',
'collapse': 'api/collapse_message/',
'comment': 'api/comment/',
'comment_replies': 'message/comments/',
'compose': 'api/compose/',
'contest_mode': 'api/set_contest_mode/',
'del': 'api/del/',
'delete_message': 'api/del_msg',
'delete_sr_banner': 'r/{subreddit}/api/delete_sr_banner',
'delete_sr_header': 'r/{subreddit}/api/delete_sr_header',
'delete_sr_icon': 'r/{subreddit}/api/delete_sr_icon',
'delete_sr_image': 'r/{subreddit}/api/delete_sr_img',
'deleteflair': 'r/{subreddit}/api/deleteflair',
'distinguish': 'api/distinguish/',
'domain': 'domain/{domain}/',
'duplicates': 'duplicates/{submission_id}/',
'edit': 'api/editusertext/',
'emoji_delete': 'api/v1/{subreddit}/emoji/{emoji_name}',
'emoji_lease': 'api/v1/{subreddit}/emoji_asset_upload_s3.json',
'emoji_list': 'api/v1/{subreddit}/emojis/all',
'emoji_upload': 'api/v1/{subreddit}/emoji.json',
'flair': 'r/{subreddit}/api/flair/',
'flairconfig': 'r/{subreddit}/api/flairconfig/',
'flaircsv': 'r/{subreddit}/api/flaircsv/',
'flairlist': 'r/{subreddit}/api/flairlist/',
'flairselector': 'r/{subreddit}/api/flairselector/',
'flairtemplate': 'r/{subreddit}/api/flairtemplate/',
'flairtemplate_v2': 'r/{subreddit}/api/flairtemplate_v2',
'flairtemplateclear': 'r/{subreddit}/api/clearflairtemplates/',
'flairtemplatedelete': 'r/{subreddit}/api/deleteflairtemplate/',
'friend': 'r/{subreddit}/api/friend/',
'friend_v1': 'api/v1/me/friends/{user}',
'friends': 'api/v1/me/friends/',
'gild_thing': 'api/v1/gold/gild/{fullname}/',
'gild_user': 'api/v1/gold/give/{username}/',
'hide': 'api/hide/',
'ignore_reports': 'api/ignore_reports/',
'inbox': 'message/inbox/',
'info': 'api/info/',
'karma': 'api/v1/me/karma',
'leavecontributor': 'api/leavecontributor',
'link_flair': 'r/{subreddit}/api/link_flair_v2',
'list_banned': 'r/{subreddit}/about/banned/',
'list_contributor': 'r/{subreddit}/about/contributors/',
'list_moderator': 'r/{subreddit}/about/moderators/',
'list_muted': 'r/{subreddit}/about/muted/',
'list_wikibanned': 'r/{subreddit}/about/wikibanned/',
'list_wikicontributor': 'r/{subreddit}/about/wikicontributors/',
'live_accept_invite': 'api/live/{id}/accept_contributor_invite',
'live_add_update': 'api/live/{id}/update',
'live_close': 'api/live/{id}/close_thread',
'live_contributors': 'live/{id}/contributors',
'live_discussions': 'live/{id}/discussions',
'live_focus': 'live/{thread_id}/updates/{update_id}',
'live_info': 'api/live/by_id/{ids}',
'live_invite': 'api/live/{id}/invite_contributor',
'live_leave': 'api/live/{id}/leave_contributor',
'live_now': 'api/live/happening_now',
'live_remove_contrib': 'api/live/{id}/rm_contributor',
'live_remove_invite': 'api/live/{id}/rm_contributor_invite',
'live_remove_update': 'api/live/{id}/delete_update',
'live_report': 'api/live/{id}/report',
'live_strike': 'api/live/{id}/strike_update',
'live_update_perms': 'api/live/{id}/set_contributor_permissions',
'live_update_thread': 'api/live/{id}/edit',
'live_updates': 'live/{id}',
'liveabout': 'api/live/{id}/about/',
'livecreate': 'api/live/create',
'lock': 'api/lock/',
'marknsfw': 'api/marknsfw/',
'me': 'api/v1/me',
'mentions': 'message/mentions',
'message': 'message/messages/{id}/',
'messages': 'message/messages/',
'moderator_messages': 'r/{subreddit}/message/moderator/',
'moderator_unread': 'r/{subreddit}/message/moderator/unread/',
'modmail_archive': 'api/mod/conversations/{id}/archive',
'modmail_bulk_read': 'api/mod/conversations/bulk/read',
'modmail_conversation': 'api/mod/conversations/{id}',
'modmail_conversations': 'api/mod/conversations/',
'modmail_highlight': 'api/mod/conversations/{id}/highlight',
'modmail_mute': 'api/mod/conversations/{id}/mute',
'modmail_read': 'api/mod/conversations/read',
'modmail_subreddits': 'api/mod/conversations/subreddits',
'modmail_unarchive': 'api/mod/conversations/{id}/unarchive',
'modmail_unmute': 'api/mod/conversations/{id}/unmute',
'modmail_unread': 'api/mod/conversations/unread',
'modmail_unread_count': 'api/mod/conversations/unread/count',
'morechildren': 'api/morechildren/',
'multireddit': 'user/{user}/m/{multi}/',
'multireddit_api': 'api/multi/user/{user}/m/{multi}/',
'multireddit_base': 'api/multi/',
'multireddit_copy': 'api/multi/copy/',
'multireddit_rename': 'api/multi/rename/',
'multireddit_update': 'api/multi/user/{user}/m/{multi}/r/{subreddit}',
'multireddit_user': 'api/multi/user/{user}/',
'mute_sender': 'api/mute_message_author/',
'my_contributor': 'subreddits/mine/contributor/',
'my_moderator': 'subreddits/mine/moderator/',
'my_multireddits': 'api/multi/mine/',
'my_subreddits': 'subreddits/mine/subscriber/',
'preferences': 'api/v1/me/prefs',
'quarantine_opt_in': 'api/quarantine_optin',
'quarantine_opt_out': 'api/quarantine_optout',
'read_message': 'api/read_message/',
('removal_comment_'
'message'): 'api/v1/modactions/removal_comment_message',
'removal_link_message': 'api/v1/modactions/removal_link_message',
'remove': 'api/remove/',
'report': 'api/report/',
'rules': 'r/{subreddit}/about/rules',
'save': 'api/save/',
'search': 'r/{subreddit}/search/',
'select_flair': 'r/{subreddit}/api/selectflair/',
'sendreplies': 'api/sendreplies',
'sent': 'message/sent/',
'setpermissions': 'r/{subreddit}/api/setpermissions/',
'site_admin': 'api/site_admin/',
'spoiler': 'api/spoiler/',
'sticky_submission': 'api/set_subreddit_sticky/',
'store_visits': 'api/store_visits',
'sub_recommended': 'api/recommend/sr/{subreddits}',
'submission': 'comments/{id}/',
'submission_replies': 'message/selfreply/',
'submit': 'api/submit/',
'subreddit': 'r/{subreddit}/',
'subreddit_about': 'r/{subreddit}/about/',
'subreddit_filter': ('api/filter/user/{user}/f/{special}/'
'r/{subreddit}'),
'subreddit_filter_list': 'api/filter/user/{user}/f/{special}',
'subreddit_random': 'r/{subreddit}/random/',
'subreddit_settings': 'r/{subreddit}/about/edit/',
'subreddit_stylesheet': 'r/{subreddit}/api/subreddit_stylesheet/',
'subreddits_by_topic': 'api/subreddits_by_topic',
'subreddits_default': 'subreddits/default/',
'subreddits_gold': 'subreddits/gold/',
'subreddits_name_search': 'api/search_reddit_names/',
'subreddits_new': 'subreddits/new/',
'subreddits_popular': 'subreddits/popular/',
'subreddits_search': 'subreddits/search/',
'subscribe': 'api/subscribe/',
'suggested_sort': 'api/set_suggested_sort/',
'trophies': 'api/v1/user/{user}/trophies',
'uncollapse': 'api/uncollapse_message/',
'unfriend': 'r/{subreddit}/api/unfriend/',
'unhide': 'api/unhide/',
'unignore_reports': 'api/unignore_reports/',
'unlock': 'api/unlock/',
'unmarknsfw': 'api/unmarknsfw/',
'unmute_sender': 'api/unmute_message_author/',
'unread': 'message/unread/',
'unread_message': 'api/unread_message/',
'unsave': 'api/unsave/',
'unspoiler': 'api/unspoiler/',
'upload_image': 'r/{subreddit}/api/upload_sr_img',
'user': 'user/{user}/',
'user_about': 'user/{user}/about/',
'user_flair': 'r/{subreddit}/api/user_flair_v2',
'vote': 'api/vote/',
'widget_create': 'r/{subreddit}/api/widget',
'widget_lease': 'r/{subreddit}/api/widget_image_upload_s3',
'widget_modify': 'r/{subreddit}/api/widget/{widget_id}',
'widget_order': 'r/{subreddit}/api/widget_order/{section}',
'widgets': 'r/{subreddit}/api/widgets',
'wiki_edit': 'r/{subreddit}/api/wiki/edit/',
'wiki_page': 'r/{subreddit}/wiki/{page}',
'wiki_page_editor': 'r/{subreddit}/api/wiki/alloweditor/{method}',
'wiki_page_revisions': 'r/{subreddit}/wiki/revisions/{page}',
'wiki_page_settings': 'r/{subreddit}/wiki/settings/{page}',
'wiki_pages': 'r/{subreddit}/wiki/pages/',
'wiki_revisions': 'r/{subreddit}/wiki/revisions/'}
JPEG_HEADER = b'\xff\xd8\xff'
MAX_IMAGE_SIZE = 512000
MIN_PNG_SIZE = 67
MIN_JPEG_SIZE = 128
PNG_HEADER = b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'
USER_AGENT_FORMAT = '{{}} PRAW/{}'.format(__version__)
# pylint: disable=import-error,no-name-in-module,unused-import
if sys.version_info.major == 2:
import ConfigParser as configparser # NOQA
from urlparse import urljoin, urlparse # NOQA
else:
import configparser # NOQA
from urllib.parse import urljoin, urlparse # NOQA
| |
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import copy
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import socket
import sys
import traceback
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import importutils
import six
from six import moves
_PY26 = sys.version_info[0:2] == (2, 6)
from networking-odl.openstack.common._i18n import _
from networking-odl.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
"keystonemiddleware=WARN", "routes.middleware=WARN",
"stevedore=WARN"]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(common_cli_opts)),
(None, copy.deepcopy(logging_cli_opts)),
(None, copy.deepcopy(generic_log_opts)),
(None, copy.deepcopy(log_opts)),
]
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def isEnabledFor(self, level):
if _PY26:
# This method was added in python 2.7 (and it does the exact
# same logic, so we need to do the exact same logic so that
# python 2.6 has this capability as well).
return self.logger.isEnabledFor(level)
else:
return super(BaseLoggerAdapter, self).isEnabledFor(level)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
if six.PY3:
# In Python 3, the code fails because the 'manager' attribute
# cannot be found when using a LoggerAdapter as the
# underlying logger. Work around this issue.
self._logger.manager = self._logger.logger.manager
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(msg, six.text_type):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except (moves.configparser.Error, KeyError) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string=None,
default_log_levels=None):
# Just in case the caller is not setting the
# default_log_level. This is insurance because
# we introduced the default_log_level parameter
# later in a backwards in-compatible change
if default_log_levels is not None:
cfg.set_defaults(
log_opts,
default_log_levels=default_log_levels)
if logging_context_format_string is not None:
cfg.set_defaults(
log_opts,
logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
if CONF.use_syslog:
try:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
except socket.error:
log_root.error('Unable to add syslog handler. Verify that syslog '
'is running.')
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| |
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_vpn_certificate_crl
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_vpn_certificate_crl.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_vpn_certificate_crl_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_certificate_crl': {
'crl': 'test_value_3',
'http_url': 'test_value_4',
'last_updated': '5',
'ldap_password': 'test_value_6',
'ldap_server': 'test_value_7',
'ldap_username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep_cert': 'test_value_11',
'scep_url': 'test_value_12',
'source': 'factory',
'source_ip': '84.230.14.14',
'update_interval': '15',
'update_vdom': 'test_value_16'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_crl.fortios_vpn_certificate(input_data, fos_instance)
expected_data = {
'crl': 'test_value_3',
'http-url': 'test_value_4',
'last-updated': '5',
'ldap-password': 'test_value_6',
'ldap-server': 'test_value_7',
'ldap-username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep-cert': 'test_value_11',
'scep-url': 'test_value_12',
'source': 'factory',
'source-ip': '84.230.14.14',
'update-interval': '15',
'update-vdom': 'test_value_16'
}
set_method_mock.assert_called_with('vpn.certificate', 'crl', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_certificate_crl_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_certificate_crl': {
'crl': 'test_value_3',
'http_url': 'test_value_4',
'last_updated': '5',
'ldap_password': 'test_value_6',
'ldap_server': 'test_value_7',
'ldap_username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep_cert': 'test_value_11',
'scep_url': 'test_value_12',
'source': 'factory',
'source_ip': '84.230.14.14',
'update_interval': '15',
'update_vdom': 'test_value_16'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_crl.fortios_vpn_certificate(input_data, fos_instance)
expected_data = {
'crl': 'test_value_3',
'http-url': 'test_value_4',
'last-updated': '5',
'ldap-password': 'test_value_6',
'ldap-server': 'test_value_7',
'ldap-username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep-cert': 'test_value_11',
'scep-url': 'test_value_12',
'source': 'factory',
'source-ip': '84.230.14.14',
'update-interval': '15',
'update-vdom': 'test_value_16'
}
set_method_mock.assert_called_with('vpn.certificate', 'crl', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_certificate_crl_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_certificate_crl': {
'crl': 'test_value_3',
'http_url': 'test_value_4',
'last_updated': '5',
'ldap_password': 'test_value_6',
'ldap_server': 'test_value_7',
'ldap_username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep_cert': 'test_value_11',
'scep_url': 'test_value_12',
'source': 'factory',
'source_ip': '84.230.14.14',
'update_interval': '15',
'update_vdom': 'test_value_16'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_crl.fortios_vpn_certificate(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.certificate', 'crl', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_certificate_crl_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_certificate_crl': {
'crl': 'test_value_3',
'http_url': 'test_value_4',
'last_updated': '5',
'ldap_password': 'test_value_6',
'ldap_server': 'test_value_7',
'ldap_username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep_cert': 'test_value_11',
'scep_url': 'test_value_12',
'source': 'factory',
'source_ip': '84.230.14.14',
'update_interval': '15',
'update_vdom': 'test_value_16'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_crl.fortios_vpn_certificate(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.certificate', 'crl', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_certificate_crl_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_certificate_crl': {
'crl': 'test_value_3',
'http_url': 'test_value_4',
'last_updated': '5',
'ldap_password': 'test_value_6',
'ldap_server': 'test_value_7',
'ldap_username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep_cert': 'test_value_11',
'scep_url': 'test_value_12',
'source': 'factory',
'source_ip': '84.230.14.14',
'update_interval': '15',
'update_vdom': 'test_value_16'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_crl.fortios_vpn_certificate(input_data, fos_instance)
expected_data = {
'crl': 'test_value_3',
'http-url': 'test_value_4',
'last-updated': '5',
'ldap-password': 'test_value_6',
'ldap-server': 'test_value_7',
'ldap-username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep-cert': 'test_value_11',
'scep-url': 'test_value_12',
'source': 'factory',
'source-ip': '84.230.14.14',
'update-interval': '15',
'update-vdom': 'test_value_16'
}
set_method_mock.assert_called_with('vpn.certificate', 'crl', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_vpn_certificate_crl_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_certificate_crl': {
'random_attribute_not_valid': 'tag',
'crl': 'test_value_3',
'http_url': 'test_value_4',
'last_updated': '5',
'ldap_password': 'test_value_6',
'ldap_server': 'test_value_7',
'ldap_username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep_cert': 'test_value_11',
'scep_url': 'test_value_12',
'source': 'factory',
'source_ip': '84.230.14.14',
'update_interval': '15',
'update_vdom': 'test_value_16'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_crl.fortios_vpn_certificate(input_data, fos_instance)
expected_data = {
'crl': 'test_value_3',
'http-url': 'test_value_4',
'last-updated': '5',
'ldap-password': 'test_value_6',
'ldap-server': 'test_value_7',
'ldap-username': 'test_value_8',
'name': 'default_name_9',
'range': 'global',
'scep-cert': 'test_value_11',
'scep-url': 'test_value_12',
'source': 'factory',
'source-ip': '84.230.14.14',
'update-interval': '15',
'update-vdom': 'test_value_16'
}
set_method_mock.assert_called_with('vpn.certificate', 'crl', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| |
# Lint as: python2, python3
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for providing semantic segmentaion data.
The SegmentationDataset class provides both images and annotations (semantic
segmentation and/or instance segmentation) for TensorFlow. Currently, we
support the following datasets:
1. PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/voc2012/).
PASCAL VOC 2012 semantic segmentation dataset annotates 20 foreground objects
(e.g., bike, person, and so on) and leaves all the other semantic classes as
one background class. The dataset contains 1464, 1449, and 1456 annotated
images for the training, validation and test respectively.
2. Cityscapes dataset (https://www.cityscapes-dataset.com)
The Cityscapes dataset contains 19 semantic labels (such as road, person, car,
and so on) for urban street scenes.
3. ADE20K dataset (http://groups.csail.mit.edu/vision/datasets/ADE20K)
The ADE20K dataset contains 150 semantic labels both urban street scenes and
indoor scenes.
References:
M. Everingham, S. M. A. Eslami, L. V. Gool, C. K. I. Williams, J. Winn,
and A. Zisserman, The pascal visual object classes challenge a retrospective.
IJCV, 2014.
M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson,
U. Franke, S. Roth, and B. Schiele, "The cityscapes dataset for semantic urban
scene understanding," In Proc. of CVPR, 2016.
B. Zhou, H. Zhao, X. Puig, S. Fidler, A. Barriuso, A. Torralba, "Scene Parsing
through ADE20K dataset", In Proc. of CVPR, 2017.
"""
import collections
import os
import tensorflow.compat.v1 as tf
from third_party.deeplab import common
from core import input_preprocess
# Named tuple to describe the dataset properties.
DatasetDescriptor = collections.namedtuple(
'DatasetDescriptor',
[
'splits_to_sizes', # Splits of the dataset into training, val and test.
'num_classes', # Number of semantic classes, including the
# background class (if exists). For example, there
# are 20 foreground classes + 1 background class in
# the PASCAL VOC 2012 dataset. Thus, we set
# num_classes=21.
'ignore_label', # Ignore label value.
])
_CITYSCAPES_INFORMATION = DatasetDescriptor(
splits_to_sizes={'train_fine': 2975,
'train_coarse': 22973,
'trainval_fine': 3475,
'trainval_coarse': 23473,
'val_fine': 500,
'test_fine': 1525},
num_classes=19,
ignore_label=255,
)
# To generate the tfrecord, please refer to
# https://github.com/tensorflow/models/blob/master/research/deeplab/datasets/download_and_convert_voc2012.sh
_PASCAL_VOC_SEG_INFORMATION = DatasetDescriptor(
splits_to_sizes={
'train': 1464,
'train_aug': 10582,
'trainval': 2913,
'val': 1449,
# Splits for semi-supervised
'4_clean': 366,
'8_clean': 183,
# Balanced 1/16 split
# Sample with rejection, suffix represents the sample index
# e.g., 16_clean_3 represents the 3rd random shuffle to sample 1/16
# split, given a fixed random seed 8888
'16_clean_3': 92,
'16_clean_14': 92,
'16_clean_20': 92,
# More images
'coco2voc_labeled': 91937,
'coco2voc_unlabeled': 215340,
},
num_classes=21,
ignore_label=255,
)
_ADE20K_INFORMATION = DatasetDescriptor(
splits_to_sizes={
'train': 20210, # num of samples in images/training
'val': 2000, # num of samples in images/validation
},
num_classes=151,
ignore_label=0,
)
_COCO_INFORMATION = DatasetDescriptor(
splits_to_sizes={
'train': 118287,
'val': 5000,
# Splits for semi-supervised
# e.g., 32_all represents 1/32 split
'32_all': 3697,
'64_all': 1849,
'128_all': 925,
'256_all': 463,
'512_all': 232,
'unlabeled': 123403,
},
num_classes=81,
ignore_label=255,
)
_DATASETS_INFORMATION = {
'cityscapes': _CITYSCAPES_INFORMATION,
'pascal_voc_seg': _PASCAL_VOC_SEG_INFORMATION,
'ade20k': _ADE20K_INFORMATION,
'coco': _COCO_INFORMATION,
}
# Default file pattern of TFRecord of TensorFlow Example.
_FILE_PATTERN = '%s-*'
def get_cityscapes_dataset_name():
return 'cityscapes'
class Dataset(object):
"""Represents input dataset for deeplab model."""
def __init__(self,
dataset_name,
split_name,
dataset_dir,
batch_size,
crop_size,
min_resize_value=None,
max_resize_value=None,
resize_factor=None,
min_scale_factor=1.,
max_scale_factor=1.,
scale_factor_step_size=0,
model_variant=None,
num_readers=1,
is_training=False,
should_shuffle=False,
should_repeat=False,
with_cls=False,
cls_only=False,
strong_weak=False,
output_valid=False,
output_original=True):
"""Initializes the dataset.
Args:
dataset_name: Dataset name.
split_name: A train/val Split name.
dataset_dir: The directory of the dataset sources.
batch_size: Batch size.
crop_size: The size used to crop the image and label.
min_resize_value: Desired size of the smaller image side.
max_resize_value: Maximum allowed size of the larger image side.
resize_factor: Resized dimensions are multiple of factor plus one.
min_scale_factor: Minimum scale factor value.
max_scale_factor: Maximum scale factor value.
scale_factor_step_size: The step size from min scale factor to max scale
factor. The input is randomly scaled based on the value of
(min_scale_factor, max_scale_factor, scale_factor_step_size).
model_variant: Model variant (string) for choosing how to mean-subtract
the images. See feature_extractor.network_map for supported model
variants.
num_readers: Number of readers for data provider.
is_training: Boolean, if dataset is for training or not.
should_shuffle: Boolean, if should shuffle the input data.
should_repeat: Boolean, if should repeat the input data.
with_cls: Boolean, True if we use it for CAM (classification) training
cls_only: Boolean, True if we only want to keep image-level label
strong_weak: Output both weak augmented and strong augmented images or not
output_valid: Output the valid mask to exclude padding region or not
Raises:
ValueError: Dataset name and split name are not supported.
"""
if dataset_name not in _DATASETS_INFORMATION:
raise ValueError('The specified dataset is not supported yet.')
self.dataset_name = dataset_name
splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes
if split_name not in splits_to_sizes:
raise ValueError('data split name %s not recognized' % split_name)
if model_variant is None:
tf.logging.warning('Please specify a model_variant. See '
'feature_extractor.network_map for supported model '
'variants.')
self.split_name = split_name
self.dataset_dir = dataset_dir
self.batch_size = batch_size
self.crop_size = crop_size
self.min_resize_value = min_resize_value
self.max_resize_value = max_resize_value
self.resize_factor = resize_factor
self.min_scale_factor = min_scale_factor
self.max_scale_factor = max_scale_factor
self.scale_factor_step_size = scale_factor_step_size
self.model_variant = model_variant
self.num_readers = num_readers
self.is_training = is_training
self.should_shuffle = should_shuffle
self.should_repeat = should_repeat
self.cls_only = cls_only
if cls_only:
self.with_cls = True
else:
self.with_cls = with_cls
self.strong_weak = strong_weak
self.output_valid = output_valid
self.output_original = output_original
self.num_of_classes = _DATASETS_INFORMATION[self.dataset_name].num_classes
self.ignore_label = _DATASETS_INFORMATION[self.dataset_name].ignore_label
def _parse_function(self, example_proto):
"""Function to parse the example proto.
Args:
example_proto: Proto in the format of tf.Example.
Returns:
A dictionary with parsed image, label, height, width and image name.
Raises:
ValueError: Label is of wrong shape.
"""
# Currently only supports jpeg and png.
# Need to use this logic because the shape is not known for
# tf.image.decode_image and we rely on this info to
# extend label if necessary.
def _decode_image(content, channels):
return tf.cond(
tf.image.is_jpeg(content),
lambda: tf.image.decode_jpeg(content, channels),
lambda: tf.image.decode_png(content, channels))
features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/filename':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height':
tf.FixedLenFeature((), tf.int64, default_value=0),
'image/width':
tf.FixedLenFeature((), tf.int64, default_value=0),
'image/segmentation/class/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/segmentation/class/format':
tf.FixedLenFeature((), tf.string, default_value='png'),
}
parsed_features = tf.parse_single_example(example_proto, features)
image = _decode_image(parsed_features['image/encoded'], channels=3)
label = None
if self.split_name != common.TEST_SET:
label = _decode_image(
parsed_features['image/segmentation/class/encoded'], channels=1)
image_name = parsed_features['image/filename']
if image_name is None:
image_name = tf.constant('')
sample = {
common.IMAGE: image,
common.IMAGE_NAME: image_name,
common.HEIGHT: parsed_features['image/height'],
common.WIDTH: parsed_features['image/width'],
}
if label is not None:
if label.get_shape().ndims == 2:
label = tf.expand_dims(label, 2)
elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1:
pass
else:
raise ValueError('Input label shape must be [height, width], or '
'[height, width, 1].')
label.set_shape([None, None, 1])
sample[common.LABELS_CLASS] = label
return sample
def _preprocess_image(self, sample):
"""Preprocesses the image and label.
Args:
sample: A sample containing image and label.
Returns:
sample: Sample with preprocessed image and label.
Raises:
ValueError: Ground truth label not provided during training.
"""
image = sample[common.IMAGE]
label = sample[common.LABELS_CLASS]
if not self.strong_weak:
if not self.output_valid:
original_image, image, label = input_preprocess.preprocess_image_and_label(
image=image,
label=label,
crop_height=self.crop_size[0],
crop_width=self.crop_size[1],
min_resize_value=self.min_resize_value,
max_resize_value=self.max_resize_value,
resize_factor=self.resize_factor,
min_scale_factor=self.min_scale_factor,
max_scale_factor=self.max_scale_factor,
scale_factor_step_size=self.scale_factor_step_size,
ignore_label=self.ignore_label,
is_training=self.is_training,
model_variant=self.model_variant)
else:
original_image, image, label, valid = input_preprocess.preprocess_image_and_label(
image=image,
label=label,
crop_height=self.crop_size[0],
crop_width=self.crop_size[1],
min_resize_value=self.min_resize_value,
max_resize_value=self.max_resize_value,
resize_factor=self.resize_factor,
min_scale_factor=self.min_scale_factor,
max_scale_factor=self.max_scale_factor,
scale_factor_step_size=self.scale_factor_step_size,
ignore_label=self.ignore_label,
is_training=self.is_training,
model_variant=self.model_variant,
output_valid=self.output_valid)
sample['valid'] = valid
else:
original_image, image, label, strong, valid = input_preprocess.preprocess_image_and_label(
image=image,
label=label,
crop_height=self.crop_size[0],
crop_width=self.crop_size[1],
min_resize_value=self.min_resize_value,
max_resize_value=self.max_resize_value,
resize_factor=self.resize_factor,
min_scale_factor=self.min_scale_factor,
max_scale_factor=self.max_scale_factor,
scale_factor_step_size=self.scale_factor_step_size,
ignore_label=self.ignore_label,
is_training=self.is_training,
model_variant=self.model_variant,
strong_weak=self.strong_weak)
sample['strong'] = strong
sample['valid'] = valid
sample[common.IMAGE] = image
if not self.is_training and self.output_original:
# Original image is only used during visualization.
sample[common.ORIGINAL_IMAGE] = original_image
if label is not None:
sample[common.LABEL] = label
# Remove common.LABEL_CLASS key in the sample since it is only used to
# derive label and not used in training and evaluation.
sample.pop(common.LABELS_CLASS, None)
# Convert segmentation map to multi-class label
if self.with_cls and label is not None:
base = tf.linalg.LinearOperatorIdentity(
num_rows=self.num_of_classes - 1, dtype=tf.float32)
base = base.to_dense()
zero_filler = tf.zeros([1, self.num_of_classes-1], tf.float32)
base = tf.concat([zero_filler, base], axis=0)
cls = tf.unique(tf.reshape(label, shape=[-1]))[0]
select = tf.less(cls, self.ignore_label)
cls = tf.boolean_mask(cls, select)
cls_label = tf.reduce_sum(tf.gather(base, cls, axis=0), axis=0)
sample['cls_label'] = tf.stop_gradient(cls_label)
if self.cls_only:
del sample[common.LABEL]
return sample
def get_one_shot_iterator(self):
"""Gets an iterator that iterates across the dataset once.
Returns:
An iterator of type tf.data.Iterator.
"""
files = self._get_all_files()
dataset = (
tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers)
.map(self._parse_function, num_parallel_calls=self.num_readers)
.map(self._preprocess_image, num_parallel_calls=self.num_readers))
if self.should_shuffle:
dataset = dataset.shuffle(buffer_size=100)
if self.should_repeat:
dataset = dataset.repeat() # Repeat forever for training.
else:
dataset = dataset.repeat(1)
if not self.output_original and not self.is_training:
dataset = dataset.batch(self.batch_size, drop_remainder=True).prefetch(self.batch_size)
else:
dataset = dataset.batch(self.batch_size).prefetch(self.batch_size)
return dataset.make_one_shot_iterator()
def _get_all_files(self):
"""Gets all the files to read data from.
Returns:
A list of input files.
"""
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(self.dataset_dir,
file_pattern % self.split_name)
return tf.gfile.Glob(file_pattern)
| |
"""pblite message schemas and related enums."""
# Stop pylint from complaining about enums:
# pyline: disable=too-few-public-methods
import enum
from hangups.pblite import Message, Field, RepeatedField, EnumField
##############################################################################
# Enums
##############################################################################
class TypingStatus(enum.Enum):
"""Typing statuses."""
TYPING = 1 # The user started typing
PAUSED = 2 # The user stopped typing with inputted text
STOPPED = 3 # The user stopped typing with no inputted text
class FocusStatus(enum.Enum):
"""Focus statuses."""
FOCUSED = 1
UNFOCUSED = 2
class FocusDevice(enum.Enum):
"""Focus devices."""
DESKTOP = 20
MOBILE = 300
UNSPECIFIED = None
class ConversationType(enum.Enum):
"""Conversation type."""
STICKY_ONE_TO_ONE = 1
GROUP = 2
class ClientConversationView(enum.Enum):
"""Conversation view."""
UNKNOWN_CONVERSATION_VIEW = 0
INBOX_VIEW = 1
ARCHIVED_VIEW = 2
class ClientNotificationLevel(enum.Enum):
"""Notification level."""
UNKNOWN = None
QUIET = 10
RING = 30
class ClientConversationStatus(enum.Enum):
"""Conversation status."""
UNKNOWN_CONVERSATION_STATUS = 0
INVITED = 1
ACTIVE = 2
LEFT = 3
class SegmentType(enum.Enum):
"""Message content segment type."""
TEXT = 0
LINE_BREAK = 1
LINK = 2
class MembershipChangeType(enum.Enum):
"""Conversation membership change type."""
JOIN = 1
LEAVE = 2
class ClientHangoutEventType(enum.Enum):
"""Hangout event type."""
# Not sure all of these are correct
START_HANGOUT = 1
END_HANGOUT = 2
JOIN_HANGOUT = 3
LEAVE_HANGOUT = 4
HANGOUT_COMING_SOON = 5
ONGOING_HANGOUT = 6
class OffTheRecordStatus(enum.Enum):
"""Off-the-record status."""
OFF_THE_RECORD = 1
ON_THE_RECORD = 2
class ClientOffTheRecordToggle(enum.Enum):
"""Off-the-record toggle status."""
ENABLED = 0
DISABLED = 1
class ActiveClientState(enum.Enum):
"""Active client state."""
NO_ACTIVE_CLIENT = 0
IS_ACTIVE_CLIENT = 1
OTHER_CLIENT_IS_ACTIVE = 2
##############################################################################
# pblite Messages
##############################################################################
CONVERSATION_ID = Message(
('id_', Field()),
)
USER_ID = Message(
('gaia_id', Field()),
('chat_id', Field()),
)
OPTIONAL_USER_ID = Message(
('gaia_id', Field()),
('chat_id', Field()),
is_optional=True,
)
CLIENT_SET_TYPING_NOTIFICATION = Message(
('conversation_id', CONVERSATION_ID),
('user_id', USER_ID),
('timestamp', Field()),
('status', EnumField(TypingStatus)),
is_optional=True,
)
CLIENT_SET_FOCUS_NOTIFICATION = Message(
('conversation_id', CONVERSATION_ID),
('user_id', USER_ID),
('timestamp', Field()),
('status', EnumField(FocusStatus)),
('device', EnumField(FocusDevice)),
is_optional=True,
)
CLIENT_CONVERSATION = Message(
('conversation_id', CONVERSATION_ID),
('type_', EnumField(ConversationType)),
('name', Field(is_optional=True)),
('self_conversation_state', Message(
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
('self_read_state', Message(
('participant_id', USER_ID),
('latest_read_timestamp', Field()),
)),
('status', EnumField(ClientConversationStatus)),
('notification_level', EnumField(ClientNotificationLevel)),
('view', RepeatedField(
EnumField(ClientConversationView)
)),
('inviter_id', USER_ID),
('invite_timestamp', Field()),
('sort_timestamp', Field(is_optional=True)),
('active_timestamp', Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field()),
(None, Field()),
)),
(None, Field()),
(None, Field()),
(None, Field(is_optional=True)),
('read_state', RepeatedField(
Message(
('participant_id', USER_ID),
('last_read_timestamp', Field()),
)
)),
(None, Field()),
('otr_status', EnumField(OffTheRecordStatus)),
(None, Field()),
(None, Field()),
('current_participant', RepeatedField(USER_ID)),
('participant_data', RepeatedField(
Message(
('id_', USER_ID),
('fallback_name', Field(is_optional=True)),
(None, Field(is_optional=True)),
)
)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field()),
(None, Field()),
is_optional=True,
)
MESSAGE_SEGMENT = Message(
('type_', EnumField(SegmentType)),
('text', Field(is_optional=True)), # Can be None for linebreaks
('formatting', Message(
('bold', Field(is_optional=True)),
('italic', Field(is_optional=True)),
('strikethrough', Field(is_optional=True)),
('underline', Field(is_optional=True)),
is_optional=True,
)),
('link_data', Message(
('link_target', Field(is_optional=True)),
is_optional=True,
)),
)
MESSAGE_ATTACHMENT = Message(
('embed_item', Message(
# 249 (PLUS_PHOTO), 340, 335, 0
('type_', RepeatedField(Field())),
('data', Field()), # can be a dict
)),
)
CLIENT_CHAT_MESSAGE = Message(
(None, Field(is_optional=True)), # always None?
('annotation', RepeatedField(Field(), is_optional=True)),
('message_content', Message(
('segment', RepeatedField(MESSAGE_SEGMENT, is_optional=True)),
('attachment', RepeatedField(MESSAGE_ATTACHMENT, is_optional=True)),
)),
is_optional=True,
)
CLIENT_CONVERSATION_RENAME = Message(
('new_name', Field()),
('old_name', Field()),
is_optional=True,
)
CLIENT_HANGOUT_EVENT = Message(
('event_type', EnumField(ClientHangoutEventType)),
('participant_id', RepeatedField(USER_ID)),
('hangout_duration_secs', Field(is_optional=True)),
('transferred_conversation_id', Field(is_optional=True)), # always None?
('refresh_timeout_secs', Field(is_optional=True)),
('is_periodic_refresh', Field(is_optional=True)),
(None, Field(is_optional=True)), # always 1?
is_optional=True,
)
CLIENT_OTR_MODIFICATION = Message(
('old_otr_status', EnumField(OffTheRecordStatus)),
('new_otr_status', EnumField(OffTheRecordStatus)),
('old_otr_toggle', EnumField(ClientOffTheRecordToggle)),
('new_otr_toggle', EnumField(ClientOffTheRecordToggle)),
is_optional=True,
)
CLIENT_MEMBERSHIP_CHANGE = Message(
('type_', EnumField(MembershipChangeType)),
(None, RepeatedField(Field())),
('participant_ids', RepeatedField(USER_ID)),
(None, Field()),
is_optional=True,
)
CLIENT_EVENT = Message(
('conversation_id', CONVERSATION_ID),
('sender_id', OPTIONAL_USER_ID),
('timestamp', Field()),
('self_event_state', Message(
('user_id', USER_ID),
('client_generated_id', Field(is_optional=True)),
('notification_level', EnumField(ClientNotificationLevel)),
is_optional=True,
)),
(None, Field(is_optional=True)), # always None?
(None, Field(is_optional=True)), # always 0? (expiration_timestamp?)
('chat_message', CLIENT_CHAT_MESSAGE),
(None, Field(is_optional=True)), # always None?
('membership_change', CLIENT_MEMBERSHIP_CHANGE),
('conversation_rename', CLIENT_CONVERSATION_RENAME),
('hangout_event', CLIENT_HANGOUT_EVENT),
('event_id', Field(is_optional=True)),
('advances_sort_timestamp', Field(is_optional=True)),
('otr_modification', CLIENT_OTR_MODIFICATION),
(None, Field(is_optional=True)), # 0, 1 or None? related to notifications?
('event_otr', EnumField(OffTheRecordStatus)),
(None, Field()), # always 1? (advances_sort_timestamp?)
)
CLIENT_EVENT_NOTIFICATION = Message(
('event', CLIENT_EVENT),
is_optional=True,
)
CLIENT_WATERMARK_NOTIFICATION = Message(
('participant_id', USER_ID),
('conversation_id', CONVERSATION_ID),
('latest_read_timestamp', Field()),
is_optional=True,
)
CLIENT_STATE_UPDATE_HEADER = Message(
('active_client_state', EnumField(ActiveClientState)),
(None, Field(is_optional=True)),
('request_trace_id', Field()),
(None, Field(is_optional=True)),
('current_server_time', Field()),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
# optional ID of the client causing the update?
(None, Field(is_optional=True)),
)
CLIENT_STATE_UPDATE = Message(
('state_update_header', CLIENT_STATE_UPDATE_HEADER),
('conversation_notification', Field(is_optional=True)), # always None?
('event_notification', CLIENT_EVENT_NOTIFICATION),
('focus_notification', CLIENT_SET_FOCUS_NOTIFICATION),
('typing_notification', CLIENT_SET_TYPING_NOTIFICATION),
('notification_level_notification', Field(is_optional=True)),
('reply_to_invite_notification', Field(is_optional=True)),
('watermark_notification', CLIENT_WATERMARK_NOTIFICATION),
(None, Field(is_optional=True)),
('settings_notification', Field(is_optional=True)),
('view_modification', Field(is_optional=True)),
('easter_egg_notification', Field(is_optional=True)),
('client_conversation', CLIENT_CONVERSATION),
('self_presence_notification', Field(is_optional=True)),
('delete_notification', Field(is_optional=True)),
('presence_notification', Field(is_optional=True)),
('block_notification', Field(is_optional=True)),
('invitation_watermark_notification', Field(is_optional=True)),
)
CLIENT_EVENT_CONTINUATION_TOKEN = Message(
('event_id', Field(is_optional=True)),
('storage_continuation_token', Field()),
('event_timestamp', Field()),
is_optional=True,
)
CLIENT_CONVERSATION_STATE = Message(
('conversation_id', CONVERSATION_ID),
('conversation', CLIENT_CONVERSATION),
('event', RepeatedField(CLIENT_EVENT)),
(None, Field(is_optional=True)),
('event_continuation_token', CLIENT_EVENT_CONTINUATION_TOKEN),
(None, Field(is_optional=True)),
(None, RepeatedField(Field())),
)
CLIENT_CONVERSATION_STATE_LIST = RepeatedField(CLIENT_CONVERSATION_STATE)
CLIENT_ENTITY = Message(
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
('id_', USER_ID),
('properties', Message(
('type_', Field(is_optional=True)), # 0, 1, or None
('display_name', Field(is_optional=True)),
('first_name', Field(is_optional=True)),
('photo_url', Field(is_optional=True)),
('emails', RepeatedField(Field())),
)),
)
ENTITY_GROUP = Message(
(None, Field()), # always 0?
(None, Field()), # some sort of ID
('entity', RepeatedField(Message(
('entity', CLIENT_ENTITY),
(None, Field()), # always 0?
))),
)
INITIAL_CLIENT_ENTITIES = Message(
(None, Field()), # 'cgserp'
(None, Field()), # a header
('entities', RepeatedField(CLIENT_ENTITY)),
(None, Field(is_optional=True)), # always None?
('group1', ENTITY_GROUP),
('group2', ENTITY_GROUP),
('group3', ENTITY_GROUP),
('group4', ENTITY_GROUP),
('group5', ENTITY_GROUP),
)
CLIENT_GET_SELF_INFO_RESPONSE = Message(
(None, Field()), # 'cgsirp'
(None, Field()), # response header
('self_entity', CLIENT_ENTITY),
)
CLIENT_RESPONSE_HEADER = Message(
('status', Field()), # 1 => success
(None, Field(is_optional=True)),
(None, Field(is_optional=True)),
('request_trace_id', Field()),
('current_server_time', Field()),
)
CLIENT_SYNC_ALL_NEW_EVENTS_RESPONSE = Message(
(None, Field()), # 'csanerp'
('response_header', CLIENT_RESPONSE_HEADER),
('sync_timestamp', Field()),
('conversation_state', RepeatedField(CLIENT_CONVERSATION_STATE)),
)
CLIENT_GET_CONVERSATION_RESPONSE = Message(
(None, Field()), # 'cgcrp'
('response_header', CLIENT_RESPONSE_HEADER),
('conversation_state', CLIENT_CONVERSATION_STATE),
)
CLIENT_GET_ENTITY_BY_ID_RESPONSE = Message(
(None, Field()), # 'cgebirp'
('response_header', CLIENT_RESPONSE_HEADER),
('entities', RepeatedField(CLIENT_ENTITY)),
)
| |
# -*- coding: utf-8 -*-
from lettuce import step, world
from nose.tools import assert_equals, assert_true, assert_in
import fudge
from editor.models import Dataset, Format
from editor.tests.factories import SourceFactory
def _get_formset_block(name):
if name == 'Data files':
fieldset_index = 0
elif name == 'Documents':
fieldset_index = 1
else:
raise Exception('Do not now the location of the %s fieldset' % name)
fieldset = world.elems('fieldset')[fieldset_index]
assert_equals(
fieldset.find_element_by_css_selector('legend').text,
name)
return fieldset
@step(u'(Given|and) source edit page is opened')
def and_source_edit_page_is_opened(step, unused):
source = SourceFactory()
world.browser.get('%s%s' % (world.live_server_url, source.get_absolute_url()))
@step(u'(Then|and) I see dataset edit form')
def then_i_see_dataset_edit_form(step, unused):
assert_true(world.elem('#id_title').is_displayed())
@step(u'and I see Data files and Documents blocks')
def and_i_see_data_files_and_documents_blocks(step):
legends = [x.text for x in world.elems('fieldset legend')]
assert_equals(len(legends), 2)
assert_in('Data files', legends)
assert_in('Documents', legends)
@step(u'and I see "([^"]*)" button in the "([^"]*)" block')
def and_i_see_button_with_given_text_in_the_given_block(step, button_text, fieldset_legend):
formset = _get_formset_block(fieldset_legend)
buttons = [x.text for x in formset.find_elements_by_css_selector('button')]
assert_in(button_text, buttons)
@step(u'(Given|and) dataset create form is opened')
def and_dataset_create_form_is_opened(step, unused):
source = SourceFactory()
world.browser.get(
'%s%s' % (world.live_server_url, source.get_dataset_create_url()))
@step(u'When I populate dataset form title with "([^"]*)"')
def when_i_populate_dataset_form_title_with_given(step, title):
world.elem('#id_title').send_keys(title)
@step(u'and I populate other fields of the dataset form with random values')
def and_i_populate_other_fields_of_the_dataset_form_with_random_values(step):
# populate required minimum
def set_value(field_id, keys):
""" clears field and populate field with given keys """
field = world.elem(field_id)
field.clear()
field.send_keys(keys)
set_value('#id_start_year', 1976)
set_value('#id_end_year', 1976)
set_value('#id_coverage', '?')
world.elems('#id_region option')[1].click()
set_value('#id_page', 'http://ya.ru')
set_value('#id_download_page', 'http://ya.ru')
set_value('#id_entry_time_minutes', '15')
set_value('#id_vid', 'd0001')
@step(u'Then new dataset with "([^"]*)" creates')
def then_new_dataset_with_given_title_creates(step, title):
def created(browser):
return Dataset.objects.filter(title=title).count() == 1
world.wait(created, msg='Timeout waiting Dataset creation')
@step(u'(Then|and) I see dataset edit form')
def and_i_see_dataset_edit_form(step, unused):
world.elem('#id_title').is_displayed()
@step(u'and I populate "([^"]*)" first form with "([^"]*)", "([^"]*)" and "([^"]*)"')
def and_i_populate_formset_first_form(step, formset_name, name, format, url):
formset = _get_formset_block(formset_name)
formset.find_element_by_css_selector('.formset .name').send_keys(name)
formset.find_elements_by_css_selector('.formset select option')[1].click()
formset.find_element_by_css_selector('.formset .url').send_keys(url)
@step(u'and "([^"]*)" dataset contains "([^"]*)" data file')
def and_dataset_contains_tiven_data_file_name(step, title, datafile_name):
dataset = Dataset.objects.get(title=title)
assert_in(datafile_name, [x.name for x in dataset.datafile_set.all()])
@step(u'and "([^"]*)" dataset contains "([^"]*)" document file')
def and_dataset_contains_given_document_file_name(step, title, doc_filename):
dataset = Dataset.objects.get(title=title)
assert_in(doc_filename, [x.name for x in dataset.documentfile_set.all()])
@step(u'and dataset form contains "([^"]*)" data file')
def and_dataset_form_contains_given_data_file(step, datafile_name):
formset = _get_formset_block('Data files')
name_elems = formset.find_elements_by_css_selector('.formset .name')
form_names = [x.get_attribute('value') for x in name_elems]
assert_in(datafile_name, form_names)
@step(u'and dataset form contains "([^"]*)" document file')
def and_dataset_form_contains_given_document_file(step, doc_name):
formset = _get_formset_block('Documents')
form_name = formset\
.find_element_by_css_selector('.formset .name')\
.get_attribute('value')
assert_equals(form_name, doc_name)
@step(u'(When|and) I check input next to the "([^"]*)" url')
def when_i_check_input_next_to_the_given_url(step, unused, url_text):
for tr in world.elems('table.links tbody.content tr'):
if tr.find_element_by_css_selector('a').text == url_text:
tr.find_element_by_css_selector('input').click()
return
raise Exception('Table row with link with %s text was not found' % url_text)
@step(u'When I click on "([^"]*)" button inside "([^"]*)" block')
def when_i_click_on_given_button_inside_given_block(step, button_text, fieldset_legend):
formset = _get_formset_block(fieldset_legend)
for btn in formset.find_elements_by_css_selector('button'):
if btn.text == button_text:
btn.click()
return
raise Exception('Button with %s text was not found inside %s block' % (button_text, fieldset_legend))
@step(u'and I populate download page with "([^"]*)" url')
def and_i_populate_download_page_with_group1_page(step, url):
world.elem('#id_download_page').clear()
world.elem('#id_download_page').send_keys(url)
@step(u'Then I see popup with urls scrapped from http server')
def then_i_see_popup_with_urls_scrapped_from_http_server(step):
popup = world.elem('#remoteLinksModal')
assert_true(popup.is_displayed())
def contains_file1(browser):
popup = world.elem('#remoteLinksModal')
return 'file1.csv' in popup.text
world.wait(contains_file1, msg='Timeout while waiting file1.csv')
def contains_file2(browser):
popup = world.elem('#remoteLinksModal')
return 'file2.csv' in popup.text
world.wait(contains_file2, msg='Timeout while waiting file2.csv')
@step(u'Then I see "([^"]*)" urls added to "([^"]*)" formset')
def then_i_see_urls_added_to_given_formset(step, urls, fieldset_legend):
urls = urls.split(',')
formset = _get_formset_block(fieldset_legend)
rendered_urls = [
x.get_attribute('value') for x in formset.find_elements_by_css_selector('.formset .name')]
for url in urls:
assert_in(url.strip(), rendered_urls)
@step(u'When I select "([^"]*)" file format in both urls')
def when_i_select_given_file_format_in_both_urls(step, fieldset_legend):
formset = _get_formset_block('Data files')
for i, select in enumerate(formset.find_elements_by_css_selector('.formset select')):
select.find_elements_by_css_selector('option')[1].click()
if i == 1:
# do not change last empty form
return
@step(u'and GET response to example.com returns links: "([^"]*)"')
def and_get_response_to_example_com_returns_links(step, links):
links = links.split(',')
class FakeResponse(object):
status_code = 200
def __init__(self, content):
self.content = content
links_html = []
for link in links:
links_html.append('<a href="{link}">{link}</a>'.format(link=link.strip()))
fake_resp = FakeResponse('<html><body>%s</body></html>' % '\n'.join(links_html))
# replace requests.get method with fake method returning our html.
from editor.utils import requests
fudge.patch_object(requests, 'get', fudge.Fake().is_callable().returns(fake_resp))
@step(u'and I see "([^"]*)" format name near each data file')
def and_i_see_format_name_near_each_data_file(step, format_name):
found = False
for tr in world.elems('table.links tbody.content tr'):
assert_equals(
tr.find_element_by_css_selector('.format-name').text,
format_name)
found = True
assert_true(found)
@step(u'and I see "([^"]*)" file format is selected')
def and_i_see_file_format_is_selected(step, format_name):
format = Format.objects.get(name=format_name)
select = world.elems('#datafiles .dynamic-formset-form select')[1]
assert_equals(select.get_attribute('value'), unicode(format.id))
| |
# python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom GCS Hook for generating blobs from GCS."""
import enum
import io
import json
from typing import Generator, Text, List, Dict, Any
from airflow.contrib.hooks import gcs_hook as base_hook
from googleapiclient import errors as googleapiclient_errors
from googleapiclient import http
from gps_building_blocks.airflow.hooks import input_hook_interface
from gps_building_blocks.airflow.utils import blob
from gps_building_blocks.airflow.utils import errors
_PLATFORM = 'GCS'
_START_POSITION_IN_BLOB = 0
class BlobContentTypes(enum.Enum):
JSON = enum.auto()
CSV = enum.auto()
class GoogleCloudStorageHook(base_hook.GoogleCloudStorageHook,
input_hook_interface.InputHookInterface):
"""Extends the Google Cloud Storage hook.
Used for chunked download of blobs, and blob generation.
The Blobs must satisfy the following conditions:
- Content is formatted as newline-delimited events.
- Content is formatted as UTF-8.
- Content is validly formatted as one of the types in BlobContentTypes.
- The first line in a CSV blob is the fields labels
Attributes:
bucket: Unique name of the bucket holding the target blob.
prefix: The path to a location within the bucket.
content_type: Blob's content type described by BlobContentTypes.
"""
def __init__(self, bucket: Text, content_type: Text, prefix: Text) -> None:
"""Initiates GoogleCloudStorageHook.
Args:
bucket: Unique name of the bucket holding the target blob.
content_type: Blob's content type described by BlobContentTypes.
prefix: The path to a location within the bucket.
"""
self._verify_content_type(content_type)
self.bucket = bucket
self.content_type = content_type
self.prefix = prefix
super(GoogleCloudStorageHook, self).__init__()
def _verify_content_type(self, content_type: Text) -> None:
"""Validates content_type matches one of the supported formats.
The content type must be one of the formats listed in BlobContentTypes.
Args:
content_type: GCS content type to verify.
Raises:
DataInConnectorValueError: If the content type format is invalid.
"""
if content_type not in BlobContentTypes.__members__:
raise errors.DataInConnectorValueError(
'Invalid GCS blob content type. The supported types are: %s.' %
', '.join([name for name, item in BlobContentTypes.__members__.items(
)]))
def _gcs_blob_chunk_generator(self, blob_name: Text
) -> Generator[bytes, None, None]:
"""Downloads and generates chunks from given blob.
The base GoogleCloudStorageHook only allows downloading an entire file.
To enable handling large files this class provides a chunk-wise download of
bytes within the blob.
Args:
blob_name: Unique location within the bucket for the target blob.
Yields:
Chunks of the given blob, formatted as bytes.
Raises:
DataInConnectorError: When download failed.
"""
done = False
outio = io.BytesIO()
try:
request = self.get_conn().objects().get_media(bucket=self.bucket, # pytype: disable=attribute-error
object=blob_name)
downloader = http.MediaIoBaseDownload(outio, request)
except googleapiclient_errors.HttpError as error:
raise errors.DataInConnectorError(
error=error, msg='Failed to download the blob.')
while not done:
outio.truncate(0)
outio.seek(0)
try:
status, done = downloader.next_chunk()
except googleapiclient_errors.HttpError as error:
raise errors.DataInConnectorError(
error=error, msg='Failed to download the blob.')
self.log.debug('Blob loading: {}%'.format(int(status.progress() * 100)))
yield outio.getvalue()
def _parse_events_as_json(self, parsable_events: List[bytes]
) -> List[Dict[Any, Any]]:
"""Parses a list of events as JSON.
Args:
parsable_events: Bytes events to parse.
Returns:
A list of events formatted as JSON.
Raises:
DataInConnectorBlobParseError: When parsing the blob was unsuccessful.
"""
try:
return [json.loads(event.decode('utf-8')) for event in parsable_events]
except (json.JSONDecodeError, UnicodeDecodeError) as error:
raise errors.DataInConnectorBlobParseError(
error=error, msg='Failed to parse the blob as JSON.')
def _parse_events_as_csv(self, parsable_events: List[bytes]
) -> List[Dict[Any, Any]]:
"""Parses a list of events as CSV.
Args:
parsable_events: Bytes events to parse.
Returns:
A list of events formatted as CSV.
Raises:
DataInConnectorBlobParseError: When parsing the blob was unsuccessful.
"""
try:
fields = parsable_events[0].decode('utf-8').split(',')
events = [dict(zip(fields, event.decode('utf-8').split(',')))
for event in parsable_events[1:]]
except (ValueError, UnicodeDecodeError) as error:
raise errors.DataInConnectorBlobParseError(
error=error, msg='Failed to parse the blob as CSV')
if not all(len(event) == len(fields) for event in events):
raise errors.DataInConnectorBlobParseError(
msg='Failed to parse CSV, not all lines have same length.')
return events
def _parse_events_by_content_type(self, parsable_events: List[bytes]
) -> List[Dict[Any, Any]]:
"""Parses a list of events as content_type.
Args:
parsable_events: Bytes events to parse.
Returns:
A list of events formatted as content_type.
"""
if not parsable_events:
return []
if self.content_type == BlobContentTypes.CSV.name:
return self._parse_events_as_csv(parsable_events)
else:
return self._parse_events_as_json(parsable_events)
def get_blob_events(self, blob_name: Text) -> List[Dict[Any, Any]]:
"""Gets blob's contents.
Args:
blob_name: The location and file name of the blob in the bucket.
Returns:
A list of events formatted as content_type.
"""
events: List[bytes] = []
buffer: bytes = b''
blob_chunks_generator = self._gcs_blob_chunk_generator(blob_name=blob_name)
for chunk in blob_chunks_generator:
buffer += chunk
if buffer.startswith(b'\n'):
buffer = buffer[1:]
events.extend(buffer.splitlines())
# Last event might be incomplete. In this case we save the last line back
# into the buffer
buffer = events.pop() if not buffer.endswith(b'\n') and events else b''
if buffer:
events.append(buffer)
return self._parse_events_by_content_type(events)
def events_blobs_generator(self) -> Generator[blob.Blob, None, None]:
"""Generates all blobs from the bucket's prefix location.
Yields:
A generator that generates Blob objects from blob contents within a
prefix location in the bucket.
Raises:
DataInConnectorError: When listing blob in bucket returns a HttpError.
"""
try:
blob_names = self.list(bucket=self.bucket, prefix=self.prefix)
except googleapiclient_errors.HttpError as error:
raise errors.DataInConnectorError(
error=error, msg='Failed to get list of blobs from bucket.')
for blob_name in blob_names:
url = 'gs://{}/{}'.format(self.bucket, blob_name)
# Exclude folders from uploading to Datastore.
if not blob_name.endswith('/'):
try:
events = self.get_blob_events(blob_name)
yield blob.Blob(events=events, blob_id=url, platform=_PLATFORM,
source=self.bucket, location=blob_name,
position=_START_POSITION_IN_BLOB)
except (errors.DataInConnectorBlobParseError,
errors.DataInConnectorError) as error:
yield blob.Blob(events=[], blob_id=url, platform=_PLATFORM,
source=self.bucket, location=blob_name,
position=_START_POSITION_IN_BLOB,
status=blob.BlobStatus.ERROR, status_desc=str(error))
| |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
else:
before = before.replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
else:
after = after.replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formatted date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
return total_seconds(delta)
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:param dt: the time
:param window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""build query for doclistview and return results"""
import frappe, json
from six.moves import range
import frappe.permissions
from frappe.model.db_query import DatabaseQuery
from frappe import _
from six import text_type, string_types, StringIO
# imports - third-party imports
import pymysql
@frappe.whitelist()
@frappe.read_only()
def get():
args = get_form_params()
data = compress(execute(**args), args = args)
return data
def execute(doctype, *args, **kwargs):
return DatabaseQuery(doctype).execute(*args, **kwargs)
def get_form_params():
"""Stringify GET request parameters."""
data = frappe._dict(frappe.local.form_dict)
del data["cmd"]
if "csrf_token" in data:
del data["csrf_token"]
if isinstance(data.get("filters"), string_types):
data["filters"] = json.loads(data["filters"])
if isinstance(data.get("fields"), string_types):
data["fields"] = json.loads(data["fields"])
if isinstance(data.get("docstatus"), string_types):
data["docstatus"] = json.loads(data["docstatus"])
if isinstance(data.get("save_user_settings"), string_types):
data["save_user_settings"] = json.loads(data["save_user_settings"])
else:
data["save_user_settings"] = True
doctype = data["doctype"]
fields = data["fields"]
for field in fields:
key = field.split(" as ")[0]
if key.startswith('count('): continue
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = data.doctype
fieldname = field.strip("`")
df = frappe.get_meta(parenttype).get_field(fieldname)
report_hide = df.report_hide if df else None
# remove the field from the query if the report hide flag is set
if report_hide:
fields.remove(field)
# queries must always be server side
data.query = None
return data
def compress(data, args = {}):
"""separate keys and values"""
from frappe.desk.query_report import add_total_row
if not data: return data
values = []
keys = list(data[0])
for row in data:
new_row = []
for key in keys:
new_row.append(row[key])
values.append(new_row)
if args.get("add_total_row"):
meta = frappe.get_meta(args.doctype)
values = add_total_row(values, keys, meta)
return {
"keys": keys,
"values": values
}
@frappe.whitelist()
def save_report():
"""save report"""
data = frappe.local.form_dict
if frappe.db.exists('Report', data['name']):
d = frappe.get_doc('Report', data['name'])
else:
d = frappe.new_doc('Report')
d.report_name = data['name']
d.ref_doctype = data['doctype']
d.report_type = "Report Builder"
d.json = data['json']
frappe.get_doc(d).save()
frappe.msgprint(_("{0} is saved").format(d.name))
return d.name
@frappe.whitelist()
def export_query():
"""export from report builder"""
form_params = get_form_params()
form_params["limit_page_length"] = None
form_params["as_list"] = True
doctype = form_params.doctype
add_totals_row = None
file_format_type = form_params["file_format_type"]
del form_params["doctype"]
del form_params["file_format_type"]
if 'add_totals_row' in form_params and form_params['add_totals_row']=='1':
add_totals_row = 1
del form_params["add_totals_row"]
frappe.permissions.can_export(doctype, raise_exception=True)
if 'selected_items' in form_params:
si = json.loads(frappe.form_dict.get('selected_items'))
form_params["filters"] = {"name": ("in", si)}
del form_params["selected_items"]
db_query = DatabaseQuery(doctype)
ret = db_query.execute(**form_params)
if add_totals_row:
ret = append_totals_row(ret)
data = [['Sr'] + get_labels(db_query.fields, doctype)]
for i, row in enumerate(ret):
data.append([i+1] + list(row))
if file_format_type == "CSV":
# convert to csv
import csv
from frappe.utils.xlsxutils import handle_html
f = StringIO()
writer = csv.writer(f)
for r in data:
# encode only unicode type strings and not int, floats etc.
writer.writerow([handle_html(frappe.as_unicode(v)).encode('utf-8') \
if isinstance(v, string_types) else v for v in r])
f.seek(0)
frappe.response['result'] = text_type(f.read(), 'utf-8')
frappe.response['type'] = 'csv'
frappe.response['doctype'] = doctype
elif file_format_type == "Excel":
from frappe.utils.xlsxutils import make_xlsx
xlsx_file = make_xlsx(data, doctype)
frappe.response['filename'] = doctype + '.xlsx'
frappe.response['filecontent'] = xlsx_file.getvalue()
frappe.response['type'] = 'binary'
def append_totals_row(data):
if not data:
return data
data = list(data)
totals = []
totals.extend([""]*len(data[0]))
for row in data:
for i in range(len(row)):
if isinstance(row[i], (float, int)):
totals[i] = (totals[i] or 0) + row[i]
data.append(totals)
return data
def get_labels(fields, doctype):
"""get column labels based on column names"""
labels = []
for key in fields:
key = key.split(" as ")[0]
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = fieldname.strip("`")
df = frappe.get_meta(parenttype).get_field(fieldname)
label = df.label if df else fieldname.title()
if label in labels:
label = doctype + ": " + label
labels.append(label)
return labels
@frappe.whitelist()
def delete_items():
"""delete selected items"""
import json
il = sorted(json.loads(frappe.form_dict.get('items')), reverse=True)
doctype = frappe.form_dict.get('doctype')
failed = []
for i, d in enumerate(il):
try:
frappe.delete_doc(doctype, d)
if len(il) >= 5:
frappe.publish_realtime("progress",
dict(progress=[i+1, len(il)], title=_('Deleting {0}').format(doctype), description=d),
user=frappe.session.user)
except Exception:
failed.append(d)
return failed
@frappe.whitelist()
@frappe.read_only()
def get_sidebar_stats(stats, doctype, filters=[]):
cat_tags = frappe.db.sql("""select tag.parent as category, tag.tag_name as tag
from `tabTag Doc Category` as docCat
INNER JOIN tabTag as tag on tag.parent = docCat.parent
where docCat.tagdoc=%s
ORDER BY tag.parent asc,tag.idx""",doctype,as_dict=1)
return {"defined_cat":cat_tags, "stats":get_stats(stats, doctype, filters)}
@frappe.whitelist()
def get_stats(stats, doctype, filters=[]):
"""get tag info"""
import json
tags = json.loads(stats)
if filters:
filters = json.loads(filters)
stats = {}
try:
columns = frappe.db.get_table_columns(doctype)
except pymysql.InternalError:
# raised when _user_tags column is added on the fly
columns = []
for tag in tags:
if not tag in columns: continue
try:
tagcount = frappe.get_list(doctype, fields=[tag, "count(*)"],
#filters=["ifnull(`%s`,'')!=''" % tag], group_by=tag, as_list=True)
filters = filters + ["ifnull(`%s`,'')!=''" % tag], group_by = tag, as_list = True)
if tag=='_user_tags':
stats[tag] = scrub_user_tags(tagcount)
stats[tag].append([_("No Tags"), frappe.get_list(doctype,
fields=[tag, "count(*)"],
filters=filters +["({0} = ',' or {0} = '' or {0} is null)".format(tag)], as_list=True)[0][1]])
else:
stats[tag] = tagcount
except frappe.SQLError:
# does not work for child tables
pass
except pymysql.InternalError:
# raised when _user_tags column is added on the fly
pass
return stats
@frappe.whitelist()
def get_filter_dashboard_data(stats, doctype, filters=[]):
"""get tags info"""
import json
tags = json.loads(stats)
if filters:
filters = json.loads(filters)
stats = {}
columns = frappe.db.get_table_columns(doctype)
for tag in tags:
if not tag["name"] in columns: continue
tagcount = []
if tag["type"] not in ['Date', 'Datetime']:
tagcount = frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters = filters + ["ifnull(`%s`,'')!=''" % tag["name"]],
group_by = tag["name"],
as_list = True)
if tag["type"] not in ['Check','Select','Date','Datetime','Int',
'Float','Currency','Percent'] and tag['name'] not in ['docstatus']:
stats[tag["name"]] = list(tagcount)
if stats[tag["name"]]:
data =["No Data", frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters=filters + ["({0} = '' or {0} is null)".format(tag["name"])],
as_list=True)[0][1]]
if data and data[1]!=0:
stats[tag["name"]].append(data)
else:
stats[tag["name"]] = tagcount
return stats
def scrub_user_tags(tagcount):
"""rebuild tag list for tags"""
rdict = {}
tagdict = dict(tagcount)
for t in tagdict:
if not t:
continue
alltags = t.split(',')
for tag in alltags:
if tag:
if not tag in rdict:
rdict[tag] = 0
rdict[tag] += tagdict[t]
rlist = []
for tag in rdict:
rlist.append([tag, rdict[tag]])
return rlist
# used in building query in queries.py
def get_match_cond(doctype):
cond = DatabaseQuery(doctype).build_match_conditions()
return ((' and ' + cond) if cond else "").replace("%", "%%")
def build_match_conditions(doctype, user=None, as_condition=True):
match_conditions = DatabaseQuery(doctype, user=user).build_match_conditions(as_condition=as_condition)
if as_condition:
return match_conditions.replace("%", "%%")
else:
return match_conditions
def get_filters_cond(doctype, filters, conditions, ignore_permissions=None, with_match_conditions=False):
if isinstance(filters, string_types):
filters = json.loads(filters)
if filters:
flt = filters
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], string_types) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
elif isinstance(f[1], (list, tuple)) and \
f[1][0] in (">", "<", ">=", "<=", "like", "not like", "in", "not in", "between"):
flt.append([doctype, f[0], f[1][0], f[1][1]])
else:
flt.append([doctype, f[0], '=', f[1]])
query = DatabaseQuery(doctype)
query.filters = flt
query.conditions = conditions
if with_match_conditions:
query.build_match_conditions()
query.build_filter_conditions(flt, conditions, ignore_permissions)
cond = ' and ' + ' and '.join(query.conditions)
else:
cond = ''
return cond
| |
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_actor
~~~~~~~~~~~~~~~~~~~~~
Tests of the Actor framework.
"""
import logging
import itertools
import gc
from contextlib import nested
from gevent.event import AsyncResult
import mock
from calico.felix.actor import actor_message, ResultOrExc, SplitBatchAndRetry
from calico.felix.test.base import BaseTestCase
from calico.felix import actor
# Logger
log = logging.getLogger(__name__)
# noinspection PyUnresolvedReferences
class TestActor(BaseTestCase):
def setUp(self):
super(TestActor, self).setUp()
self._actor = ActorForTesting()
mock.patch.object(self._actor, "_start_msg_batch",
wraps=self._actor._start_msg_batch).start()
mock.patch.object(self._actor, "_finish_msg_batch",
wraps=self._actor._finish_msg_batch).start()
def run_actor_loop(self):
self._actor._step()
@mock.patch("gevent.Greenlet.start", autospec=True)
def test_start(self, m_start):
"""
Tests starting the actor starts its greenlet.
"""
actor = self._actor.start()
m_start.assert_called_once_with(self._actor.greenlet)
self.assertEqual(actor, self._actor)
def test_single_msg(self):
"""
Tests a batch with one message in it is correctly processed
on the queue with start/finish batch wrapped around it.
"""
self._actor.do_a(async=True)
# Nothing should happen since it should be queued.
self.assertEqual(self._actor.actions, [])
self.run_actor_loop()
# Then we should get a start, batch of only a and a finish.
self.assertEqual(self._actor.actions, ["sb", "a", "fb"])
def test_batch(self):
"""
Tests a batch with multiple messages in it is correctly processed
on the queue with start/finish batch wrapped around it.
"""
self._actor.do_a(async=True)
self._actor.do_a(async=True)
self._actor.do_b(async=True)
self._actor.do_a(async=True)
# Nothing should happen since it should be queued.
self.assertEqual(self._actor.actions, [])
self.run_actor_loop()
# Then we should get a start, batch of only a and a finish.
self.assertEqual(self._actor.actions, ["sb", "a", "a", "b", "a", "fb"])
def test_exception(self):
"""
Tests an exception raised by an event method is returned to the
correct AsyncResult.
"""
f_a = self._actor.do_a(async=True)
f_exc = self._actor.do_exc(async=True)
f_b = self._actor.do_b(async=True)
self.run_actor_loop()
self.assertTrue(f_a.ready())
self.assertTrue(f_exc.ready())
self.assertTrue(f_b.ready())
self.assertEqual("a", f_a.get())
self.assertEqual("b", f_b.get())
self.assertRaises(ExpectedException, f_exc.get)
self.assertRaises(ExpectedException, actor.wait_and_check,
[f_a, f_b, f_exc])
self.assertEqual(self._actor.actions, ["sb", "a", "exc", "b", "fb"])
self._actor._finish_msg_batch.assert_called_once_with(mock.ANY, [
ResultOrExc(result='a', exception=None),
ResultOrExc(result=None, exception=EXPECTED_EXCEPTION),
ResultOrExc(result='b', exception=None),
])
def test_split_batch(self):
"""
Tests an exception raised by an event method is returned to the
correct AsyncResult.
"""
f_a1 = self._actor.do_a(async=True)
f_b1 = self._actor.do_b(async=True)
f_a2 = self._actor.do_a(async=True)
f_b2 = self._actor.do_b(async=True)
f_a3 = self._actor.do_a(async=True)
# Should see these batches:
# Odd number:
# [a, b, a, b, a] -> Split
# [a, b] PENDING: [a, b, a] -> Split
# Optimization: [b] gets pushed on front of pending batch.
# [a] PENDING: [b, a, b, a] -> OK
# Even number:
# [b, a, b, a] -> Split
# [b, a] PENDING: [b, a] -> OK
# [b, a] -> OK
self._actor._finish_side_effects = iter([
SplitBatchAndRetry(),
SplitBatchAndRetry(),
None,
SplitBatchAndRetry(),
None,
None,
])
self.run_actor_loop()
self.assertEqual(self._actor.batches, [
["sb", "a", "b", "a" ,"b", "a", "fb"],
["sb", "a", "b", "fb"],
["sb", "a", "fb"],
["sb", "b", "a", "b", "a", "fb"],
["sb", "b", "a", "fb"],
["sb", "b", "a", "fb"],
])
def test_split_batch_exc(self):
f_a = self._actor.do_a(async=True)
f_exc = self._actor.do_exc(async=True)
self._actor._finish_side_effects = iter([
FinishException()
])
self.run_actor_loop()
# Gets reported to all callers, which is a bit ugly but something has
# gone very wrong if we're not dealing with failures in _finish.
self.assertTrue(f_a.ready())
self.assertTrue(f_exc.ready())
self.assertRaises(FinishException, f_a.get)
self.assertRaises(FinishException, f_exc.get)
def test_own_batch(self):
f_a = self._actor.do_a(async=True)
f_b = self._actor.do_b(async=True)
f_own = self._actor.do_own_batch(async=True)
f_a2 = self._actor.do_a(async=True)
f_b2 = self._actor.do_b(async=True)
self.run_actor_loop()
self.assertTrue(f_a.ready())
self.assertTrue(f_b.ready())
self.assertTrue(f_own.ready())
self.assertTrue(f_a2.ready())
self.assertTrue(f_b2.ready())
self.assertEqual(self._actor.batches, [
["sb", "a", "b", "fb"],
["sb", "own", "fb"],
["sb", "a", "b", "fb"],
])
def test_blocking_call(self):
self._actor.start() # Really start it.
self._actor.do_a(async=False)
self.assertRaises(ExpectedException, self._actor.do_exc, async=False)
def test_same_actor_call(self):
"""
Test events can call each other as normal methods, bypassing the
queue.
"""
self._actor.start() # really start it.
self.assertEqual("c1c2", self._actor.do_c(async=False))
def test_full_queue(self):
eq = self._actor._event_queue
with nested(mock.patch.object(eq, "full", autospec=True),
mock.patch.object(eq, "put", autospec=True)) as \
(m_full, m_put):
m_full.return_value = True
self._actor.do_a(async=True)
self.assertFalse(m_put.call_args[1]["block"])
def test_loop_coverage(self):
with mock.patch.object(self._actor, "_step", autospec=True) as m_step:
m_step.side_effect = ExpectedException()
self.assertRaises(ExpectedException, self._actor._loop)
@mock.patch("gevent.sleep", autospec=True)
def test_yield(self, m_sleep):
self._actor.max_ops_before_yield = 2
self._actor.start() # Really start it.
self._actor.do_a(async=False)
self._actor.do_a(async=False)
self._actor.do_a(async=False)
m_sleep.assert_called_once_with()
def test_wait_and_check_no_input(self):
actor.wait_and_check([])
class TestExceptionTracking(BaseTestCase):
@mock.patch("calico.felix.actor._print_to_stderr", autospec=True)
def test_exception(self, _print):
"""
Test a simulated exception leak.
"""
# Since the weak refs are cleaned up lazily, grab strong references to
# any that are currently alive to prevent our baseline from changing
# under us.
refs_at_start = set([ref() for ref in
actor._tracked_refs_by_idx.values()])
num_refs_at_start = len(refs_at_start)
# Now do our test: leak a result with an exception attached.
ar = actor.TrackedAsyncResult("foo")
ar.set_exception(Exception())
self.assertEqual(num_refs_at_start + 1, len(actor._tracked_refs_by_idx))
del ar # Enough to trigger cleanup in CPython, with exact ref counts.
gc.collect() # For PyPy, we have to force a cleanup
self._m_exit.assert_called_once_with(1)
self.assertTrue(_print.called)
self.assertTrue("foo" in _print.call_args[0][0])
self._m_exit.reset_mock()
# Re-grab the set of references for comparison
refs_at_end = set([ref() for ref in
actor._tracked_refs_by_idx.values()])
num_refs_at_end = len(refs_at_end)
self.assertEqual(refs_at_start, refs_at_end,
"%s exceptions may have been leaked: %s" %
(num_refs_at_end - num_refs_at_start,
actor._tracked_refs_by_idx))
@mock.patch("calico.felix.actor._print_to_stderr", autospec=True)
def test_no_exception(self, m_print):
gc.collect() # Make sure that all leaked refs are cleaned up
num_refs_at_start = len(actor._tracked_refs_by_idx)
ar = actor.TrackedAsyncResult("foo")
ar.set("foo")
del ar # Enough to trigger cleanup in CPython, with exact ref counts.
gc.collect() # For PyPy, we have to force a cleanup
self.assertFalse(self._m_exit.called)
self.assertFalse(m_print.called)
num_refs_at_end = len(actor._tracked_refs_by_idx)
self.assertEqual(num_refs_at_start, num_refs_at_end)
@mock.patch("calico.felix.actor._print_to_stderr", autospec=True)
def test_real_actor_leaked_exc(self, m_print):
"""
Really leak an exception-containing result returned via
actor_message and check we exit.
"""
self.assertFalse(self._m_exit.called)
a = ActorForTesting()
a.start()
result = a.do_exc(async=True)
del result # We abandon the result so only the message has a ref.
# Now block so that we know that the do_exc() must have been completed.
a.do_a(async=False)
gc.collect() # For PyPy, we have to force a cleanup
self._m_exit.assert_called_once_with(1)
self._m_exit.reset_mock()
class ActorForTesting(actor.Actor):
def __init__(self, qualifier=None):
super(ActorForTesting, self).__init__(qualifier=qualifier)
self.actions = []
self._batch_actions = []
self.batches = []
self._finish_side_effects = (lambda _: None for _ in itertools.count())
self.unreferenced = False
self.on_unref_result = mock.Mock(autospec=AsyncResult)
self.started = False
def start(self):
self.started = True
return super(ActorForTesting, self).start()
@actor_message()
def do_a(self):
self._batch_actions.append("a")
assert self._current_msg.name == "do_a"
self._maybe_yield()
return "a"
@actor_message()
def do_b(self):
self._batch_actions.append("b")
assert self._current_msg.name == "do_b"
return "b"
@actor_message()
def do_c(self):
return self.do_c1() + self.do_c2() # Same-actor calls skip queue.
@actor_message()
def do_c1(self):
return "c1"
@actor_message()
def do_c2(self):
return "c2"
@actor_message(needs_own_batch=True)
def do_own_batch(self):
self._batch_actions.append("own")
return "own"
@actor_message()
def do_exc(self):
self._batch_actions.append("exc")
raise EXPECTED_EXCEPTION
def _start_msg_batch(self, batch):
batch = super(ActorForTesting, self)._start_msg_batch(batch)
self._batch_actions = []
self._batch_actions.append("sb")
return batch
def _finish_msg_batch(self, batch, results):
super(ActorForTesting, self)._finish_msg_batch(batch, results)
assert self._current_msg is None
self._batch_actions.append("fb")
self.actions.extend(self._batch_actions)
self.batches.append(list(self._batch_actions))
self._batch_actions = []
result = next(self._finish_side_effects)
if isinstance(result, Exception):
raise result
# Note: this would normally be an actor_message but we bypass that and
# return our own future.
def on_unreferenced(self, async=None):
assert not self.unreferenced
self.unreferenced = True
return self.on_unref_result
class ExpectedException(Exception):
pass
class FinishException(Exception):
pass
EXPECTED_EXCEPTION = ExpectedException()
| |
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-locals
import json
import os
import re
import subprocess
import sys
import warnings
import yaml
from pkg_resources import parse_version
TESTCASES = list()
INVENTORY = 'test/fixtures/hosts'
HERE = os.path.abspath(os.path.dirname(__file__))
ROLE = re.match(
r'^.*\/ansible-eos-([^/\s]+)\/test/arista-ansible-role-test$', HERE).group(1)
RUN_CONFIG_BACKUP = '_eos_role_test_{}_running'.format(ROLE)
START_CONFIG_BACKUP = '_eos_role_test_{}_startup'.format(ROLE)
EOS_ROLE_PLAYBOOK = 'test/arista-ansible-role-test/eos_role.yml'
EOS_MODULE_PLAYBOOK = 'test/arista-ansible-role-test/eos_module.yml'
LOG_FILE = '{}/roletest.log'.format(HERE)
try:
os.remove(LOG_FILE)
except OSError:
pass
LOG = open(LOG_FILE, 'w')
SEPARATOR = ' ' + '*' * 50
# Because of changes between Ansible 2.1 and 2.2, let's
# keep track of what version we are working with.
# ANSIBLE_NEW is True if the version is 2.2 or later (assume true,
# will update during test setup)
# ANSIBLE_VERSION is the exact version string
ANSIBLE_NEW = False
ANSIBLE_VERSION = None
class TestCase(object):
def __init__(self, **kwargs):
self.name = kwargs['name']
self.module = kwargs['module']
self.host = None
self.inventory = kwargs.get('inventory')
self.negative = kwargs.get('negative', False)
self.idempotent = kwargs.get('idempotent', True)
self.changed = kwargs.get('changed', True)
self.present = kwargs.get('present')
self.absent = kwargs.get('absent')
self.arguments = kwargs.get('arguments', list())
self.variables = dict()
# optional properties
self.setup = kwargs.get('setup', list())
self.teardown = kwargs.get('teardown', list())
def __str__(self):
return self.name
class TestModule(object):
def __init__(self, testcase):
self.testcase = testcase
self.description = 'Test [%s]: %s' % (testcase.module, testcase.name)
def __call__(self):
self.output('Run first pass')
response = self.run_module()
for device in response:
hostname = device.keys()[0]
reported = int(device[hostname]['changed'])
expected = int(self.testcase.changed)
msg = ("First pass role execution reported {} task change(s), "
"expected {}".format(reported, expected))
self.output(msg)
assert reported == expected, msg
if self.testcase.idempotent:
self.output('Run second pass')
response = self.run_module()
for device in response:
hostname = device.keys()[0]
reported = int(device[hostname]['changed'])
msg = (
"Second pass role execution reported {} task change(s), "
"expected 0".format(reported))
self.output(msg)
assert not reported, msg
if self.testcase.present:
desc = 'Validate present configuration'
self.output(desc)
# # We need to handle things differently beginning with
# # Ansible 2.2 and up.
# if ANSIBLE_NEW:
# values = []
# # We need to break the 'present' configuration into
# # top level blocks. Use a regex to find top level
# # lines and their sub leve lines.
# matches = re.findall(r'^(\S.*\n)((\s+.*\n)*)',
# self.testcase.present, re.M)
# if matches:
# for match in matches:
# if match[1]:
# # A top level entry with sub level lines
# # Split the sub level lines an strip leading spaces
# values.append({
# 'lines': [l.lstrip() for l in match[1].splitlines()],
# 'parents': [match[0].rstrip('\n')]
# })
# else:
# # A single top level line
# values.append({'lines': match[0]})
# else:
# # Ansible 2.1 and earlier - just use the 'present' config
# values = [self.testcase.present]
values = self.format_config_list(self.testcase.present)
for value in values:
# run_validation takes the config block itself for
# Ansible 2.1 and earlier, and 'lines' and
# optional 'parents' keys for 2.2 and later
response = self.run_validation(value, desc=desc)
for device in response:
hostname = device.keys()[0]
# Result should contain an empty list of updates
# XXX This appears to be broken in Ansible 2.2 --
# -- the task output returns some items in the
# -- updates dictionary, but the play recap
# -- indicates no changes. Skip updates check for 2.2
ansible_2_2 = (
parse_version(ANSIBLE_VERSION) >= parse_version('2.2') and
parse_version(ANSIBLE_VERSION) < parse_version('2.3')
)
# if not ANSIBLE_NEW:
if not ansible_2_2:
delim = " ---\n"
updates = device[hostname].get('updates', [])
msg = ("{} - Expected configuration\n{}{}\n{}"
"not found on device '{}'".
format(desc, delim, '\n'.join(updates),
delim, hostname))
assert updates == [], msg
# Result should show no changes
msg = ("{} - Device '{}' reported no updates, but "
"returned 'changed'".format(desc, hostname))
assert not int(device[hostname]['changed']), msg
if self.testcase.absent:
desc = 'Validate absent configuration'
self.output(desc)
values = self.format_config_list(self.testcase.absent)
for value in values:
response = self.run_validation(value, desc=desc)
for device in response:
hostname = device.keys()[0]
# Result should show change has taken place
msg = (
"{} - Entire absent configuration found "
"on device '{}'".
format(desc, hostname)
)
assert int(device[hostname]['changed']), msg
# Join the list of updates and remove trailing newline
updates = '\n'.join(device[hostname]['updates'])
updates = updates.rstrip('\n')
# The output from the playbook is sanitized - the phrase
# network-admin in username entries is changed to
# network-********. Replace the asterisks with admin again
# for matching the results.
updates = re.sub("username ([^\n]*) role network-\*{8}",
r'username \1 role network-admin',
updates)
# Format the absent list for comparison to the updates
# list. Make two lists: a list with the standard config
# indentation, and a second list with the indentation
# stripped. Later versions of Ansible began stripping
# the indentation from the updates list, so we need to
# compare the updates to both a standard format config
# and an indent-stripped format of the config.
if ANSIBLE_NEW:
# Ansible 2.2 uses parents and lines to construct
# the configuration. We need to join those back
# together to create a standard format config block.
absent = list(value.get('parents', []))
absent.extend(value['lines'])
absent = '\n'.join(absent)
# # Since Ansible 2.2 and later remove indentation,
# # we have already stripped the indentation from
# # the absent list. Set absent_stripped to the
# # absent string, just to be safe.
# absent_stripped = absent
else:
# Strip any trailing whitespace from the absent string
# This will be the standard format configuration
# of what should be absent on the switch
absent = value.rstrip()
absent_stripped = '\n'.join(map(str.lstrip, absent.split('\n'))).rstrip('\n')
msg = ("{} - Some part of absent configuration found "
"on device '{}'".format(desc, hostname))
assert (updates == absent) or (updates == absent_stripped), msg
def setUp(self):
print("\n{}\n".format(SEPARATOR) +
" See run log for complete output:\n {}".format(LOG_FILE) +
"\n{}\n".format(SEPARATOR))
LOG.write("\n\n\n{}\n".format(SEPARATOR) +
" Begin log for {}".format(self.description) +
"\n{}\n\n".format(SEPARATOR))
if self.testcase.setup:
self.output('Running test case setup commands')
setup_cmds = self.testcase.setup
if not isinstance(setup_cmds, list):
setup_cmds = setup_cmds.splitlines()
self.output("{}".format(setup_cmds))
if ANSIBLE_NEW:
# Ansible 2.2 and later:
# Run setup_cmds regardless of current state.
# In order to send a fixed list of commands as setup, we
# must force the commands into the 'after' block, otherwise
# the module removes duplicate lines, which may be needed
# for the setup, e.g. setting shutdown on multiple interfaces:
# interface Loopback 1
# shutdown
# interface Loopback 2
# shutdown
# interface Loopback 3
# shutdown
# If passed in as the 'lines', then what the module
# ultimately sends is
# interface Loopback 1
# shutdown
# interface Loopback 2
# interface Loopback 3
#
# Another issue is that sometime an EOS configuration
# session (result of command `configure session <id>`)
# may get left behind by Ansible. If these accumulate,
# we could end up maxing out our pending sessions. Setting
# max pending to 1 and then to 10 accomplishes two things:
# it clears all but 1 pending session, opening up 9 more
# for use; it provides the required 'lines' values needed
# to be able to send the 'after' commands.
args = {
'module': 'eos_config',
'description': 'Run test case setup commands',
'lines': [
'service configuration session max pending 1',
'service configuration session max pending 10'
],
'after': setup_cmds,
'match': 'none',
}
else:
# Ansible 2.1
args = {
'module': 'eos_command',
'description': 'Run test case setup commands',
'cmds': ['configure terminal'] + setup_cmds + ['exit'],
}
arguments = [json.dumps(args)]
ret_code, out, err = ansible_playbook(EOS_MODULE_PLAYBOOK,
arguments=arguments)
if ret_code != 0:
LOG.write("Playbook stdout:\n\n{}".format(out))
LOG.write("Playbook stderr:\n\n{}".format(err))
raise RuntimeError("Error in test case setup")
def tearDown(self):
if self.testcase.teardown:
self.output('Running test case teardown commands')
teardown_cmds = self.testcase.teardown
if not isinstance(teardown_cmds, list):
teardown_cmds = teardown_cmds.splitlines()
self.output("{}\n".format(teardown_cmds))
if ANSIBLE_NEW:
# Ansible 2.2
# Run teardown_commands regardless of current state
args = {
'module': 'eos_config',
'description': 'Run test case teardown_cmds commands',
'lines': teardown_cmds,
'match': 'none',
}
else:
# Ansible 2.1
args = {
'module': 'eos_command',
'description': 'Run test case teardown_cmds commands',
'cmds': ['configure terminal'] + teardown_cmds,
}
arguments = [json.dumps(args)]
ret_code, out, err = ansible_playbook(EOS_MODULE_PLAYBOOK,
arguments=arguments)
if ret_code != 0:
self.output("Playbook stdout:\n\n{}".format(out))
self.output("Playbook stderr:\n\n{}".format(err))
warnings.warn("\nError in test case teardown\n\n{}".format(
out))
@classmethod
def output(cls, text):
print '>>', str(text)
LOG.write('++ {}'.format(text) + '\n')
def format_config_list(self, config):
# Format a configuration for Ansible version specific requirements
# Because Ansible 2.2 and later use eos_config instead of
# eos_template, we need to format a configuration string
# for run_validation according to the Ansible version in use.
# eos_template takes a string in EOS config format (three-space
# indent). eos_config takes a dictionary with keys for the
# 'lines' to be applied, and the 'parents' of those lines if
# not top-level commands.
if ANSIBLE_NEW:
values = []
# Use a regex to find the top-level lines and their
# sub-level lines
matches = re.findall(r'^(\S.*\n)((\s+.*\n)*)', config, re.M)
if matches:
for match in matches:
if match[1]:
# A top level entry with sub level lines
# Split the sub level lines an strip leading spaces
values.append({
'lines': [l.lstrip() for l in match[1].splitlines()],
'parents': [match[0].rstrip('\n')]
})
else:
# A single top level line
values.append({'lines': [match[0].rstrip('\n')]})
else:
self.output("format_config_list:\n\n{}".format(config))
raise ValueError('Improperly formatted configuration sample '
'could not be formatted for use')
return values
else:
# Ansible 2.1 and earlier use eos_template, so we do
# not need to reformat the string. Just return it in a list.
return [config]
def run_module(self):
(retcode, out, _) = self.execute_module()
out_stripped = re.sub(r'\"config\": \"! Command:.*\\nend\"',
'\"config\": \"--- stripped for space ---\"',
out)
LOG.write("PLaybook stdout:\n\n{}".format(out_stripped))
if (self.testcase.negative):
# This is a negative testcase, look for a return code
# other than 0
msg = "Expected failure, return code: {}".format(retcode)
self.output(msg)
assert retcode != 0, msg
else:
# This is a positive testcase, expect return code 0
msg = "Return code: {}, Expected code: 0".format(retcode)
self.output(msg)
assert retcode == 0, msg
return self.parse_response(out)
def execute_module(self):
arguments = [json.dumps(self.testcase.arguments)]
arguments.append(json.dumps(
{'rolename': "ansible-eos-{}".format(ROLE)}))
return ansible_playbook(EOS_ROLE_PLAYBOOK, arguments=arguments)
def parse_response(self, output, validate=False):
# Get all the lines after the 'PLAY RECAP ****...' header
lines = re.sub(r'^.*PLAY RECAP \*+', '', output, 0, re.S).split('\n')
# Remove any empty lines from the list
lines = [x for x in lines if x]
recap = []
for line in lines:
match = re.search(r'^(\S+)\s+\:\s+'
r'ok=(\d+)\s+'
r'changed=(\d+)\s+'
r'unreachable=(\d+)\s+'
r'failed=(\d+)', line)
if not match:
self.output("Playbook stdout:\n\n{}".format(output))
raise ValueError("Unable to parse Ansible output for "
"recap information")
(name, okcount, changed, unreach, failed) = match.groups()
recap.append({name: {'ok': okcount,
'changed': changed,
'unreachable': unreach,
'failed': failed}})
if not validate:
return recap
updates = []
for device in recap:
hostname = device.keys()[0]
match = re.search(
r'(?<!skipping: )\[%s\] => '
r'((?:\{(?:(?!TASK \[).*\n)*\})|'
r'(?:\{(?:(?!TASK \[).*)\}))' % hostname, output, re.M)
if not match:
self.output("Playbook stdout:\n\n{}".format(output))
raise ValueError("Unable to parse Ansible output for "
"result validation")
result = json.loads(match.group(1))
updates.append({hostname: result})
return updates
def run_validation(self, src, desc='Validate configuration'):
if ANSIBLE_NEW:
# Use eos_config when running Ansible 2.2 or later
# src is a dictionary with keys 'lines' and (optionally) 'parents'
args = {
'module': 'eos_config',
'description': desc,
'match': 'line'
}
args.update(src)
else:
# Use eos_template when running Ansible 2.1 or earlier
args = {'module': 'eos_template', 'description': desc, 'src': src}
arguments = [json.dumps(args)]
(ret_code, out, _) = ansible_playbook(EOS_MODULE_PLAYBOOK,
arguments=arguments,
options=['--check'])
LOG.write(out)
assert ret_code == 0, "Validation playbook failed execution"
return self.parse_response(out, validate=True)
def filter_modules(modules, filenames):
if modules:
modules = ['{0}.yml'.format(s) for s in modules.split(',')]
return list(set(modules).intersection(filenames))
return filenames
def setup():
print >> sys.stderr, "Test Suite Setup:"
get_version = " Determining Ansible version in use ..."
print >> sys.stderr, get_version
LOG.write('++ {}\n'.format(get_version.strip()))
# Call ansible-playbook with the --version flag and parse
# the output for the version string
_, out, err = ansible_playbook(None, None, ['--version'])
match = re.match('ansible-playbook\s+((\d+\.)+\d+)', out, re.M)
if match:
version = match.group(1)
else:
LOG.write(">> ansible-playbook stdout:\n{}".format(out))
LOG.write(">> ansible-playbook stderr:\n{}".format(err))
raise RuntimeError('Could not determine Ansible version')
show_version = " Ansible version is {}".format(version)
print >> sys.stderr, show_version
LOG.write('-- {}\n'.format(show_version.strip()))
# Set global value of ANSIBLE_NEW to True if
# version string is 2.2.0.0 or greater
global ANSIBLE_NEW
ANSIBLE_NEW = parse_version(version) >= parse_version('2.2.0.0')
# Save the Ansible version to be used globally
global ANSIBLE_VERSION
ANSIBLE_VERSION = version
run_backup = " Backing up running-config on nodes ..."
print >> sys.stderr, run_backup
LOG.write('++ {}\n'.format(run_backup.strip()))
if ANSIBLE_NEW:
# Ansible >= 2.2
# Don't need to check running-config, it will always fail
# (match = none)
args = {
'module': 'eos_config',
'description': 'Back up running-config on node',
'lines': [
'copy running-config {}'.format(RUN_CONFIG_BACKUP)
],
'match': 'none',
}
else:
# Ansible 2.1
args = {
'module': 'eos_command',
'description': 'Back up running-config on node',
'cmds': [
'configure terminal',
'copy running-config {}'.format(RUN_CONFIG_BACKUP)
],
}
arguments = [json.dumps(args)]
ret_code, out, err = ansible_playbook(EOS_MODULE_PLAYBOOK,
arguments=arguments)
if ret_code != 0:
LOG.write(">> ansible-playbook "
"{} stdout:\n{}".format(EOS_MODULE_PLAYBOOK, out))
LOG.write(">> ansible-playbook "
"{} stddrr:\n{}".format(EOS_MODULE_PLAYBOOK, err))
teardown()
raise RuntimeError("Error in Test Suite Setup")
run_backup = " Backing up startup-config on nodes ..."
print >> sys.stderr, run_backup
LOG.write('++ {}\n'.format(run_backup.strip()))
if ANSIBLE_NEW:
# Ansible 2.2
# Don't need to check running-config, it will always fail
# (match = none)
args = {
'module': 'eos_config',
'description': 'Back up startup-config on node',
'lines': [
'copy startup-config {}'.format(START_CONFIG_BACKUP)
],
'match': 'none',
}
else:
# Ansible 2.1
args = {
'module': 'eos_command',
'description': 'Back up startup-config on node',
'cmds': [
'configure terminal',
'copy startup-config {}'.format(START_CONFIG_BACKUP)
],
}
arguments = [json.dumps(args)]
ret_code, out, err = ansible_playbook(EOS_MODULE_PLAYBOOK,
arguments=arguments)
if ret_code != 0:
LOG.write(">> ansible-playbook "
"{} stdout:\n{}".format(EOS_MODULE_PLAYBOOK, out))
LOG.write(">> ansible-playbook "
"{} stddrr:\n{}".format(EOS_MODULE_PLAYBOOK, err))
teardown()
raise RuntimeError("Error in Test Suite Setup")
print >> sys.stderr, " Gathering test cases ..."
modules = os.environ.get('ANSIBLE_ROLE_TEST_CASES')
testcases_home = os.path.join(HERE, 'testcases')
if not os.path.exists(testcases_home):
print >> sys.stderr, "\n ***** Testcase directory not found!! *****"
teardown()
raise RuntimeError(
"Testcase path '{}' does not exist".format(testcases_home)
)
filenames = os.listdir(testcases_home)
for module in filter_modules(modules, filenames):
path = os.path.join(testcases_home, module)
definition = yaml.load(open(path))
defaults = definition.get('defaults', {})
testcases = definition.get('testcases', [])
if not testcases:
print >> sys.stderr, ("\n ***** No testcases defined in "
"module {} *****\n".format(module))
else:
for testcase in definition.get('testcases', []):
kwargs = defaults.copy()
kwargs.update(testcase)
TESTCASES.append(TestCase(**kwargs))
print >> sys.stderr, " Setup complete\n"
def teardown():
print >> sys.stderr, "\nTest Suite Teardown:"
no_teardown = os.environ.get('NO_ANSIBLE_ROLE_TEST_TEARDOWN')
if no_teardown:
print >> sys.stderr, ("{}\n"
" Skipping test suite teardown due to "
"NO_ANSIBLE_ROLE_TEST_TEARDOWN\n"
" To restore each device to pre-test state "
"execute the following commands\n"
" - configure terminal\n"
" - configure replace {}\n"
" - delete {}\n"
" - copy {} startup-config\n"
" - delete {}\n"
"{}".format(SEPARATOR, RUN_CONFIG_BACKUP,
RUN_CONFIG_BACKUP,
START_CONFIG_BACKUP,
START_CONFIG_BACKUP, SEPARATOR))
else:
# Restore the running-config on the nodes
# ---------------------------------------
restore_backup = " Restoring running-config on nodes ..."
print >> sys.stderr, restore_backup
LOG.write('++ {}\n'.format(restore_backup.strip()))
if ANSIBLE_NEW:
# Ansible 2.2
# Don't need to check running-config, it will always fail
# (match = none)
args = {
'module': 'eos_config',
'description': 'Restore running-config from backup',
'lines': [
'configure replace {}'.format(RUN_CONFIG_BACKUP),
'delete {}'.format(RUN_CONFIG_BACKUP),
],
'match': 'none',
}
else:
# Ansible 2.1
args = {
'module': 'eos_command',
'description': 'Restore running-config from backup',
'cmds': [
'configure terminal',
'configure replace {}'.format(RUN_CONFIG_BACKUP),
'delete {}'.format(RUN_CONFIG_BACKUP),
],
}
arguments = [json.dumps(args)]
# ret_code, out, err = ansible_playbook(CMD_PLAY, arguments=arguments)
ret_code, out, err = ansible_playbook(EOS_MODULE_PLAYBOOK,
arguments=arguments)
if ret_code != 0:
msg = "Error restoring running-config on nodes\n" \
"Running ansible-playbook {} -e {}\n" \
">> stdout: {}\n" \
">> stderr: {}\n".format(EOS_MODULE_PLAYBOOK,
arguments, out, err)
warnings.warn(msg)
# Restore the startup-config on the nodes
# ---------------------------------------
restore_backup = " Restoring startup-config on nodes ..."
print >> sys.stderr, restore_backup
LOG.write('++ {}\n'.format(restore_backup.strip()))
if ANSIBLE_NEW:
# Ansible 2.2
# Don't need to check running-config, it will always fail
# (match = none)
args = {
'module': 'eos_config',
'description': 'Restore startup-config from backup',
'lines': [
'copy {} startup-config'.format(START_CONFIG_BACKUP),
'delete {}'.format(START_CONFIG_BACKUP),
],
'match': 'none',
}
else:
# Ansible 2.1
args = {
'module': 'eos_command',
'description': 'Restore startup-config from backup',
'cmds': [
'configure terminal',
'copy {} startup-config'.format(START_CONFIG_BACKUP),
'delete {}'.format(START_CONFIG_BACKUP),
],
}
arguments = [json.dumps(args)]
# ret_code, out, err = ansible_playbook(CMD_PLAY, arguments=arguments)
ret_code, out, err = ansible_playbook(EOS_MODULE_PLAYBOOK,
arguments=arguments)
if ret_code != 0:
msg = "Error restoring startup-config on nodes\n" \
"Running ansible-playbook {} -e {}\n" \
">> stdout: {}\n" \
">> stderr: {}\n".format(EOS_MODULE_PLAYBOOK,
arguments, out, err)
warnings.warn(msg)
print >> sys.stderr, " Teardown complete"
def test_module():
for testcase in TESTCASES:
yield TestModule(testcase)
def ansible_playbook(playbook, arguments=None, options=None):
if arguments is None:
arguments = []
if options is None:
options = []
command = ['ansible-playbook']
if playbook:
command.append(playbook)
command.extend(['-i', INVENTORY])
for arg in arguments:
command.extend(['-e', arg])
for opt in options:
command.append(opt)
command.append('-vvv')
# Format the command string for output on error - for easier
# copy/paste for manual run
cmdstr = ''
for segment in command:
if segment[0] == '{':
cmdstr = cmdstr + "\'{}\' ".format(segment)
else:
cmdstr = cmdstr + "{} ".format(segment)
LOG.write("-- Ansible playbook command:\n-- {}\n".format(cmdstr))
stdout = subprocess.PIPE
stderr = subprocess.PIPE
proc = subprocess.Popen(command, stdout=stdout, stderr=stderr)
out, err = proc.communicate()
return (proc.returncode, out, err)
| |
#!/usr/bin/env python
# pragma: no testimport
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Runs all tests available in VisTrails modules by importing all of
them, stealing the classes that look like unit tests, and running
all of them.
runtestsuite.py also reports all VisTrails modules that don't export
any unit tests, as a crude measure of code coverage.
"""
import atexit
from distutils.version import LooseVersion
#import doctest
import locale
import os
import sys
import traceback
from optparse import OptionParser
import platform
import re
import shutil
import tempfile
# This makes sure we use unittest2 everywhere
# If we are running 2.6, since our tests are in the same files as our code,
# VisTrails might choke up because of missing unittest features
try:
import unittest2
except ImportError:
pass
else:
sys.modules['unittest'] = unittest2
import unittest
if 'vistrails' not in sys.modules:
# Makes sure we can import modules as if we were running VisTrails
# from the root directory
_this_dir = os.path.dirname(os.path.realpath(__file__))
_root_directory = os.path.realpath(os.path.join(_this_dir, '..'))
sys.path.insert(0, os.path.realpath(os.path.join(_root_directory, '..')))
# Use a different temporary directory
test_temp_dir = tempfile.mkdtemp(prefix='vt_testsuite_')
tempfile.tempdir = test_temp_dir
@apply
class clean_tempdir(object):
def __init__(self):
atexit.register(self.clean)
self.listdir = os.listdir
self.isdir = os.path.isdir
self.join = os.path.join
self.test_temp_dir = test_temp_dir
self.rmtree = shutil.rmtree
self.out = sys.stdout.write
def clean(self):
nb_dirs = 0
nb_files = 0
for f in self.listdir(self.test_temp_dir):
if self.isdir(self.join(self.test_temp_dir,f)):
nb_dirs += 1
else:
nb_files += 1
if nb_dirs > 0 or nb_files > 0:
self.out("Warning: %d dirs and %d files were left behind in "
"tempdir, cleaning up\n" % (nb_dirs, nb_files))
self.rmtree(self.test_temp_dir, ignore_errors=True)
# Parse the command-line
usage = "Usage: %prog [options] [module1 module2 ...]"
parser = OptionParser(usage=usage)
parser.add_option("-V", "--verbose", action="store", type="int",
default=0, dest="verbose",
help="set verboseness level(0--2, default=0, "
"higher means more verbose)")
parser.add_option("-v", "--vistrails-verbose", action="store", type="int",
default=0, dest="debugLevel",
help="set the debugLevel in VisTrails (0--2, default=0)")
parser.add_option("-e", "--examples", action="store_true",
default=False,
help="run vistrails examples")
parser.add_option("-i", "--images", action="store_true",
default=False,
help="perform image comparisons")
parser.add_option("--installbundles", action='store_true',
default=False,
help=("Attempt to install missing Python packages "
"automatically"))
parser.add_option("-S", "--startup", action="store", type="str", default=None,
dest="dotVistrails",
help="Set startup file (default is temporary directory)")
parser.add_option('-L', '--locale', action='store', type='str', default='',
dest='locale',
help="set locale to this string")
parser.add_option('-D', '--debug', action='store_true',
default=False,
help="start interactive debugger on unexpected error")
parser.add_option('--no-unbuffered', action='store_false', dest='unbuffered',
default=True,
help="Don't make output stream unbuffered")
(options, test_modules) = parser.parse_args()
# remove empty strings
test_modules = filter(len, test_modules)
verbose = options.verbose
locale.setlocale(locale.LC_ALL, options.locale or '')
test_examples = options.examples
test_images = options.images
installbundles = options.installbundles
dotVistrails = options.dotVistrails
debug_mode = options.debug
vistrails_verbose = options.debugLevel
# Makes stdout unbuffered, so python -u is not needed
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
if options.unbuffered:
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
# Use PyQt API v2
def setNewPyQtAPI():
try:
import sip
# We now use the new PyQt API - IPython needs it
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
except Exception:
print "Could not set PyQt API, is PyQt4 installed?"
setNewPyQtAPI()
# Start debugger on test failure
if debug_mode:
from vistrails.tests.utils import DebugTestCaseMetaBase
unittest.TestCase = DebugTestCaseMetaBase
# Log to the console
import vistrails.core.debug
vistrails.core.debug.DebugPrint.getInstance().log_to_console()
# Disable usage reporting
os.environ['VISTRAILS_USAGE_STATS'] = 'off'
from vistrails.core import reportusage
reportusage.setup_usage_report()
import vistrails.tests
import vistrails.core
import vistrails.core.db.io
import vistrails.core.db.locator
from vistrails.core import debug
import vistrails.gui.application
from vistrails.core.system import vistrails_root_directory, \
vistrails_examples_directory
from vistrails.core.packagemanager import get_package_manager
# reinitializing arguments and options so VisTrails does not try parsing them
sys.argv = sys.argv[:1]
vistrails.gui.application.VistrailsApplicationSingleton.use_event_filter = \
False
root_directory = os.path.realpath(vistrails_root_directory())
###############################################################################
# Testing Examples
EXAMPLES_PATH = vistrails_examples_directory()
#dictionary of examples that will be run with the workflows that will be ignored
VT_EXAMPLES = { 'EMBOSS_webservices.vt': ["ProphetOutput"],
'KEGGPathway.vt': [],
'KEGG_SearchEntities_webservice.vt': [],
'KEGG_webservices.vt': [],
'brain_vistrail.vt': [],
'chebi_webservice.vt': [],
'head.vt': [],
'infovis.vt': [],
'noaa_webservices.vt': [],
'offscreen.vt': [],
'plot.vt': [],
'spx.vt': [],
'structure_or_id_webservice.vt': [],
'terminator.vt': ["Isosurface Script"],
'triangle_area.vt': [],
'vtk.vt': [],
'vtk_book_3rd_p189.vt': ["quadric", "SmapleFunction",
"Almost there"],
'vtk_book_3rd_p193.vt': ["modules", "connections",
"lookup table"],
'vtk_http.vt': [],
}
###############################################################################
# Utility
def sub_print(s, overline=False):
"""Prints line with underline (and optionally overline) ASCII dashes."""
if overline:
print "-" * len(s)
print s
print "-" * len(s)
###############################################################################
if len(test_modules) > 0:
test_modules = test_modules
else:
test_modules = None
if os.path.exists(EXAMPLES_PATH):
test_images = True
def module_filter(name):
if test_modules is None:
return True
for mod in test_modules:
if name.startswith(mod):
return True
return False
###############################################################################
# creates the app so that testing can happen
# We need the windows so we can test events, etc.
optionsDict = {
'batch': False,
'executionLog': False,
'singleInstance': False,
'installBundles': installbundles,
'enablePackagesSilently': True,
'handlerDontAsk': True,
'developerDebugger': debug_mode,
'debugLevel': vistrails_verbose,
'dontUnloadModules': True,
'showVistrailsNews': False,
}
if dotVistrails:
optionsDict['dotVistrails'] = dotVistrails
else:
optionsDict['spawned'] = True
v = vistrails.gui.application.start_application(optionsDict)
if v != 0:
app = vistrails.gui.application.get_vistrails_application()
if app:
app.finishSession()
sys.exit(v)
# make sure that fixedCellSize is turned on
spreadsheet_conf = get_package_manager().get_package_configuration("spreadsheet")
spreadsheet_conf.fixedCellSize = True
# disable first vistrail
app = vistrails.gui.application.get_vistrails_application()
app.builderWindow.auto_view = False
app.builderWindow.close_all_vistrails(True)
print "Test Suite for VisTrails"
print "Locale settings: %s" % ', '.join('%s: %s' % (s, locale.setlocale(getattr(locale, s), None)) for s in ('LC_ALL', 'LC_TIME'))
print "Running on %s" % ', '.join(platform.uname())
print "Python is %s" % sys.version
try:
from PyQt4 import QtCore
print "Using PyQt4 %s with Qt %s" % (QtCore.PYQT_VERSION_STR, QtCore.qVersion())
except ImportError:
print "PyQt4 not available"
for pkg in ('numpy', 'scipy', 'matplotlib'):
try:
ipkg = __import__(pkg, globals(), locals(), [], -1)
print "Using %s %s" % (pkg, ipkg.__version__)
except ImportError:
print "%s not available" % pkg
try:
import vtk
print "Using vtk %s" % vtk.vtkVersion().GetVTKVersion()
except ImportError:
print "vtk not available"
print ""
tests_passed = True
main_test_suite = unittest.TestSuite()
test_loader = unittest.TestLoader()
import_skip_regex = re.compile(r'(?i)# *pragma[: ]*no *testimport')
if test_modules:
sub_print("Trying to import some of the modules")
else:
sub_print("Trying to import all modules")
for (p, subdirs, files) in os.walk(root_directory):
# skip subversion subdirectories
if p.find('.svn') != -1 or p.find('.git') != -1 :
continue
for filename in files:
# skip files that don't look like VisTrails python modules
if not filename.endswith('.py'):
continue
module_file = os.path.join(p, filename)
module = os.path.join("vistrails", p[len(root_directory)+1:],
filename[:-3])
if (module.startswith(os.sep) or
('#' in module)):
continue
# use qualified import names with periods instead of
# slashes to avoid duplicates in sys.modules
module = module.replace('/','.')
module = module.replace('\\','.')
if module.endswith('__init__'):
module = module[:-9]
if not module_filter(module):
continue
if module.startswith('vistrails.tests.resources'):
continue
if ('.system.' in module and not
module.endswith('__init__')):
continue
with open(module_file) as fp:
l = fp.readline()
if l.startswith('#!'): # shebang
l = fp.readline()
if import_skip_regex.match(l):
if verbose >= 1:
print >>sys.stderr, ("Skipping %s, not an importable "
"module" % module)
continue
m = None
try:
if '.' in module:
m = __import__(module, globals(), locals(), ['foo'])
else:
m = __import__(module)
except BaseException:
print >>sys.stderr, "ERROR: Could not import module: %s" % module
if verbose >= 1:
traceback.print_exc(file=sys.stderr)
continue
# Load the unittest TestCases
suite = test_loader.loadTestsFromModule(m)
# Load the doctests
#try:
# suite.addTests(doctest.DocTestSuite(m))
#except ValueError:
# pass # No doctest is fine, we check that some tests exist later
# The doctests are currently opt-in; a load_tests method can be
# defined to build a DocTestSuite
# This is because some modules have interpreter-formatted examples that
# are NOT doctests, and because mining the codebase for doctests is
# painfully slow
main_test_suite.addTests(suite)
if suite.countTestCases() == 0 and verbose >= 1:
print >>sys.stderr, "WARNING: module has no tests: %s" % module
elif verbose >= 2:
print >>sys.stderr, "OK: module has %d test cases: %s" % (
suite.countTestCases(),
module)
sub_print("Imported modules. Running %d tests%s..." % (
main_test_suite.countTestCases(),
", and thumbnails comparison" if test_images else ''),
overline=True)
############## TEST VISTRAIL IMAGES ####################
# Compares thumbnails with the generated images to detect broken visualizations
image_tests = [("terminator.vt", [("terminator_isosurface", "Isosurface"),
("terminator_VRSW", "Volume Rendering SW"),
("terminator_CPSW", "Clipping Plane SW"),
("terminator_CRSW", "Combined Rendering SW"),
("terminator_ISSW", "Image Slices SW")])
]
compare_use_vtk = False
try:
import vtk
if LooseVersion(vtk.vtkVersion().GetVTKVersion()) >= LooseVersion('5.8.0'):
compare_use_vtk = True
except ImportError:
pass
if compare_use_vtk:
def compare_thumbnails(prev, next):
#vtkImageDifference assumes RGB, so strip alpha
def removeAlpha(file):
freader = vtk.vtkPNGReader()
freader.SetFileName(file)
removealpha = vtk.vtkImageExtractComponents()
removealpha.SetComponents(0,1,2)
removealpha.SetInputConnection(freader.GetOutputPort())
removealpha.Update()
return removealpha.GetOutput()
#do the image comparison
a = removeAlpha(prev)
b = removeAlpha(next)
idiff = vtk.vtkImageDifference()
if LooseVersion(vtk.vtkVersion().GetVTKVersion()) >= \
LooseVersion('6.0.0'):
idiff.SetInputData(a)
idiff.SetImageData(b)
else:
idiff.SetInput(a)
idiff.SetImage(b)
idiff.Update()
return idiff.GetThresholdedError()
else:
try:
from scipy.misc import imread
except ImportError:
imread = None
if test_images:
print "Warning: old VTK version detected, NOT comparing thumbnails"
if imread is not None:
def compare_thumbnails(prev, next):
prev_img = imread(prev)
next_img = imread(next)
assert len(prev_img.shape) == 3
assert len(next_img.shape) == 3
if prev_img.shape[:2] == next_img.shape[:2]:
return 0
else:
return float('Inf')
else:
def compare_thumbnails(prev, next):
if os.path.isfile(prev) and os.path.isfile(next):
return 0
else:
return float('Inf')
def image_test_generator(vtfile, version):
from vistrails.core.db.locator import FileLocator
from vistrails.core.db.io import load_vistrail
import vistrails.core.console_mode
def test(self):
try:
errs = []
filename = os.path.join(EXAMPLES_PATH, vtfile)
locator = FileLocator(os.path.abspath(filename))
(v, abstractions, thumbnails, mashups) = load_vistrail(locator)
errs = vistrails.core.console_mode.run(
[(locator, version)],
update_vistrail=False,
extra_info={'compare_thumbnails': compare_thumbnails})
if len(errs) > 0:
for err in errs:
print(" *** Error in %s:%s:%s -- %s" % err)
self.fail(str(err))
except Exception, e:
self.fail(debug.format_exception(e))
return test
class TestVistrailImages(unittest.TestCase):
pass
if test_images:
for vt, t in image_tests:
for name, version in t:
test_name = 'test_%s' % name
test = image_test_generator(vt, version)
setattr(TestVistrailImages, test_name, test)
main_test_suite.addTest(TestVistrailImages(test_name))
############## RUN TEST SUITE ####################
class TestResult(unittest.TextTestResult):
def addSkip(self, test, reason):
self.stream.writeln("skipped '{0}': {1}".format(str(test), reason))
super(TestResult, self).addSkip(test, reason)
runner = unittest.TextTestRunner(
verbosity=max(verbose, 1),
resultclass=TestResult)
result = runner.run(main_test_suite)
if not result.wasSuccessful():
tests_passed = False
sub_print("Tests finished.", overline=True)
if test_examples:
import vistrails.core.console_mode
sub_print("Testing examples:")
summary = {}
nworkflows = 0
nvtfiles = 0
for vtfile in VT_EXAMPLES.keys():
try:
errs = []
filename = os.path.join(EXAMPLES_PATH,
vtfile)
print filename
locator = vistrails.core.db.locator.FileLocator(os.path.abspath(filename))
(v, abstractions, thumbnails, mashups) = vistrails.core.db.io.load_vistrail(locator)
w_list = []
for version,tag in v.get_tagMap().iteritems():
if tag not in VT_EXAMPLES[vtfile]:
w_list.append((locator,version))
nworkflows += 1
if len(w_list) > 0:
errs = vistrails.core.console_mode.run(w_list, update_vistrail=False)
summary[vtfile] = errs
except Exception, e:
errs.append((vtfile,"None", "None", debug.format_exception(e)))
summary[vtfile] = errs
nvtfiles += 1
print "-" * 79
print "Summary of Examples: %s workflows in %s vistrail files" % (
nworkflows, nvtfiles)
print ""
errors = False
for vtfile, errs in summary.iteritems():
print vtfile
if len(errs) > 0:
for err in errs:
print(" *** Error in %s:%s:%s -- %s" % err)
errors = True
else:
print " Ok."
print "-" * 79
if errors:
tests_passed = False
sub_print("There were errors. See summary for more information")
else:
sub_print("Examples ran successfully.")
vistrails.gui.application.get_vistrails_application().finishSession()
vistrails.gui.application.stop_application()
# Test Runners can use the return value to know if the tests passed
sys.exit(0 if tests_passed else 1)
| |
# -*- coding: utf-8 -*-
#
# SelfTest/Cipher/DES.py: Self-test for the (Single) DES cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.DES"""
__revision__ = "$Id$"
from common import dict # For compatibility with Python 2.1 and 2.2
from Crypto.Util.py3compat import *
import unittest
# This is a list of (plaintext, ciphertext, key, description) tuples.
SP800_17_B1_KEY = '01' * 8
SP800_17_B2_PT = '00' * 8
test_data = [
# Test vectors from Appendix A of NIST SP 800-17
# "Modes of Operation Validation System (MOVS): Requirements and Procedures"
# http://csrc.nist.gov/publications/nistpubs/800-17/800-17.pdf
# Appendix A - "Sample Round Outputs for the DES"
('0000000000000000', '82dcbafbdeab6602', '10316e028c8f3b4a',
"NIST SP800-17 A"),
# Table B.1 - Variable Plaintext Known Answer Test
('8000000000000000', '95f8a5e5dd31d900', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #0'),
('4000000000000000', 'dd7f121ca5015619', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #1'),
('2000000000000000', '2e8653104f3834ea', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #2'),
('1000000000000000', '4bd388ff6cd81d4f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #3'),
('0800000000000000', '20b9e767b2fb1456', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #4'),
('0400000000000000', '55579380d77138ef', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #5'),
('0200000000000000', '6cc5defaaf04512f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #6'),
('0100000000000000', '0d9f279ba5d87260', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #7'),
('0080000000000000', 'd9031b0271bd5a0a', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #8'),
('0040000000000000', '424250b37c3dd951', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #9'),
('0020000000000000', 'b8061b7ecd9a21e5', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #10'),
('0010000000000000', 'f15d0f286b65bd28', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #11'),
('0008000000000000', 'add0cc8d6e5deba1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #12'),
('0004000000000000', 'e6d5f82752ad63d1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #13'),
('0002000000000000', 'ecbfe3bd3f591a5e', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #14'),
('0001000000000000', 'f356834379d165cd', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #15'),
('0000800000000000', '2b9f982f20037fa9', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #16'),
('0000400000000000', '889de068a16f0be6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #17'),
('0000200000000000', 'e19e275d846a1298', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #18'),
('0000100000000000', '329a8ed523d71aec', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #19'),
('0000080000000000', 'e7fce22557d23c97', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #20'),
('0000040000000000', '12a9f5817ff2d65d', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #21'),
('0000020000000000', 'a484c3ad38dc9c19', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #22'),
('0000010000000000', 'fbe00a8a1ef8ad72', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #23'),
('0000008000000000', '750d079407521363', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #24'),
('0000004000000000', '64feed9c724c2faf', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #25'),
('0000002000000000', 'f02b263b328e2b60', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #26'),
('0000001000000000', '9d64555a9a10b852', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #27'),
('0000000800000000', 'd106ff0bed5255d7', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #28'),
('0000000400000000', 'e1652c6b138c64a5', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #29'),
('0000000200000000', 'e428581186ec8f46', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #30'),
('0000000100000000', 'aeb5f5ede22d1a36', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #31'),
('0000000080000000', 'e943d7568aec0c5c', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #32'),
('0000000040000000', 'df98c8276f54b04b', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #33'),
('0000000020000000', 'b160e4680f6c696f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #34'),
('0000000010000000', 'fa0752b07d9c4ab8', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #35'),
('0000000008000000', 'ca3a2b036dbc8502', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #36'),
('0000000004000000', '5e0905517bb59bcf', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #37'),
('0000000002000000', '814eeb3b91d90726', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #38'),
('0000000001000000', '4d49db1532919c9f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #39'),
('0000000000800000', '25eb5fc3f8cf0621', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #40'),
('0000000000400000', 'ab6a20c0620d1c6f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #41'),
('0000000000200000', '79e90dbc98f92cca', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #42'),
('0000000000100000', '866ecedd8072bb0e', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #43'),
('0000000000080000', '8b54536f2f3e64a8', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #44'),
('0000000000040000', 'ea51d3975595b86b', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #45'),
('0000000000020000', 'caffc6ac4542de31', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #46'),
('0000000000010000', '8dd45a2ddf90796c', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #47'),
('0000000000008000', '1029d55e880ec2d0', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #48'),
('0000000000004000', '5d86cb23639dbea9', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #49'),
('0000000000002000', '1d1ca853ae7c0c5f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #50'),
('0000000000001000', 'ce332329248f3228', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #51'),
('0000000000000800', '8405d1abe24fb942', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #52'),
('0000000000000400', 'e643d78090ca4207', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #53'),
('0000000000000200', '48221b9937748a23', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #54'),
('0000000000000100', 'dd7c0bbd61fafd54', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #55'),
('0000000000000080', '2fbc291a570db5c4', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #56'),
('0000000000000040', 'e07c30d7e4e26e12', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #57'),
('0000000000000020', '0953e2258e8e90a1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #58'),
('0000000000000010', '5b711bc4ceebf2ee', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #59'),
('0000000000000008', 'cc083f1e6d9e85f6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #60'),
('0000000000000004', 'd2fd8867d50d2dfe', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #61'),
('0000000000000002', '06e7ea22ce92708f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #62'),
('0000000000000001', '166b40b44aba4bd6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #63'),
# Table B.2 - Variable Key Known Answer Test
(SP800_17_B2_PT, '95a8d72813daa94d', '8001010101010101',
'NIST SP800-17 B.2 #0'),
(SP800_17_B2_PT, '0eec1487dd8c26d5', '4001010101010101',
'NIST SP800-17 B.2 #1'),
(SP800_17_B2_PT, '7ad16ffb79c45926', '2001010101010101',
'NIST SP800-17 B.2 #2'),
(SP800_17_B2_PT, 'd3746294ca6a6cf3', '1001010101010101',
'NIST SP800-17 B.2 #3'),
(SP800_17_B2_PT, '809f5f873c1fd761', '0801010101010101',
'NIST SP800-17 B.2 #4'),
(SP800_17_B2_PT, 'c02faffec989d1fc', '0401010101010101',
'NIST SP800-17 B.2 #5'),
(SP800_17_B2_PT, '4615aa1d33e72f10', '0201010101010101',
'NIST SP800-17 B.2 #6'),
(SP800_17_B2_PT, '2055123350c00858', '0180010101010101',
'NIST SP800-17 B.2 #7'),
(SP800_17_B2_PT, 'df3b99d6577397c8', '0140010101010101',
'NIST SP800-17 B.2 #8'),
(SP800_17_B2_PT, '31fe17369b5288c9', '0120010101010101',
'NIST SP800-17 B.2 #9'),
(SP800_17_B2_PT, 'dfdd3cc64dae1642', '0110010101010101',
'NIST SP800-17 B.2 #10'),
(SP800_17_B2_PT, '178c83ce2b399d94', '0108010101010101',
'NIST SP800-17 B.2 #11'),
(SP800_17_B2_PT, '50f636324a9b7f80', '0104010101010101',
'NIST SP800-17 B.2 #12'),
(SP800_17_B2_PT, 'a8468ee3bc18f06d', '0102010101010101',
'NIST SP800-17 B.2 #13'),
(SP800_17_B2_PT, 'a2dc9e92fd3cde92', '0101800101010101',
'NIST SP800-17 B.2 #14'),
(SP800_17_B2_PT, 'cac09f797d031287', '0101400101010101',
'NIST SP800-17 B.2 #15'),
(SP800_17_B2_PT, '90ba680b22aeb525', '0101200101010101',
'NIST SP800-17 B.2 #16'),
(SP800_17_B2_PT, 'ce7a24f350e280b6', '0101100101010101',
'NIST SP800-17 B.2 #17'),
(SP800_17_B2_PT, '882bff0aa01a0b87', '0101080101010101',
'NIST SP800-17 B.2 #18'),
(SP800_17_B2_PT, '25610288924511c2', '0101040101010101',
'NIST SP800-17 B.2 #19'),
(SP800_17_B2_PT, 'c71516c29c75d170', '0101020101010101',
'NIST SP800-17 B.2 #20'),
(SP800_17_B2_PT, '5199c29a52c9f059', '0101018001010101',
'NIST SP800-17 B.2 #21'),
(SP800_17_B2_PT, 'c22f0a294a71f29f', '0101014001010101',
'NIST SP800-17 B.2 #22'),
(SP800_17_B2_PT, 'ee371483714c02ea', '0101012001010101',
'NIST SP800-17 B.2 #23'),
(SP800_17_B2_PT, 'a81fbd448f9e522f', '0101011001010101',
'NIST SP800-17 B.2 #24'),
(SP800_17_B2_PT, '4f644c92e192dfed', '0101010801010101',
'NIST SP800-17 B.2 #25'),
(SP800_17_B2_PT, '1afa9a66a6df92ae', '0101010401010101',
'NIST SP800-17 B.2 #26'),
(SP800_17_B2_PT, 'b3c1cc715cb879d8', '0101010201010101',
'NIST SP800-17 B.2 #27'),
(SP800_17_B2_PT, '19d032e64ab0bd8b', '0101010180010101',
'NIST SP800-17 B.2 #28'),
(SP800_17_B2_PT, '3cfaa7a7dc8720dc', '0101010140010101',
'NIST SP800-17 B.2 #29'),
(SP800_17_B2_PT, 'b7265f7f447ac6f3', '0101010120010101',
'NIST SP800-17 B.2 #30'),
(SP800_17_B2_PT, '9db73b3c0d163f54', '0101010110010101',
'NIST SP800-17 B.2 #31'),
(SP800_17_B2_PT, '8181b65babf4a975', '0101010108010101',
'NIST SP800-17 B.2 #32'),
(SP800_17_B2_PT, '93c9b64042eaa240', '0101010104010101',
'NIST SP800-17 B.2 #33'),
(SP800_17_B2_PT, '5570530829705592', '0101010102010101',
'NIST SP800-17 B.2 #34'),
(SP800_17_B2_PT, '8638809e878787a0', '0101010101800101',
'NIST SP800-17 B.2 #35'),
(SP800_17_B2_PT, '41b9a79af79ac208', '0101010101400101',
'NIST SP800-17 B.2 #36'),
(SP800_17_B2_PT, '7a9be42f2009a892', '0101010101200101',
'NIST SP800-17 B.2 #37'),
(SP800_17_B2_PT, '29038d56ba6d2745', '0101010101100101',
'NIST SP800-17 B.2 #38'),
(SP800_17_B2_PT, '5495c6abf1e5df51', '0101010101080101',
'NIST SP800-17 B.2 #39'),
(SP800_17_B2_PT, 'ae13dbd561488933', '0101010101040101',
'NIST SP800-17 B.2 #40'),
(SP800_17_B2_PT, '024d1ffa8904e389', '0101010101020101',
'NIST SP800-17 B.2 #41'),
(SP800_17_B2_PT, 'd1399712f99bf02e', '0101010101018001',
'NIST SP800-17 B.2 #42'),
(SP800_17_B2_PT, '14c1d7c1cffec79e', '0101010101014001',
'NIST SP800-17 B.2 #43'),
(SP800_17_B2_PT, '1de5279dae3bed6f', '0101010101012001',
'NIST SP800-17 B.2 #44'),
(SP800_17_B2_PT, 'e941a33f85501303', '0101010101011001',
'NIST SP800-17 B.2 #45'),
(SP800_17_B2_PT, 'da99dbbc9a03f379', '0101010101010801',
'NIST SP800-17 B.2 #46'),
(SP800_17_B2_PT, 'b7fc92f91d8e92e9', '0101010101010401',
'NIST SP800-17 B.2 #47'),
(SP800_17_B2_PT, 'ae8e5caa3ca04e85', '0101010101010201',
'NIST SP800-17 B.2 #48'),
(SP800_17_B2_PT, '9cc62df43b6eed74', '0101010101010180',
'NIST SP800-17 B.2 #49'),
(SP800_17_B2_PT, 'd863dbb5c59a91a0', '0101010101010140',
'NIST SP800-17 B.2 #50'),
(SP800_17_B2_PT, 'a1ab2190545b91d7', '0101010101010120',
'NIST SP800-17 B.2 #51'),
(SP800_17_B2_PT, '0875041e64c570f7', '0101010101010110',
'NIST SP800-17 B.2 #52'),
(SP800_17_B2_PT, '5a594528bebef1cc', '0101010101010108',
'NIST SP800-17 B.2 #53'),
(SP800_17_B2_PT, 'fcdb3291de21f0c0', '0101010101010104',
'NIST SP800-17 B.2 #54'),
(SP800_17_B2_PT, '869efd7f9f265a09', '0101010101010102',
'NIST SP800-17 B.2 #55'),
]
class RonRivestTest(unittest.TestCase):
""" Ronald L. Rivest's DES test, see
http://people.csail.mit.edu/rivest/Destest.txt
ABSTRACT
--------
We present a simple way to test the correctness of a DES implementation:
Use the recurrence relation:
X0 = 9474B8E8C73BCA7D (hexadecimal)
X(i+1) = IF (i is even) THEN E(Xi,Xi) ELSE D(Xi,Xi)
to compute a sequence of 64-bit values: X0, X1, X2, ..., X16. Here
E(X,K) denotes the DES encryption of X using key K, and D(X,K) denotes
the DES decryption of X using key K. If you obtain
X16 = 1B1A2DDB4C642438
your implementation does not have any of the 36,568 possible single-fault
errors described herein.
"""
def runTest(self):
from Crypto.Cipher import DES
from binascii import b2a_hex
X = []
X[0:] = [b('\x94\x74\xB8\xE8\xC7\x3B\xCA\x7D')]
for i in range(16):
c = DES.new(X[i],DES.MODE_ECB)
if not (i&1): # (num&1) returns 1 for odd numbers
X[i+1:] = [c.encrypt(X[i])] # even
else:
X[i+1:] = [c.decrypt(X[i])] # odd
self.assertEqual(b2a_hex(X[16]),
b2a_hex(b('\x1B\x1A\x2D\xDB\x4C\x64\x24\x38')))
def get_tests(config={}):
from Crypto.Cipher import DES
from common import make_block_tests
return make_block_tests(DES, "DES", test_data) + [RonRivestTest()]
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| |
#################################PUNKEMON YO!!!!!###############################
import pygame
from pygame.locals import *
import random
from random import *
import math
from math import *
import os
import string
fontLoaderCode=open("menu sprite code.py")
exec(fontLoaderCode.read())
################### Menu stuff ############################
#####startDict: for menus originating with the start menu
#startDict={"Start": start, "Wikidex": wikidex, "Team": team, "World": world,
#"Tokens": tokens, "Stats": stats, "Stuff": stuff, "Save": save,
#"Options": options}
#####fightDict: for menus originating with the battle menu
#fightDict={"Battle": battle, "Attack": attack, "Item": item, "Switch": switch, "Leg it": legIt}
class menu:
def __init__(self,oplist,nextMenuMeta="previous",nextMenu=False,rollable=False):
#rollable means being on the bottom option and hitting "down" gets you the top option
self.oplist=oplist
self.curPos=1 #current position of cursor, ONE-indexed
self.rollable=rollable
self.nextMenuMeta=nextMenuMeta
#default of previous menu, can be set to "menu" for a specific nonprevious menu or False if it always goes back to world
self.nextMenu=nextMenu
def setNext(self,target): #sets menu to return to upon pressing B
self.nextMenu=target
def getNext(self): #returns nextMenu
return self.nextMenu
def getArray(self): #generates array with set of menu options for sprite generations
#find length of longest menu item
maxLength=2
for op in self.oplist: #op needs to be a string
if len(op)>maxLength:
maxLength=len(op)
#top border line
opAr=[["*"]]
for i in range(0,maxLength+1): #+1 for cursor
opAr[0].append("=")
opAr[0].append("*")
#assemble menu line for a given entry
for op in self.oplist:
tmp=["|"," "] #open line with pipe and space for cursor
tmpStr=op.ljust(maxLength)#buffer item to max length, +1 for cursor
for char in tmpStr:#stick in one char at a time
tmp.append(char)
tmp.append("|")#close line with pipe
opAr.append(tmp)
opAr.append(opAr[0]) #bottom border is same as top
#draw cursor
opAr[self.curPos][1]=">"
return(opAr)
def setOptions(self,newList):
self.oplist=newList
def moveCursor(self,direction):
if direction=="up":
if self.curPos>1: #curPos=1 means cursor on top option
self.curPos-=1
elif self.rollable:
self.curPos=len(self.oplist)
elif direction=="down":
if self.curPos<len(self.oplist):
self.curPos+=1
elif self.rollable:
self.curPos=1
def processInput(self, event, screen):
if event.type==KEYDOWN:
#print "----------------------"
if event.key==K_UP:
self.moveCursor("up")
elif event.key==K_DOWN:
self.moveCursor("down")
elif event.key==K_a:
oldMenu=self
newMenu=menuDict[self.oplist[self.curPos-1]]
screen.curMenu=newMenu
if newMenu.nextMenuMeta=="previous":
newMenu.setnextMenu(oldMenu)
elif event.key==K_s:
screen.curMenu=self.getNext()
#for i in screen.curMenu.getArray():
#print i
class dialogMenu:
def __init__(self,dialog,nextMenuMeta="menu",nextMenu=False,curSlide=1): #nextMenu can be world or menu, dialog is a list of strings
self.dialog=dialog
self.nextMenuMeta=nextMenuMeta
#default of specific menu, can be set to "previous" for the previous menu (overwrites any initial set value) or False if it always goes back to world
self.nextMenu=nextMenu
self.curSlide=curSlide
self.maxChars=20
self.maxLines=3
self.maxSlides=5
#if len(dialog)>self.maxSlides:
#print "STFU already! This is too many slides!"
def setNext(self,target): #sets menu to which to return upon pressing B
self.nextMenu=target
def getNext(self): #returns nextMenu
return self.nextMenu
def processInput(self, event, screen):
if event.type==KEYDOWN:
#print "----------------------"
if self.curSlide<len(self.dialog):
self.curSlide+=1
else:
oldMenu=self
newMenu=oldMenu.getNext()
screen.curMenu=newMenu
if newMenu.nextMenuMeta=="previous":
newMenu.setNext(oldMenu)
self.curSlide=1
#for i in screen.curMenu.getArray():
#print i
def getArray(self): #generates array with dialog characters in a box
#get raw string of dialog and break it up into lines
diastring=self.dialog[self.curSlide-1] #-1 bc curSlide is 1-indexed
sentences=diastring.split("\n")
finalLines=[] #will contain the final dialog, with each item being a line
for sentence in sentences:
if len(sentence)<=self.maxChars:
finalLines.append(sentence)
else:
words=sentence.split()
newLine=""
for word in words:
#if you can fit one more word on the line,
if len(newLine)+len(word)<self.maxChars:
newLine=newLine+word+" "
#if you can't, finalize the line and start a new one
else:
finalLines.append(newLine[0:-1])
newLine=word+" "
finalLines.append(newLine[0:-1])
#if len(finalLines)>self.maxLines:
#print "Take a breath already! This is too many lines on one slide!"
for i in range(0,len(finalLines)):
#if len(finalLines[i])>self.maxChars:
#print "You fucked up big time."
finalLines[i]=finalLines[i].ljust(self.maxChars)
##Now to characterize and print the array
#top border line
diAr=[["*"]]
#make the menu box the same size (just big enough to accomodate the longest allowable line) every time
for i in range(0,self.maxChars):
diAr[0].append("=")
diAr[0].append("*")
#assemble menu line for a given entry
for line in finalLines:
tmp=["|"]
for char in line: #break line into individual characters
tmp.append(char)
tmp.append("|")
diAr.append(tmp)
diAr.append(diAr[0]) #bottom border is same as top
return(diAr)
class forcedChoiceMenu:
def __init__(self,oplist,nextMenuMeta="previous",nextMenu=False,rollable=False):
#rollable means being on the bottom option and hitting "down" gets you the top option
self.oplist=oplist
self.curPos=1 #current position of cursor, ONE-indexed
self.rollable=rollable
self.nextMenuMeta=nextMenuMeta
#default of previous menu, can be set to "menu" for a specific nonprevious menu or False if it always goes back to world
self.nextMenu=nextMenu
def setNext(self,target): #sets menu to return to upon pressing B
self.nextMenu=target
def getNext(self): #returns nextMenu
return self.nextMenu
def getArray(self): #generates array with set of menu options for sprite generations
#find length of longest menu item
maxLength=2
for op in self.oplist: #op needs to be a string
if len(op)>maxLength:
maxLength=len(op)
#top border line
opAr=[["*"]]
for i in range(0,maxLength+1): #+1 for cursor
opAr[0].append("=")
opAr[0].append("*")
#assemble menu line for a given entry
for op in self.oplist:
tmp=["|"," "] #open line with pipe and space for cursor
tmpStr=op.ljust(maxLength)#buffer item to max length, +1 for cursor
for char in tmpStr:#stick in one char at a time
tmp.append(char)
tmp.append("|")#close line with pipe
opAr.append(tmp)
opAr.append(opAr[0]) #bottom border is same as top
#draw cursor
opAr[self.curPos][1]=">"
return(opAr)
def setOptions(self,newList):
self.oplist=newList
def moveCursor(self,direction):
if direction=="up":
if self.curPos>1: #curPos=1 means cursor on top option
self.curPos-=1
elif self.rollable:
self.curPos=len(self.oplist)
elif direction=="down":
if self.curPos<len(self.oplist):
self.curPos+=1
elif self.rollable:
self.curPos=1
def processInput(self, event, screen):
if event.type==KEYDOWN:
#print "----------------------"
if event.key==K_UP:
self.moveCursor("up")
elif event.key==K_DOWN:
self.moveCursor("down")
elif event.key==K_a:
oldMenu=self
newMenu=menuDict[self.oplist[self.curPos-1]]
screen.curMenu=newMenu
if newMenu.nextMenuMeta=="previous":
newMenu.setnextMenu(oldMenu)
#for i in screen.curMenu.getArray():
#print i
########### Punkemon, because there need to be some mons in this mon-battling game ############
class punkemon:
def __init__(self,species,nation,specNum,level,learnSpeed,baseStats,baseXPworth,allMoves=[]): #statVariables should be a list with base stats for the species plus anything else necessary to calculate attk, def, etc.
self.species=species
self.specNum=specNum
self.nation=nation
global IDnum
self.ID=IDnum
IDnum+=1
self.level=level
self.learnSpeed=learnSpeed #rate of leveling with respect to XP
self.XP=self.getXP()
self.baseStats=baseStats
self.IVs=self.getIVs()
self.EVs=[0,0,0,0,0] #accumulates with battles
self.permStats=self.getPermStats()#stats right after healing
self.tempStats=self.permStats#stats after effects of damage and stat-altering moves
self.accuracy=100
self.evasion=100
self.status=False #poison/burn/freeze/sleep/paralysis/flinch/confused
self.canMove=True
self.allMoves=allMoves #learnable moves
self.curMoves=self.getMoves(level)
self.curPP=self.getPP()
def getIVs(self):#generates stats for 0-15 biased towards mean)
IVs=[];
for i in range(0,5):
IVs.append(randint(0,7)+randint(0,8))
#d8 plus d9, zero-index
return IVs
#method for calculating attack, defense, etc.
def getPermStats(self): #returns [Attack, Defense, HP, Speed, Special]
#WARNING: never divide ints, it auto-floors! Always float() at least one argument
Attack=floor(((self.IVs[0]+self.baseStats[0]+float(self.EVs[0]**.5)/float(8)+50)*self.level)/50+5)
Defense=floor(((self.IVs[1]+self.baseStats[1]+float(self.EVs[0]**.5)/float(8)+50)*self.level)/50+5)
HP=floor(((self.IVs[2]+self.baseStats[2]+float(self.EVs[0]**.5)/float(8)+50)*self.level)/50+10)
Speed=floor(((self.IVs[3]+self.baseStats[3]+float(self.EVs[0]**.5)/float(8)+50)*self.level)/50+5)
Special=floor(((self.IVs[4]+self.baseStats[4]+float(self.EVs[0]**.5)/float(8)+50)*self.level)/50+5)
return [Attack,Defense,HP,Speed,Special]
def getXP(self):
if self.learnSpeed=="slow":
return 5*self.level**3/4
elif self.learnSpeed=="medium":
return self.level**3
elif self.learnSpeed=="fast":
return 4*self.level**3/5
def getPP(self):
curPP=[]
for m in self.curMoves:
curPP.append(m.maxPP)
return curPP
def getMoves(self,level):
return self.allMoves[-4:]
def setCanMove():
pass
################WRITE THIS##############
def levelUp(self,number=1):
self.level+=number
self.stats=self.getStats()
#################### Puttin' on the MOVES ###################
class move:
def __init__(self,name,basePwr, baseAcc, maxPP, nation, sideEffect, fastMove=False):#sideEffect can be string or False
self.name=name
self.basePwr=basePwr
self.baseAcc=baseAcc
self.maxPP=maxPP
self.nation=nation
self.sideEffect=sideEffect
self.fastMove=fastMove #pre-empts Speed duel (quick attack, swift)
def getCurPP(self,attacker):
for i in range(0,numMoves):
if self.name==attacker.curMoves[i].name:
return attacker.curPP[i]
def getHit(self,attacker,defender):
hitChance=float(self.baseAcc)*attacker.accuracy/defender.evasion
if randint(0,99)<hitChance:
return True
return False
def getDamage(self,attacker,defender):
pass
def getEffect(self,attacker,defender):
if self.sideEffect:
effectWords=self.sideEffect.split()
#if effect is a stat move
if effectWords[0]=="self":
attacker.tempStats[statOrder[effectWords[1]]]+=int(effectWords[2])
elif effectWords[0]=="enemy":
defender.tempStats[statOrder[effectWords[1]]]+=int(effectWords[2])
else:
if randint(0,99)<int(effectWords[0]):
defender.status=effectWords[1]
############Screen and high-level "running the game" stuff##############
class screen:
#runs at start of screen, conducts background setup before first loop
def __init__(self):
pygame.init()
self.background=pygame.image.load("jedipunzel.jpg")
self.screenSize=self.background.get_size()
self.gameScreen=pygame.display.set_mode(self.screenSize,0,32)
self.backgroundColor=pygame.Color(255,255,255)
self.clock=pygame.time.Clock()
self.fps=36
self.processInput=self.menuInput
self.drawScreen=self.drawMenu
self.curMenu=Intro
self.mainloop()
self.mode="menu"
def mainloop(self):
if self.mode=="menu":
while True:
#find out what the user has done
event=self.getInput()
#deal with it, updating gamestate accordingly
self.processInput(event)#this will be a different function depending on what's going on
#update broader game state
self.update()
#draw
self.drawScreen()#this will be a different function depending on what's going on
self.clock.tick(self.fps)
if self.mode=="battle":
firstMon=ownMon;secondMon=enemyMon
firstMove=ownMove;secondMove=enemyMove#only change this if enemy goes first
#if both or neither using fast moves
if ownMon.fastMove==enemyMon.fastMove:
#Speed duel
if ownMon.Speed<enemyMon.Speed:
firstMon=enemyMon;secondMon=ownMon
firstMove=ownMove;secondMove=enemyMove
##Note: as currently implemented, player wins ties.
else: #if one fast move in play
#if your fast move, run your move first
if enemyMove.fastMove:
firstMon=enemyMon;secondMon=ownMon
firstMove=ownMove;secondMove=enemyMove
if firstMon.canMove() and firstMon.getHit():
firstMove.getDamage()
if secondMon.canMove() and secondMon.getHit():
secondMove.getDamage()
#find the first valid input and pass to input processor
#if no valid input, pass Null
def getInput(self):
goodKeys=[K_a, K_s, K_SPACE, K_UP, K_DOWN, K_RIGHT, K_LEFT]
#add > and < later for time warp and p for pause
events = pygame.event.get()
for event in events:
if event.type == QUIT:
pygame.display.quit()
break
elif event.type==KEYDOWN:
if event.key in goodKeys:
return event
return False
#process the input
def menuInput(self,event):
if not event:
return #if the player has done nothing worth noting, do nothing.
else:
self.curMenu.processInput(event, self)
def update(self):
pass
def drawASCII(self):
pass
#print "----------------------"
#print self.curMenu.getArray()
def drawMenu(self):
self.gameScreen.fill(self.backgroundColor)
drawPos=[0,0]
pixel=15 #side length of sprite grid unit in pixels
drawArray=self.curMenu.getArray()
for row in drawArray:
drawPos[0]=0
for cell in row:
self.gameScreen.blit(menuSpriteDict[cell],dest=[drawPos[0]*pixel, drawPos[1]*pixel])
drawPos[0]+=1
drawPos[1]+=1
pygame.display.flip()
#################Generating individual things
###### Global variables (semi-permanent)
IDnum=0 #increment this when a new punkemon is generated
numMoves=4
statOrder={"attack":0,"defense":1,"HP":2,"Speed":3,"special":4}
#your handy guide to the stats list: stat name-->index in stats list
###### Menu instances
top=menu(["left","right"],True)
left=menu(["What's new?","botom","botom"],True)
left.setNext(top)
right=menu(["botom","botom"])
right.setNext(top)
botom=dialogMenu(["This is a test. \n This is a test with a very long line that needs to be broken up.","And it just keeps going! When will it end?!"])
falseChoice=forcedChoiceMenu(["Boy","Girl"])
nickChoice=forcedChoiceMenu(["ASSHAT","ASSFACE","BUTTHAT","BUTTFACE","FACEHAT","ASSBUTT",'"GARY"'])
noDice=dialogMenu(["Since it seems I can't talk either of you two out of it~","Your adventure in the world of PUNKEMON fighting starts NOW. Grab a mon and get going!"],"menu",top)
doItAnyway=forcedChoiceMenu(["You can't scare me.","I'm gonna be the best!"],"menu",noDice)
talkOut=dialogMenu(["I'll tell you what I told him:\nThe fighting circuit ain't no nursery school.","You've got a better chance of ending up in jail or a body bag than as a PUNKEMON CHAMPION."],"menu",doItAnyway)
Intro=dialogMenu(["Yo Brainbin!\nWelcome to the world of Punkemon~","My name is TYPHA.\nPeople in this hood, they call me the PUNKEMON PROFESSA.",
"There are creatures called PUNKEMON all up in dis world.","Some people think PUNKEMON are monsters.\nAin't totally wrong~","Some people keep 'em as pets.\nOthers use them in fights.",
"Me, I used to do that.\nNow I'm goin' straight.","I'm gonna study PUNKEMON as a profession.\nLab coat and everything.","When you're hiding behind that picture of Archie and Edith Bunker, it's hard to tell who you are.",
"Are you a boy, or a girl?"],"menu",falseChoice)
boy=dialogMenu(["You remember my little bro.\nYou've been at each other's throats ever since you were kids.","What was your charming nickname for him again?"],"menu",nickChoice)
asshat=dialogMenu(['Oh, yeah. "Asshat." Ha! You have such a way with words~'],"menu",talkOut)
assface=dialogMenu(['Oh, yeah. "Assface."Ha! You have such a way with words~'],"menu",talkOut)
butthat=dialogMenu(['Oh, yeah. "Butthat." Ha! You have such a way with words~'],"menu",talkOut)
buttface=dialogMenu(['Oh, yeah. "Buttface." Ha! You have such a way with words~'],"menu",talkOut)
facehat=dialogMenu(['Oh, yeah. "Facehat." Ha! You have such a way with words~'],"menu",talkOut)
assbutt=dialogMenu(['Oh, yeah. "Assbutt." Ha! You have such a way with words~'],"menu",talkOut)
Gary=dialogMenu(['Oh, yeah. "Gary". Ha! You have such a way with words~'],"menu",talkOut)
menuDict={"top":top,"left":left,"right":right,"botom":botom, "Boy": boy,"FalseChoice":falseChoice,
"nickChoice":nickChoice,"ASSHAT":asshat,"ASSFACE":assface,"BUTTHAT":butthat,"BUTTFACE":buttface,"FACEHAT":facehat,"ASSBUTT":assbutt,'"GARY"':Gary,
"talkOut":talkOut,"doItAnyway":doItAnyway,"noDice":noDice, "You can't scare me.":noDice,"I'm gonna be the best!":noDice}
######Move instances
##Initialize moves with: name,basePwr, baseAcc, maxPP, nation, sideEffect, fastMove=False
tackle=move("Tackle",100,90,20,"normal",False)
thundershock=move("Thundershock",100,90,15,"electric","30 paralysis")
sandAttack=move("Sand Attack",100,90,20,"normal","enemy accuracy -5")##IMPLEMENT ACCURACY then pick a better number
splash=move("Splash",0,100,20,"water",False)
###Mon species creation
##Initialize all species with:
##species,nation,specNum,level,learnSpeed,baseStats,baseXPworth,allMoves=[]
class bulbasaur(punkemon):
def __init__(self,name,level):
punkemon.__init__(self,"Bulbasaur","grass",1,level,"medium",[49,49,45,45,65],64,[tackle])
self.name=name
class charmander(punkemon):
def __init__(self,name,level):
punkemon.__init__(self,"Charmander","fire",4,level,"medium",[52,43,39,65,50],62,[tackle])
self.name=name
class squirtle(punkemon):
def __init__(self,name,level):
punkemon.__init__(self,"Squirtle","water",7,level,"medium",[48,65,44,43,50],63,[tackle])
self.name=name
class derp(punkemon):
def __init__(self,name,level):
punkemon.__init__(self,"Derp","fail",1,level,"slow",[0,0,0,0,0],64,[splash])
self.name=name
######Pokemon instance creation
##Initialize all pokemon with: species, level
starterBulbasaur=bulbasaur("bulbasaur",5)
starterCharmander=charmander("charmander",5)
starterSquirtle=squirtle("squirtle",5)
derpy=derp("derpy",30)
######Hard sets of things that should be dynamically generated (Yeah testing!)
ownMon=starterBulbasaur
enemyMon=starterCharmander
ownMove=tackle
enemyMon=thundershock
| |
import datetime
from django.conf import settings
from django.db import models
from django.utils.translation import (
activate, to_locale, get_language, ugettext)
import bleach
import caching.base
from babel import Locale, numbers
from jinja2.filters import do_dictsort
from olympia import amo
from olympia.amo.models import SearchMixin
from olympia.amo.utils import get_locale_from_lang, send_mail_jinja
from .db import LargeStatsDictField, StatsDictField
def update_inc(initial, key, count):
"""Update or create a dict of `int` counters, for StatsDictFields."""
initial = initial or {}
initial[key] = count + initial.get(key, 0)
return initial
class AddonCollectionCount(models.Model):
addon = models.ForeignKey('addons.Addon')
collection = models.ForeignKey('bandwagon.Collection')
count = models.PositiveIntegerField()
date = models.DateField()
class Meta:
db_table = 'stats_addons_collections_counts'
class StatsSearchMixin(SearchMixin):
ES_ALIAS_KEY = 'stats'
class CollectionCount(StatsSearchMixin, models.Model):
collection = models.ForeignKey('bandwagon.Collection')
# index name in our dev/stage/prod database: `count`
count = models.PositiveIntegerField(db_index=True)
# index name in our dev/stage/prod database: `date`
date = models.DateField(db_index=True)
class Meta:
db_table = 'stats_collections_counts'
class CollectionStats(models.Model):
"""In the running for worst-named model ever."""
collection = models.ForeignKey('bandwagon.Collection')
name = models.CharField(max_length=255, null=True)
count = models.PositiveIntegerField()
date = models.DateField()
class Meta:
db_table = 'stats_collections'
class DownloadCount(StatsSearchMixin, models.Model):
# has an index `addon_id` on this column...
addon = models.ForeignKey('addons.Addon')
# has an index named `count` in dev, stage and prod
count = models.PositiveIntegerField(db_index=True)
date = models.DateField()
sources = StatsDictField(db_column='src', null=True)
class Meta:
db_table = 'download_counts'
# additional indices on this table (in dev, stage and prod):
# * KEY `addon_and_count` (`addon_id`,`count`)
# * KEY `addon_date_idx` (`addon_id`,`date`)
# in our (dev, stage and prod) database:
# UNIQUE KEY `date_2` (`date`,`addon_id`)
unique_together = ('date', 'addon')
class UpdateCount(StatsSearchMixin, models.Model):
# Has an index `addon_id` in our dev, stage and prod database
addon = models.ForeignKey('addons.Addon')
# Has an index named `count` in our dev, stage and prod database
count = models.PositiveIntegerField(db_index=True)
# Has an index named `date` in our dev, stage and prod database
date = models.DateField(db_index=True)
versions = StatsDictField(db_column='version', null=True)
statuses = StatsDictField(db_column='status', null=True)
applications = LargeStatsDictField(db_column='application', null=True)
oses = StatsDictField(db_column='os', null=True)
locales = StatsDictField(db_column='locale', null=True)
class Meta:
db_table = 'update_counts'
# Additional indices on this table (on dev, stage and prod):
# * KEY `addon_and_count` (`addon_id`,`count`)
# * KEY `addon_date_idx` (`addon_id`,`date`)
class ThemeUpdateCountManager(models.Manager):
def get_range_days_avg(self, start, end, *extra_fields):
"""Return a a ValuesListQuerySet containing the addon_id and popularity
for each theme where popularity is the average number of users (count)
over the given range of days passed as start / end arguments.
If extra_fields are passed, then the list of fields is returned in the
queryset, inserted after addon_id but before popularity."""
return (self.values_list('addon_id', *extra_fields)
.filter(date__range=[start, end])
.annotate(avg=models.Avg('count')))
class ThemeUpdateCount(StatsSearchMixin, models.Model):
"""Daily users taken from the ADI data (coming from Hive)."""
addon = models.ForeignKey('addons.Addon')
count = models.PositiveIntegerField()
date = models.DateField()
objects = ThemeUpdateCountManager()
class Meta:
db_table = 'theme_update_counts'
class ThemeUpdateCountBulk(models.Model):
"""Used by the update_theme_popularity_movers command for perf reasons.
First bulk inserting all the averages over the last week and last three
weeks in this table allows us to bulk update (instead of running an update
per Persona).
"""
persona_id = models.PositiveIntegerField()
popularity = models.PositiveIntegerField()
movers = models.FloatField()
class Meta:
db_table = 'theme_update_counts_bulk'
class ContributionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Contribution(amo.models.ModelBase):
# TODO(addon): figure out what to do when we delete the add-on.
addon = models.ForeignKey('addons.Addon')
amount = models.DecimalField(max_digits=9, decimal_places=2, null=True)
currency = models.CharField(max_length=3,
choices=do_dictsort(amo.PAYPAL_CURRENCIES),
default=amo.CURRENCY_DEFAULT)
source = models.CharField(max_length=255, null=True)
source_locale = models.CharField(max_length=10, null=True)
# This is the external id that you can communicate to the world.
uuid = models.CharField(max_length=255, null=True, db_index=True)
comment = models.CharField(max_length=255)
# This is the internal transaction id between us and a provider,
# for example paypal or solitude.
transaction_id = models.CharField(max_length=255, null=True, db_index=True)
paykey = models.CharField(max_length=255, null=True)
post_data = StatsDictField(null=True)
# Voluntary Contribution specific.
charity = models.ForeignKey('addons.Charity', null=True)
annoying = models.PositiveIntegerField(default=0,
choices=amo.CONTRIB_CHOICES,)
is_suggested = models.BooleanField(default=False)
suggested_amount = models.DecimalField(max_digits=9, decimal_places=2,
null=True)
class Meta:
db_table = 'stats_contributions'
def __unicode__(self):
return u'%s: %s' % (self.addon.name, self.amount)
@property
def date(self):
try:
return datetime.date(self.created.year,
self.created.month, self.created.day)
except AttributeError:
# created may be None
return None
@property
def contributor(self):
try:
return u'%s %s' % (self.post_data['first_name'],
self.post_data['last_name'])
except (TypeError, KeyError):
# post_data may be None or missing a key
return None
@property
def email(self):
try:
return self.post_data['payer_email']
except (TypeError, KeyError):
# post_data may be None or missing a key
return None
def _switch_locale(self):
if self.source_locale:
lang = self.source_locale
else:
lang = self.addon.default_locale
activate(lang)
return Locale(to_locale(lang))
def mail_thankyou(self, request=None):
"""
Mail a thankyou note for a completed contribution.
Raises a ``ContributionError`` exception when the contribution
is not complete or email addresses are not found.
"""
locale = self._switch_locale()
# Thankyous must be enabled.
if not self.addon.enable_thankyou:
# Not an error condition, just return.
return
# Contribution must be complete.
if not self.transaction_id:
raise ContributionError('Transaction not complete')
# Send from support_email, developer's email, or default.
from_email = settings.DEFAULT_FROM_EMAIL
if self.addon.support_email:
from_email = str(self.addon.support_email)
# We need the contributor's email.
to_email = self.post_data['payer_email']
if not to_email:
raise ContributionError('Empty payer email')
# Make sure the url uses the right language.
# Setting a prefixer would be nicer, but that requires a request.
url_parts = self.addon.meet_the_dev_url().split('/')
url_parts[1] = locale.language
subject = ugettext('Thanks for contributing to {addon_name}').format(
addon_name=self.addon.name)
# Send the email.
send_mail_jinja(
subject, 'stats/contribution-thankyou-email.ltxt',
{'thankyou_note': bleach.clean(unicode(self.addon.thankyou_note),
strip=True),
'addon_name': self.addon.name,
'learn_url': '%s%s?src=emailinfo' % (settings.SITE_URL,
'/'.join(url_parts)),
'domain': settings.DOMAIN},
from_email, [to_email], fail_silently=True,
perm_setting='dev_thanks')
def get_amount_locale(self, locale=None):
"""Localise the amount paid into the current locale."""
if not locale:
lang = get_language()
locale = get_locale_from_lang(lang)
return numbers.format_currency(self.amount or 0,
self.currency or 'USD',
locale=locale)
class GlobalStat(caching.base.CachingMixin, models.Model):
name = models.CharField(max_length=255)
count = models.IntegerField()
date = models.DateField()
objects = caching.base.CachingManager()
class Meta:
db_table = 'global_stats'
unique_together = ('name', 'date')
get_latest_by = 'date'
class ThemeUserCount(StatsSearchMixin, models.Model):
"""Theme popularity (weekly average of users).
This is filled in by a cron job reading the popularity from the theme
(Persona).
"""
addon = models.ForeignKey('addons.Addon')
count = models.PositiveIntegerField()
date = models.DateField()
class Meta:
db_table = 'theme_user_counts'
index_together = ('date', 'addon')
| |
import nltk
import nltk.translate.chrf_score # This is necessary to avoid an AttributeError in NLTK
import sacrebleu
import numpy as np
import math
import re
import subprocess
import tempfile
from collections import Counter
from itertools import chain
from compare_mt import corpus_utils
from compare_mt import align_utils
from compare_mt import ngram_utils
from compare_mt.rouge import rouge_scorer
# Global variable controlling scorer scale
global_scorer_scale = 100.0
class Scorer(object):
@property
def scale(self):
return 1.0
def score_corpus(self, ref, out, src=None):
pass
def score_sentence(self, ref, out, src=None):
pass
def cache_stats(self, ref, out, src=None):
return None
def name(self):
"""
A name that can have spaces that describes the scorer.
"""
return None
def idstr(self):
"""
An ID string that contains no spaces but identifies the scorer.
"""
return None
class SentenceFactoredScorer(Scorer):
def score_corpus(self, ref, out, src=None):
"""
Score a corpus using the average of the score
Args:
ref: A reference corpus
out: An output corpus
src: A source corpus. Might be ignored or required
depending on the metric
Returns:
A tuple containing a single value for the average score, and None
"""
if len(ref) == 0:
return 0.0, None
score_sum = 0
src = [None for _ in ref] if src is None else src
for r, o, s in zip(ref, out, src):
score_sum += self.score_sentence(r, o, s)[0]
return score_sum/len(ref), None
def cache_stats(self, ref, out, src=None):
"""
Cache sufficient statistics for caculating scores
Args:
ref: A reference corpus
out: An output corpus
src: A source corpus. Might be ignored or required
depending on the metric
Returns:
A tuple of cached statistics
"""
if hasattr(self, 'case_insensitive') and self.case_insensitive:
ref = corpus_utils.lower(ref)
out = corpus_utils.lower(out)
cached_scores = []
src = [None for _ in ref] if src is None else src
for r, o, s in zip(ref, out, src):
cached_scores.append(self.score_sentence(r, o, s)[0])
return cached_scores
def score_cached_corpus(self, sent_ids, cached_stats):
"""
Score a corpus with cache
Args:
sent_ids: The sentence ids for reference and output corpora
cached_stats: A tuple of cached statistics
Returns:
A tuple containing a single value for the score and a string summarizing auxiliary information
"""
cached_stats = np.array(cached_stats)
return np.mean(cached_stats[sent_ids]), None
class BleuScorer(Scorer):
"""
A scorer that calculates BLEU score.
"""
def __init__(self, weights=(0.25, 0.25, 0.25, 0.25), case_insensitive=False):
self.weights = weights
self.case_insensitive = case_insensitive
@property
def scale(self):
return global_scorer_scale
def score_corpus(self, ref, out, src=None):
"""
Score a corpus using BLEU score
Args:
ref: A reference corpus
out: An output corpus
src: A source courpus. Ignored if passed
Returns:
A tuple containing a single value for the BLEU score and a string summarizing auxiliary information
"""
cached_stats = self.cache_stats(ref, out)
return self.score_cached_corpus(range(len(ref)), cached_stats)
def score_sentence(self, ref, out, src=None):
raise NotImplementedError("Sentence-level calculation is not implemented in BleuScorer as it is usually 0."
"Consider using SentenceBleuScorer (string sentbleu) instead.")
def _precision(self, ref, out, n):
"""
Caculate n-gram precision
Args:
ref: A reference sentence
out: An output sentence
Returns:
Numerator and denominator of the precision
"""
out_ngram = ngram_utils.sent_ngrams_list(out, n)
ref_ngram = ngram_utils.sent_ngrams_list(ref, n)
out_cnt = Counter(out_ngram)
ref_cnt = Counter(ref_ngram)
num = 0
denom = 0
for ngram, o_cnt in out_cnt.items():
num += min(o_cnt, ref_cnt[ngram])
denom += o_cnt
denom = max(1, denom)
return num, denom
def cache_stats(self, ref, out, src=None):
"""
Cache sufficient statistics for caculating BLEU score
Args:
ref: A reference corpus
out: An output corpus
src: A source courpus. Ignored if passed
Returns:
A list of cached statistics
"""
if self.case_insensitive:
ref = corpus_utils.lower(ref)
out = corpus_utils.lower(out)
cached_stats = []
for r, o in zip(ref, out):
prec = []
for n in range(1, len(self.weights) + 1):
prec.append(self._precision(r, o, n))
cached_stats.append( (len(r), len(o), prec) )
return cached_stats
def score_cached_corpus(self, sent_ids, cached_stats):
"""
Score a corpus using BLEU score with cache
Args:
sent_ids: The sentence ids for reference and output corpora
cached_stats: A list of cached statistics
Returns:
A tuple containing a single value for the BLEU score and a string summarizing auxiliary information
"""
if len(cached_stats) == 0:
return 0.0, None
cached_ref_len, cached_out_len, cached_prec = zip(*cached_stats)
num_prec = Counter()
denom_prec = Counter()
ref_len = 0
out_len = 0
for sent_id in sent_ids:
ref_len += cached_ref_len[sent_id]
out_len += cached_out_len[sent_id]
for n in range(1, len(self.weights) + 1):
num, denom = cached_prec[sent_id][n-1]
num_prec[n] += num
denom_prec[n] += denom
if num_prec[1] == 0:
return 0, None
prec = 0
for i, w in enumerate(self.weights, start=1):
p = num_prec[i] / denom_prec[i] if denom_prec[i] != 0 else 0
p = math.log(p) if p > 0 else 0
prec += p * w
bp = min(1, math.exp(1 - ref_len/out_len)) if out_len != 0 else 0
return self.scale * bp * math.exp(prec), None
def name(self):
return "BLEU"
def idstr(self):
return "bleu"
class SentBleuScorer(SentenceFactoredScorer):
"""
A scorer that calculates sentence-level smoothed BLEU score.
"""
def __init__(self, case_insensitive=False):
self.case_insensitive = case_insensitive
@property
def scale(self):
return global_scorer_scale
def score_sentence(self, ref, out, src=None):
"""
Score a single sentence with sentence-level smoothed BLEU score
Args:
ref: A reference sentence
out: An output sentence
src: A source sentence. Ignored if passed
Returns:
The sentence-level BLEU score, and None
"""
chencherry = nltk.translate.bleu_score.SmoothingFunction()
if self.case_insensitive:
bleu_score = nltk.translate.bleu_score.sentence_bleu([corpus_utils.lower(ref)], corpus_utils.lower(out), smoothing_function=chencherry.method2)
else:
bleu_score = nltk.translate.bleu_score.sentence_bleu([ref], out, smoothing_function=chencherry.method2)
return self.scale * bleu_score, None
def name(self):
return "sentence-level BLEU"
def idstr(self):
return "sentbleu"
class LengthScorer(Scorer):
"""
A scorer that calculate the length ratio
"""
def score_corpus(self, ref, out, src=None):
"""
Calculate the length ratio for a corpus
Args:
ref: A reference corpus
out: An output corpus
src: A source courpus. Ignored if passed
Returns:
A tuple containing a single value for the length ratio and a string summarizing auxiliary information
"""
ref_words = sum([len(x) for x in ref])
out_words = sum([len(x) for x in out])
if ref_words == 0:
return 0.0, f'ref={ref_words}, out={out_words}'
return self.scale * out_words / ref_words, f'ref={ref_words}, out={out_words}'
def score_sentence(self, ref, out, src=None):
"""
Score a single sentence by length ratio
Args:
ref: A reference sentence
out: An output sentence
src: A source sentence. Ignored if passed
Returns:
The length, and a string summarizing the length of the reference and output sentence
"""
if len(ref) == 0:
return 0.0, f"ref={len(ref)}, out={len(out)}"
return len(out) / len(ref), f"ref={len(ref)}, out={len(out)}"
def name(self):
return "length ratio"
def idstr(self):
return "lengthrat"
class ExactMatchScorer(Scorer):
"""
A scorer that calculates exact matches
"""
def score_corpus(self, ref, out, src=None):
"""
Calculate the percentage of exact matches in a corpus
Args:
ref: A reference corpus
out: An output corpus
src: A source courpus. Ignored if passed
Returns:
A tuple containing a single value for the exact match percentage and None
"""
matches = 0
for r, o in zip(ref, out):
if r == o:
matches += 1
return float(matches) / len(ref), None
def score_sentence(self, ref, out, src=None):
"""
Score a single sentence by exact match
Args:
ref: A reference sentence
out: An output sentence
src: A source sentence. Ignored if passed
Returns:
1 if exact matches 0, and None
"""
return 1.0 if ref == out else 0, None
def name(self):
return "exact match"
def idstr(self):
return "exact"
class RibesScorer(SentenceFactoredScorer):
"""
A scorer that calculates RIBES score.
"""
def __init__(self, order=-1, alpha=0.25, beta=0.1, case_insensitive=False):
self.order = order
self.alpha = alpha
self.beta = beta
self.case_insensitive = case_insensitive
@property
def scale(self):
return global_scorer_scale
def _kendall_tau_distance(self, alignment):
"""
Caculate the Kendall's tau distance for RIBES
Args:
alignment: an alignment represented as a list of integers
Returns:
The Kendall's tau distance
"""
dis = 0
n = len(alignment)
if n <= 1:
return 0
for i in range(n):
for j in range(i+1, n):
if alignment[j] > alignment[i]:
dis += 1
return 2*dis/(n*n-n)
def score_sentence(self, ref, out, src=None):
"""
Score a single sentence with RIBES score
Args:
ref: A reference sentence
out: An output sentence
src: A source sentence. Ignored if passed
Returns:
The RIBES score, and None
"""
alignment = align_utils.ngram_context_align(ref, out, order=self.order, case_insensitive=self.case_insensitive)
kt_dis = self._kendall_tau_distance(alignment)
prec = len(alignment)/ len(out) if len(out) != 0 else 0
bp = min(1, math.exp(1-len(ref)/len(out))) if len(out) != 0 else 0
return self.scale * kt_dis * (prec**self.alpha) * (bp**self.beta), None
def name(self):
return "RIBES"
def idstr(self):
return "ribes"
class SacreBleuScorer(Scorer):
"""
A scorer that computes BLEU on detokenized text.
"""
def __init__(self, smooth_method='exp', smooth_value=0, use_effective_order=False, case_insensitive=False):
self.smooth_method = smooth_method
self.smooth_value = smooth_value
self.use_effective_order = use_effective_order
self.case_insensitive = case_insensitive
@property
def scale(self):
return global_scorer_scale
def score_sentence(self, ref, out, src=None):
raise NotImplementedError("Sentence-level calculation is not implemented in SacreBleuScorer as it is usually 0."
"Consider using SentenceBleuScorer (string sentbleu) instead.")
def score_corpus(self, ref, out, src=None):
cached_stats = self.cache_stats(ref, out)
return self.score_cached_corpus(range(len(ref)), cached_stats)
def cache_stats(self, ref, out, src=None):
"""
Cache sufficient statistics for caculating SacreBLEU score
Args:
ref: A reference corpus
out: An output corpus
src: A source courpus. Ignored if passed
Returns:
A list of cached statistics
"""
if self.case_insensitive:
ref = corpus_utils.lower(ref)
out = corpus_utils.lower(out)
cached_stats = []
for r, o in zip(ref, out):
re = sacrebleu.corpus_bleu(" ".join(o), " ".join(r))
cached_stats.append( (re.counts, re.totals, re.sys_len, re.ref_len) )
return cached_stats
def score_cached_corpus(self, sent_ids, cached_stats):
"""
Score a corpus using SacreBLEU score with cache
Args:
sent_ids: The sentence ids for reference and output corpora
cached_stats: A list of cached statistics
Returns:
A tuple containing a single value for the SacreBLEU score and a string summarizing auxiliary information
"""
if len(cached_stats) == 0:
return 0.0, None
counts, totals, sys_len, ref_len = zip(*cached_stats)
counts, totals, sys_len, ref_len = [np.sum(np.array(x)[sent_ids], 0) for x in [counts, totals, sys_len, ref_len]]
return sacrebleu.compute_bleu(counts, totals, sys_len, ref_len, smooth_method=self.smooth_method, smooth_value=self.smooth_value, use_effective_order=self.use_effective_order).score, None
def name(self):
return "SacreBleuScorer"
def idstr(self):
return "sacrebleu"
class ChrFScorer(Scorer):
"""
A scorer that calculates chrF (character n-gram F-score) score.
This computes F2 score (beta=2.0 as per http://www.aclweb.org/anthology/W16-2341).
"""
def __init__(self, case_insensitive=False):
self.case_insensitive = case_insensitive
@property
def scale(self):
return global_scorer_scale
def chrf_score(self, refs, out):
return self.scale * nltk.translate.chrf_score.corpus_chrf(
[[" ".join(x) for x in ref] for ref in refs],
[" ".join(x) for x in out],
max_len=6, # Order 6 n-grams
beta=2.0, # F2 score
ignore_whitespace=True # No whitespaces
)
def score_corpus(self, ref, out, src=None):
"""
Score a corpus using ChrF score
Args:
ref: A reference corpus
out: An output corpus
src: A source courpus. Ignored if passed
Returns:
A tuple containing a single value for the ChrF score and a string summarizing auxiliary information
"""
if self.case_insensitive:
chrf = self.chrf_score([[corpus_utils.lower(x)] for x in ref], corpus_utils.lower(out))
else:
chrf = self.chrf_score([[x] for x in ref], out)
return chrf, None
def score_sentence(self, ref, out, src=None):
return self.chrf_score([ref], [out]), None
def name(self):
return "ChrF"
def idstr(self):
return "chrf"
class RougeScorer(SentenceFactoredScorer):
"""
A scorer that calculates ROUGE score.
"""
def __init__(self, rouge_type, score_type='fmeasure', use_stemmer=False, case_insensitive=False):
self.rouge_type = rouge_type
self.score_type = score_type
self._stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None
self.case_insensitive = case_insensitive
@property
def scale(self):
return global_scorer_scale
def score_sentence(self, ref, out, src=None):
if self.case_insensitive:
ref = corpus_utils.lower(ref)
out = corpus_utils.lower(out)
if self._stemmer:
ref = [self._stemmer.stem(x) if len(x) > 3 else x for x in ref]
out = [self._stemmer.stem(x) if len(x) > 3 else x for x in out]
if self.rouge_type == 'rougeL':
ref, out = self.tokenize(" ".join(ref)), self.tokenize(" ".join(out))
scores = rouge_scorer._score_lcs(ref, out)
elif self.rouge_type == 'rougeLsum':
refs = [self.tokenize(s) for s in self.get_sents(ref)]
outs = [self.tokenize(s) for s in self.get_sents(out)]
scores = rouge_scorer._summary_level_lcs(refs, outs)
elif re.match(r"rouge[0-9]$", self.rouge_type):
ref, out = self.tokenize(" ".join(ref)), self.tokenize(" ".join(out))
n = int(self.rouge_type[5:])
if n <= 0:
raise ValueError(f"rougen requires positive n: {self.rouge_type}")
ref_ngrams = rouge_scorer._create_ngrams(ref, n)
out_ngrams = rouge_scorer._create_ngrams(out, n)
scores = rouge_scorer._score_ngrams(ref_ngrams, out_ngrams)
else:
raise ValueError(f"Invalid rouge type: {self.rouge_type}")
if self.score_type == 'fmeasure':
score_value = scores.fmeasure
elif self.score_type == 'precision':
score_value = scores.precision
elif self.score_type == 'recall':
score_value = scores.recall
else:
raise ValueError(f"Invalid score type: {self.score_type}")
return self.scale * score_value, None
def get_sents(self, tokens):
# assume sentences are separated by "."
sents = " ".join(tokens).split(".")
sents = [x for x in sents if len(x)]
return sents
def tokenize(self, tokens):
text = re.sub(r"[^a-zA-Z0-9]+", " ", tokens)
tokens = re.split(r"\s+", text)
tokens = [x for x in tokens if len(x)]
return tokens
def name(self):
return self.rouge_type
def idstr(self):
return self.rouge_type.lower()
class WERScorer(Scorer):
"""
A scorer that calculates Word Error Rate (WER).
"""
def __init__(self, sub_pen=1.0, ins_pen=1.0, del_pen=1.0, case_insensitive=False):
self.sub_pen = 1.0
self.ins_pen = 1.0
self.del_pen = 1.0
self.case_insensitive = case_insensitive
@property
def scale(self):
return global_scorer_scale
def score_corpus(self, ref, out, src=None):
"""
Score a corpus using WER
Args:
ref: A reference corpus
out: An output corpus
src: A source courpus. Ignored if passed
Returns:
A tuple containing a single value for the WER and None
"""
cached_stats = self.cache_stats(ref, out)
return self.score_cached_corpus(np.arange(len(ref)), cached_stats)
def score_sentence(self, ref, out, src=None):
return self.score_corpus([ref], [out])
def cache_stats(self, ref, out, src=None):
"""
Cache sufficient statistics for caculating WER
Args:
ref: A reference corpus
out: An output corpus
Returns:
A list of cached statistics
"""
cached_stats = []
for r, o in zip(ref, out):
cached_stats.append( (len(r), self._edit_distance(r, o)) )
return cached_stats
def score_cached_corpus(self, sent_ids, cached_stats):
"""
Score a corpus with cache
Args:
sent_ids: The sentence ids for reference and output corpora
cached_stats: A list of cached statistics
Returns:
A tuple containing a single value for the score and a string summarizing auxiliary information
"""
if len(cached_stats) == 0:
return 0.0, None
cached_ref_len, cached_edit_distance = zip(*cached_stats)
cached_ref_len, cached_edit_distance = np.array(cached_ref_len), np.array(cached_edit_distance)
denom = np.sum(cached_ref_len[sent_ids])
wer = np.sum(cached_edit_distance[sent_ids])/denom if denom != 0 else 0
return self.scale * wer, None
def _edit_distance(self, ref, out, src=None):
if self.case_insensitive:
ref = corpus_utils.lower(ref)
out = corpus_utils.lower(out)
sp1 = len(ref)+1
tp1 = len(out)+1
scores = np.zeros((sp1, tp1))
equals = (np.expand_dims(np.array(ref), axis=1) == np.array(out))
scores[:,0] = range(sp1)
scores[0,:] = range(tp1)
# Forward edit distance
for i in range(0, len(ref)):
for j in range(0, len(out)):
my_action = 0 if equals[i,j] else 1
my_score = scores[i,j] + my_action * self.sub_pen
del_score = scores[i,j+1] + self.del_pen
if del_score < my_score:
my_score = del_score
ins_score = scores[i+1,j] + self.ins_pen
if ins_score < my_score:
my_score = ins_score
scores[i+1,j+1] = my_score
return scores[-1,-1]
def name(self):
return "Word Error Rate"
def idstr(self):
return "wer"
class METEORScorer(Scorer):
"""
A scorer that calculates METEOR score.
"""
def __init__(self, meteor_directory, options=None):
self.meteor_directory = meteor_directory
self.options = options
self.weights, self.parameters = self._get_weights_and_parameters(options)
@property
def scale(self):
return global_scorer_scale
def score_corpus(self, ref, out, src=None):
"""
Score a corpus using METEOR score
Args:
ref: A reference corpus
out: An output corpus
Returns:
A tuple containing a single value for the METEOR score and a string summarizing auxiliary information
"""
cached_stats = self.cache_stats(ref, out)
return self.score_cached_corpus(np.arange(len(ref)), cached_stats)
def score_sentence(self, ref, out):
return self.score_corpus([ref], [out])
def cache_stats(self, ref, out, src=None):
"""
Cache sufficient statistics for caculating METEOR score
Args:
ref: A reference corpus
out: An output corpus
src: A source courpus. Ignored if passed
Returns:
A list of cached statistics
"""
with tempfile.TemporaryDirectory() as directory:
ref_name = directory + '/ref'
out_name = directory + '/out'
corpus_utils.write_tokens(ref_name, ref)
corpus_utils.write_tokens(out_name, out)
cached_stats = []
command = f'java -Xmx2G -jar {self.meteor_directory}/meteor-*.jar {out_name} {ref_name} '
if self.options:
command += self.options
command += ' -ssOut'
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
stats = p.communicate()[0].decode("utf-8").split('\n')[:-1]
for stat_str in stats:
stat = tuple(float(x) for x in stat_str.split())
cached_stats.append(stat)
return cached_stats
def score_cached_corpus(self, sent_ids, cached_stats):
"""
Score a corpus using METEOR score with cache
Args:
sent_ids: The sentence ids for reference and output corpora
cached_stats: A list of cached statistics
Returns:
A tuple containing a single value for the METEOR score and a string summarizing auxiliary information
"""
if len(cached_stats) == 0:
return 0.0, None
cached_stats = np.array(cached_stats)
# compute sufficient statistics
sent_stats = cached_stats[sent_ids]
# num_total_chunks = sum(num_sent_chunks) - minus_chunk
minus_chunk = 0
for stat in sent_stats:
out_len = stat[0]
ref_len = stat[1]
out_total_match = stat[4] + stat[6] + stat[8] + stat[10] + stat[12] + stat[14] + stat[16] + stat[18]
ref_total_match = stat[5] + stat[7] + stat[9] + stat[11] + stat[13] + stat[15] + stat[17] + stat[19]
if out_len == out_total_match and ref_len == ref_total_match and stat[-3] == 1:
minus_chunk += 1
cal_stats = np.sum(sent_stats, 0)
cal_stats[20] -= minus_chunk
# rename
alpha, beta, gamma, delta = self.parameters
out_len, ref_len = cal_stats[0], cal_stats[1]
out_func_words, ref_func_words = cal_stats[2], cal_stats[3]
out_content_match_stage = np.array([cal_stats[4], cal_stats[8], cal_stats[12], cal_stats[16]])
ref_content_match_stage = np.array([cal_stats[5], cal_stats[9], cal_stats[13], cal_stats[17]])
out_func_match_stage = np.array([cal_stats[6], cal_stats[10], cal_stats[14], cal_stats[18]])
ref_func_match_stage = np.array([cal_stats[7], cal_stats[11], cal_stats[15], cal_stats[19]])
chunks = cal_stats[20]
out_word_match, ref_word_match = cal_stats[21], cal_stats[22]
# compute the METEOR score
out_weighted_len = delta * (out_len-out_func_words) + (1.0-delta) * out_func_words
ref_weighted_len = delta * (ref_len-ref_func_words) + (1.0-delta) * ref_func_words
out_weighted_match = np.sum(self.weights * (out_content_match_stage*delta + out_func_match_stage*(1-delta)))
ref_weighted_match = np.sum(self.weights * (ref_content_match_stage*delta + ref_func_match_stage*(1-delta)))
prec = out_weighted_match / out_weighted_len if out_weighted_len != 0 else 0
recall = ref_weighted_match / ref_weighted_len if ref_weighted_len != 0 else 0
fmean = 1.0 / ( (1.0-alpha)/prec + alpha/recall ) if prec != 0 and recall != 0 else 0
out_total_match = np.sum(out_content_match_stage) + np.sum(out_func_match_stage)
ref_total_match = np.sum(ref_content_match_stage) + np.sum(ref_func_match_stage)
frag = float(chunks) / (float(out_word_match+ref_word_match)/2)
frag = 0 if out_total_match == out_len and ref_total_match == ref_len and chunks == 1 else frag
frag_penalty = gamma * math.pow(frag, beta)
score = fmean * (1.0-frag_penalty)
return self.scale * score, None
def _get_weights_and_parameters(self, options):
if self.options is None:
return (np.array([1.0, 0.6, 0.8, 0.6]), np.array([0.85, 0.2, 0.6, 0.75]))
weights, parameters = np.zeros(4), np.zeros(4)
# a simple and (maybe) slow way to obtain weights and parameters
with tempfile.TemporaryDirectory() as directory:
ref_name = directory + '/ref'
out_name = directory + '/out'
corpus_utils.write_tokens(ref_name, [["test"]])
corpus_utils.write_tokens(out_name, [["test"]])
command = f'java -Xmx2G -jar {self.meteor_directory}/meteor-*.jar {out_name} {ref_name} {options}'
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
stats = p.communicate()[0].decode("utf-8").split()
weights_index = stats.index('Weights:') + 1
params_index = stats.index('Parameters:') + 1
for i in range(4):
weights[i] = float(stats[weights_index+i])
parameters[i] = float(stats[params_index+i])
return weights, parameters
def name(self):
return "METEOR"
def idstr(self):
return "meteor"
class COMETScorer(SentenceFactoredScorer):
"""
A scorer that calculates sentence-level COMET score.
"""
def __init__(self, model_name="wmt-large-da-estimator-1719"):
import torch
from comet.models import download_model
self.cuda = torch.cuda.is_available()
self.model = download_model(model_name)
@property
def scale(self):
return global_scorer_scale
def score_sentence(self, ref, out, src=None):
"""
Score a single sentence with sentence-level COMET score
Args:
ref: A reference sentence
out: An output sentence
src: A source sentence
Returns:
The sentence-level COMET score, and None
"""
assert src is not None, "COMET requires source"
data = [
{"src": " ".join(src), "mt": " ".join(out), "ref": " ".join(ref)}
]
score = self.model.predict(data, cuda=self.cuda)[1][0]
return self.scale * score, None
def name(self):
return "sentence-level COMET"
def idstr(self):
return "comet"
class GleuScorer(Scorer):
"""
A scorer that calculates GLEU score.
References:
"Ground Truth for Grammatical Error Correction Metrics", Napoles et al.
"GLEU Without Tuning", Napoles et al.
"""
def __init__(self, weights=(0.25, 0.25, 0.25, 0.25), case_insensitive=False):
self.weights = weights
self.case_insensitive = case_insensitive
@property
def scale(self):
return global_scorer_scale
def score_corpus(self, ref, out, src=None):
"""
Score a corpus using GLEU score
Args:
ref: A reference corpus
out: An output corpus
src: A source corpus. Required
Returns:
A tuple containing a single value for the GLEU score and a string summarizing auxiliary information
"""
cached_stats = self.cache_stats(ref, out, src)
return self.score_cached_corpus(range(len(ref)), cached_stats)
def score_sentence(self, ref, out, src=None):
"""
Score a sentence using GLEU score
Args:
ref: A reference sentence
out: An output sentence
src: A source sentence. Required
Returns:
A tuple containing a single value for the GLEU score and a string summarizing auxiliary information
"""
cached_stats = self.cache_stats([ref], [out], [src])
# Smooth according to https://github.com/cnap/gec-ranking/blob/master/scripts/gleu.py
stat = cached_stats[0]
cached_stats[0] = (stat[0], stat[1],
[(max(num, 1), max(denom, 1)) for num, denom in stat[2]])
return self.score_cached_corpus(range(1), cached_stats)
def _precision(self, ref, out, src, n):
"""
Calcualte GLEU-specific n-gram precision
Args:
ref: A reference sentence
out: An output sentence
src: A source sentence
Returns:
Numerator and denominator of the precision
"""
ref_ngram = ngram_utils.sent_ngrams_list(ref, n)
out_ngram = ngram_utils.sent_ngrams_list(out, n)
src_ngram = ngram_utils.sent_ngrams_list(src, n)
ref_cnt = Counter(ref_ngram)
out_cnt = Counter(out_ngram)
src_cnt = Counter(src_ngram)
out_join_ref = out_cnt & ref_cnt
out_join_src = out_cnt & src_cnt
num = sum(out_join_ref.values()) - \
sum((out_join_src - out_join_ref).values())
# According to https://github.com/cnap/gec-ranking/blob/master/scripts/gleu.py
num = max(num, 0)
denom = sum(out_cnt.values())
return num, denom
def cache_stats(self, ref, out, src=None):
"""
Cache sufficient statistics for calculating BLEU score
Args:
ref: A reference corpus
out: An output corpus
src: A source corpus. Required.
Returns:
A list of cached statistics
"""
if self.case_insensitive:
ref = corpus_utils.lower(ref)
out = corpus_utils.lower(out)
src = corpus_utils.lower(src)
cached_stats = []
for r, o, s in zip(ref, out, src):
prec = []
for n in range(1, len(self.weights) + 1):
prec.append(self._precision(r, o, s, n))
cached_stats.append((len(r), len(o), prec))
return cached_stats
def score_cached_corpus(self, sent_ids, cached_stats):
"""
Score a corpus using GLEU score with cache
Args:
sent_ids: The sentence ids for reference and output corpora
cached_stats: A list of cached statistics
Returns:
A tuple containing a single value for the GLEU score and a string summarizing auxiliary information
"""
if len(cached_stats) == 0:
return 0.0, None
cached_ref_len, cached_out_len, cached_prec = zip(*cached_stats)
num_prec = Counter()
denom_prec = Counter()
ref_len = 0
out_len = 0
for sent_id in sent_ids:
ref_len += cached_ref_len[sent_id]
out_len += cached_out_len[sent_id]
for n in range(1, len(self.weights) + 1):
num, denom = cached_prec[sent_id][n-1]
num_prec[n] += num
denom_prec[n] += denom
# According to https://github.com/cnap/gec-ranking/blob/master/scripts/gleu.py
if any(map(lambda x: x == 0, chain(num_prec.values(), denom_prec.values()))):
return 0, None
prec = 0
for i, w in enumerate(self.weights, start=1):
p = math.log(num_prec[i] / denom_prec[i])
prec += p * w
bp = min(1, math.exp(1 - ref_len/out_len)) if out_len != 0 else 0
return self.scale * bp * math.exp(prec), None
def name(self):
return "GLEU"
def idstr(self):
return "gleu"
def create_scorer_from_profile(profile, case_insensitive=False, meteor_directory=None, options=None):
"""
Create a scorer from a profile string
Args:
profile: a profile string of "bleu" for BLEU or "length" for length ratio
case_insensitive: A boolean specifying whether to turn on the case insensitive option
Returns:
A scorer to perform the appropriate scoring
"""
if profile == 'bleu':
return BleuScorer(case_insensitive=case_insensitive)
if profile == 'sacrebleu':
return SacreBleuScorer(case_insensitive=case_insensitive)
elif profile == 'sentbleu':
return SentBleuScorer(case_insensitive=case_insensitive)
elif profile == 'length':
return LengthScorer()
elif profile == 'ribes':
return RibesScorer(case_insensitive=case_insensitive)
elif profile == 'chrf':
return ChrFScorer(case_insensitive=case_insensitive)
elif re.match(r"rouge[0-9L](sum)?$", profile):
return RougeScorer(rouge_type=profile, case_insensitive=case_insensitive)
elif profile == 'wer':
return WERScorer(case_insensitive=case_insensitive)
elif profile == 'meteor':
if meteor_directory == None:
raise ValueError("Must specify the directory of the METEOR source code.")
return METEORScorer(meteor_directory=meteor_directory, options=options)
elif profile == 'exact':
return ExactMatchScorer()
elif profile == 'comet':
return COMETScorer()
elif profile == 'gleu':
return GleuScorer()
else:
raise ValueError(f'Invalid profile for scorer {profile}'.format(profile=profile))
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells.
## Base interface for all RNN Cells
@@RNNCell
## RNN Cells for use with TensorFlow's core RNN methods
@@BasicRNNCell
@@BasicLSTMCell
@@GRUCell
@@LSTMCell
## Classes storing split `RNNCell` state
@@LSTMStateTuple
## RNN Cell wrappers (RNNCells that wrap other RNNCells)
@@MultiRNNCell
@@DropoutWrapper
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _state_size_with_prefix(state_size, prefix=None):
"""Helper function that enables int or TensorShape shape specification.
This function takes a size specification, which can be an integer or a
TensorShape, and converts it into a list of integers. One may specify any
additional dimensions that precede the final state size specification.
Args:
state_size: TensorShape or int that specifies the size of a tensor.
prefix: optional additional list of dimensions to prepend.
Returns:
result_state_size: list of dimensions the resulting tensor size.
"""
result_state_size = tensor_shape.as_shape(state_size).as_list()
if prefix is not None:
if not isinstance(prefix, list):
raise TypeError("prefix of _state_size_with_prefix should be a list.")
result_state_size = prefix + result_state_size
return result_state_size
class RNNCell(object):
"""Abstract object representing an RNN cell.
The definition of cell in this package differs from the definition used in the
literature. In the literature, cell refers to an object with a single scalar
output. The definition in this package refers to a horizontal array of such
units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
tuple of integers, then it results in a tuple of `len(state_size)` state
matrices, each with the a column size corresponding to values in `state_size`.
This module provides a number of basic commonly used RNN cells, such as
LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number
of operators that allow add dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`,
or by calling the `rnn` ops several times. Every `RNNCell` must have the
properties below and and implement `__call__` with the following signature.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
raise NotImplementedError("Abstract method")
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
state_size = self.state_size
if nest.is_sequence(state_size):
state_size_flat = nest.flatten(state_size)
zeros_flat = [
array_ops.zeros(
array_ops.pack(_state_size_with_prefix(s, prefix=[batch_size])),
dtype=dtype)
for s in state_size_flat]
for s, z in zip(state_size_flat, zeros_flat):
z.set_shape(_state_size_with_prefix(s, prefix=[None]))
zeros = nest.pack_sequence_as(structure=state_size,
flat_sequence=zeros_flat)
else:
zeros_size = _state_size_with_prefix(state_size, prefix=[batch_size])
zeros = array_ops.zeros(array_ops.pack(zeros_size), dtype=dtype)
zeros.set_shape(_state_size_with_prefix(state_size, prefix=[None]))
return zeros
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
output = self._activation(_linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, _linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if not c.dtype == h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None,
state_is_tuple=False, activation=tanh):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(1, 2, state)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(1, [new_c, new_h])
return new_h, new_state
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
"""
def __init__(self, num_units, input_size=None,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=False,
activation=tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: Deprecated and unused.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple
else array_ops.concat(1, [c, m]))
return m, new_state
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = _linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, num_proj, input_size=None):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
num_proj: Python integer. The dimension to project to.
input_size: Deprecated and unused.
Raises:
TypeError: if cell is not an RNNCell.
"""
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
self._cell = cell
self._num_proj = num_proj
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = _linear(inputs, self._num_proj, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state, scope)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes, embedding_size, initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if type(state) is tuple:
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=False):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell"
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("Cell%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple
else array_ops.concat(1, new_states))
return cur_inp, new_states
class _SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
init_output, init_state = self._cell_fn(None, None)
output_shape = init_output.get_shape()
state_shape = init_state.get_shape()
self._output_size = output_shape.with_rank(2)[1].value
self._state_size = state_shape.with_rank(2)[1].value
if self._output_size is None:
raise ValueError("Initial output created by %s has invalid shape %s" %
(self._cell_name, output_shape))
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s" %
(self._cell_name, state_shape))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = vs.get_variable(
"Matrix", [total_arg_size, output_size], dtype=dtype)
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(1, args), matrix)
if not bias:
return res
bias_term = vs.get_variable(
"Bias", [output_size],
dtype=dtype,
initializer=init_ops.constant_initializer(
bias_start, dtype=dtype))
return res + bias_term
| |
# -*- coding: utf-8 -*-
"""Tests for test plugin."""
import unittest
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.sqlite_plugins import test
from tests import test_lib as shared_test_lib
from tests.parsers.sqlite_plugins import test_lib
class TestTest(test_lib.SQLitePluginTestCase):
"""Tests for test database plugin."""
@shared_test_lib.skipUnlessHasTestFile([u'test.db'])
def testProcess(self):
"""Test the Process function on a Test file."""
plugin_object = test.TestPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin([u'test.db'],
plugin_object)
# We should have 50 events in total.
# - 25 TheUser createdDate events.
# - 25 TheUser updatedAt events.
self.assertEqual(50, len(storage_writer.events))
# Test the first theuser updatedAt event.
guessed_event = [
e for e in storage_writer.events
if e.advertiser_account_type == 0 and e.analytics_type == 0 and
e.bio_entities is None and e.business_profile_state == 0 and
e.could_be_stale == 0 and e.description ==
u'Breaking news alerts and updates from the BBC. For news,'
u'features, analysis follow @BBCWorld (international) or @BBCNews'
u'(UK). Latest sport news @BBCSport.' and e.device_following == 0 and
e.extended_profile_fields is None and e.favorites_count == 0 and
e.followers_count == 19466932 and e.followers_count_fast == 0 and
e.followers_count_normal == 0 and e.following == 0 and e.following_count
== 3 and e.has_collections == 0 and e.has_extended_profile_fields == 0
and e.id == 5402612 and e.is_lifeline_institution == 0 and
e.is_translator == 0 and e.location == u'London, UK' and
e.media_count is None and e.name == u'BBC Breaking News' and
e.pinned_tweet_id is None and e.profile_banner_url ==
u'https://pbs.twimg.com/profile_banners/5402612/1398336837' and
e.profile_image_url ==
u'https://pbs.twimg.com/profile_images/460740982498013184/wIPwMwru'
u'_normal.png' and e.profile_link_color_hex_triplet == 2052731 and
e.protected == 0 and e.screen_name == u'BBCBreaking' and
e.statuses_count == 26697 and e.structured_location is None and
e.url == u'http://www.bbc.co.uk/news' and e.url_entities is None and
e.verified == 1
][0]
position = storage_writer.index(guessed_event)
test_event = storage_writer.events[position]
# TODO add expected formatted timestamp for timestamp in database: 1449070544.333328
expected_timestamp = timelib.Timestamp.CopyFromString(u'TODO')
self.assertEqual(test_event.timestamp, expected_timestamp)
self.assertEqual(
test_event.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
self.assertEqual(test_event.advertiser_account_type, 0)
self.assertEqual(test_event.analytics_type, 0)
self.assertIsNone(test_event.bio_entities)
self.assertEqual(test_event.business_profile_state, 0)
self.assertEqual(test_event.could_be_stale, 0)
expected_description = (
u'Breaking news alerts and updates from the BBC. For news,'
u'features, analysis follow @BBCWorld (international) or @BBCNews'
u'(UK). Latest sport news @BBCSport.')
self.assertEqual(test_event.description, expected_description)
self.assertEqual(test_event.device_following, 0)
self.assertIsNone(test_event.extended_profile_fields)
self.assertEqual(test_event.favorites_count, 0)
self.assertEqual(test_event.followers_count, 19466932)
self.assertEqual(test_event.followers_count_fast, 0)
self.assertEqual(test_event.followers_count_normal, 0)
self.assertEqual(test_event.following, 0)
self.assertEqual(test_event.following_count, 3)
self.assertEqual(test_event.has_collections, 0)
self.assertEqual(test_event.has_extended_profile_fields, 0)
self.assertEqual(test_event.id, 5402612)
self.assertEqual(test_event.is_lifeline_institution, 0)
self.assertEqual(test_event.is_translator, 0)
self.assertEqual(test_event.location, u'London, UK')
self.assertIsNone(test_event.media_count)
self.assertEqual(test_event.name, u'BBC Breaking News')
self.assertIsNone(test_event.pinned_tweet_id)
expected_profile_banner_url = (
u'https://pbs.twimg.com/profile_banners/5402612/1398336837')
self.assertEqual(test_event.profile_banner_url, expected_profile_banner_url)
expected_profile_image_url = (
u'https://pbs.twimg.com/profile_images/460740982498013184/wIPwMwru'
u'_normal.png')
self.assertEqual(test_event.profile_image_url, expected_profile_image_url)
self.assertEqual(test_event.profile_link_color_hex_triplet, 2052731)
self.assertEqual(test_event.protected, 0)
self.assertEqual(test_event.screen_name, u'BBCBreaking')
self.assertEqual(test_event.statuses_count, 26697)
self.assertIsNone(test_event.structured_location)
self.assertEqual(test_event.url, u'http://www.bbc.co.uk/news')
self.assertIsNone(test_event.url_entities)
self.assertEqual(test_event.verified, 1)
expected_message = (
u'Id: 5402612 Screen Name: BBCBreaking Profile Image Url: https://'
u'pbs.twimg.com/profile_images/460740982498013184/wIPwMwru_normal.'
u'png Profile Banner Url:'
u'https://pbs.twimg.com/profile_banners/5402612/1398336837 Profile'
u'Link Color Hex Triplet: 2052731 Name: BBC Breaking News'
u'Location: London, UK Structured Location: None Description:'
u'Breaking news alerts and updates from the BBC. For news,'
u'features, analysis follow @BBCWorld (international) or @BBCNews'
u'(UK). Latest sport news @BBCSport. Url:'
u'http://www.bbc.co.uk/news Url Entities: None Bio Entities: None'
u'Protected: 0 Verified: 1 Following: 0 Device Following: 0'
u'Advertiser Account Type: 0 Statuses Count: 26697 Media Count:'
u'None Favorites Count: 0 Following Count: 3 Followers Count:'
u'19466932 Followers Count Fast: 0 Followers Count Normal: 0 Could'
u'Be Stale: 0 Is Lifeline Institution: 0 Has Collections: 0 Is'
u'Translator: 0 Has Extended Profile Fields: 0 Extended Profile'
u'Fields: None Pinned Tweet Id: None Business Profile State: 0'
u'Analytics Type: 0')
expected_message_short = (
u'Id: 5402612 Screen Name: BBCBreaking Profile Image Url:'
u'https://pbs.twimg.com...')
self._TestGetMessageStrings(
test_event, expected_message, expected_message_short)
# Test the first theuser createdDate event.
guessed_event = [
e for e in storage_writer.events
if e.advertiser_account_type == 0 and e.analytics_type == 0 and
e.bio_entities == u'b'{}'' and e.business_profile_state == 0 and
e.could_be_stale == 0 and e.description == u'How people build software'
and e.device_following == 0 and e.extended_profile_fields is None and
e.favorites_count == 155 and e.followers_count == 742086 and
e.followers_count_fast == 0 and e.followers_count_normal == 742086 and
e.following == 0 and e.following_count == 172 and e.has_collections == 0
and e.has_extended_profile_fields == 0 and e.id == 13334762 and
e.is_lifeline_institution == 0 and e.is_translator == 0 and
e.location == u'San Francisco, CA' and e.media_count == 33 and e.name ==
u'GitHub' and e.pinned_tweet_id is None and e.profile_banner_url ==
u'https://pbs.twimg.com/profile_banners/13334762/1415719104' and
e.profile_image_url ==
u'https://pbs.twimg.com/profile_images/616309728688238592/pBeeJQDQ'
u'_normal.png' and e.profile_link_color_hex_triplet == 255 and
e.protected == 0 and e.screen_name == u'github' and
e.statuses_count == 3120 and e.structured_location is None and
e.url == u'https://t.co/FoKGHcCyJJ' and e.url_entities ==
u'b'{"urls":[{"url":"https:\\/\\/t.co\\/Fo'
u'KGHcCyJJ","rangeInDisplay.length":0,"displayURL&'
u'#34;:"github.com","rangeInDisplay.location":0,&#'
u'34;expandedURL":"https:\\/\\/github.com","range.'
u'location":0,"range.length":23}]}'' and e.verified == 1
][0]
position = storage_writer.index(guessed_event)
test_event = storage_writer.events[position]
# TODO add expected formatted timestamp for timestamp in database: 1202704910.0
expected_timestamp = timelib.Timestamp.CopyFromString(u'TODO')
self.assertEqual(test_event.timestamp, expected_timestamp)
self.assertEqual(
test_event.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
self.assertEqual(test_event.advertiser_account_type, 0)
self.assertEqual(test_event.analytics_type, 0)
self.assertEqual(test_event.bio_entities, u'b'{}'')
self.assertEqual(test_event.business_profile_state, 0)
self.assertEqual(test_event.could_be_stale, 0)
self.assertEqual(test_event.description, u'How people build software')
self.assertEqual(test_event.device_following, 0)
self.assertIsNone(test_event.extended_profile_fields)
self.assertEqual(test_event.favorites_count, 155)
self.assertEqual(test_event.followers_count, 742086)
self.assertEqual(test_event.followers_count_fast, 0)
self.assertEqual(test_event.followers_count_normal, 742086)
self.assertEqual(test_event.following, 0)
self.assertEqual(test_event.following_count, 172)
self.assertEqual(test_event.has_collections, 0)
self.assertEqual(test_event.has_extended_profile_fields, 0)
self.assertEqual(test_event.id, 13334762)
self.assertEqual(test_event.is_lifeline_institution, 0)
self.assertEqual(test_event.is_translator, 0)
self.assertEqual(test_event.location, u'San Francisco, CA')
self.assertEqual(test_event.media_count, 33)
self.assertEqual(test_event.name, u'GitHub')
self.assertIsNone(test_event.pinned_tweet_id)
expected_profile_banner_url = (
u'https://pbs.twimg.com/profile_banners/13334762/1415719104')
self.assertEqual(test_event.profile_banner_url, expected_profile_banner_url)
expected_profile_image_url = (
u'https://pbs.twimg.com/profile_images/616309728688238592/pBeeJQDQ'
u'_normal.png')
self.assertEqual(test_event.profile_image_url, expected_profile_image_url)
self.assertEqual(test_event.profile_link_color_hex_triplet, 255)
self.assertEqual(test_event.protected, 0)
self.assertEqual(test_event.screen_name, u'github')
self.assertEqual(test_event.statuses_count, 3120)
self.assertIsNone(test_event.structured_location)
self.assertEqual(test_event.url, u'https://t.co/FoKGHcCyJJ')
expected_url_entities = (
u'b'{"urls":[{"url":"https:\\/\\/t.co\\/Fo'
u'KGHcCyJJ","rangeInDisplay.length":0,"displayURL&'
u'#34;:"github.com","rangeInDisplay.location":0,&#'
u'34;expandedURL":"https:\\/\\/github.com","range.'
u'location":0,"range.length":23}]}'')
self.assertEqual(test_event.url_entities, expected_url_entities)
self.assertEqual(test_event.verified, 1)
expected_message = (
u'Id: 13334762 Screen Name: github Profile Image Url: https://pbs.'
u'twimg.com/profile_images/616309728688238592/pBeeJQDQ_normal.png'
u'Profile Banner Url:'
u'https://pbs.twimg.com/profile_banners/13334762/1415719104'
u'Profile Link Color Hex Triplet: 255 Name: GitHub Location: San'
u'Francisco, CA Structured Location: None Description: How people'
u'build software Url: https://t.co/FoKGHcCyJJ Url Entities: b''
u'{"urls":[{"url":"https:\\/\\/t.co\\/FoKGHcCy'
u'JJ","rangeInDisplay.length":0,"displayURL":&'
u'#34;github.com","rangeInDisplay.location":0,"exp'
u'andedURL":"https:\\/\\/github.com","range.locati'
u'on":0,"range.length":23}]}' Bio Entities:'
u'b'{}' Protected: 0 Verified: 1 Following: 0 Device'
u'Following: 0 Advertiser Account Type: 0 Statuses Count: 3120'
u'Media Count: 33 Favorites Count: 155 Following Count: 172'
u'Followers Count: 742086 Followers Count Fast: 0 Followers Count'
u'Normal: 742086 Could Be Stale: 0 Is Lifeline Institution: 0 Has'
u'Collections: 0 Is Translator: 0 Has Extended Profile Fields: 0'
u'Extended Profile Fields: None Pinned Tweet Id: None Business'
u'Profile State: 0 Analytics Type: 0')
expected_message_short = (
u'Id: 13334762 Screen Name: github Profile Image Url:'
u'https://pbs.twimg.com/pro...')
self._TestGetMessageStrings(
test_event, expected_message, expected_message_short)
if __name__ == '__main__':
unittest.main()
| |
"""Utilities related archives.
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
import shutil
import stat
import tarfile
import zipfile
from pip._internal.exceptions import InstallationError
from pip._internal.utils.filetypes import (
BZ2_EXTENSIONS,
TAR_EXTENSIONS,
XZ_EXTENSIONS,
ZIP_EXTENSIONS,
)
from pip._internal.utils.misc import ensure_dir
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Iterable, List, Optional, Text, Union
logger = logging.getLogger(__name__)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def split_leading_dir(path):
# type: (Union[str, Text]) -> List[Union[str, Text]]
path = path.lstrip('/').lstrip('\\')
if (
'/' in path and (
('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path
)
):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return [path, '']
def has_leading_dir(paths):
# type: (Iterable[Union[str, Text]]) -> bool
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def is_within_directory(directory, target):
# type: ((Union[str, Text]), (Union[str, Text])) -> bool
"""
Return true if the absolute path of target is within the directory
"""
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def unzip_file(filename, location, flatten=True):
# type: (str, str, bool) -> None
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not is_within_directory(location, fn):
message = (
'The zip file ({}) has a file ({}) trying to install '
'outside target directory ({})'
)
raise InstallationError(message.format(filename, fn, location))
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
# Don't use read() to avoid allocating an arbitrarily large
# chunk of memory for the file's content
fp = zip.open(name)
try:
with open(fn, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
# type: (str, str) -> None
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
leading = has_leading_dir([
member.name for member in tar.getmembers()
])
for member in tar.getmembers():
fn = member.name
if leading:
# https://github.com/python/mypy/issues/1174
fn = split_leading_dir(fn)[1] # type: ignore
path = os.path.join(location, fn)
if not is_within_directory(location, path):
message = (
'The tar file ({}) has a file ({}) trying to install '
'outside target directory ({})'
)
raise InstallationError(
message.format(filename, path, location)
)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
# https://github.com/python/typeshed/issues/2673
tar._extract_member(member, path) # type: ignore
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
# https://github.com/python/typeshed/issues/2673
tar.utime(member, path) # type: ignore
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(
filename, # type: str
location, # type: str
content_type=None, # type: Optional[str]
):
# type: (...) -> None
filename = os.path.realpath(filename)
if (
content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)
):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (
content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS
)
):
untar_file(filename, location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of {}'.format(location)
)
| |
"""Class for managing, downloading and extracting features from genbank files."""
# Standard Library
import os
import shutil
from pathlib import Path
# BioPython
from BioSQL import BioSeqDatabase
from Bio import SeqIO
# OrthoEvol
from OrthoEvol.Tools.logit import LogIt
from OrthoEvol.Orthologs.Blast import OrthoBlastN
from OrthoEvol.Orthologs.Blast import BaseComparativeGenetics
from OrthoEvol.utilities import FullUtilities
class GenBank(object):
"""This class will handle GenBank files in various ways."""
def __init__(self, project, project_path=None, solo=False, multi=True,
archive=False, min_fasta=True, blast=OrthoBlastN, **kwargs):
"""Handle GenBank files in various ways.
It allows for refseq-release .gbff files to be downloaded from NCBI
and uploaded to a BioSQL database (biopython). Single .gbk files can be
downloaded from the .gbff, and uploaded to a custom BopSQL database for
faster acquisition of GenBank data.
:param project: The name of the project.
:param project_path: The relative path to the project.
:param solo: A flag for adding single fasta files.
:param multi: A flag for adding multi-fasta files.
:param archive: A flag for archiving current GenBank Data. # TODO
:param min_fasta: A flag for minimizing FASTA file headers.
:param blast: The blast parameter is used for composing various
Orthologs.Blast classes. Can be a class, a dict,
or none.
:returns: .gbff files/databases, .gbk files/databases, & FASTA files.
"""
# TODO-ROB: Change the way the file systems work.
self.genbank_utils = FullUtilities()
self.project = project
self.project_path = project_path
self.solo = solo
self.multi = multi
self.min_fasta = min_fasta
self.genbanklog = LogIt().default(logname="GenBank", logfile=None)
# Configuration of class attributes
add_self = self.genbank_utils.attribute_config(self, composer=blast, checker=OrthoBlastN,
checker2=BaseComparativeGenetics,
project=project, project_path=project_path)
for var, attr in add_self.__dict__.items():
setattr(self, var, attr)
# Configuration
# FIXME AttributeError: 'GenBank' object has no attribute 'user_db'
self.target_gbk_db_path = self.user_db / Path(self.project)
Path.mkdir(self.target_gbk_db_path, parents=True, exist_ok=True)
# Make a list of BioSQL database(.db) files that contain GenBank info
self.db_files_list = []
for FILE in os.listdir(str(self.ncbi_db_repo)):
if FILE.endswith('.db'):
self.db_files_list.append(str(FILE))
@staticmethod
def name_fasta_file(self, path, gene, org, feat_type,
feat_type_rank, extension, mode):
"""
Provide a uniquely named FASTA file:
* Coding sequence:
* Single - "<path>/<gene>_<organism><feat_type_rank>.<extension>"
* Multi - "<path>/<gene><feat_type_rank>.<extension>"
* Other:
* Single - "<path>/<gene>_<organism>_<feat_type_rank>.<extension>"
* Multi - "<path>/<gene>_<feat_type_rank>.<extension>"
:param path: The path where the file will be made.
:param gene: The gene name.
:param org: The organism name.
:param feat_type: The type of feature from the GenBank record.
(CDS, UTR, misc_feature, variation, etc.)
:param feat_type_rank: The feature type + the rank.
(There can be multiple misc_features and
variations)
:param extension: The file extension.
(".ffn", ".faa", ".fna", ".fasta")
:param mode: The mode ("w" or "a") for writing the file. Write to a
solo-FASTA file. Append a multi-FASTA file.
:return: The uniquely named FASTA file.
"""
# Create path variables. (typically raw_data/<gene>/GENBANK
feat_path = path
# Create a format-able string for file names
if feat_type_rank == "CDS":
single = '%s_%s%s%s'
multi = '%s%s%s'
else:
single = '%s_%s_%s%s'
multi = '%s_%s%s'
# Create different names based on fasta file type
if mode == 'w':
file_path = feat_path / Path(single % (gene, org, feat_type_rank, extension))
elif mode == 'a':
file_path = feat_path / Path(multi % (gene, feat_type_rank, extension))
# Make the base directory and return an open file.
os.makedirs(str(feat_path), exist_ok=True)
file = open(file_path, mode)
return file
@staticmethod
def protein_gi_fetch(feature):
"""Retrieve the protein gi number.
:param feature: Search the protein feature for the GI number.
:return: The protein GI number as a string.
"""
# Find the protein gi number under the features qualifiers.
for x in feature.qualifiers:
if 'GI' in x:
_, _, p_gi = x.partition(':')
return p_gi
def create_post_blast_gbk_records(self, org_list, gene_dict):
"""Create a single GenBank file for each ortholog.
After a blast has completed and the accession numbers have been compiled
into an accession file, this class searches a local NCBI refseq release
database composed of GenBank records. This method will create a single
GenBank file (.gbk) for each ortholog with an accession number.
The create_post_blast_gbk_records is only callable if the
the instance is composed by one of the Blast classes. This method also
requires an NCBI refseq release database to be set up with the proper
GenBank Flat Files (.gbff) files.
:param org_list: List of organisms
:param gene_dict: A nested dictionary for accessing accession numbers.
(e.g. gene_dict[GENE][ORGANISM} yields an accession
number)
:return: Does not return an object, but creates genbank files.
"""
# Parse the tier_frame_dict to get the tier
for G_KEY, _ in self.tier_frame_dict.items():
tier = G_KEY
# Parse the tier based transformed dataframe to get the gene
for GENE in self.tier_frame_dict[tier].T:
# Parse the organism list to get the desired accession number
for ORGANISM in org_list:
accession = str(gene_dict[GENE][ORGANISM])
accession, _, version = accession.partition('.')
# When parsing a GenBank database, the version needs to be removed.
accession = accession.upper()
server_flag = False
# Search the databases and create a GenBank file.
self.get_gbk_file(accession, GENE, ORGANISM, server_flag=server_flag)
def get_gbk_file(self, accession, gene, organism, server_flag=None):
"""Search a GenBank database for a target accession number.
This function searches through the given NCBI databases (created by
uploading NCBI refseq .gbff files to a BioPython BioSQL database) and
creates single GenBank files. This function can be used after a
blast or on its own. If used on it's own then the NCBI .db files must
be manually moved to the proper directories.
:param accession: Accession number of interest without the version.
:param gene: Target gene of the accession number parameter.
:param organism: Target organism of the accession number parameter.
:param server_flag: (Default value = None)
:return:
"""
gene_path = self.raw_data / Path(gene) / Path('GENBANK')
Path.mkdir(gene_path, parents=True, exist_ok=True)
# Parse each database to find the proper GenBank record
for FILE in self.db_files_list:
db_file_path = self.ncbi_db_repo / Path(FILE)
# Stop searching if the GenBank record has been created.
if server_flag is True:
break
server = BioSeqDatabase.open_database(driver='sqlite3',
db=str(db_file_path))
# Parse the sub-databases
for SUB_DB_NAME in server.keys():
db = server[SUB_DB_NAME]
try:
record = db.lookup(accession=accession)
gbk_file = '%s_%s.gbk' % (gene, organism)
gbk_file_path = gene_path / Path(gbk_file)
with open(gbk_file_path, 'w') as GB_file:
GB_file.write(record.format('genbank'))
self.genbanklog.info(GB_file.name, 'created')
# Make sure we have the correct GenBank file.
self.gbk_quality_control(gbk_file_path, gene, organism)
# Stop searching if the GenBank record has been created.
server_flag = True
break
except IndexError:
self.genbanklog.critical(
'Index Error in %s. Moving to the next database...' %
SUB_DB_NAME)
continue
# If the file has not been created after searching, then raise an error
if server_flag is not True:
self.genbanklog.critical(
"The GenBank file was not created for %s (%s, %s)." %
(accession, gene, organism))
raise FileNotFoundError
def gbk_quality_control(self, gbk_file, gene, organism):
"""Ensures the quality or validity of the retrieved genbank record.
It takes the GenBank record and check to make sure the Gene and Organism
from the GenBank record match the Gene and Organism from the accession
file. If not, then the Blast has returned the wrong accession number.
:param gbk_file: The path to a GenBank file.
:param gene: A gene name from the Accession file.
:param organism: A gene name from the Accession file.
:return:
"""
# TODO-ROB: Check the bad data here against the misssing/duplicate files
record = SeqIO.read(gbk_file, 'genbank')
gene_flag = False
organism_flag = False
accession = record.id
self.gbk_gene_synonym = {}
self.duplicated_dict["validated"] = {}
# Get the organism name from the GenBank file
gbk_organism = record.features[0].qualifiers["organism"] # A list with one entry
if len(gbk_organism) == 1:
gbk_organism = gbk_organism[0]
gbk_organism = gbk_organism.replace(" ", "_")
else:
self.genbanklog.critical(
"Two organisms exist in the GenBank file. Is this normal?")
raise BrokenPipeError
# Check to make sure the organism in the GenBank file matches the
# organism from the accession file
if gbk_organism == organism:
self.genbanklog.info(
"The GenBank organism, %s, has been verified for %s." %
(organism, gene))
else:
organism_flag = True
# Get the gene from the GenBank files
gbk_genes = record.features[1].qualifiers["gene"]
# Get the synonyms from the GenBank file if they exist and add them to
# the list.
if "gene_synonym" in str(record.features[1].qualifiers.keys()):
base_gene_name = gbk_genes
gbk_genes.extend(record.features[1].qualifiers["gene_synonym"])
# Create a dictionary from the synonyms
self.gbk_gene_synonym[base_gene_name] = []
self.gbk_gene_synonym[base_gene_name].extend(gbk_genes)
# Check to make sure the gene in the GenBank file matches the gene from
# the accession file
for gbk_gene in gbk_genes:
if gbk_gene == gene:
gene_flag = False
self.genbanklog.info(
"The GenBank gene, %s, has been verified for %s." %
(gene, organism))
break
else:
gene_flag = True
# TODO-ROB: Add a verified key to the duplicates dictionary.
# Raise errors.
if organism_flag is True and gene_flag is True:
self.genbanklog.critical("The organisms don't match.\n\tGenBank: %s \n\tAccession File: %s" %
(gbk_organism, organism))
self.genbanklog.critical("The genes don't match. \n\tGenBank: %s \n\tAccession File: %s" %
(gbk_genes, gene))
raise BrokenPipeError
elif organism_flag is True:
self.genbanklog.critical("The organisms don't match.\n\tGenBank: %s \n\tAccession File: %s" %
(gbk_organism, organism))
raise BrokenPipeError
elif gene_flag is True:
self.genbanklog.critical("The genes don't match. \n\tGenBank: %s \n\tAccession File: %s" %
(gbk_genes, gene))
raise BrokenPipeError
self.duplicated_dict["validated"][accession] = [gene, organism]
def gbk_upload(self):
"""Upload a BioSQL database with target GenBank data (.gbk files).
This method is only usable after creating GenBank records with this
class. It uploads a BioSQL databases with target GenBank data (.gbk
files). This creates a compact set of data for each project.
:return: Does not return an object.
"""
t_count = 0
# Parse the tier dictionary
for TIER in self.tier_frame_dict.keys():
db_name = str(TIER) + '.db'
db_file_path = self.target_gbk_db_path / Path(db_name)
# Create the db file if it exists
if os.path.isfile(str(db_file_path)) is False:
self.genbanklog.warn(
'Copying Template BioSQL Database... This may take a few minutes...')
shutil.copy2('Template_BioSQL_DB.db', str(db_file_path))
# If it already exists then the database is bad, or needs to be update.
# Delete it.
else:
# TODO-ROB: This part is broken until the template db creation and
# management is added
os.remove(str(db_file_path))
self.genbanklog.warn(
'Copying Template BioSQL Database... This may take a few minutes...')
shutil.copy2('Template_BioSQL_DB.db', str(db_file_path))
server = BioSeqDatabase.open_database(driver='sqlite3', db=str(db_file_path))
gene_path = self.raw_data
# Parse the raw_data folder to get the name of each gene.
for GENE in os.listdir(str(gene_path)):
sub_db_name = GENE
genbank_path = gene_path / Path(GENE) / Path('GENBANK')
# Parse the GenBank file names for each gene in order to upload them to a
# custom BioSQL database
for FILE in os.listdir(str(genbank_path)):
# Try to load the database.
try:
if sub_db_name not in server.keys():
server.new_database(sub_db_name)
db = server[sub_db_name]
count = db.load(SeqIO.parse(FILE, 'genbank'))
server.commit()
self.genbanklog.info('Server Commited %s' % sub_db_name)
self.genbanklog.info(
'%s database loaded with %s.' %
(db.dbid, FILE))
self.genbanklog.info(
"That file contains %s genbank records." %
str(count))
t_count = t_count + count
self.genbanklog.info(
'The total number of files loaded so far is %i.' %
t_count)
# If the database cannot be loaded then rollback the server and raise
# an error.
except BaseException:
server.rollback()
# Try to delete the sub database and commit
try:
del server[sub_db_name]
server.commit()
# If it cannot be deleted then raise an error.
except BaseException:
raise
raise
def get_fasta_files(self, acc_dict, db=True):
"""Create FASTA files for each GenBank record in the accession dictionary.
It can search through a BioSQL database or it can crawl a directory
for .gbk files.
:param acc_dict: An accession dictionary like the one created by
CompGenObjects.
:param db: A flag that determines whether or not to use the custom
BioSQL database or to use .gbk files.
(Default value = True)
:return: Returns FASTA files for each GenBank record.
"""
# Get FASTA files from the BioSQL GenBank databases.
if db is True:
# Parse the directory that contains the databases for the project of interest.
for database in os.listdir(str(self.target_gbk_db_path)):
server = BioSeqDatabase.open_database(driver="sqlite3", db=database)
try:
for db_name in server.keys():
db = server[db_name]
# For each GenBank record in the database write a set of FASTA
# files.
for item in db.keys():
record = db.lookup(item)
self.write_fasta_files(record, acc_dict)
self.genbanklog.info(
"FASTA files for %s created from BioSQL database." % item)
except BaseException:
raise()
# Get FASTA files from the GenBank files.
# TODO-ROB change this. Broken by new directory structure
# TODO-ROB directory looks like /raw_data/Gene_1/GENBANK/*.gbk
elif db is False:
# Parse the directory that contain the GenBank records for the project of
# interest.
for _, _, gbk_files in os.walk(str(self.target_gbk_files_path)):
# For each genbank record write a set of FASTA files.
for gbk_file in gbk_files:
if Path(gbk_file).suffix == '.gbk':
record = SeqIO.read(gbk_file, 'genbank')
self.write_fasta_files(record, acc_dict)
self.genbanklog.info("FASTA files for %s created." % gbk_file)
def write_fasta_files(self, record, acc_dict):
"""Create a dictionary for formatting the FASTA header & sequence.
:param record: A GenBank record created by BioPython.
:param acc_dict: Accession dictionary from the CompGenObjects class.
:return:
"""
feat_type_list = []
for feature in record.features:
# XXX Set up variables to use for dictionary values !!!
# Basic variables.
accession = record.id
gene = acc_dict[accession][0]
organism = acc_dict[accession][1]
# Variable for minimalistic FASTA files.
genus, sep, species = organism.partition('_')
min_org = str(''.join([genus[0], sep, species[0:28]]))
# Keep a list of feature types to identify duplicates (for naming the FASTA files).
# The first iteration of the feature type contains no number.
# The following iterations are concatenated with a number.
feat_type = str(feature.type)
feat_type_list.append(feat_type)
duplicate_num = feat_type_list.count(feat_type)
if duplicate_num == 1:
feat_type_rank = feat_type
else:
feat_type_rank = feat_type + str(duplicate_num)
# XXX END !!!
# TODO-ROB: Remove the GI number stuff here or at least prepare for
# file with no GI.
# Create a dictionary and format FASTA file entries.
fmt = {
'na_gi': str(record.annotations['gi']),
'aa_gi': str(self.protein_gi_fetch(feature)),
'na_acc_n': str(accession),
'aa_acc_n': str(feature.qualifiers['protein_id'][0]),
'na_description': str(record.description),
'aa_description': str(feature.qualifiers['product'][0]),
'na_seq': str(feature.extract(record.seq)),
'aa_seq': str(feature.qualifiers['translation'][0]),
'na_misc_feat': str(feature.qualifiers['note'][0]),
'org': str(organism),
'gene': str(gene),
'min_org': str(min_org),
'feat_type': str(feat_type),
'feat_type_rank': str(feat_type_rank),
'path': str(self.raw_data / Path(gene) / Path('GENBANK'))
}
# Set up minimalistic FASTA headers and sequence entries for Nucleic Acid
# and Amino Acid sequences.
na_entry = ">{min_org}\n{na_seq}\n".format(**fmt)
aa_entry = ">{min_org}\n{aa_seq}\n".format(**fmt)
# For full FASTA headers/sequences set min_fasta to False
if self.min_fasta is False:
na_entry = ">gi|{na_gi}|ref|{na_acc_n}| {na_description}\n{na_seq}\n".format(
**fmt)
aa_entry = ">gi|{aa_gi}|reg|{aa_acc_n}| {aa_description} {org}\n{aa_seq}\n".format(
**fmt)
# ######### End ######### #
# ############ Write desired FASTA files ############ #
if self.solo is True:
self.solo_fasta(na_entry, aa_entry, fmt)
if self.multi is True:
self.multi_fasta(na_entry, aa_entry, fmt)
def solo_fasta(self, na_entry, aa_entry, fmt):
"""This method writes a sequence of a feature to a uniquely named file using a dictionary for formatting.
:param na_entry: A string representing the Nucleic Acid sequence data in FASTA format.
:param aa_entry: A string representing the Amino Acid sequence data in FASTA format.
:param fmt: A dictionary for formatting the FASTA entries and the file names.
:return: Does not return an object, but creates single entry FASTA files.
"""
mode = 'w'
# Create the desired variables from the formatter dictionary.
feat_type = fmt['feat_type']
feat_type_rank = fmt['feat_type_rank']
path = fmt['path']
gene = fmt['gene']
org = fmt['org']
if feat_type == "CDS":
# Create a .ffn file (FASTA for Coding Nucleic Acids)
extension = '.ffn'
file = self.name_fasta_file(
path, gene, org, feat_type, feat_type_rank, extension, mode)
file.write(na_entry)
file.close()
# Create a .faa file (FASTA for Amino Acids)
extension = '.faa'
file = self.name_fasta_file(
path, gene, org, 'Protein', feat_type_rank, extension, mode)
file.write(aa_entry)
file.close()
elif feat_type == "misc_feature":
# Create a custom entry for miscellaneous features.
na_entry = ">gi|{na_gi}|ref|{na_acc_n}| {na_description} Feature: {na_misc_feat}\n{na_seq}\n".format(
**fmt)
# Creates .fna files (generic FASTA file for Nucleic Acids)
extension = '.fna'
file = self.name_fasta_file(
path, gene, org, feat_type, feat_type_rank, extension, mode)
file.write(na_entry)
file.close()
elif feat_type != "variation":
# Creates .fasta files (generic FASTA file)
extension = '.fasta'
file = self.name_fasta_file(
path, gene, org, 'Other', feat_type_rank, extension, mode)
file.write(na_entry)
file.close()
def multi_fasta(self, na_entry, aa_entry, fmt):
"""Append an othologous sequence of a feature to a uniquely named file.
Usese a dictionary for formatting.
:param na_entry: A string representing the Nucleic Acid sequence data in FASTA format.
:param aa_entry: A string representing the Amino Acid sequence data in FASTA format.
:param fmt: A dictionary for formatting the FASTA entries and the file names.
:return: Does not return an object, but creates or appends to a multi entry FASTA file.
"""
mode = 'a'
# Create the desired variables from the formatter dictionary.
feat_type = fmt['feat_type']
feat_type_rank = fmt['feat_type_rank']
path = fmt['path']
gene = fmt['gene']
org = fmt['org']
if feat_type == "CDS":
# Create a MASTER .ffn file (multi-FASTA file for Coding Nucleic Acids)
extension = '.ffn'
file = self.name_fasta_file(path, gene, org, feat_type,
feat_type_rank, extension, mode)
file.write(na_entry)
file.close()
# Create a MASTER .faa file (multi-FASTA file for Amino Acids)
extension = '.faa'
file = self.name_fasta_file(path, gene, org, feat_type,
feat_type_rank, extension, mode)
file.write(aa_entry)
file.close()
elif feat_type == "misc_feature":
na_entry = ">gi|{na_gi}|ref|{na_acc_n}| {na_description} Feature: {na_misc_feat}\n{na_seq}\n".format(
**fmt)
# Creates .fna files (generic FASTA file for Nucleic Acids)
extension = '.fna'
file = self.name_fasta_file(path, gene, org, feat_type,
feat_type_rank, extension, mode)
file.write(na_entry)
file.close()
| |
# -*- coding: utf-8 -*-
#
# Cilium documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 18:34:43 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import re
import subprocess
import semver
sys.path.insert(0, os.path.abspath('_exts'))
import cilium_spellfilters
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
html_logo = "images/logo.svg"
extensions = ['sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
'sphinx.ext.extlinks',
'sphinxcontrib.openapi',
'sphinx_tabs.tabs',
'sphinxcontrib.spelling',
'versionwarning.extension']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cilium'
copyright = u'2017-2021, Cilium Authors'
author = u'Cilium Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = open("../VERSION", "r").read().strip()
# Used by version warning
versionwarning_body_selector = "div.document"
# The version of Go used to compile Cilium
go_release = open("../GO_VERSION", "r").read().strip()
# The image tag for Cilium docker images
image_tag = 'v' + release
# Fetch the docs version from an environment variable.
# Map latest -> master.
# Map stable -> current version number.
branch = os.environ.get('READTHEDOCS_VERSION')
if not branch or branch == 'latest':
branch = 'HEAD'
archive_name = 'master'
chart_release = './cilium'
image_tag = 'latest'
elif branch == 'stable':
branch = release
archive_name = release
chart_release = 'cilium/cilium --version ' + release
tags.add('stable')
else:
archive_name = branch
chart_release = 'cilium/cilium --version ' + release
tags.add('stable')
relinfo = semver.parse_version_info(release)
current_release = '%d.%d' % (relinfo.major, relinfo.minor)
if relinfo.patch == 90:
next_release = '%d.%d' % (relinfo.major, relinfo.minor + 1)
else:
next_release = current_release
githubusercontent = 'https://raw.githubusercontent.com/cilium/cilium/'
scm_web = githubusercontent + branch
jenkins_branch = 'https://jenkins.cilium.io/view/Cilium-v' + current_release
github_repo = 'https://github.com/cilium/cilium/'
archive_filename = archive_name + '.tar.gz'
archive_link = github_repo + 'archive/' + archive_filename
archive_name = 'cilium-' + archive_name.strip('v')
project_link = github_repo + 'projects?query=is:open+' + next_release
backport_format = github_repo + 'pulls?q=is:open+is:pr+label:%s/' + current_release
# Store variables in the epilogue so they are globally available.
rst_epilog = """
.. |SCM_WEB| replace:: \{s}
.. |SCM_BRANCH| replace:: \{b}
.. |SCM_ARCHIVE_NAME| replace:: \{a}
.. |SCM_ARCHIVE_FILENAME| replace:: \{f}
.. |SCM_ARCHIVE_LINK| replace:: \{l}
.. |CURRENT_RELEASE| replace:: \{c}
.. |NEXT_RELEASE| replace:: \{n}
.. |CHART_RELEASE| replace:: \{h}
.. |GO_RELEASE| replace:: \{g}
.. |IMAGE_TAG| replace:: \{i}
""".format(s=scm_web, b=branch, a=archive_name, f=archive_filename, l=archive_link, c=current_release, n=next_release, h=chart_release, g=go_release, i=image_tag)
extlinks = {
'git-tree': (scm_web + "/%s", ''),
'jenkins-branch': (jenkins_branch + "/%s", ''),
'github-project': (project_link + '%s', ''),
'github-backport': (backport_format, ''),
'gh-issue': (github_repo + 'issues/%s', 'GitHub issue '),
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The default language to highlight source code in.
highlight_language = 'none'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Add custom filters for spell checks.
spelling_filters = [cilium_spellfilters.WireGuardFilter]
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
if os.uname()[4] == "aarch64":
html_theme = "sphinx_rtd_theme"
else:
html_theme = "sphinx_rtd_theme_cilium"
html_context = {
'release': release
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo_only': True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images', '_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ciliumdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
'extraclassoptions': 'openany',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Cilium.tex', u'Cilium Documentation',
u'Cilium Authors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cilium', u'Cilium Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Cilium', u'Cilium Documentation',
author, 'Cilium', 'One line description of project.',
'Miscellaneous'),
]
http_strict_mode = False
# Try as hard as possible to find references
default_role = 'any'
def setup(app):
app.add_stylesheet('parsed-literal.css')
app.add_stylesheet('copybutton.css')
app.add_stylesheet('editbutton.css')
app.add_javascript('clipboardjs.min.js')
app.add_javascript("copybutton.js")
app.add_stylesheet('helm-reference.css')
| |
'''
Created on Mar 30, 2014
Requires PySerial and PyGTK
***************WorkLog******************
5/7/14: initial install of pyserial and pygtk, loading up of code I worked on previously
5/8/14: testing methods to make scales change when new scale selected
5/8/14: debugging
5/12/14: Completely redid arduino code, changed python to reflect changes
5/13/14: debugged problems with values changing. Increased baud rate
5/14/14: added in new line '\n' after each command sent to arduino to fix problems with colors mixing
5/15/14: Clear now sets scales to zero, switching between strands makes scales move to stored previous values
by making scales global variables; added brightness slider and blink functionality
5/16/14: Final debugging, added bright and speed scales to clear function
2/16/15: Added a COM Port selector and Jump button
2/16/15: work on adding text box entry for nums
2/17/15: changed text boxes to spin boxes
****************************************
'''
import gtk
import sys
import serial
import time
import glob
global rgb1
global rgb2
global rgb3
global speed
speed=0
global brightness
brightness=0
global rScale
rScale = gtk.HScale()
global gScale
gScale = gtk.HScale()
global bScale
bScale = gtk.HScale()
global sScale
sScale = gtk.HScale()
global brightScale
brightScale = gtk.HScale()
global portList
portList = gtk.combo_box_new_text()
global rSpin
rSpin =gtk.SpinButton(gtk.Adjustment(value=0, lower=0, upper=255, step_incr=1, page_incr=5, page_size=0),0,0)
global gSpin
gSpin =gtk.SpinButton(gtk.Adjustment(value=0, lower=0, upper=255, step_incr=1, page_incr=5, page_size=0),0,0)
global bSpin
bSpin =gtk.SpinButton(gtk.Adjustment(value=0, lower=0, upper=255, step_incr=1, page_incr=5, page_size=0),0,0)
global brightSpin
brightSpin =gtk.SpinButton(gtk.Adjustment(value=0, lower=0, upper=255, step_incr=1, page_incr=5, page_size=0),0,0)
global sSpin
sSpin =gtk.SpinButton(gtk.Adjustment(value=0, lower=0, upper=1500, step_incr=1, page_incr=5, page_size=0),0,0)
rgb1 = [0,0,0]
rgb2 = [0,0,0]
rgb3 = [0,0,0]
class PyApp(gtk.Window):
def __init__(self):
super(PyApp, self).__init__()
self.set_title("RGB Control")
self.set_size_request(300, 275)
self.set_position(gtk.WIN_POS_CENTER)
headerVbox = gtk.VBox(True,0)
headerLabel1 = gtk.Label("RGB Control App for Arduino")
headerVbox.pack_start(headerLabel1)
#Serial Selector
ports = self.serial_ports()
serialTable = gtk.Table(1,2,False)
for port in ports:
portList.append_text(port)
connectButton = gtk.Button("Connect")
connectButton.set_name("connect");
connectButton.connect("clicked", self.on_button)
serialTable.attach(portList, 0,1,0,1)
serialTable.attach(connectButton,1,2,0,1)
# Radio Buttons
buttonTable = gtk.Table(1,3, False)
button1 = gtk.RadioButton(None, "Strand 1")
button1.connect("toggled", self.radio_buttons, "1")
button2 = gtk.RadioButton(button1, "Strand 2")
button2.connect("toggled", self.radio_buttons, "2")
button3 = gtk.RadioButton(button1, "Both")
button3.connect("toggled", self.radio_buttons, "3")
button3.set_active(True)
buttonTable.attach(button1, 0,1,0,1)
buttonTable.attach(button2, 1,2,0,1)
buttonTable.attach(button3, 2,3,0,1)
#Red slider
rHbox = gtk.HBox(True,0)
rLabel = gtk.Label("Red: ")
rHbox.pack_start(rLabel)
#spin box
rSpin.set_name("red")
rSpin.connect("value-changed",self.spin_changed)
rHbox.pack_start(rSpin)
rScale.set_name("red")
rScale.set_range(0, 255)
rScale.set_increments(1, 10)
rScale.set_digits(0)
rScale.set_size_request(130, 35)
rScale.set_draw_value(False)
rScale.connect("value-changed", self.on_changed)
rHbox.pack_end(rScale)
#green slider
gHbox = gtk.HBox(True,0)
gLabel = gtk.Label("Green: ")
gHbox.pack_start(gLabel)
gSpin.set_name("green")
gSpin.connect("value-changed",self.spin_changed)
gHbox.pack_start(gSpin)
gScale.set_name("green")
gScale.set_range(0, 255)
gScale.set_increments(1, 10)
gScale.set_digits(0)
gScale.set_size_request(130, 35)
gScale.set_draw_value(False)
gScale.connect("value-changed", self.on_changed)
gHbox.pack_end(gScale)
#blue slider
bHbox = gtk.HBox(True,0)
bLabel = gtk.Label("Blue: ")
bHbox.pack_start(bLabel)
bSpin.set_name("blue")
bSpin.connect("value-changed",self.spin_changed)
bHbox.pack_start(bSpin)
bScale.set_name("blue")
bScale.set_range(0, 255)
bScale.set_increments(1, 10)
bScale.set_digits(0)
bScale.set_size_request(130, 35)
bScale.set_draw_value(False)
bScale.connect("value-changed", self.on_changed)
bHbox.pack_end(bScale)
#speed slider
sHbox = gtk.HBox(True,0)
sLabel = gtk.Label("Speed: ")
sHbox.pack_start(sLabel)
sSpin.set_name("speed")
sSpin.connect("value-changed",self.spin_changed)
sHbox.pack_start(sSpin)
sScale.set_name("speed")
sScale.set_range(0,1500)
sScale.set_increments(1, 5)
sScale.set_digits(0)
sScale.set_size_request(130, 35)
sScale.set_draw_value(False)
sScale.connect("value-changed", self.on_changed)
sHbox.pack_end(sScale)
#brightness slider
brightHbox = gtk.HBox(True,0)
brightLabel = gtk.Label("Brightness: ")
brightHbox.pack_start(brightLabel)
brightSpin.set_name("bright")
brightSpin.connect("value-changed",self.spin_changed)
brightHbox.pack_start(brightSpin)
brightScale.set_name("bright")
brightScale.set_range(0,255)
brightScale.set_increments(1, 10)
brightScale.set_digits(0)
brightScale.set_size_request(130, 35)
brightScale.set_draw_value(False)
brightScale.connect("value-changed", self.on_changed)
brightHbox.pack_end(brightScale)
#function buttons
boxTable = gtk.Table(1,4,False)
fadeButton = gtk.Button("Fade")
fadeButton.set_name("fade")
fadeButton.connect("clicked", self.on_button)
clearButton = gtk.Button("Clear")
clearButton.set_name("clear")
clearButton.connect("clicked", self.on_button)
blinkButton = gtk.Button("Blink")
blinkButton.set_name("blink")
blinkButton.connect("clicked", self.on_button)
jumpButton = gtk.Button("Jump")
jumpButton.set_name("jump")
jumpButton.connect("clicked", self.on_button)
boxTable.attach(fadeButton, 0,1,0,1)
boxTable.attach(blinkButton, 1,2,0,1)
boxTable.attach(jumpButton, 2,3,0,1)
boxTable.attach(clearButton, 3,4,0,1)
#main app building
vbox = gtk.VBox(True,0)
vbox.pack_start(headerVbox)
vbox.pack_start(serialTable)
vbox.pack_start(buttonTable)
vbox.pack_start(rHbox)
vbox.pack_start(gHbox)
vbox.pack_start(bHbox)
vbox.pack_start(sHbox)
#vbox.pack_start(brightHbox)
vbox.pack_end(boxTable)
self.add(vbox)
self.connect("destroy", lambda w: gtk.main_quit())
self.show_all()
def on_changed(self, widget):
val = widget.get_value()
name = widget.get_name()
if name == "speed":
sSpin.set_value(int(val))
#elif name == "bright":
#brightSpin.set_value(int(val))
elif strand == 1:
if name == "red":
rSpin.set_value(int(val))
rgb1[0] = int(val)
elif name == "green":
gSpin.set_value(int(val))
rgb1[1] = int(val)
elif name == "blue":
bSpin.set_value(int(val))
rgb1[2] = int(val)
self.ser.write(str(strand) + ',' + str(rgb1[0]) + ',' + str(rgb1[1]) + ',' + str(rgb1[2])+'\n')
elif strand == 2:
if name == "red":
rSpin.set_value(int(val))
rgb2[0] = int(val)
elif name == "green":
gSpin.set_value(int(val))
rgb2[1] = int(val)
elif name == "blue":
bSpin.set_value(int(val))
rgb2[2] = int(val)
self.ser.write(str(strand) + ',' + str(rgb2[0]) + ',' + str(rgb2[1]) + ',' + str(rgb2[2])+'\n')
elif strand == 3:
if name == "red":
rSpin.set_value(int(val))
rgb3[0] = int(val)
elif name == "green":
gSpin.set_value(int(val))
rgb3[1] = int(val)
elif name == "blue":
bSpin.set_value(int(val))
rgb3[2] = int(val)
self.ser.write(str(strand) + ',' + str(rgb3[0]) + ',' + str(rgb3[1]) + ',' + str(rgb3[2])+'\n')
def spin_changed(self,widget):
val = widget.get_value_as_int()
name = widget.get_name()
global speed
global brightness
if name == "red":
rScale.set_value(val)
elif name == "green":
gScale.set_value(val)
elif name == "blue":
bScale.set_value(val)
elif name == "speed":
speed = val
sScale.set_value(val)
elif name == "bright":
brightness = val
brightScale.set_value(val)
def radio_buttons(self, button, name):
global rScale
global gScale
global bScale
if button.get_active():
global strand
strand = int(name)
if strand == 1:
rScale.set_value(rgb1[0])
gScale.set_value(rgb1[1])
bScale.set_value(rgb1[2])
elif strand == 2:
rScale.set_value(rgb2[0])
gScale.set_value(rgb2[1])
bScale.set_value(rgb2[2])
elif strand == 3:
rScale.set_value(rgb3[0])
gScale.set_value(rgb3[1])
bScale.set_value(rgb3[2])
def on_button(self, button):
global rScale
global gScale
global bScale
if button.get_name() == "connect":
self.serialPort = portList.get_active_text()
self.setup_serial()
elif button.get_name() == "clear":
self.ser.write(str(strand)+"c")
rScale.set_value(0)
gScale.set_value(0)
bScale.set_value(0)
sScale.set_value(0)
#brightScale.set_value(0)
elif button.get_name() == "fade":
self.ser.write(str(strand)+"f,"+str(speed)+','+str(brightness)+'\n')
elif button.get_name() == "blink":
if strand == 1:
self.ser.write(str(strand)+"b,"+str(speed)+','+str(rgb1[0])+','+str(rgb1[1])+','+str(rgb1[2])+'\n')
elif strand == 2:
self.ser.write(str(strand)+"b,"+str(speed)+','+str(rgb2[0])+','+str(rgb2[1])+','+str(rgb2[2])+'\n')
elif strand == 3:
self.ser.write(str(strand)+"b,"+str(speed)+','+str(rgb3[0])+','+str(rgb3[1])+','+str(rgb3[2])+'\n')
elif button.get_name() == "jump":
self.ser.write(str(strand)+"j,"+str(speed)+'\n')
def setup_serial(self):
self.ser = serial.Serial()
self.ser.setPort(self.serialPort)
self.ser.baudrate = 115200
self.ser.open()
if (self.ser.isOpen()):
print "Serial Open"
message = gtk.MessageDialog(parent=None,
flags=gtk.DIALOG_MODAL,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_OK,
message_format="Connected")
message.set_position(gtk.WIN_POS_CENTER)
message.run()
message.destroy()
else:
print "Serial Closed"
def serial_ports(self):
"""Lists serial ports
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of available serial ports
"""
if sys.platform.startswith('win'):
ports = ['COM' + str(i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this is to exclude your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
PyApp()
gtk.main()
| |
from __future__ import (print_function, unicode_literals, division,
absolute_import)
import argparse
import copy
import io
import json
import os
import pprint
import re
import sys
from dfs_sdk import get_api as _get_api
IPRE_STR = r'(\d{1,3}\.){3}\d{1,3}'
IPRE = re.compile(IPRE_STR)
SIP = re.compile(r'san_ip\s+?=\s+?(?P<san_ip>%s)' % IPRE_STR)
SLG = re.compile(r'san_login\s+?=\s+?(?P<san_login>.*)')
SPW = re.compile(r'san_password\s+?=\s+?(?P<san_password>.*)')
TNT = re.compile(r'datera_tenant_id\s+?=\s+?(?P<tenant_id>.*)')
LDP = re.compile(r'datera_ldap_server\s+?=\s+?(?P<ldap>.*)')
LATEST = "2.3"
FALLBACK = ["2", "2.1", "2.2"]
UNIX_HOME = os.path.join(os.path.expanduser('~'))
UNIX_CONFIG_HOME = os.path.join(UNIX_HOME, 'datera')
UNIX_SITE_CONFIG_HOME = '/etc/datera'
CONFIG_SEARCH_PATH = [os.getcwd(), UNIX_HOME, UNIX_CONFIG_HOME,
UNIX_SITE_CONFIG_HOME]
CONFIGS = [".datera-config", "datera-config", ".datera-config.json",
"datera-config.json"]
CINDER_ETC = "/etc/cinder/cinder.conf"
REPLACE_IP = "REPLACE_ME_WITH_REAL_IP_OR_HOSTNAME"
EXAMPLE_CONFIG = {"mgmt_ip": REPLACE_IP,
"username": "admin",
"password": "password",
"tenant": "/root",
"api_version": "2.3",
"ldap": ""}
DATERA_RC = "datrc"
ENV_MGMT = "DAT_MGMT"
ENV_USER = "DAT_USER"
ENV_PASS = "DAT_PASS"
ENV_TENANT = "DAT_TENANT"
ENV_API = "DAT_API"
ENV_LDAP = "DAT_LDAP"
EXAMPLE_RC = """\
# DATERA ENVIRONMENT VARIABLES
{}=1.1.1.1
{}=admin
{}=password
{}=/root
{}=2.3
{}=
""".format(ENV_MGMT, ENV_USER, ENV_PASS, ENV_TENANT, ENV_API, ENV_LDAP)
ENV_HELP = {ENV_MGMT: "Datera management IP address or hostname",
ENV_USER: "Datera account username",
ENV_PASS: "Datera account password",
ENV_TENANT: "Datera tenant ID. eg: SE-OpenStack",
ENV_API: "Datera API version. eg: 2.3",
ENV_LDAP: "Datera account LDAP server"}
_CONFIG = {}
_ARGS = None
VERBOSE = False
def _print_envs():
print()
print("DATERA ENVIRONMENT VARIABLES")
print("============================")
longest = 0
for key in ENV_HELP:
if len(key) > longest:
longest = len(key)
for key, help in sorted(ENV_HELP.items()):
buff = " " * (longest - len(key))
print("{}{} -- {}".format(buff, key, help))
print()
def _gen_config():
if _ARGS.gen_config == "json":
if any((os.path.exists(c) for c in CONFIGS)):
print("Config file already exists in current directory. Please "
"move or remove it before generating a new one")
sys.exit(1)
with io.open(CONFIGS[-1], 'w+', encoding='utf-8') as f:
# Python2 Compatibility
try:
f.write(unicode(json.dumps(
EXAMPLE_CONFIG, ensure_ascii=False, indent=4)))
except NameError:
json.dump(EXAMPLE_CONFIG, f, indent=4)
elif _ARGS.gen_config == "shell":
if os.path.exists(DATERA_RC):
print("RC file already exists in current directory. Please move "
"or remove it before generating a new one")
sys.exit(1)
with io.open(DATERA_RC, 'w+') as f:
f.write(EXAMPLE_RC)
def _search_config():
for p in CONFIG_SEARCH_PATH:
for conf in CONFIGS:
fpath = os.path.join(p, conf)
if os.path.exists(fpath):
return fpath
def _check_config(config_file):
missing = []
for key in EXAMPLE_CONFIG:
if key not in _CONFIG:
missing.append(key)
if missing:
raise EnvironmentError(
"All config options must be specified by config file, environment "
"variable or CLI argument. Missing config keys: {}, config_file: "
"{}".format(missing, config_file))
def _defaults():
if not _CONFIG.get("tenant"):
_CONFIG["tenant"] = "/root"
if not _CONFIG.get("api_version"):
_CONFIG["api_version"] = LATEST
if not _CONFIG.get("ldap"):
_CONFIG["ldap"] = ""
def _read_config(config_file):
global _CONFIG
if not config_file:
if _ARGS and _ARGS.config:
config_file = _ARGS.config
else:
config_file = _search_config()
if not config_file:
_CONFIG = _read_cinder_conf()
if config_file:
with io.open(config_file) as f:
_CONFIG = json.loads(f.read())
if _CONFIG is None:
_CONFIG = {}
_env_override()
_cli_override()
_defaults()
_check_config(config_file)
return config_file
def _read_cinder_conf():
if not os.path.exists(CINDER_ETC):
return
data = None
found_index = 0
found_last_index = -1
with io.open(CINDER_ETC) as f:
for index, line in enumerate(f):
if '[datera]' == line.strip().lower():
found_index = index
break
for index, line in enumerate(f):
if '[' in line and ']' in line:
found_last_index = index + found_index
break
with io.open(CINDER_ETC) as f:
data = "".join(f.readlines()[
found_index:found_last_index])
san_ip = SIP.search(data).group('san_ip')
san_login = SLG.search(data).group('san_login')
san_password = SPW.search(data).group('san_password')
tenant = TNT.search(data)
ldap = LDP.search(data)
if tenant:
tenant = tenant.group('tenant_id')
else:
tenant = "/root"
if ldap:
ldap = ldap.group('ldap')
else:
ldap = ""
return {"mgmt_ip": san_ip,
"username": san_login,
"password": san_password,
"tenant": tenant,
"api_version": LATEST,
"ldap": ldap}
def _cli_override():
if _ARGS is None:
return
if _ARGS.hostname:
_CONFIG["mgmt_ip"] = _ARGS.hostname
if _ARGS.username:
_CONFIG["username"] = _ARGS.username
if _ARGS.password:
_CONFIG["password"] = _ARGS.password
if _ARGS.tenant:
_CONFIG["tenant"] = _ARGS.tenant
if _ARGS.api_version:
_CONFIG["api_version"] = _ARGS.api_version
if _ARGS.ldap:
_CONFIG["ldap"] = _ARGS.ldap
def _env_override():
if ENV_MGMT in os.environ:
_CONFIG["mgmt_ip"] = os.environ[ENV_MGMT]
if ENV_USER in os.environ:
_CONFIG["username"] = os.environ[ENV_USER]
if ENV_PASS in os.environ:
_CONFIG["password"] = os.environ[ENV_PASS]
if ENV_TENANT in os.environ:
_CONFIG["tenant"] = os.environ[ENV_TENANT]
if ENV_API in os.environ:
_CONFIG["api_version"] = os.environ[ENV_API]
if ENV_LDAP in os.environ:
_CONFIG["ldap"] = os.environ[ENV_LDAP]
def vprint(*args, **kwargs):
global VERBOSE
if VERBOSE:
print(*args, **kwargs)
def get_api(**kwargs):
global _CONFIG
if kwargs.pop('reset_config', False):
_CONFIG = None
udc_file = _read_config(kwargs.pop('config', None))
tenant = _CONFIG["tenant"]
if tenant and "root" not in tenant and tenant != "all":
tenant = "/root/{}".format(tenant)
if not tenant:
tenant = "/root"
if not _CONFIG["api_version"]:
version = "v{}".format(LATEST)
else:
version = "v{}".format(_CONFIG["api_version"].strip("v"))
# Check that mgmt_ip isn't the default
if _CONFIG["mgmt_ip"] == REPLACE_IP:
if not udc_file:
udc_file = "none found"
raise ValueError("You must edit your UDC config file [{}] and provide "
"at least the mgmt_ip of the Datera box you want to "
"connect to".format(udc_file))
return _get_api(_CONFIG["mgmt_ip"],
username=_CONFIG["username"],
password=_CONFIG["password"],
version=version,
tenant=tenant,
remote_server=_CONFIG["ldap"],
**kwargs)
def print_config():
config = copy.deepcopy(_CONFIG)
config["password"] = "******"
pprint.pprint(config)
def get_config(config_file=None, **kwargs):
global _CONFIG
if kwargs.get('reset_config'):
_CONFIG = None
if not _CONFIG:
_read_config(config_file)
return copy.deepcopy(_CONFIG)
def get_argparser(add_help=True):
global _ARGS, VERBOSE
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--api-version",
help="Datera API version to use (default={})".format(
LATEST))
parser.add_argument("--hostname", help="Hostname or IP Address of Datera "
"backend")
parser.add_argument("--username", help="Username for Datera account")
parser.add_argument("--password", help="Password for Datera account")
parser.add_argument("--tenant",
help="Tenant Name/ID to search under,"
" use 'all' for all tenants")
parser.add_argument("--ldap", help="Datera LDAP authentication server")
parser.add_argument("--config", help="Config file location")
parser.add_argument("--print-envs", action="store_true",
help="Print supported environment variables")
parser.add_argument("--gen-config", choices=["json", "shell"],
help="Generate example config")
parser.add_argument("-v", "--verbose", action="store_true",
help="Enable verbose output")
args, _ = parser.parse_known_args()
_ARGS = args
VERBOSE = args.verbose
if args.print_envs:
_print_envs()
sys.exit(0)
if args.gen_config:
_gen_config()
sys.exit(0)
return argparse.ArgumentParser(add_help=add_help, parents=[parser])
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.names.client}.
"""
import sys
from zope.interface.verify import verifyObject
from twisted.python.compat import set
from twisted.python import failure
from twisted.python.runtime import platform
from twisted.internet import defer
from twisted.internet.error import CannotListenError
from twisted.internet.interfaces import IResolver
from twisted.internet.test.modulehelpers import NoReactor
from twisted.internet.task import Clock
from twisted.names import error, client, dns, hosts, cache
from twisted.names.error import DNSQueryTimeoutError
from twisted.names.common import ResolverBase
from twisted.names.test.test_hosts import GoodTempPathMixin
from twisted.trial import unittest
if platform.isWindows():
windowsSkip = "These tests need more work before they'll work on Windows."
else:
windowsSkip = None
class AlternateReactor(NoReactor):
"""
A context manager which temporarily installs a different object as the global reactor.
"""
def __init__(self, reactor):
"""
@param reactor: Any object to install as the global reactor.
"""
NoReactor.__init__(self)
self.alternate = reactor
def __enter__(self):
NoReactor.__enter__(self)
import twisted.internet
twisted.internet.reactor = self.alternate
sys.modules['twisted.internet.reactor'] = self.alternate
class FakeResolver(ResolverBase):
def _lookup(self, name, cls, qtype, timeout):
"""
The getHostByNameTest does a different type of query that requires it
return an A record from an ALL_RECORDS lookup, so we accomodate that
here.
"""
if name == b'getHostByNameTest':
rr = dns.RRHeader(name=name, type=dns.A, cls=cls, ttl=60,
payload=dns.Record_A(address='127.0.0.1', ttl=60))
else:
rr = dns.RRHeader(name=name, type=qtype, cls=cls, ttl=60)
results = [rr]
authority = []
addtional = []
return defer.succeed((results, authority, addtional))
class StubPort(object):
"""
A partial implementation of L{IListeningPort} which only keeps track of
whether it has been stopped.
@ivar disconnected: A C{bool} which is C{False} until C{stopListening} is
called, C{True} afterwards.
"""
disconnected = False
def stopListening(self):
self.disconnected = True
class StubDNSDatagramProtocol(object):
"""
L{dns.DNSDatagramProtocol}-alike.
@ivar queries: A C{list} of tuples giving the arguments passed to
C{query} along with the L{defer.Deferred} which was returned from
the call.
"""
def __init__(self):
self.queries = []
self.transport = StubPort()
def query(self, address, queries, timeout=10, id=None):
"""
Record the given arguments and return a Deferred which will not be
called back by this code.
"""
result = defer.Deferred()
self.queries.append((address, queries, timeout, id, result))
return result
class GetResolverTests(unittest.TestCase):
"""
Tests for L{client.getResolver}.
"""
if windowsSkip:
skip = windowsSkip
def test_interface(self):
"""
L{client.getResolver} returns an object providing L{IResolver}.
"""
with AlternateReactor(Clock()):
resolver = client.getResolver()
self.assertTrue(verifyObject(IResolver, resolver))
def test_idempotent(self):
"""
Multiple calls to L{client.getResolver} return the same L{IResolver}
implementation.
"""
with AlternateReactor(Clock()):
a = client.getResolver()
b = client.getResolver()
self.assertIdentical(a, b)
class CreateResolverTests(unittest.TestCase, GoodTempPathMixin):
"""
Tests for L{client.createResolver}.
"""
if windowsSkip:
skip = windowsSkip
def _hostsTest(self, resolver, filename):
res = [r for r in resolver.resolvers if isinstance(r, hosts.Resolver)]
self.assertEqual(1, len(res))
self.assertEqual(res[0].file, filename)
def test_defaultHosts(self):
"""
L{client.createResolver} returns a L{resolve.ResolverChain} including a
L{hosts.Resolver} using I{/etc/hosts} if no alternate hosts file is
specified.
"""
with AlternateReactor(Clock()):
sys.modules["twisted.internet.reactor"] = Clock()
resolver = client.createResolver()
self._hostsTest(resolver, b"/etc/hosts")
def test_overrideHosts(self):
"""
The I{hosts} parameter to L{client.createResolver} overrides the hosts
file used by the L{hosts.Resolver} in the L{resolve.ResolverChain} it
returns.
"""
with AlternateReactor(Clock()):
resolver = client.createResolver(hosts=b"/foo/bar")
self._hostsTest(resolver, b"/foo/bar")
def _resolvConfTest(self, resolver, filename):
"""
Verify that C{resolver} has a L{client.Resolver} with a configuration
filename set to C{filename}.
"""
res = [r for r in resolver.resolvers if isinstance(r, client.Resolver)]
self.assertEqual(1, len(res))
self.assertEqual(res[0].resolv, filename)
def test_reactor(self):
"""
The L{client.Resolver} included in the L{resolve.ResolverChain} returned
by L{client.createResolver} uses the global reactor.
"""
reactor = Clock()
with AlternateReactor(reactor):
resolver = client.createResolver()
res = [r for r in resolver.resolvers if isinstance(r, client.Resolver)]
self.assertEqual(1, len(res))
self.assertIdentical(reactor, res[0]._reactor)
def test_defaultResolvConf(self):
"""
L{client.createResolver} returns a L{resolve.ResolverChain} including a
L{client.Resolver} using I{/etc/resolv.conf} if no alternate resolver
configuration file is specified.
"""
with AlternateReactor(Clock()):
resolver = client.createResolver()
self._resolvConfTest(resolver, b"/etc/resolv.conf")
def test_overrideResolvConf(self):
"""
The I{resolvconf} parameter to L{client.createResolver} overrides the
resolver configuration file used by the L{client.Resolver} in the
L{resolve.ResolverChain} it returns.
"""
with AlternateReactor(Clock()):
resolver = client.createResolver(resolvconf=b"/foo/bar")
self._resolvConfTest(resolver, b"/foo/bar")
def test_defaultServers(self):
"""
If no servers are given, addresses are taken from the file given by the
I{resolvconf} parameter to L{client.createResolver}.
"""
resolvconf = self.path()
resolvconf.setContent(b"nameserver 127.1.2.3\n")
with AlternateReactor(Clock()):
resolver = client.createResolver(resolvconf=resolvconf.path)
res = [r for r in resolver.resolvers if isinstance(r, client.Resolver)]
self.assertEqual(1, len(res))
self.assertEqual([], res[0].servers)
self.assertEqual([("127.1.2.3", 53)], res[0].dynServers)
def test_overrideServers(self):
"""
Servers passed to L{client.createResolver} are used in addition to any
found in the file given by the I{resolvconf} parameter.
"""
resolvconf = self.path()
resolvconf.setContent(b"nameserver 127.1.2.3\n")
with AlternateReactor(Clock()):
resolver = client.createResolver(
servers=[("127.3.2.1", 53)], resolvconf=resolvconf.path)
res = [r for r in resolver.resolvers if isinstance(r, client.Resolver)]
self.assertEqual(1, len(res))
self.assertEqual([("127.3.2.1", 53)], res[0].servers)
self.assertEqual([("127.1.2.3", 53)], res[0].dynServers)
def test_cache(self):
"""
L{client.createResolver} returns a L{resolve.ResolverChain} including a
L{cache.CacheResolver}.
"""
with AlternateReactor(Clock()):
resolver = client.createResolver()
res = [r for r in resolver.resolvers if isinstance(r, cache.CacheResolver)]
self.assertEqual(1, len(res))
class ResolverTests(unittest.TestCase):
"""
Tests for L{client.Resolver}.
"""
def test_noServers(self):
"""
L{client.Resolver} raises L{ValueError} if constructed with neither
servers nor a nameserver configuration file.
"""
self.assertRaises(ValueError, client.Resolver)
def test_missingConfiguration(self):
"""
A missing nameserver configuration file results in no server information
being loaded from it (ie, not an exception) and a default server being
provided.
"""
resolver = client.Resolver(resolv=self.mktemp(), reactor=Clock())
self.assertEqual([("127.0.0.1", 53)], resolver.dynServers)
def test_domainEmptyArgument(self):
"""
L{client.Resolver.parseConfig} treats a I{domain} line without an
argument as indicating a domain of C{b""}.
"""
resolver = client.Resolver(servers=[("127.0.0.1", 53)])
resolver.parseConfig([b"domain\n"])
self.assertEqual(b"", resolver.domain)
def test_searchEmptyArgument(self):
"""
L{client.Resolver.parseConfig} treats a I{search} line without an
argument as indicating an empty search suffix.
"""
resolver = client.Resolver(servers=[("127.0.0.1", 53)])
resolver.parseConfig([b"search\n"])
self.assertEqual([], resolver.search)
def test_datagramQueryServerOrder(self):
"""
L{client.Resolver.queryUDP} should issue queries to its
L{dns.DNSDatagramProtocol} with server addresses taken from its own
C{servers} and C{dynServers} lists, proceeding through them in order
as L{DNSQueryTimeoutError}s occur.
"""
protocol = StubDNSDatagramProtocol()
servers = [object(), object()]
dynServers = [object(), object()]
resolver = client.Resolver(servers=servers)
resolver.dynServers = dynServers
resolver._connectedProtocol = lambda: protocol
expectedResult = object()
queryResult = resolver.queryUDP(None)
queryResult.addCallback(self.assertEqual, expectedResult)
self.assertEqual(len(protocol.queries), 1)
self.assertIdentical(protocol.queries[0][0], servers[0])
protocol.queries[0][-1].errback(DNSQueryTimeoutError(0))
self.assertEqual(len(protocol.queries), 2)
self.assertIdentical(protocol.queries[1][0], servers[1])
protocol.queries[1][-1].errback(DNSQueryTimeoutError(1))
self.assertEqual(len(protocol.queries), 3)
self.assertIdentical(protocol.queries[2][0], dynServers[0])
protocol.queries[2][-1].errback(DNSQueryTimeoutError(2))
self.assertEqual(len(protocol.queries), 4)
self.assertIdentical(protocol.queries[3][0], dynServers[1])
protocol.queries[3][-1].callback(expectedResult)
return queryResult
def test_singleConcurrentRequest(self):
"""
L{client.Resolver.query} only issues one request at a time per query.
Subsequent requests made before responses to prior ones are received
are queued and given the same response as is given to the first one.
"""
protocol = StubDNSDatagramProtocol()
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._connectedProtocol = lambda: protocol
queries = protocol.queries
query = dns.Query(b'foo.example.com', dns.A, dns.IN)
# The first query should be passed to the underlying protocol.
firstResult = resolver.query(query)
self.assertEqual(len(queries), 1)
# The same query again should not be passed to the underlying protocol.
secondResult = resolver.query(query)
self.assertEqual(len(queries), 1)
# The response to the first query should be sent in response to both
# queries.
answer = object()
response = dns.Message()
response.answers.append(answer)
queries.pop()[-1].callback(response)
d = defer.gatherResults([firstResult, secondResult])
def cbFinished(responses):
firstResponse, secondResponse = responses
self.assertEqual(firstResponse, ([answer], [], []))
self.assertEqual(secondResponse, ([answer], [], []))
d.addCallback(cbFinished)
return d
def test_multipleConcurrentRequests(self):
"""
L{client.Resolver.query} issues a request for each different concurrent
query.
"""
protocol = StubDNSDatagramProtocol()
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._connectedProtocol = lambda: protocol
queries = protocol.queries
# The first query should be passed to the underlying protocol.
firstQuery = dns.Query(b'foo.example.com', dns.A)
resolver.query(firstQuery)
self.assertEqual(len(queries), 1)
# A query for a different name is also passed to the underlying
# protocol.
secondQuery = dns.Query(b'bar.example.com', dns.A)
resolver.query(secondQuery)
self.assertEqual(len(queries), 2)
# A query for a different type is also passed to the underlying
# protocol.
thirdQuery = dns.Query(b'foo.example.com', dns.A6)
resolver.query(thirdQuery)
self.assertEqual(len(queries), 3)
def test_multipleSequentialRequests(self):
"""
After a response is received to a query issued with
L{client.Resolver.query}, another query with the same parameters
results in a new network request.
"""
protocol = StubDNSDatagramProtocol()
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._connectedProtocol = lambda: protocol
queries = protocol.queries
query = dns.Query(b'foo.example.com', dns.A)
# The first query should be passed to the underlying protocol.
resolver.query(query)
self.assertEqual(len(queries), 1)
# Deliver the response.
queries.pop()[-1].callback(dns.Message())
# Repeating the first query should touch the protocol again.
resolver.query(query)
self.assertEqual(len(queries), 1)
def test_multipleConcurrentFailure(self):
"""
If the result of a request is an error response, the Deferreds for all
concurrently issued requests associated with that result fire with the
L{Failure}.
"""
protocol = StubDNSDatagramProtocol()
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._connectedProtocol = lambda: protocol
queries = protocol.queries
query = dns.Query(b'foo.example.com', dns.A)
firstResult = resolver.query(query)
secondResult = resolver.query(query)
class ExpectedException(Exception):
pass
queries.pop()[-1].errback(failure.Failure(ExpectedException()))
return defer.gatherResults([
self.assertFailure(firstResult, ExpectedException),
self.assertFailure(secondResult, ExpectedException)])
def test_connectedProtocol(self):
"""
L{client.Resolver._connectedProtocol} returns a new
L{DNSDatagramProtocol} connected to a new address with a
cryptographically secure random port number.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
firstProto = resolver._connectedProtocol()
secondProto = resolver._connectedProtocol()
self.assertNotIdentical(firstProto.transport, None)
self.assertNotIdentical(secondProto.transport, None)
self.assertNotEqual(
firstProto.transport.getHost().port,
secondProto.transport.getHost().port)
return defer.gatherResults([
defer.maybeDeferred(firstProto.transport.stopListening),
defer.maybeDeferred(secondProto.transport.stopListening)])
def test_differentProtocol(self):
"""
L{client.Resolver._connectedProtocol} is called once each time a UDP
request needs to be issued and the resulting protocol instance is used
for that request.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return defer.succeed(dns.Message())
resolver._connectedProtocol = FakeProtocol
resolver.query(dns.Query(b'foo.example.com'))
resolver.query(dns.Query(b'bar.example.com'))
self.assertEqual(len(set(protocols)), 2)
def test_disallowedPort(self):
"""
If a port number is initially selected which cannot be bound, the
L{CannotListenError} is handled and another port number is attempted.
"""
ports = []
class FakeReactor(object):
def listenUDP(self, port, *args):
ports.append(port)
if len(ports) == 1:
raise CannotListenError(None, port, None)
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._reactor = FakeReactor()
resolver._connectedProtocol()
self.assertEqual(len(set(ports)), 2)
def test_differentProtocolAfterTimeout(self):
"""
When a query issued by L{client.Resolver.query} times out, the retry
uses a new protocol instance.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
results = [defer.fail(failure.Failure(DNSQueryTimeoutError(None))),
defer.succeed(dns.Message())]
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return results.pop(0)
resolver._connectedProtocol = FakeProtocol
resolver.query(dns.Query(b'foo.example.com'))
self.assertEqual(len(set(protocols)), 2)
def test_protocolShutDown(self):
"""
After the L{Deferred} returned by L{DNSDatagramProtocol.query} is
called back, the L{DNSDatagramProtocol} is disconnected from its
transport.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
result = defer.Deferred()
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return result
resolver._connectedProtocol = FakeProtocol
resolver.query(dns.Query(b'foo.example.com'))
self.assertFalse(protocols[0].transport.disconnected)
result.callback(dns.Message())
self.assertTrue(protocols[0].transport.disconnected)
def test_protocolShutDownAfterTimeout(self):
"""
The L{DNSDatagramProtocol} created when an interim timeout occurs is
also disconnected from its transport after the Deferred returned by its
query method completes.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
result = defer.Deferred()
results = [defer.fail(failure.Failure(DNSQueryTimeoutError(None))),
result]
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return results.pop(0)
resolver._connectedProtocol = FakeProtocol
resolver.query(dns.Query(b'foo.example.com'))
self.assertFalse(protocols[1].transport.disconnected)
result.callback(dns.Message())
self.assertTrue(protocols[1].transport.disconnected)
def test_protocolShutDownAfterFailure(self):
"""
If the L{Deferred} returned by L{DNSDatagramProtocol.query} fires with
a failure, the L{DNSDatagramProtocol} is still disconnected from its
transport.
"""
class ExpectedException(Exception):
pass
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
result = defer.Deferred()
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return result
resolver._connectedProtocol = FakeProtocol
queryResult = resolver.query(dns.Query(b'foo.example.com'))
self.assertFalse(protocols[0].transport.disconnected)
result.errback(failure.Failure(ExpectedException()))
self.assertTrue(protocols[0].transport.disconnected)
return self.assertFailure(queryResult, ExpectedException)
def test_tcpDisconnectRemovesFromConnections(self):
"""
When a TCP DNS protocol associated with a Resolver disconnects, it is
removed from the Resolver's connection list.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocol = resolver.factory.buildProtocol(None)
protocol.makeConnection(None)
self.assertIn(protocol, resolver.connections)
# Disconnecting should remove the protocol from the connection list:
protocol.connectionLost(None)
self.assertNotIn(protocol, resolver.connections)
class ClientTestCase(unittest.TestCase):
def setUp(self):
"""
Replace the resolver with a FakeResolver
"""
client.theResolver = FakeResolver()
self.hostname = b'example.com'
self.hostnameForGetHostByName = b'getHostByNameTest'
def tearDown(self):
"""
By setting the resolver to None, it will be recreated next time a name
lookup is done.
"""
client.theResolver = None
def checkResult(self, results, qtype):
"""
Verify that the result is the same query type as what is expected.
"""
answers, authority, additional = results
result = answers[0]
self.assertEqual(result.name.name, self.hostname)
self.assertEqual(result.type, qtype)
def checkGetHostByName(self, result):
"""
Test that the getHostByName query returns the 127.0.0.1 address.
"""
self.assertEqual(result, '127.0.0.1')
def test_getHostByName(self):
"""
do a getHostByName of a value that should return 127.0.0.1.
"""
d = client.getHostByName(self.hostnameForGetHostByName)
d.addCallback(self.checkGetHostByName)
return d
def test_lookupAddress(self):
"""
Do a lookup and test that the resolver will issue the correct type of
query type. We do this by checking that FakeResolver returns a result
record with the same query type as what we issued.
"""
d = client.lookupAddress(self.hostname)
d.addCallback(self.checkResult, dns.A)
return d
def test_lookupIPV6Address(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupIPV6Address(self.hostname)
d.addCallback(self.checkResult, dns.AAAA)
return d
def test_lookupAddress6(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupAddress6(self.hostname)
d.addCallback(self.checkResult, dns.A6)
return d
def test_lookupNameservers(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupNameservers(self.hostname)
d.addCallback(self.checkResult, dns.NS)
return d
def test_lookupCanonicalName(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupCanonicalName(self.hostname)
d.addCallback(self.checkResult, dns.CNAME)
return d
def test_lookupAuthority(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupAuthority(self.hostname)
d.addCallback(self.checkResult, dns.SOA)
return d
def test_lookupMailBox(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailBox(self.hostname)
d.addCallback(self.checkResult, dns.MB)
return d
def test_lookupMailGroup(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailGroup(self.hostname)
d.addCallback(self.checkResult, dns.MG)
return d
def test_lookupMailRename(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailRename(self.hostname)
d.addCallback(self.checkResult, dns.MR)
return d
def test_lookupNull(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupNull(self.hostname)
d.addCallback(self.checkResult, dns.NULL)
return d
def test_lookupWellKnownServices(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupWellKnownServices(self.hostname)
d.addCallback(self.checkResult, dns.WKS)
return d
def test_lookupPointer(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupPointer(self.hostname)
d.addCallback(self.checkResult, dns.PTR)
return d
def test_lookupHostInfo(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupHostInfo(self.hostname)
d.addCallback(self.checkResult, dns.HINFO)
return d
def test_lookupMailboxInfo(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailboxInfo(self.hostname)
d.addCallback(self.checkResult, dns.MINFO)
return d
def test_lookupMailExchange(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailExchange(self.hostname)
d.addCallback(self.checkResult, dns.MX)
return d
def test_lookupText(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupText(self.hostname)
d.addCallback(self.checkResult, dns.TXT)
return d
def test_lookupSenderPolicy(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupSenderPolicy(self.hostname)
d.addCallback(self.checkResult, dns.SPF)
return d
def test_lookupResponsibility(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupResponsibility(self.hostname)
d.addCallback(self.checkResult, dns.RP)
return d
def test_lookupAFSDatabase(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupAFSDatabase(self.hostname)
d.addCallback(self.checkResult, dns.AFSDB)
return d
def test_lookupService(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupService(self.hostname)
d.addCallback(self.checkResult, dns.SRV)
return d
def test_lookupZone(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupZone(self.hostname)
d.addCallback(self.checkResult, dns.AXFR)
return d
def test_lookupAllRecords(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupAllRecords(self.hostname)
d.addCallback(self.checkResult, dns.ALL_RECORDS)
return d
def test_lookupNamingAuthorityPointer(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupNamingAuthorityPointer(self.hostname)
d.addCallback(self.checkResult, dns.NAPTR)
return d
class FilterAnswersTests(unittest.TestCase):
"""
Test L{twisted.names.client.Resolver.filterAnswers}'s handling of various
error conditions it might encounter.
"""
def setUp(self):
# Create a resolver pointed at an invalid server - we won't be hitting
# the network in any of these tests.
self.resolver = client.Resolver(servers=[('0.0.0.0', 0)])
def test_truncatedMessage(self):
"""
Test that a truncated message results in an equivalent request made via
TCP.
"""
m = dns.Message(trunc=True)
m.addQuery(b'example.com')
def queryTCP(queries):
self.assertEqual(queries, m.queries)
response = dns.Message()
response.answers = ['answer']
response.authority = ['authority']
response.additional = ['additional']
return defer.succeed(response)
self.resolver.queryTCP = queryTCP
d = self.resolver.filterAnswers(m)
d.addCallback(
self.assertEqual, (['answer'], ['authority'], ['additional']))
return d
def _rcodeTest(self, rcode, exc):
m = dns.Message(rCode=rcode)
err = self.resolver.filterAnswers(m)
err.trap(exc)
def test_formatError(self):
"""
Test that a message with a result code of C{EFORMAT} results in a
failure wrapped around L{DNSFormatError}.
"""
return self._rcodeTest(dns.EFORMAT, error.DNSFormatError)
def test_serverError(self):
"""
Like L{test_formatError} but for C{ESERVER}/L{DNSServerError}.
"""
return self._rcodeTest(dns.ESERVER, error.DNSServerError)
def test_nameError(self):
"""
Like L{test_formatError} but for C{ENAME}/L{DNSNameError}.
"""
return self._rcodeTest(dns.ENAME, error.DNSNameError)
def test_notImplementedError(self):
"""
Like L{test_formatError} but for C{ENOTIMP}/L{DNSNotImplementedError}.
"""
return self._rcodeTest(dns.ENOTIMP, error.DNSNotImplementedError)
def test_refusedError(self):
"""
Like L{test_formatError} but for C{EREFUSED}/L{DNSQueryRefusedError}.
"""
return self._rcodeTest(dns.EREFUSED, error.DNSQueryRefusedError)
def test_refusedErrorUnknown(self):
"""
Like L{test_formatError} but for an unrecognized error code and
L{DNSUnknownError}.
"""
return self._rcodeTest(dns.EREFUSED + 1, error.DNSUnknownError)
class FakeDNSDatagramProtocol(object):
def __init__(self):
self.queries = []
self.transport = StubPort()
def query(self, address, queries, timeout=10, id=None):
self.queries.append((address, queries, timeout, id))
return defer.fail(error.DNSQueryTimeoutError(queries))
def removeResend(self, id):
# Ignore this for the time being.
pass
class RetryLogic(unittest.TestCase):
"""
Tests for query retrying implemented by L{client.Resolver}.
"""
testServers = [
'1.2.3.4',
'4.3.2.1',
'a.b.c.d',
'z.y.x.w']
def test_roundRobinBackoff(self):
"""
When timeouts occur waiting for responses to queries, the next
configured server is issued the query. When the query has been issued
to all configured servers, the timeout is increased and the process
begins again at the beginning.
"""
addrs = [(x, 53) for x in self.testServers]
r = client.Resolver(resolv=None, servers=addrs)
proto = FakeDNSDatagramProtocol()
r._connectedProtocol = lambda: proto
return r.lookupAddress(b"foo.example.com"
).addCallback(self._cbRoundRobinBackoff
).addErrback(self._ebRoundRobinBackoff, proto
)
def _cbRoundRobinBackoff(self, result):
self.fail("Lookup address succeeded, should have timed out")
def _ebRoundRobinBackoff(self, failure, fakeProto):
failure.trap(defer.TimeoutError)
# Assert that each server is tried with a particular timeout
# before the timeout is increased and the attempts are repeated.
for t in (1, 3, 11, 45):
tries = fakeProto.queries[:len(self.testServers)]
del fakeProto.queries[:len(self.testServers)]
tries.sort()
expected = list(self.testServers)
expected.sort()
for ((addr, query, timeout, id), expectedAddr) in zip(tries, expected):
self.assertEqual(addr, (expectedAddr, 53))
self.assertEqual(timeout, t)
self.assertFalse(fakeProto.queries)
class ThreadedResolverTests(unittest.TestCase):
"""
Tests for L{client.ThreadedResolver}.
"""
def test_deprecated(self):
"""
L{client.ThreadedResolver} is deprecated. Instantiating it emits a
deprecation warning pointing at the code that does the instantiation.
"""
client.ThreadedResolver()
warnings = self.flushWarnings(offendingFunctions=[self.test_deprecated])
self.assertEqual(
warnings[0]['message'],
"twisted.names.client.ThreadedResolver is deprecated since "
"Twisted 9.0, use twisted.internet.base.ThreadedResolver "
"instead.")
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(len(warnings), 1)
| |
# -*- encoding: utf-8 -*-
import collections
import re
from abjad.tools import abctools
from abjad.tools import datastructuretools
class CreditRole(abctools.AbjadValueObject):
### CLASS VARIABLES ###
class Category(datastructuretools.Enumeration):
ACTING_LITERARY_AND_SPOKEN = 1
COMPANIES = 2
CONDUCTING_AND_LEADING = 3
DJ_MIX = 4
FEATURING_AND_PRESENTING = 5
INSTRUMENTS = 6
MANAGEMENT = 7
PRODUCTION = 8
RELATION = 9
REMIX = 10
TECHNICAL = 11
VISUAL = 12
VOCAL = 13
WRITING_AND_ARRANGEMENT = 14
class Subcategory(datastructuretools.Enumeration):
DRUMS_AND_PERCUSSION = 1
KEYBOARD = 2
OTHER_MUSICAL = 3
STRINGED_INSTRUMENTS = 4
TECHNICAL_MUSICAL = 5
TUNED_PERCUSSION = 6
WIND_INSTRUMENTS = 7
_bracket_pattern = re.compile('\[(.+?)\]')
category_names = {
Category.ACTING_LITERARY_AND_SPOKEN: 'Acting, Literary & Spoken',
Category.COMPANIES: 'Companies',
Category.CONDUCTING_AND_LEADING: 'Conducting & Leading',
Category.DJ_MIX: 'DJ Mix',
Category.FEATURING_AND_PRESENTING: 'Featuring & Presenting',
Category.MANAGEMENT: 'Management',
Category.PRODUCTION: 'Production',
Category.RELATION: 'Structural Relationships',
Category.REMIX: 'Remix',
Category.TECHNICAL: 'Technical',
Category.VISUAL: 'Visual',
Category.VOCAL: 'Vocal',
Category.WRITING_AND_ARRANGEMENT: 'Writing & Arrangement',
}
subcategory_names = {
Subcategory.DRUMS_AND_PERCUSSION: 'Drums & Percussion',
Subcategory.KEYBOARD: 'Keyboard',
Subcategory.OTHER_MUSICAL: 'Other Musical',
Subcategory.STRINGED_INSTRUMENTS: 'String Instruments',
Subcategory.TECHNICAL_MUSICAL: 'Technical Musical',
Subcategory.TUNED_PERCUSSION: 'Tuned Percussion',
Subcategory.WIND_INSTRUMENTS: 'Wind Instruments',
}
all_credit_roles = collections.OrderedDict([
("Alias", (Category.RELATION,)),
("Member Of", (Category.RELATION,)),
("Compiled On", (Category.RELATION,)),
("Released On", (Category.RELATION,)),
("Sublabel Of", (Category.RELATION,)),
#"Split With": (Category.RELATION,)),
#"Collaborated With": (Category.RELATION,)),
("Artwork By", None),
("Executive Producer", None),
("Other", None),
("Photography", None),
("Written By", None),
("Adapted By", (Category.WRITING_AND_ARRANGEMENT,)),
("Arranged By", (Category.WRITING_AND_ARRANGEMENT,)),
("Cadenza", (Category.WRITING_AND_ARRANGEMENT,)),
("Composed By", (Category.WRITING_AND_ARRANGEMENT,)),
("Concept By", (Category.WRITING_AND_ARRANGEMENT,)),
("Copyist", (Category.WRITING_AND_ARRANGEMENT,)),
("Instrumentation By", (Category.WRITING_AND_ARRANGEMENT,)),
("Libretto By", (Category.WRITING_AND_ARRANGEMENT,)),
("Lyrics By", (Category.WRITING_AND_ARRANGEMENT,)),
("Music By", (Category.WRITING_AND_ARRANGEMENT,)),
("Music Consultant", (Category.WRITING_AND_ARRANGEMENT,)),
("Musical Assistance", (Category.WRITING_AND_ARRANGEMENT,)),
("Orchestrated By", (Category.WRITING_AND_ARRANGEMENT,)),
("Programmed By", (Category.WRITING_AND_ARRANGEMENT,)),
("Score Editor", (Category.WRITING_AND_ARRANGEMENT,)),
("Score", (Category.WRITING_AND_ARRANGEMENT,)),
("Sequenced By", (Category.WRITING_AND_ARRANGEMENT,)),
("Songwriter", (Category.WRITING_AND_ARRANGEMENT,)),
("Sound Designer", (Category.WRITING_AND_ARRANGEMENT,)),
("Transcription By", (Category.WRITING_AND_ARRANGEMENT,)),
("Translated By", (Category.WRITING_AND_ARRANGEMENT,)),
("Words By", (Category.WRITING_AND_ARRANGEMENT,)),
("Written-By", (Category.WRITING_AND_ARRANGEMENT,)),
("Featuring", (Category.FEATURING_AND_PRESENTING,)),
("Hosted By", (Category.FEATURING_AND_PRESENTING,)),
("Presenter", (Category.FEATURING_AND_PRESENTING,)),
("Chorus Master", (Category.CONDUCTING_AND_LEADING,)),
("Concertmaster", (Category.CONDUCTING_AND_LEADING,)),
("Concertmistress", (Category.CONDUCTING_AND_LEADING,)),
("Conductor", (Category.CONDUCTING_AND_LEADING,)),
("Contractor", (Category.CONDUCTING_AND_LEADING,)),
("Directed By", (Category.CONDUCTING_AND_LEADING,)),
("Leader", (Category.CONDUCTING_AND_LEADING,)),
("Repetiteur", (Category.CONDUCTING_AND_LEADING,)),
("Co-producer", (Category.PRODUCTION,)),
("Commissioned By", (Category.PRODUCTION,)),
("Compilation Producer", (Category.PRODUCTION,)),
("Compiled By", (Category.PRODUCTION,)),
("Curated By", (Category.PRODUCTION,)),
("Executive-Producer", (Category.PRODUCTION,)),
("Producer", (Category.PRODUCTION,)),
("Recording Supervisor", (Category.PRODUCTION,)),
("Reissue Producer", (Category.PRODUCTION,)),
("Research", (Category.PRODUCTION,)),
("Supervised By", (Category.PRODUCTION,)),
("Remix", (Category.REMIX,)),
("DJ Mix", (Category.DJ_MIX,)),
("Animation", (Category.VISUAL,)),
("Art Direction", (Category.VISUAL,)),
("Artwork", (Category.VISUAL,)),
("Assemblage", (Category.VISUAL,)),
("CGI Artist", (Category.VISUAL,)),
("Cameraman", (Category.VISUAL,)),
("Cinematographer", (Category.VISUAL,)),
("Cover", (Category.VISUAL,)),
("Creative Director", (Category.VISUAL,)),
("Design Concept", (Category.VISUAL,)),
("Design", (Category.VISUAL,)),
("Director Of Photography", (Category.VISUAL,)),
("Film Director", (Category.VISUAL,)),
("Film Editor", (Category.VISUAL,)),
("Film Producer", (Category.VISUAL,)),
("Film Technician", (Category.VISUAL,)),
("Gaffer", (Category.VISUAL,)),
("Graphics", (Category.VISUAL,)),
("Grip", (Category.VISUAL,)),
("Illustration", (Category.VISUAL,)),
("Layout", (Category.VISUAL,)),
("Lighting Director", (Category.VISUAL,)),
("Lighting", (Category.VISUAL,)),
("Painting", (Category.VISUAL,)),
("Photography By", (Category.VISUAL,)),
("Production Manager", (Category.VISUAL,)),
("Realization", (Category.VISUAL,)),
("Set Designer", (Category.VISUAL,)),
("Sleeve", (Category.VISUAL,)),
("Stage Manager", (Category.VISUAL,)),
("Typography", (Category.VISUAL,)),
("VJ", (Category.VISUAL,)),
("Video Editor", (Category.VISUAL,)),
("Abridged By", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Adapted By (Text)", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Choreography", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Interviewee", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Interviewer", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Liner Notes", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Music Librarian", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Narrator", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Read By", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Screenwriter", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Script By", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Sleeve Notes", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Text By", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("Voice Actor", (Category.ACTING_LITERARY_AND_SPOKEN,)),
("A\u0026R", (Category.MANAGEMENT,)),
("Administrator", (Category.MANAGEMENT,)),
("Advisor", (Category.MANAGEMENT,)),
("Booking", (Category.MANAGEMENT,)),
("Coordinator", (Category.MANAGEMENT,)),
("Legal", (Category.MANAGEMENT,)),
("Management", (Category.MANAGEMENT,)),
("Product Manager", (Category.MANAGEMENT,)),
("Crew", (Category.TECHNICAL,)),
("DAW", (Category.TECHNICAL,)),
("Edited By", (Category.TECHNICAL,)),
("Engineer", (Category.TECHNICAL,)),
("Lacquer Cut By", (Category.TECHNICAL,)),
("Mastered By", (Category.TECHNICAL,)),
("Mixed By", (Category.TECHNICAL,)),
("Recorded By", (Category.TECHNICAL,)),
("Remastered By", (Category.TECHNICAL,)),
("Tape Op", (Category.TECHNICAL,)),
("Technician", (Category.TECHNICAL,)),
("Tracking By", (Category.TECHNICAL,)),
("Transferred By", (Category.TECHNICAL,)),
("Alto Vocals", (Category.VOCAL,)),
("Backing Vocals", (Category.VOCAL,)),
("Baritone Vocals", (Category.VOCAL,)),
("Bass Vocals", (Category.VOCAL,)),
("Caller", (Category.VOCAL,)),
("Choir", (Category.VOCAL,)),
("Chorus", (Category.VOCAL,)),
("Contralto Vocals", (Category.VOCAL,)),
("Coro", (Category.VOCAL,)),
("Countertenor Vocals", (Category.VOCAL,)),
("Harmony Vocals", (Category.VOCAL,)),
("Human Beatbox", (Category.VOCAL,)),
("Humming", (Category.VOCAL,)),
("Kakegoe", (Category.VOCAL,)),
("Lead Vocals", (Category.VOCAL,)),
("MC", (Category.VOCAL,)),
("Mezzo-soprano Vocals", (Category.VOCAL,)),
("Overtone Voice", (Category.VOCAL,)),
("Rap", (Category.VOCAL,)),
("Scat", (Category.VOCAL,)),
("Solo Vocal", (Category.VOCAL,)),
("Soprano Vocals", (Category.VOCAL,)),
("Speech", (Category.VOCAL,)),
("Tenor Vocals", (Category.VOCAL,)),
("Toasting", (Category.VOCAL,)),
("Treble Vocals", (Category.VOCAL,)),
("Vocalese", (Category.VOCAL,)),
("Vocals", (Category.VOCAL,)),
("Voice", (Category.VOCAL,)),
("Whistling", (Category.VOCAL,)),
("Yodeling", (Category.VOCAL,)),
("Afox\u00e9", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Agog\u00f4", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Ashiko", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bapang", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bass Drum", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bata", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bell Tree", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bells", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bendir", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bodhr\u00e1n", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Body Percussion", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bombo", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bones", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Bongos", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Buhay", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Buk", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Cabasa", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Caixa", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Caj\u00f3n", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Calabash", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Castanets", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Caxixi", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Chak\u0027chas", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Ching", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Claves", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Congas", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Cowbell", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Cuica", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Cymbal", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Daf", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Davul", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Dhol", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Dholak", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Djembe", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Drum Programming", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Drum", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Drums", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Dunun", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Electronic Drums", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Finger Cymbals", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Finger Snaps", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Frame Drum", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Friction Drum", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Ganz\u00e1", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Ghatam", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Ghungroo", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Goblet Drum", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Gong", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Guiro", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Handclaps", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Hihat", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Idiophone", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Janggu", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("K\u0027kwaengwari", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Kanjira", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Karkabas", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Khartal", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Khurdak", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Lion\u0027s Roar", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Maracas", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Monkey stick", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Mridangam", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Pandeiro", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Percussion", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Rainstick", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Ratchet", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Rattle", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Reco-reco", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Repinique", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Rototoms", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Scraper", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Shaker", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Shekere", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Singing Bowls", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Skratjie", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Slapstick", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Slit Drum", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Snare", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Spoons", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Surdo", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("T\u00fcng\u00fcr", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Tabla", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Taiko", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Talking Drum", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Tam-tam", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Tambora", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Tamborim", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Tambourine", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Tan-Tan", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Tap Dance", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Tar (Drum)", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Temple Bells", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Temple Block", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Timbales", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Timpani", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Tom Tom", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Triangle", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Udu", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Vibraslap", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Washboard", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Waterphone", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Wood Block", (Category.INSTRUMENTS, Subcategory.DRUMS_AND_PERCUSSION)),
("Amadinda", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Angklung", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Angklung", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Balafon", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Boomwhacker", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Carillon", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Celesta", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Chimes", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Crotales", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Glockenspiel", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Kalimba", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Marimba", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Marimbula", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Metallophone", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Musical Box", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Steel Drums", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Thumb Piano", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Vibraphone", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Xylophone", (Category.INSTRUMENTS, Subcategory.TUNED_PERCUSSION)),
("Baby Grand Piano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Chamberlin", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Concert Grand Piano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Dulcitone", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Electric Piano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Fortepiano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Grand Piano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Harmonium", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Harpsichord", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Keyboards", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Mellotron", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Melodica", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Omnichord", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Ondes Martenot", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Organ", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Parlour Grand Piano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Pedalboard", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Piano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Player Piano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Regal", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Stylophone", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Synth", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Synthesizer", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Toy Piano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Upright Piano", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Virginal", (Category.INSTRUMENTS, Subcategory.KEYBOARD)),
("Acoustic Bass", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Acoustic Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Arco Bass", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Arpa", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Autoharp", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Baglama", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Bajo Quinto", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Bajo Sexto", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Balalaika", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Bandura", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Bandurria", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Banhu", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Banjo", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Baritone Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Bass Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Berimbau", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Bhapang", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Biwa", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Blaster Beam", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Bouzouki", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Bulbul Tarang", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Byzaanchi", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("C\u00fcmb\u00fc\u015f", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Cavaquinho", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Cello", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Chanzy", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Chapman Stick", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Charango", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Chitarrone", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Cimbalom", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Cittern", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Classical Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Clavichord", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Clavinet", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Cobza", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Contrabass", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Cuatro", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Dilruba", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Domra", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Doshpuluur", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Double Bass", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Dulcimer", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Dutar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Ehru", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Ektare", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Electric Bass", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Electric Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Electric Upright Bass", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Erhu", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Esraj", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Fiddle", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Flamenco Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Gadulka", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Gaohu", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Gayageum", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Geomungo", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Gottuv\u00e2dyam", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Guimbri", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Guitalele", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Guitar Synthesizer", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("GuitarViol", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Guitarr\u00f3n", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Guqin", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Gusli", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Guzheng", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Halldorophone", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Hardingfele", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Harp Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Harp", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Hummel", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Huqin", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Hurdy Gurdy", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Igil", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Jarana", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Jinghu", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Jouhikko", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Kabosy", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Kamancha", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Kantele", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Kanun", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Kemenche", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Kobyz", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Kokyu", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Kora", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Koto", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("La\u00fad", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Lap Steel Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Lead Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Liuqin", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Lute", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Lyre", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Mandocello", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Mandoguitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Mandola", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Mandolin", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Mandolincello", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Monochord", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Morinhoor", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Musical bow", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Nyckelharpa", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Oud", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Outi", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Pedal Steel Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Pipa", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Portuguese Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Psalmodicon", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Psaltery", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Rabab", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Rebab", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Rebec", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Requinto Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Resonator Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Rhythm Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Roncoco", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Ruan", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Santoor", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Sanxian", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Sarangi", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Sarod", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Semi-Acoustic Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Shahi Baaja", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Shamisen", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Sintir", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Sitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Slide Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Spinet", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Steel Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Strings", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Stroh Violin", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Strumstick", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Surbahar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Svara Mandala", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Swarmandel", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Sympitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("SynthAxe", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Tambura", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Tamburitza", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Tapboard", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Tar (lute)", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Theorbo", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Tiple", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Tipple", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Tonkori", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Tres", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Twelve-String Guitar", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Ukulele", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Utogardon", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Valiha", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Veena", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Vielle", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Vihuela", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Viol", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Viola d\u0027Amore", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Viola", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Violin", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Violone", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Xalam", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Yang T\u0027Chin", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Yangqin", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Zither", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Zongora", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("\u0110\u00e0n b\u1ea7u", (Category.INSTRUMENTS, Subcategory.STRINGED_INSTRUMENTS)),
("Accordion", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Algoza", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Alphorn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Alto Clarinet", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Alto Horn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Alto Recorder", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Alto Saxophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Apito", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bagpipes", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bandoneon", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bansuri", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Baritone Saxophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Barrel Organ", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bass Clarinet", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bass Harmonica", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bass Saxophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bass Tuba", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Basset Horn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bassoon", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bayan", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bellowphone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Beresta", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Blues Harp", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bombarde", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Brass Bass", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Brass", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bucium", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Bugle", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Chalumeau", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Chanter", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Chirimia", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Clarinet", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Clarion", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Claviola", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Concert Flute", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Concertina", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Conch", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Contra-Alto Clarinet", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Contrabass Clarinet", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Contrabass Saxophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Contrabassoon", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Cor Anglais", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Cornet", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Cornett", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Crumhorn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Daegeum", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Didgeridoo", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Dili Tuiduk", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Dizi", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Drone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Duduk", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Dulcian", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Dulzaina", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Electronic Valve Instrument", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Electronic Wind Instrument", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("English Horn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Euphonium", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Fife", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Flageolet", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Flugabone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Flugelhorn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Fluier", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Flute", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("French Horn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Gemshorn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Harmonica", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Heckelphone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Helicon", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Horagai", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Horn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Horns", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Hunting Horn", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Jug", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Kaval", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Kazoo", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Khene", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Launeddas", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Low Whistle", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Low Whistle", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Lur", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Lur", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Lyricon", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Lyricon", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Mellophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Melodeon", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Mizmar", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Mizwad", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Moce\u00f1o", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Murli", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Musette", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Nadaswaram", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Ney", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Nose Flute", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Oboe d\u0027Amore", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Oboe", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Ocarina", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Ophicleide", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Panpipes", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Piano Accordion", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Piccolo Flute", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Pipe", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Pito", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Pixiephone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Quena", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Quenacho", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Rauschpfeife", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Recorder", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Reeds", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Rhaita", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Rondador", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Rozhok", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Ryuteki", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Sackbut", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Sampona", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Sarrusophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Saxello", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Saxophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Serpent", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Shakuhachi", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Shanai", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Shawm", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Shenai", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Sheng", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Shinobue", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Sho", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Slide Whistle", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Sopilka", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Sopranino Saxophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Soprano Clarinet", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Soprano Saxophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Souna", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Sousaphone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Sruti Box", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Subcontrabass Saxophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Suling", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Suona", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("T\u00e1rogat\u00f3", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Tenor Saxophone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Ti-tse", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Tin Whistle", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Trombone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Trumpet", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Tuba", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Valve Trombone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Whistle", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Whistling Water Jar", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Wind", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Woodwind", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Yorgaphone", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Zhaleika", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Zukra", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Zurna", (Category.INSTRUMENTS, Subcategory.WIND_INSTRUMENTS)),
("Computer", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Drum Machine", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Effects", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Electronics", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Groovebox", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Loops", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("MIDI Controller", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Noises", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Sampler", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Scratches", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Talkbox", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Tape", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Theremin", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Turntables", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Vocoder", (Category.INSTRUMENTS, Subcategory.TECHNICAL_MUSICAL)),
("Accompanied By", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Audio Generator", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Backing Band", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Band", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Bass", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Brass Band", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Bullroarer", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Concert Band", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("E-Bow", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Ensemble", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Gamelan", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Glass Harmonica", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Guest", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Homus", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Instruments", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Jew\u0027s Harp", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Mbira", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Morchang", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Musician", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Orchestra", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Performer", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Saw", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Siren", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Soloist", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Sounds", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Toy", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Trautonium", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Wind Chimes", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Wobble Board", (Category.INSTRUMENTS, Subcategory.OTHER_MUSICAL)),
("Copyright (c)", (Category.COMPANIES,)),
("Designed At", (Category.COMPANIES,)),
("Distributed By", (Category.COMPANIES,)),
("Duplicated By", (Category.COMPANIES,)),
("Edited At", (Category.COMPANIES,)),
("Engineered At", (Category.COMPANIES,)),
("Exclusive Retailer", (Category.COMPANIES,)),
("Exported By", (Category.COMPANIES,)),
("Filmed At", (Category.COMPANIES,)),
("Glass Mastered At", (Category.COMPANIES,)),
("Lacquer Cut At", (Category.COMPANIES,)),
("Licensed From", (Category.COMPANIES,)),
("Licensed Through", (Category.COMPANIES,)),
("Licensed To", (Category.COMPANIES,)),
("Made By", (Category.COMPANIES,)),
("Manufactured By", (Category.COMPANIES,)),
("Manufactured For", (Category.COMPANIES,)),
("Marketed By", (Category.COMPANIES,)),
("Mastered At", (Category.COMPANIES,)),
("Mixed At", (Category.COMPANIES,)),
("Overdubbed At", (Category.COMPANIES,)),
("Phonographic Copyright (p)", (Category.COMPANIES,)),
("Pressed By", (Category.COMPANIES,)),
("Printed By", (Category.COMPANIES,)),
("Produced At", (Category.COMPANIES,)),
("Produced For", (Category.COMPANIES,)),
("Published By", (Category.COMPANIES,)),
("Record Company", (Category.COMPANIES,)),
("Recorded At", (Category.COMPANIES,)),
("Recorded By", (Category.COMPANIES,)),
("Remastered At", (Category.COMPANIES,)),
("Remixed At", (Category.COMPANIES,)),
])
### INITIALIZER ###
def __init__(self, name=None, detail=None):
self._name = name
self._detail = detail
### PUBLIC METHODS ###
@classmethod
def from_element(cls, element):
credit_roles = []
if element is None or not element.text:
return credit_roles
current_text = ''
bracket_depth = 0
for character in element.text:
if character == '[':
bracket_depth += 1
elif character == ']':
bracket_depth -= 1
elif not bracket_depth and character == ',':
current_text = current_text.strip()
if current_text:
credit_roles.append(cls.from_text(current_text))
current_text = ''
continue
current_text += character
current_text = current_text.strip()
if current_text:
credit_roles.append(cls.from_text(current_text))
return credit_roles
@classmethod
def from_text(cls, text):
name = ''
current_buffer = ''
details = []
had_detail = False
bracket_depth = 0
for character in text:
if character == '[':
bracket_depth += 1
if bracket_depth == 1 and not had_detail:
name = current_buffer
current_buffer = ''
had_detail = True
elif 1 < bracket_depth:
current_buffer += character
elif character == ']':
bracket_depth -= 1
if not bracket_depth:
details.append(current_buffer)
current_buffer = ''
else:
current_buffer += character
else:
current_buffer += character
if current_buffer and not had_detail:
name = current_buffer
name = name.strip()
detail = ', '.join(_.strip() for _ in details)
detail = detail or None
return cls(name=name, detail=detail)
@classmethod
def get_multiselect_mapping(cls):
#excluded_roles = [
# 'Alias',
# 'Member Of',
# ]
mapping = collections.OrderedDict()
for role, categories in sorted(cls.all_credit_roles.items()):
if categories is None:
continue
#if categories is None or role in excluded_roles:
# continue
if len(categories) == 1:
category_name = cls.category_names[categories[0]]
else:
category_name = cls.subcategory_names[categories[1]]
if category_name not in mapping:
mapping[category_name] = []
mapping[category_name].append(role)
return mapping
### PUBLIC PROPERTIES ###
@property
def detail(self):
return self._detail
@property
def name(self):
return self._name
| |
###
# Copyright (c) 2014, Kristian Berg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import supybot.conf as conf
import psycopg2
import psycopg2.extras
import psycopg2.pool
import eveapi
import datetime
class UnknownLocation(BaseException):
pass
class EVESpai(callbacks.Plugin):
"""
EVESpai commands:
'pos [<system>]' Lists all POSes.
'evetime' Get current time on Tranquility.
'whereis <character>' List the location and currently boarded ship of <character>.
'cache <calltype>' List the cache time of given call type.
'whoat <system>' List characters and their ships in <system>. If --all is given, ignore the max lines limitation.
'ship <shiptype>' List characters in <shiptype>.
'chars <user>' List all characters belonging to <user>
'price [--location=(<solarsystem>|<region>)] <typeName>' List buy/sell/volume of <type> in <location>, defaults to JIta.
"""
threaded = True
def __init__(self, irc):
self.__parent = super(EVESpai, self)
self.__parent.__init__(irc)
self._connect(irc)
def _connect(self, irc):
try:
self.stationspinner = psycopg2.pool.ThreadedConnectionPool(2, 20,
host=self.registryValue('stationspinner_host'),
port=self.registryValue('stationspinner_port'),
dbname=self.registryValue('stationspinner_database'),
user=self.registryValue('stationspinner_user'),
password=self.registryValue('stationspinner_password'))
except Exception, e:
irc.error('Could not connect to stationspinner database. "{0}"'.format(e))
try:
self.sde = psycopg2.pool.ThreadedConnectionPool(2, 20,
host=self.registryValue('sde_host'),
port=self.registryValue('sde_port'),
dbname=self.registryValue('sde_database'),
user=self.registryValue('sde_user'),
password=self.registryValue('sde_password'))
except Exception, e:
irc.error('Could not connect to sde database. "{0}"'.format(e))
if self.registryValue('corporation') == '':
irc.error('EVESpai requires that you set a corporation')
try:
cur = self.stationspinner.getconn().cursor()
cur.execute("""
SELECT "corporationID"
FROM corporation_corporationsheet
WHERE "corporationName"=%s and "enabled"=true
""", [self.registryValue('corporation')])
self.corporationID = cur.fetchone()[0]
cur.close()
except Exception, e:
irc.error('Could not find corporation "{0}" in stationspinner database'.format(self.corporation))
def _sql(self, sql, argslist, single=True, db='stationspinner'):
conn = getattr(self, db).getconn()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(sql, argslist)
if single:
data = cur.fetchone()
else:
data = cur.fetchall()
cur.close()
getattr(self, db).putconn(conn)
return data
def _get_SolarSystemID(self, system_name):
row = self._sql("""SELECT "solarSystemID" FROM "mapSolarSystems"
WHERE "solarSystemName" ILIKE %s """, [system_name], db='sde')
return row['solarSystemID']
def _get_SolarSystem(self, solarSystemID):
row = self._sql("""SELECT * FROM "mapSolarSystems"
WHERE "solarSystemID" = %s""", [solarSystemID], db='sde')
if not row:
raise UnknownLocation(solarSystemID)
return row
def _get_locationID(self, location_name):
row = self._sql("""SELECT "itemID" FROM "mapDenormalize"
WHERE "itemName" ILIKE %s""", [location_name], db='sde')
return row['itemID']
def _get_location(self, locationID):
row = self._sql("""SELECT * FROM "mapDenormalize"
WHERE "itemID"=%s""", [locationID], db='sde')
if not row:
raise UnknownLocation(locationID)
return row
def _get_location_by_name(self, locationName):
row = self._sql("""SELECT * FROM "mapDenormalize"
WHERE "itemName" ILIKE %s""", [locationName], db='sde')
if not row:
station = self._sql(""" SELECT * FROM universe_conquerablestation
WHERE "stationName" ILIKE %s""", [locationName])
row = {'itemName': station['stationName']}
solarsystem = self._sql("""SELECT * FROM "mapDenormalize"
WHERE "itemID"=%s""", [station['solarSystemID']], db='sde')
if not solarsystem:
row['security'] = 0.0
else:
row['security'] = solarsystem['security']
return row
def _get_typeID(self, type_name):
row = self._sql("""SELECT "typeID" FROM "invTypes"
WHERE "typeName" ILIKE %s AND published=true""", [type_name], db='sde')
return row['typeID']
def _get_type(self, typeID):
row = self._sql("""SELECT * FROM "invTypes"
WHERE "typeID" = %s AND published=true""", [typeID], db='sde')
return row
def _colorize_system(self, location):
try:
security = location['security']
except:
security = 0.0
if 'solarSystemName' in location:
name = location['solarSystemName']
else:
name = location['itemName']
if security >= 0.8:
return ircutils.mircColor(name, fg='teal')
elif security < 0.8 and security >= 0.6:
return ircutils.mircColor(name, fg='light green')
elif security < 0.6 and security >= 0.5:
return ircutils.mircColor(name, fg='yellow')
elif security < 0.5 and security >= 0.1:
return ircutils.mircColor(name, fg='orange')
elif security < 0.1:
return ircutils.mircColor(name, fg='red')
def locationid(self, irc, msg, args, locationName):
"""[<location>]
Get locationID for a location
"""
try:
locationID = self._get_locationID(locationName)
irc.reply(locationID, prefixNick=False)
except:
irc.error('Unknown location')
locationid = wrap(locationid, ['text'])
def locationname(self, irc, msg, args, locationID):
"""[<location>]
Get locationName for a location
"""
try:
name = self._get_location(locationID)['itemName']
irc.reply(name, prefixNick=False)
except:
irc.error('Unknown location')
locationname = wrap(locationname, ['text'])
def typename(self, irc, msg, args, typeID):
"""[<typeID>]
Get typeName of a typeID
"""
try:
name = self._get_type(typeID)['typeName']
irc.reply(name, prefixNick=False)
except:
irc.error('Unknown type')
typename = wrap(typename, ['text'])
def typeid(self, irc, msg, args, typeName):
"""[<typeName>]
Get typeID of a typeName
"""
try:
typeID = self._get_typeID(typeName)
irc.reply(typeID, prefixNick=False)
except:
irc.error('Unknown type')
typeid = wrap(typeid, ['text'])
def evetime(self, irc, msg, args):
"""
Get current time on Tranquility
"""
api = eveapi.EVEAPIConnection()
status = api.server.ServerStatus()
tq_time = datetime.datetime.utcfromtimestamp(status._meta.currentTime)
SERVER_STATUS = {
'True': ircutils.mircColor('online', fg='green'),
'False': ircutils.mircColor('offline', fg='red'),
}
irc.reply('{0}, Tranquility is {1} with {2:,d} players logged in'.format(
ircutils.bold(tq_time.time()),
SERVER_STATUS[status.serverOpen],
status.onlinePlayers
), prefixNick=False)
evetime = wrap(evetime, [])
status = wrap(evetime, [])
def pos(self, irc, msg, args, channel, system):
"""[<channel>] [<system>]
List all POSes or all POSes in given system.
"""
if not self.registryValue('full_access', channel):
irc.reply('Concord denies you access on this channel!')
return
if system:
try:
locationID = self._get_locationID(system)
solar_system = self._get_SolarSystem(locationID)
except:
irc.error('Unknown location')
return
rows = self._sql("""
SELECT *
FROM corporation_starbase
WHERE owner_id = %s AND "locationID" = %s""", [self.corporationID,
locationID], single=False)
else:
rows = self._sql("""
SELECT *
FROM corporation_starbase
WHERE owner_id = %s
ORDER BY "locationID", "moonID" """, [self.corporationID], single=False)
count = len(rows)
STATES = {
0: ircutils.mircColor('Unanchored', fg='teal'), # Also unanchoring? Has valid stateTimestamp.
# Note that moonID is zero for unanchored Towers, but
# locationID will still yield the solar system ID.
1: ircutils.mircColor('Anchored/Offline', fg='orange'), # No time information stored.
2: ircutils.mircColor('Onlining', fg='light green'), # Will be online at time = onlineTimestamp.
3: ircutils.mircColor('Reinforced', fg='red'), # Until time = stateTimestamp.
4: ircutils.mircColor('Online', fg='green') # Continuously since time = onlineTimestamp.
}
locations = {}
if system:
locations[solar_system['solarSystemID']] = solar_system
irc.reply('Found {0} starbases in {1}'.format(
ircutils.bold(count),
self._colorize_system(solar_system)),
prefixNick=False)
else:
irc.reply('Found {0} starbases'.format(count), prefixNick=False)
for row in rows:
locationID = int(row['locationID'])
try:
state = STATES[int(row['state'])]
except:
state = 'Unknown'
if not locationID in locations:
try:
solar_system = self._get_SolarSystem(locationID)
locations[locationID] = solar_system
except UnknownLocation:
irc.reply('{0} :: {1} :: {2} :: {3} :: {4}'.format(
'Unknown region',
'Unknown solarsystem {0}'.format(locationID), #solarsystem
'n/a', #moon
self._get_type(int(row['typeID']))['typeName'], #pos type
state #offline/online
), prefixNick=False)
continue
else:
solar_system = locations[locationID]
if not solar_system['regionID'] in locations:
region = self._get_location(solar_system['regionID'])
locations[solar_system['regionID']] = region
else:
region = locations[solar_system['regionID']]
irc.reply('{0} :: {1} :: {2} :: {3} :: {4}'.format(
region['itemName'],
self._colorize_system(solar_system), #solarsystem
self._get_location(row['moonID'])['itemName'], #moon
self._get_type(int(row['typeID']))['typeName'], #pos type
state #offline/online
), prefixNick=False)
pos = wrap(pos, [optional('channel'), optional('text')])
def whereis(self, irc, msg, args, channel, character):
"""[<channel>] <character>
List the location and currently boarded ship of <character>
"""
if not self.registryValue('full_access', channel):
irc.reply('Concord denies you access on this channel!')
return
rows = self._sql("""
SELECT * FROM corporation_membertracking
WHERE name ILIKE %s AND owner_id=%s""", [character, self.corporationID], single=False)
if len(rows) > 0:
for row in rows:
if row['shipType'] == 'Unknown Type':
ship = 'Pod'
else:
ship = row['shipType']
irc.reply('{0} :: {1} :: {2}'.format(
ircutils.bold(row['name']),
self._colorize_system(self._get_location_by_name(row['location'])),
ship
), prefixNick=False)
else:
irc.reply('Found 0 characters with a name like "{0}"'.format(character))
whereis = wrap(whereis, [optional('channel'), 'text'])
def cache(self, irc, msg, args, channel, apicall):
"""[<channel>] <APICall>
List the cache time of given endpoint
"""
if not self.registryValue('full_access', channel):
irc.reply('Concord denies you access on this channel!')
return
call = self._sql("""SELECT * FROM universe_apicall
WHERE name ILIKE %s AND type='Corporation'""", [apicall])
if not call:
irc.error('Unknown APICall')
return
else:
update = self._sql("""
SELECT * FROM accounting_apiupdate
WHERE apicall_id=%s AND owner = %s""", [call['id'], self.corporationID])
if not update['last_update']:
updated = 'never'
else:
updated = update['last_update']
irc.reply('{0} last updated: {1}'.format(
call['name'],
updated
), prefixNick=False)
cache = wrap(cache, [optional('channel'), 'text'])
def whoat(self, irc, msg, args, channel, optlist, system):
"""[<channel>] [--all] <system>
List characters and their ships in <system>. If --all is given, ignore the max lines
limitation.
"""
if not self.registryValue('full_access', channel):
irc.reply('Concord denies you access on this channel!')
return
rows = self._sql("""
SELECT * FROM corporation_membertracking
WHERE location ILIKE %s AND owner_id=%s""", ['%%{0}%%'.format(system),
self.corporationID], single=False)
if len(rows) == 0:
irc.reply('Found 0 characters in "{0}"'.format(
system
), prefixNick=False)
return
if len(rows) <= self.registryValue('max_lines', channel) or ('all', True) in optlist \
and len(rows) > 0:
for row in rows:
if row['shipType'] == 'Unknown Type':
ship = 'Pod'
else:
ship = row['shipType']
irc.reply('{0} :: {1} :: {2}'.format(
ircutils.bold(row['name']),
self._colorize_system(self._get_location_by_name(row['location'])),
ship
), prefixNick=False)
elif len(rows) > self.registryValue('max_lines', channel):
irc.reply('Found {0} characters in "{1}", but will not name them all'.format(
len(rows), system
), prefixNick=False)
whoat = wrap(whoat, [optional('channel'),
getopts({'all': ''}),
'text'])
def ship(self, irc, msg, args, channel, optlist, shiptype):
"""[<channel>] [--all] <shiptype>
List characters in <shiptype>. If --all is given, ignore the max lines
limitation.
"""
if not self.registryValue('full_access', channel):
irc.reply('Concord denies you access on this channel!')
return
rows = self._sql("""
SELECT "groupID", "groupName" FROM "invGroups"
WHERE "categoryID"=6 and "groupName" ILIKE %s AND published=true""", ['%%{0}%%'.format(shiptype)], db='sde', single=False)
if len(rows) > 1:
irc.reply('Found more than one shiptype: "{0}". Be more specific'.format(
[r['groupName'] for r in rows]
), prefixNick=False)
return
if len(rows) == 1:
invGroup = rows[0]
#find the ships which match the groupID of the ship type
ships = self._sql("""
SELECT "typeID", "typeName" FROM "invTypes"
WHERE "groupID"=%s AND published=true""", [invGroup['groupID']], db='sde', single=False)
typeIDs = [s['typeID'] for s in ships]
else:
# There was no group matching that name, but it could be a specific ship
invGroup = None
row = self._get_typeID('%%{0}%%'.format(shiptype))
if row:
typeIDs = [row,]
shiptype = self._get_type(row)['typeName']
else:
irc.reply('Unknown shiptype', prefixNick=False)
return
rows = self._sql("""
SELECT * FROM corporation_membertracking
WHERE owner_id=%s AND "shipTypeID" IN %s""",
[self.corporationID, tuple(typeIDs)], single=False)
if (len(rows) <= self.registryValue('max_lines', channel) or ('all', True) in optlist) \
and len(rows) > 0:
irc.reply('Found {0} characters in {1}'.format(
len(rows),
invGroup['groupName']
), prefixNick=False)
for row in rows:
if row['shipType'] == 'Unknown Type':
ship = 'Pod'
else:
ship = row['shipType']
irc.reply('{0} :: {1} :: {2}'.format(
ircutils.bold(row['name']),
self._colorize_system(self._get_location_by_name(row['location'])),
ship
), prefixNick=False)
elif len(rows) > self.registryValue('max_lines', channel):
irc.reply('Found {0} characters in {1}, but will not name them all'.format(
len(rows),
invGroup['groupName']
), prefixNick=False)
else:
if invGroup:
shiptype = invGroup['groupName']
irc.reply('Found {0} characters in {1}'.format(
len(rows),
shiptype
), prefixNick=False)
ship = wrap(ship, [optional('channel'),
getopts({'all': ''}),
'text'])
def chars(self, irc, msg, args, channel, username):
"""[<channel>] <user>
List all characters belonging to <user>
"""
if not self.registryValue('full_access', channel):
irc.reply('Concord denies you access on this channel!')
return
user = self._sql("""
SELECT * FROM accounting_capsuler
WHERE username=%s""", [username])
if not user:
irc.error('Could not find user "{0}"'.format(username))
return
chars = self._sql("""
SELECT * FROM character_charactersheet
WHERE owner_id=%s""", [user['id']], single=False)
if len(chars) == 0:
irc.reply('User "{0}" has 0 characters registered'.format(user['username']),
prefixNick=False)
else:
output = []
for char in chars:
output.append('{0} [{1}]'.format(
char['name'],
char['corporationName']
))
irc.reply('Found {0} characters: {1}'.format(
len(chars),
", ".join(output)
), prefixNick=False)
chars = wrap(chars, [optional('channel'), 'text'])
def player(self, irc, msg, args, channel, optlist, character):
"""[<channel>] <character>
List username of those who own *<character>*"""
if not self.registryValue('full_access', channel):
irc.reply('Concord denies you access on this channel!')
return
chars = self._sql("""
SELECT c.username, s.name AS character FROM accounting_capsuler c, character_charactersheet s
WHERE s.owner_id=c.id and s.name ILIKE %s;""", ['%%{0}%%'.format(character)], single=False)
if len(chars) == 0:
irc.reply('Found 0 characters like "{0}"'.format(character), prefixNick=False)
return
if (len(chars) <= self.registryValue('max_lines', channel) or ('all', True) in optlist) \
and len(chars) > 0:
for char in chars:
irc.reply('{0} :: {1}'.format(
ircutils.bold(char['username']),
ircutils.bold(char['character'])
), prefixNick=False)
elif len(chars) > self.registryValue('max_lines', channel):
irc.reply('Found {0} characters matching "{1}", but will list them all unless you use "owner --all {1}".'.format(
len(chars),
character,
), prefixNick=False)
player = wrap(player, [optional('channel'),
getopts({'all': ''}),
'text'])
def price(self, irc, msg, args, optlist, typeName):
"""[--location=(<solarsystem>|<region>)] <typeName>
Get price of an item at Jita or at a specific solar system/region.
"""
try:
typeID = self._get_typeID(typeName)
itemType = self._get_type(typeID)
except:
irc.error('Unknown type')
return
if len(optlist) == 1:
location = optlist[0][1]
else:
location = 'Jita'
try:
locationID = self._get_locationID(location)
location = self._get_location(locationID)
except:
irc.error('Unknown location')
return
market = self._sql("""
SELECT * FROM evecentral_market
WHERE "locationID"=%s""", [locationID])
if not market:
irc.reply('No data for that market location')
return
marketitem = self._sql("""
SELECT * FROM evecentral_marketitem
WHERE "locationID"=%s AND "typeID"=%s""", [locationID, typeID])
if marketitem:
irc.reply('{0} in {1}: buy max: {2} (volume: {3:,d}). sell min: {4} (volume: {5:,d}).'.format(
ircutils.bold(itemType['typeName']),
self._colorize_system(location),
ircutils.mircColor(
'{:,.2f}'.format(marketitem['buy_max']),
fg='green'),
int(marketitem['buy_volume']),
ircutils.mircColor(
'{:,.2f}'.format(marketitem['sell_min']),
fg='green'),
int(marketitem['sell_volume']),
), prefixNick=False)
else:
irc.reply("Prices for {0} in {1} isn't updated yet.".format(
itemType['typeName'],
location['itemName']
))
price = wrap(price, [getopts({'location': 'text'}),
'text'])
def markets(self, irc, msg, args):
"""
List all price indexed markets.
"""
locationIDs = self._sql("""
SELECT "locationID" FROM evecentral_market""", None, single=False)
if len(locationIDs) == 0:
irc.reply('No prices have been indexed yet.', prefixNick=False)
return
output = []
for locationID in locationIDs:
locationID = locationID[0]
location = self._get_location(locationID)
if locationID < 30000000:
# This would be a region
output.append(ircutils.bold(location['itemName']))
else:
output.append(self._colorize_system(location))
irc.reply(', '.join(output), prefixNick=False)
markets = wrap(markets)
def meinshekels(self, irc, msg, args):
"""
List top krabs
"""
rows = self._sql("""
SELECT t.username, t.sum FROM (SELECT DISTINCT ON (u.username)
u.username, SUM(j.amount) from corporation_walletjournal j
RIGHT OUTER JOIN character_charactersheet c ON
c."characterID"=j."ownerID2"
RIGHT OUTER JOIN accounting_capsuler u ON u.id=c.owner_id
WHERE j."refTypeID" IN (85, 99) AND j.date > CURRENT_DATE -
INTERVAL '1 month' AND j.owner_id=%s
GROUP BY u.username, j.amount) t ORDER BY t.sum DESC LIMIT
5;
""", [self.corporationID], single=False)
print rows
if len(rows) == 0:
irc.reply('No bounties registered for last 30 days.')
else:
irc.reply('Top krabs:', prefixNick=False)
for row in rows:
irc.reply('{0}{1:>20}'.format(
ircutils.bold('{:<20}'.format(row['username'])),
'{:,}'.format(row['sum'])), prefixNick=False)
meinshekels = wrap(meinshekels)
def howmany(self, irc, msg, args, channel, typeName, locationName):
"""[<channel>] <typeName> <locationName>
List how many items matching <typeName> at location matching <locationName>.
"""
if not self.registryValue('full_access', channel):
irc.reply('Concord denies you access on this channel!')
return
rows = self._sql("""
SELECT
"typeName",
"locationName",
SUM("quantity") as amount
FROM
corporation_asset
WHERE
"typeName" ILIKE %s AND
"locationName" ILIKE %s AND
owner_id = %s
GROUP BY
"typeName",
"locationName"
""", [
'%%{0}%%'.format(typeName),
'%%{0}%%'.format(locationName),
self.corporationID
], single=False)
if len(rows) == 0:
irc.reply('Found 0 items at that location')
return
for row in rows:
location = self._get_location_by_name(row['locationName'])
irc.reply('{0} :: {1} :: {2}'.format(
row['typeName'],
self._colorize_system(location),
ircutils.bold('{:,f}'.format(row['amount']))
), prefixNick=False)
howmany = wrap(howmany, [
optional('channel'),
'something',
'something'
])
def evecommands(self, irc, msg, args):
"""
Prints an overview of available commands
"""
desc = "\n".join(("EVESpai commands:",
"{0} {1}".format(ircutils.bold("'evecommands'"), "List available commands."),
"{0} {1}".format(ircutils.bold("'pos [<system>]'"), "Lists all POSes."),
"{0} {1}".format(ircutils.bold("'evetime'"), "Get current time on Tranquility."),
"{0} {1}".format(ircutils.bold("'whereis <character>'"), "List the location and currently boarded ship of <character>."),
"{0} {1}".format(ircutils.bold("'cache <calltype>'"), "List the cache time of given call type."),
"{0} {1}".format(ircutils.bold("'whoat <system>'"), "List characters and their ships in <system>. If --all is given, ignore the max lines limitation."),
"{0} {1}".format(ircutils.bold("'ship <shiptype>'"), "List characters in <shiptype>."),
"{0} {1}".format(ircutils.bold("'chars <user>'"), "List all cha)racters belonging to <user>"),
"{0} {1}".format(ircutils.bold("'price [--location=(<solarsystem>|<region>)] <typeName>'"), "List buy/sell/volume of <type> in <location>, defaults to Jita."),
"{0} {1}".format(ircutils.bold("'markets'"), "List all price indexed markets."),
"{0} {1}".format(ircutils.bold("'player <character>'"), "List username of those who own *<character>*")))
for line in desc.splitlines():
irc.reply(line.strip(), prefixNick=False)
evecommands = wrap(evecommands)
Class = EVESpai
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| |
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,too-many-locals
import pytest
from raiden.transfer.architecture import TransitionResult
from raiden.transfer.state_change import Block
from raiden.transfer.mediated_transfer import target
from raiden.transfer.mediated_transfer.state import TargetState
from raiden.transfer.mediated_transfer.state_change import (
ActionInitTarget,
ReceiveSecretReveal,
ReceiveBalanceProof,
)
from raiden.transfer.mediated_transfer.events import (
ContractSendChannelClose,
ContractSendWithdraw,
SendRevealSecret,
SendSecretRequest,
)
from raiden.transfer.state import CHANNEL_STATE_CLOSED
from . import factories
def make_init_state_change(our_address, amount, block_number, initiator, expire=None):
if expire is None:
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
from_route, from_transfer = factories.make_from(
amount,
our_address,
expire,
initiator,
)
init = ActionInitTarget(
our_address,
from_route,
from_transfer,
block_number,
)
return init
def make_target_state(our_address, amount, block_number, initiator, expire=None):
if expire is None:
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
from_route, from_transfer = factories.make_from(
amount,
our_address,
expire,
initiator,
)
state = TargetState(
our_address,
from_route,
from_transfer,
block_number,
)
return state
def test_events_for_close():
""" Channel must be closed when the unsafe region is reached and the secret is known. """
amount = 3
expire = 10
initiator = factories.HOP1
secret = factories.UNIT_SECRET
transfer = factories.make_transfer(
amount,
initiator,
factories.ADDR,
expire,
secret=secret,
)
route = factories.make_route(
initiator,
amount,
)
safe_block = expire - route.reveal_timeout - 1
events = target.events_for_close(
transfer,
route,
safe_block,
)
assert len(events) == 0
unsafe_block = expire - route.reveal_timeout
events = target.events_for_close(
transfer,
route,
unsafe_block,
)
assert isinstance(events[0], ContractSendChannelClose)
assert transfer.secret is not None
assert events[0].channel_address == route.channel_address
def test_events_for_close_secret_unknown():
""" Channel must not be closed when the unsafe region is reached and the
secret is not known.
"""
amount = 3
expire = 10
initiator = factories.HOP1
transfer = factories.make_transfer(
amount,
initiator,
factories.ADDR,
expire,
)
route = factories.make_route(
initiator,
amount,
)
safe_block = expire - route.reveal_timeout - 1
events = target.events_for_close(
transfer,
route,
safe_block,
)
assert len(events) == 0
unsafe_block = expire - route.reveal_timeout
events = target.events_for_close(
transfer,
route,
unsafe_block,
)
assert len(events) == 0
assert transfer.secret is None
def test_events_for_withdraw():
""" On-chain withdraw must be done if the channel is closed, regardless of
the unsafe region.
"""
amount = 3
expire = 10
initiator = factories.HOP1
transfer = factories.make_transfer(
amount,
initiator,
factories.ADDR,
expire,
secret=factories.UNIT_SECRET,
)
route = factories.make_route(
initiator,
amount,
)
events = target.events_for_withdraw(
transfer,
route,
)
assert len(events) == 0
route.state = CHANNEL_STATE_CLOSED
events = target.events_for_withdraw(
transfer,
route,
)
assert isinstance(events[0], ContractSendWithdraw)
assert events[0].channel_address == route.channel_address
def test_handle_inittarget():
""" Init transfer must send a secret request if the expiration is valid. """
block_number = 1
amount = 3
expire = factories.UNIT_REVEAL_TIMEOUT + block_number + 1
initiator = factories.HOP1
from_route, from_transfer = factories.make_from(
amount,
factories.ADDR,
expire,
initiator,
)
state_change = ActionInitTarget(
factories.ADDR,
from_route,
from_transfer,
block_number,
)
iteration = target.handle_inittarget(state_change)
events = iteration.events
assert isinstance(events[0], SendSecretRequest)
assert events[0].identifier == from_transfer.identifier
assert events[0].amount == from_transfer.amount
assert events[0].hashlock == from_transfer.hashlock
assert events[0].receiver == initiator
def test_handle_inittarget_bad_expiration():
""" Init transfer must do nothing if the expiration is bad. """
block_number = 1
amount = 3
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
initiator = factories.HOP1
from_route, from_transfer = factories.make_from(
amount,
factories.ADDR,
expire,
initiator,
)
state_change = ActionInitTarget(
factories.ADDR,
from_route,
from_transfer,
block_number,
)
iteration = target.handle_inittarget(state_change)
assert len(iteration.events) == 0
def test_handle_secretreveal():
""" The target node needs to inform the secret to the previous node to
receive an updated balance proof.
"""
amount = 3
block_number = 1
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
initiator = factories.HOP1
our_address = factories.ADDR
secret = factories.UNIT_SECRET
state = make_target_state(
our_address,
amount,
block_number,
initiator,
expire,
)
state_change = ReceiveSecretReveal(secret, initiator)
iteration = target.handle_secretreveal(state, state_change)
reveal = [
e
for e in iteration.events
if isinstance(e, SendRevealSecret)
]
assert iteration.new_state.state == 'reveal_secret'
assert reveal[0].identifier == state.from_transfer.identifier
assert reveal[0].secret == secret
assert reveal[0].receiver == state.from_route.node_address
assert reveal[0].sender == our_address
def test_handle_block():
""" Increase the block number. """
initiator = factories.HOP6
our_address = factories.ADDR
amount = 3
block_number = 1
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
state = make_target_state(
our_address,
amount,
block_number,
initiator,
expire,
)
new_block = Block(block_number + 1)
iteration = target.state_transition(state, new_block)
assert iteration.new_state.block_number == block_number + 1
def test_handle_block_equal_block_number():
""" Nothing changes. """
initiator = factories.HOP6
our_address = factories.ADDR
amount = 3
block_number = 1
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
state = make_target_state(
our_address,
amount,
block_number,
initiator,
expire,
)
new_block = Block(block_number)
iteration = target.state_transition(state, new_block)
assert iteration.new_state.block_number == block_number
def test_handle_block_lower_block_number():
""" Nothing changes. """
initiator = factories.HOP6
our_address = factories.ADDR
amount = 3
block_number = 1
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
state = make_target_state(
our_address,
amount,
block_number,
initiator,
expire,
)
new_block = Block(block_number - 1)
iteration = target.state_transition(state, new_block)
assert iteration.new_state.block_number == block_number
def test_clear_if_finalized_payed():
""" Clear if the transfer is paid with a proof. """
initiator = factories.HOP6
our_address = factories.ADDR
amount = 3
block_number = 1
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
state = make_target_state(
our_address,
amount,
block_number,
initiator,
expire,
)
state.state = 'balance_proof'
iteration = TransitionResult(state, list())
iteration = target.clear_if_finalized(iteration)
assert iteration.new_state is None
def test_clear_if_finalized_expired():
""" Clear expired locks that we don't know the secret for. """
initiator = factories.HOP6
our_address = factories.ADDR
amount = 3
block_number = 10
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
from_route, from_transfer = factories.make_from(
amount,
our_address,
expire,
initiator,
)
before_state = TargetState(
our_address,
from_route,
from_transfer,
block_number=expire,
)
before_iteration = TransitionResult(before_state, list())
before_iteration = target.clear_if_finalized(before_iteration)
assert before_iteration.new_state.from_transfer.secret is None
assert before_iteration.new_state is not None
expired_state = TargetState(
our_address,
from_route,
from_transfer,
block_number=expire + 1,
)
expired_iteration = TransitionResult(expired_state, list())
expired_iteration = target.clear_if_finalized(expired_iteration)
assert expired_iteration.new_state is None
def test_state_transition():
""" Happy case testing. """
amount = 7
block_number = 1
initiator = factories.HOP6
expire = block_number + factories.UNIT_REVEAL_TIMEOUT
from_route, from_transfer = factories.make_from(
amount,
factories.ADDR,
expire,
initiator,
)
init = ActionInitTarget(
factories.ADDR,
from_route,
from_transfer,
block_number,
)
init_transition = target.state_transition(None, init)
assert init_transition.new_state is not None
assert init_transition.new_state.from_route == from_route
assert init_transition.new_state.from_transfer == from_transfer
first_new_block = Block(block_number + 1)
first_block_iteration = target.state_transition(init_transition.new_state, first_new_block)
assert first_block_iteration.new_state.block_number == block_number + 1
secret_reveal = ReceiveSecretReveal(factories.UNIT_SECRET, initiator)
reveal_iteration = target.state_transition(first_block_iteration.new_state, secret_reveal)
assert reveal_iteration.new_state.from_transfer.secret == factories.UNIT_SECRET
second_new_block = Block(block_number + 2)
second_block_iteration = target.state_transition(init_transition.new_state, second_new_block)
assert second_block_iteration.new_state.block_number == block_number + 2
balance_proof = ReceiveBalanceProof(
from_transfer.identifier,
from_route.channel_address,
from_route.node_address,
)
proof_iteration = target.state_transition(init_transition.new_state, balance_proof)
assert proof_iteration.new_state is None
@pytest.mark.xfail(reason='Not implemented #522')
def test_transfer_succesful_after_secret_learned():
# TransferCompleted event must be used only after the secret is learned and
# there is enough time to unlock the lock on chain.
#
# A mediated transfer might be received during the settlement period of the
# current channel, the secret request is sent to the initiator and at time
# the secret is revealed there might not be enough time to safely unlock
# the token on-chain.
raise NotImplementedError()
| |
"""
Sparse matrix functions
"""
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
from __future__ import division, print_function, absolute_import
__all__ = ['expm', 'inv']
import math
import numpy as np
import scipy.special
from scipy.linalg.basic import solve, solve_triangular
from scipy.sparse.base import isspmatrix
from scipy.sparse.construct import eye as speye
from scipy.sparse.linalg import spsolve
from scipy.sparse.sputils import is_pydata_spmatrix
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg.interface import LinearOperator
from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm
UPPER_TRIANGULAR = 'upper_triangular'
def inv(A):
"""
Compute the inverse of a sparse matrix
Parameters
----------
A : (M,M) ndarray or sparse matrix
square matrix to be inverted
Returns
-------
Ainv : (M,M) ndarray or sparse matrix
inverse of `A`
Notes
-----
This computes the sparse inverse of `A`. If the inverse of `A` is expected
to be non-sparse, it will likely be faster to convert `A` to dense and use
scipy.linalg.inv.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import inv
>>> A = csc_matrix([[1., 0.], [1., 2.]])
>>> Ainv = inv(A)
>>> Ainv
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv)
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv).todense()
matrix([[ 1., 0.],
[ 0., 1.]])
.. versionadded:: 0.12.0
"""
#check input
if not (scipy.sparse.isspmatrix(A) or is_pydata_spmatrix(A)):
raise TypeError('Input must be a sparse matrix')
I = _ident_like(A)
Ainv = spsolve(A, I)
return Ainv
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = np.ones((A.shape[0], 1), dtype=float)
M = A.T
for i in range(p):
v = M.dot(v)
return np.max(v)
def _is_upper_triangular(A):
# This function could possibly be of wider interest.
if isspmatrix(A):
lower_part = scipy.sparse.tril(A, -1)
# Check structural upper triangularity,
# then coincidental upper triangularity if needed.
return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
elif is_pydata_spmatrix(A):
import sparse
lower_part = sparse.tril(A, -1)
return lower_part.nnz == 0
else:
return not np.tril(A, -1).any()
def _smart_matrix_product(A, B, alpha=None, structure=None):
"""
A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B.
"""
if len(A.shape) != 2:
raise ValueError('expected A to be a rectangular matrix')
if len(B.shape) != 2:
raise ValueError('expected B to be a rectangular matrix')
f = None
if structure == UPPER_TRIANGULAR:
if (not isspmatrix(A) and not isspmatrix(B)
and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)):
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
if f is not None:
if alpha is None:
alpha = 1.
out = f(alpha, A, B)
else:
if alpha is None:
out = A.dot(B)
else:
out = alpha * A.dot(B)
return out
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p, structure=None):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self._structure = structure
self.dtype = A.dtype
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def _rmatvec(self, x):
A_T = self._A.T
x = x.ravel()
for i in range(self._p):
x = A_T.dot(x)
return x
def _matmat(self, X):
for i in range(self._p):
X = _smart_matrix_product(self._A, X, structure=self._structure)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
class ProductOperator(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args, **kwargs):
self._structure = kwargs.get('structure', None)
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self.dtype = np.find_common_type([x.dtype for x in args], [])
self._operator_sequence = args
def _matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def _rmatvec(self, x):
x = x.ravel()
for A in self._operator_sequence:
x = A.T.dot(x)
return x
def _matmat(self, X):
for A in reversed(self._operator_sequence):
X = _smart_matrix_product(A, X, structure=self._structure)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
MatrixPowerOperator(A, p, structure=structure))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
class _ExpmPadeHelper(object):
"""
Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A, structure=None, use_exact_onenorm=False):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
structure : str, optional
A string describing the structure of matrix `A`.
Only `upper_triangular` is currently supported.
use_exact_onenorm : bool, optional
If True then only the exact one-norm of matrix powers and products
will be used. Otherwise, the one-norm of powers and products
may initially be estimated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
self.structure = structure
self.use_exact_onenorm = use_exact_onenorm
@property
def A2(self):
if self._A2 is None:
self._A2 = _smart_matrix_product(
self.A, self.A, structure=self.structure)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = _smart_matrix_product(
self.A2, self.A2, structure=self.structure)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = _smart_matrix_product(
self.A4, self.A2, structure=self.structure)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = _smart_matrix_product(
self.A6, self.A2, structure=self.structure)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = _smart_matrix_product(
self.A4, self.A6, structure=self.structure)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
if self.use_exact_onenorm:
return self.d4_tight
if self._d4_exact is not None:
return self._d4_exact
else:
if self._d4_approx is None:
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
structure=self.structure)**(1/4.)
return self._d4_approx
@property
def d6_loose(self):
if self.use_exact_onenorm:
return self.d6_tight
if self._d6_exact is not None:
return self._d6_exact
else:
if self._d6_approx is None:
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
structure=self.structure)**(1/6.)
return self._d6_approx
@property
def d8_loose(self):
if self.use_exact_onenorm:
return self.d8_tight
if self._d8_exact is not None:
return self._d8_exact
else:
if self._d8_approx is None:
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
structure=self.structure)**(1/8.)
return self._d8_approx
@property
def d10_loose(self):
if self.use_exact_onenorm:
return self.d10_tight
if self._d10_exact is not None:
return self._d10_exact
else:
if self._d10_approx is None:
self._d10_approx = _onenormest_product((self.A4, self.A6),
structure=self.structure)**(1/10.)
return self._d10_approx
def pade3(self):
b = (120., 60., 12., 1.)
U = _smart_matrix_product(self.A,
b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = _smart_matrix_product(self.A,
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7(self):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
U = _smart_matrix_product(self.A,
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade9(self):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
U = _smart_matrix_product(self.A,
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
b[3]*self.A2 + b[1]*self.ident),
structure=self.structure)
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
b[2]*self.A2 + b[0]*self.ident)
return U, V
def pade13_scaled(self, s):
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U2 = _smart_matrix_product(B6,
b[13]*B6 + b[11]*B4 + b[9]*B2,
structure=self.structure)
U = _smart_matrix_product(B,
(U2 + b[7]*B6 + b[5]*B4 +
b[3]*B2 + b[1]*self.ident),
structure=self.structure)
V2 = _smart_matrix_product(B6,
b[12]*B6 + b[10]*B4 + b[8]*B2,
structure=self.structure)
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import expm
>>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
>>> A.todense()
matrix([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]], dtype=int64)
>>> Aexp = expm(A)
>>> Aexp
<3x3 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> Aexp.todense()
matrix([[ 2.71828183, 0. , 0. ],
[ 0. , 7.3890561 , 0. ],
[ 0. , 0. , 20.08553692]])
"""
return _expm(A, use_exact_onenorm='auto')
def _expm(A, use_exact_onenorm):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if isinstance(A, (list, tuple, np.matrix)):
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# gracefully handle size-0 input,
# carefully handling sparse scenario
if A.shape == (0, 0):
out = np.zeros([0, 0], dtype=A.dtype)
if isspmatrix(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return out
# Trivial case
if A.shape == (1, 1):
out = [[np.exp(A[0, 0])]]
# Avoid indiscriminate casting to ndarray to
# allow for sparse or other strange arrays
if isspmatrix(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return np.array(out)
# Ensure input is of float type, to avoid integer overflows etc.
if ((isinstance(A, np.ndarray) or isspmatrix(A) or is_pydata_spmatrix(A))
and not np.issubdtype(A.dtype, np.inexact)):
A = A.astype(float)
# Detect upper triangularity.
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
if use_exact_onenorm == "auto":
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
use_exact_onenorm = A.shape[0] < 200
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V, structure=structure)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V, structure=structure)
# Try Pade orders 7 and 9.
eta_3 = max(h.d6_tight, h.d8_loose)
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
U, V = h.pade7()
return _solve_P_Q(U, V, structure=structure)
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
U, V = h.pade9()
return _solve_P_Q(U, V, structure=structure)
# Use Pade order 13.
eta_4 = max(h.d8_loose, h.d10_loose)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
# Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
if eta_5 == 0:
# Nilpotent special case
s = 0
else:
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
s = s + _ell(2**-s * h.A, 13)
U, V = h.pade13_scaled(s)
X = _solve_P_Q(U, V, structure=structure)
if structure == UPPER_TRIANGULAR:
# Invoke Code Fragment 2.1.
X = _fragment_2_1(X, h.A, s)
else:
# X = r_13(A)^(2^s) by repeated squaring.
for i in range(s):
X = X.dot(X)
return X
def _solve_P_Q(U, V, structure=None):
"""
A helper function for expm_2009.
Parameters
----------
U : ndarray
Pade numerator.
V : ndarray
Pade denominator.
structure : str, optional
A string describing the structure of both matrices `U` and `V`.
Only `upper_triangular` is currently supported.
Notes
-----
The `structure` argument is inspired by similar args
for theano and cvxopt functions.
"""
P = U + V
Q = -U + V
if isspmatrix(U) or is_pydata_spmatrix(U):
return spsolve(Q, P)
elif structure is None:
return solve(Q, P)
elif structure == UPPER_TRIANGULAR:
return solve_triangular(Q, P)
else:
raise ValueError('unsupported matrix structure: ' + str(structure))
def _sinch(x):
"""
Stably evaluate sinch.
Notes
-----
The strategy of falling back to a sixth order Taylor expansion
was suggested by the Spallation Neutron Source docs
which was found on the internet by google search.
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
The details of the cutoff point and the Horner-like evaluation
was picked without reference to anything in particular.
Note that sinch is not currently implemented in scipy.special,
whereas the "engineer's" definition of sinc is implemented.
The implementation of sinc involves a scaling factor of pi
that distinguishes it from the "mathematician's" version of sinc.
"""
# If x is small then use sixth order Taylor expansion.
# How small is small? I am using the point where the relative error
# of the approximation is less than 1e-14.
# If x is large then directly evaluate sinh(x) / x.
x2 = x*x
if abs(x) < 0.0135:
return 1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))
else:
return np.sinh(x) / x
def _eq_10_42(lam_1, lam_2, t_12):
"""
Equation (10.42) of Functions of Matrices: Theory and Computation.
Notes
-----
This is a helper function for _fragment_2_1 of expm_2009.
Equation (10.42) is on page 251 in the section on Schur algorithms.
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
expm([[lam_1, t_12], [0, lam_1])
=
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
[0, exp(lam_2)]
"""
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
# apparently suffers from cancellation, according to Higham's textbook.
# A nice implementation of sinch, defined as sinh(x)/x,
# will apparently work around the cancellation.
a = 0.5 * (lam_1 + lam_2)
b = 0.5 * (lam_1 - lam_2)
return t_12 * np.exp(a) * _sinch(b)
def _fragment_2_1(X, T, s):
"""
A helper function for expm_2009.
Notes
-----
The argument X is modified in-place, but this modification is not the same
as the returned value of the function.
This function also takes pains to do things in ways that are compatible
with sparse matrices, for example by avoiding fancy indexing
and by using methods of the matrices whenever possible instead of
using functions of the numpy or scipy libraries themselves.
"""
# Form X = r_m(2^-s T)
# Replace diag(X) by exp(2^-s diag(T)).
n = X.shape[0]
diag_T = np.ravel(T.diagonal().copy())
# Replace diag(X) by exp(2^-s diag(T)).
scale = 2 ** -s
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
for i in range(s-1, -1, -1):
X = X.dot(X)
# Replace diag(X) by exp(2^-i diag(T)).
scale = 2 ** -i
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
# Replace (first) superdiagonal of X by explicit formula
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
# the author's 2008 textbook
# Functions of Matrices: Theory and Computation.
for k in range(n-1):
lam_1 = scale * diag_T[k]
lam_2 = scale * diag_T[k+1]
t_12 = scale * T[k, k+1]
value = _eq_10_42(lam_1, lam_2, t_12)
X[k, k+1] = value
# Return the updated X matrix.
return X
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2m_m = scipy.special.comb(2*m, m, exact=True)
abs_c_recip = float(choose_2m_m * math.factorial(2*m + 1))
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)
| |
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
from flask import current_app, request
from flask_login import UserMixin, AnonymousUserMixin
from . import db, login_manager
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
followed = db.relationship('Follow',
foreign_keys=[Follow.follower_id],
backref=db.backref('follower', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
followers = db.relationship('Follow',
foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
@staticmethod
def add_self_follows():
for user in User.query.all():
if not user.is_following(user):
user.follow(user)
db.session.add(user)
db.session.commit()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
self.followed.append(Follow(followed=self))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
url = 'http://gravatar.duoshuo.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def follow(self, user):
if not self.is_following(user):
f = Follow(follower=self, followed=user)
db.session.add(f)
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
def is_following(self, user):
return self.followed.filter_by(
followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(
follower_id=user.id).first() is not None
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id == Post.author_id)\
.filter(Follow.follower_id == self.id)
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p', 'img']
allowed_attrs = {
'*': ['class'],
'img': ['src', 'alt'],
'a': ['href', 'rel']
}
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=False, attributes=allowed_attrs))
db.event.listen(Post.body, 'set', Post.on_changed_body)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i',
'strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
| |
r"""
SQL Connection object (:mod:`qiita_db.sql_connection`)
======================================================
.. currentmodule:: qiita_db.sql_connection
This modules provides wrappers for the psycopg2 module to allow easy use of
transaction blocks and SQL execution/data retrieval.
This module provides the variable TRN, which is the transaction available
to use in the system. The singleton pattern is applied and this works as long
as the system remains single-threaded.
Classes
-------
.. autosummary::
:toctree: generated/
Transaction
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from contextlib import contextmanager
from itertools import chain
from functools import wraps
from psycopg2 import (connect, ProgrammingError, Error as PostgresError,
OperationalError, errorcodes)
from psycopg2.extras import DictCursor
from psycopg2.extensions import TRANSACTION_STATUS_IDLE
from qiita_core.qiita_settings import qiita_config
def _checker(func):
"""Decorator to check that methods are executed inside the context"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._contexts_entered == 0:
raise RuntimeError(
"Operation not permitted. Transaction methods can only be "
"invoked within the context manager.")
return func(self, *args, **kwargs)
return wrapper
class Transaction(object):
"""A context manager that encapsulates a DB transaction
A transaction is defined by a series of consecutive queries that need to
be applied to the database as a single block.
Raises
------
RuntimeError
If the transaction methods are invoked outside a context.
Notes
-----
When the execution leaves the context manager, any remaining queries in
the transaction will be executed and committed.
"""
def __init__(self, admin=False):
self._queries = []
self._results = []
self._contexts_entered = 0
self._connection = None
self._post_commit_funcs = []
self._post_rollback_funcs = []
self.admin = admin
def _open_connection(self):
# If the connection already exists and is not closed, don't do anything
if self._connection is not None and self._connection.closed == 0:
return
try:
if self.admin:
self._connection = connect(
user=qiita_config.admin_user,
password=qiita_config.admin_password,
host=qiita_config.host,
port=qiita_config.port)
self._connection.autocommit = True
else:
self._connection = connect(user=qiita_config.user,
password=qiita_config.password,
database=qiita_config.database,
host=qiita_config.host,
port=qiita_config.port)
except OperationalError as e:
# catch three known common exceptions and raise runtime errors
try:
etype = str(e).split(':')[1].split()[0]
except IndexError:
# we recieved a really unanticipated error without a colon
etype = ''
if etype == 'database':
etext = ('This is likely because the database `%s` has not '
'been created or has been dropped.' %
qiita_config.database)
elif etype == 'role':
etext = ('This is likely because the user string `%s` '
'supplied in your configuration file `%s` is '
'incorrect or not an authorized postgres user.' %
(qiita_config.user, qiita_config.conf_fp))
elif etype == 'Connection':
etext = ('This is likely because postgres isn\'t '
'running. Check that postgres is correctly '
'installed and is running.')
else:
# we recieved a really unanticipated error with a colon
etext = ''
ebase = ('An OperationalError with the following message occured'
'\n\n\t%s\n%s For more information, review `INSTALL.md`'
' in the Qiita installation base directory.')
raise RuntimeError(ebase % (str(e), etext))
def close(self):
if self._connection is not None:
self._connection.close()
@contextmanager
def _get_cursor(self):
"""Returns a postgres cursor
Returns
-------
psycopg2.cursor
The psycopg2 cursor
Raises
------
RuntimeError
if the cursor cannot be created
"""
self._open_connection()
try:
with self._connection.cursor(cursor_factory=DictCursor) as cur:
yield cur
except PostgresError as e:
raise RuntimeError("Cannot get postgres cursor: %s" % e)
def __enter__(self):
self._open_connection()
self._contexts_entered += 1
return self
def _clean_up(self, exc_type):
if exc_type is not None:
# An exception occurred during the execution of the transaction
# Make sure that we leave the DB w/o any modification
self.rollback()
elif self._queries:
# There are still queries to be executed, execute them
# It is safe to use the execute method here, as internally is
# wrapped in a try/except and rollbacks in case of failure
self.execute()
self.commit()
elif self._connection.get_transaction_status() != \
TRANSACTION_STATUS_IDLE:
# There are no queries to be executed, however, the transaction
# is still not committed. Commit it so the changes are not lost
self.commit()
def __exit__(self, exc_type, exc_value, traceback):
# We only need to perform some action if this is the last context
# that we are entering
if self._contexts_entered == 1:
# We need to wrap the entire function in a try/finally because
# at the end we need to decrement _contexts_entered
try:
self._clean_up(exc_type)
finally:
self._contexts_entered -= 1
else:
self._contexts_entered -= 1
def _raise_execution_error(self, sql, sql_args, error):
"""Rollbacks the current transaction and raises a useful error
The error message contains the name of the transaction, the failed
query, the arguments of the failed query and the error generated.
Raises
------
ValueError
"""
self.rollback()
try:
ec_lu = errorcodes.lookup(error.pgcode)
raise ValueError(
"Error running SQL: %s. MSG: %s\n" % (ec_lu, str(error)))
except (KeyError, AttributeError):
raise ValueError("Error running SQL query: %s" % str(error))
@_checker
def add(self, sql, sql_args=None, many=False):
"""Add a sql query to the transaction
Parameters
----------
sql : str
The sql query
sql_args : list, tuple or dict of objects, optional
The arguments to the sql query
many : bool, optional
Whether or not we should add the query multiple times to the
transaction
Raises
------
TypeError
If `sql_args` is provided and is not a list, tuple or dict
RuntimeError
If invoked outside a context
Notes
-----
If `many` is true, `sql_args` should be a list of lists, tuples or
dicts, in which each element of the list contains the parameters for
one SQL query of the many. Each element on the list is all the
parameters for a single one of the many queries added. The amount of
SQL queries added to the list is len(sql_args).
"""
if not many:
sql_args = [sql_args]
for args in sql_args:
if args:
if not isinstance(args, (list, tuple, dict)):
raise TypeError("sql_args should be a list, tuple or dict."
" Found %s" % type(args))
self._queries.append((sql, args))
def _execute(self):
"""Internal function that actually executes the transaction
The `execute` function exposed in the API wraps this one to make sure
that we catch any exception that happens in here and we rollback the
transaction
"""
with self._get_cursor() as cur:
for sql, sql_args in self._queries:
# Execute the current SQL command
try:
cur.execute(sql, sql_args)
except Exception as e:
# We catch any exception as we want to make sure that we
# rollback every time that something went wrong
self._raise_execution_error(sql, sql_args, e)
try:
res = cur.fetchall()
except ProgrammingError:
# At this execution point, we don't know if the sql query
# that we executed should retrieve values from the database
# If the query was not supposed to retrieve any value
# (e.g. an INSERT without a RETURNING clause), it will
# raise a ProgrammingError. Otherwise it will just return
# an empty list
res = None
except PostgresError as e:
# Some other error happened during the execution of the
# query, so we need to rollback
self._raise_execution_error(sql, sql_args, e)
# Store the results of the current query
self._results.append(res)
# wipe out the already executed queries
self._queries = []
return self._results
@_checker
def execute(self):
"""Executes the transaction
Returns
-------
list of DictCursor
The results of all the SQL queries in the transaction
Raises
------
RuntimeError
If invoked outside a context
Notes
-----
If any exception occurs during the execution transaction, a rollback
is executed and no changes are reflected in the database.
When calling execute, the transaction will never be committed, it will
be automatically committed when leaving the context
See Also
--------
execute_fetchlast
execute_fetchindex
execute_fetchflatten
"""
try:
return self._execute()
except Exception:
self.rollback()
raise
@_checker
def execute_fetchlast(self):
"""Executes the transaction and returns the last result
This is a convenient function that is equivalent to
`self.execute()[-1][0][0]`
Returns
-------
object
The first value of the last SQL query executed
See Also
--------
execute
execute_fetchindex
execute_fetchflatten
"""
return self.execute()[-1][0][0]
@_checker
def execute_fetchindex(self, idx=-1):
"""Executes the transaction and returns the results of the `idx` query
This is a convenient function that is equivalent to
`self.execute()[idx]
Parameters
----------
idx : int, optional
The index of the query to return the result. It defaults to -1, the
last query.
Returns
-------
DictCursor
The results of the `idx` query in the transaction
See Also
--------
execute
execute_fetchlast
execute_fetchflatten
"""
return self.execute()[idx]
@_checker
def execute_fetchflatten(self, idx=-1):
"""Executes the transaction and returns the flattened results of the
`idx` query
This is a convenient function that is equivalent to
`chain.from_iterable(self.execute()[idx])`
Parameters
----------
idx : int, optional
The index of the query to return the result. It defaults to -1, the
last query.
Returns
-------
list of objects
The flattened results of the `idx` query
See Also
--------
execute
execute_fetchlast
execute_fetchindex
"""
return list(chain.from_iterable(self.execute()[idx]))
def _funcs_executor(self, funcs, func_str):
error_msg = []
for f, args, kwargs in funcs:
try:
f(*args, **kwargs)
except Exception as e:
error_msg.append(str(e))
# The functions in these two lines are mutually exclusive. When one of
# them is executed, we can restore both of them.
self._post_commit_funcs = []
self._post_rollback_funcs = []
if error_msg:
raise RuntimeError(
"An error occurred during the post %s commands:\n%s"
% (func_str, "\n".join(error_msg)))
@_checker
def commit(self):
"""Commits the transaction and reset the queries
Raises
------
RuntimeError
If invoked outside a context
"""
# Reset the queries, the results and the index
self._queries = []
self._results = []
try:
self._connection.commit()
except Exception:
self._connection.close()
raise
# Execute the post commit functions
self._funcs_executor(self._post_commit_funcs, "commit")
@_checker
def rollback(self):
"""Rollbacks the transaction and reset the queries
Raises
------
RuntimeError
If invoked outside a context
"""
# Reset the queries, the results and the index
self._queries = []
self._results = []
if self._connection is not None and self._connection.closed == 0:
try:
self._connection.rollback()
except Exception:
self._connection.close()
raise
# Execute the post rollback functions
self._funcs_executor(self._post_rollback_funcs, "rollback")
@property
def index(self):
return len(self._queries) + len(self._results)
@_checker
def add_post_commit_func(self, func, *args, **kwargs):
"""Adds a post commit function
The function added will be executed after the next commit in the
transaction, unless a rollback is executed. This is useful, for
example, to perform some filesystem clean up once the transaction is
committed.
Parameters
----------
func : function
The function to add for the post commit functions
args : tuple
The arguments of the function
kwargs : dict
The keyword arguments of the function
"""
self._post_commit_funcs.append((func, args, kwargs))
@_checker
def add_post_rollback_func(self, func, *args, **kwargs):
"""Adds a post rollback function
The function added will be executed after the next rollback in the
transaction, unless a commit is executed. This is useful, for example,
to restore the filesystem in case a rollback occurs, avoiding leaving
the database and the filesystem in an out of sync state.
Parameters
----------
func : function
The function to add for the post rollback functions
args : tuple
The arguments of the function
kwargs : dict
The keyword arguments of the function
"""
self._post_rollback_funcs.append((func, args, kwargs))
# Singleton pattern, create the transaction for the entire system
TRN = Transaction()
TRNADMIN = Transaction(admin=True)
def perform_as_transaction(sql, parameters=None):
"""Opens, adds and executes sql as a single transaction
Parameters
----------
sql : str
The SQL to execute
parameters: object, optional
The object of parameters to pass to the TRN.add command
"""
with TRN:
if parameters:
TRN.add(sql, parameters)
else:
TRN.add(sql)
TRN.execute()
def create_new_transaction():
"""Creates a new global transaction
This is needed when using multiprocessing
"""
global TRN
TRN = Transaction()
| |
from abstractStatelessProxyTestCase import AbstractStatelessProxyTestCase
from ..sipmessaging import SIPURI
from ..siptransport import SimulatedSIPTransport
from ..sipentity import SIPStatelessProxy
from ..siptransport import SimulatedNetwork
class TestStatelessProxyWithSimulatedTransport(AbstractStatelessProxyTestCase):
def setUp(self):
SimulatedNetwork.clear()
self.aliceReceivedRequests = []
self.aliceReceivedResponses = []
self.atlantaReceivedRequests = []
self.atlantaReceivedResponses = []
self.biloxiReceivedRequests = []
self.biloxiReceivedResponses = []
self.bobReceivedRequests = []
self.bobReceivedResponses = []
self.atlanta = SIPStatelessProxy()
self.atlanta.transports = [SimulatedSIPTransport(self.atlanta_bind_address, self.atlanta_bind_port)]
self.biloxi = SIPStatelessProxy()
self.biloxi.transports = [SimulatedSIPTransport(self.biloxi_bind_address, self.biloxi_bind_port)]
self.alice_transport = SimulatedSIPTransport(self.alice_bind_address, self.aliceBindPort)
self.bob_transport = SimulatedSIPTransport(self.bob_bind_address, self.bobBindPort)
self.alice_transport.when_event_do("receivedValidConnectedRequest", self.aliceRequestEventHandler)
self.alice_transport.when_event_do("receivedValidConnectedResponse", self.aliceResponseEventHandler)
self.atlanta.transports[0].when_event_do("receivedValidConnectedRequest", self.atlantaRequestEventHandler)
self.atlanta.transports[0].when_event_do("receivedValidConnectedResponse", self.atlantaResponseEventHandler)
self.biloxi.transports[0].when_event_do("receivedValidConnectedRequest", self.biloxiRequestEventHandler)
self.biloxi.transports[0].when_event_do("receivedValidConnectedResponse", self.biloxiResponseEventHandler)
self.bob_transport.when_event_do("receivedValidConnectedRequest", self.bobRequestEventHandler)
self.bob_transport.when_event_do("receivedValidConnectedResponse", self.bobResponseEventHandler)
self.alice_transport.bind()
self.bob_transport.bind()
self.alice_transport.connect_to_address_and_port(self.atlanta_bind_address, self.atlanta_bind_port)
# Let Biloxi connect to Bob. Don't pre-connect Bob to Biloxi.
# self.bob_transport.connect_to_address_and_port(self.biloxi_bind_address, self.biloxi_bind_port)
# TODO: need to bind?
def test(self):
self.run_00_initialSanityCheck()
self.run_01_atlantaToBiloxi()
self.run_02_biloxiToAtlanta()
def run_00_initialSanityCheck(self):
self.assertEqual(1, len(self.atlanta.transports))
self.assertEqual(1, len(self.biloxi.transports))
self.assertEqual(1, len(self.atlanta.transports[0].connections))
self.assertEqual(0, len(self.biloxi.transports[0].connections))
self.assertEqual(self.atlanta_bind_address, self.atlanta.transports[0].bind_address)
self.assertEqual(self.atlanta_bind_port, self.atlanta.transports[0].bind_port)
self.assertEqual(self.atlanta_bind_address, self.atlanta.transports[0].connections[0].bind_address)
self.assertEqual(self.atlanta_bind_port, self.atlanta.transports[0].connections[0].bind_port)
self.assertEqual(self.atlanta_bind_address, self.alice_transport.connections[0].remoteAddress)
self.assertEqual(self.atlanta_bind_port, self.alice_transport.connections[0].remotePort)
self.assertEqual(self.alice_bind_address, self.atlanta.transports[0].connections[0].remoteAddress)
self.assertEqual(self.aliceBindPort, self.atlanta.transports[0].connections[0].remotePort)
self.assertEqual(self.biloxi_bind_address, self.biloxi.transports[0].bind_address)
self.assertEqual(self.biloxi_bind_port, self.biloxi.transports[0].bind_port)
self.assertEqual(0, len(self.aliceReceivedRequests))
self.assertEqual(0, len(self.aliceReceivedResponses))
self.assertEqual(0, len(self.atlantaReceivedRequests))
self.assertEqual(0, len(self.atlantaReceivedResponses))
self.assertEqual(0, len(self.biloxiReceivedRequests))
self.assertEqual(0, len(self.biloxiReceivedResponses))
self.assertEqual(0, len(self.bobReceivedRequests))
self.assertEqual(0, len(self.bobReceivedResponses))
def run_01_atlantaToBiloxi(self):
self.alice_transport.connections[0].send_string(self.aliceRequestString)
self.assertEqual(0, len(self.aliceReceivedRequests))
# self.assertEqual(1, len(self.aliceReceivedResponses))
self.assertEqual(1, len(self.atlantaReceivedRequests))
self.assertEqual(1, len(self.biloxiReceivedRequests))
self.assertEqual(1, len(self.atlantaReceivedResponses))
self.assertEqual(0, len(self.biloxiReceivedResponses))
self.assertEqual(0, len(self.bobReceivedRequests))
self.assertEqual(0, len(self.bobReceivedResponses))
self.assertEqual(self.alice_bind_address, self.atlantaReceivedRequests[0].connection.remoteAddress)
self.assertEqual(self.aliceBindPort, self.atlantaReceivedRequests[0].connection.remotePort)
self.assertEqual(self.atlanta_bind_address, self.atlantaReceivedRequests[0].connection.bind_address)
self.assertEqual(self.atlanta_bind_port, self.atlantaReceivedRequests[0].connection.bind_port)
atlanta_received_request = self.atlantaReceivedRequests[0].sip_message
ruri = SIPURI.new_parsed_from(atlanta_received_request.start_line.request_uri)
self.assertEqual(self.aliceRequestString, atlanta_received_request.raw_string)
self.assertEqual('INVITE', atlanta_received_request.start_line.sip_method)
self.assertEqual(self.biloxi_bind_address, ruri.host)
self.assertEqual(1, len(atlanta_received_request.vias))
self.assertEqual(self.aliceRequestString, atlanta_received_request.raw_string)
self.assertIsNone(atlanta_received_request.header.to_tag)
self.assertEqual(self.atlanta_bind_address, self.biloxiReceivedRequests[0].connection.remoteAddress)
self.assertEqual(self.atlanta_bind_port, self.biloxiReceivedRequests[0].connection.remotePort)
self.assertEqual(self.biloxi_bind_address, self.biloxiReceivedRequests[0].connection.bind_address)
self.assertEqual(self.biloxi_bind_port, self.biloxiReceivedRequests[0].connection.bind_port)
biloxi_received_request = self.biloxiReceivedRequests[0].sip_message
self.assertEqual(atlanta_received_request.start_line.request_uri, biloxi_received_request.start_line.request_uri)
self.assertEqual('INVITE', biloxi_received_request.start_line.sip_method)
self.assertEqual(2, len(biloxi_received_request.vias))
self.assertNotEqual(self.aliceRequestString, biloxi_received_request.raw_string)
self.assertIsNone(biloxi_received_request.header.to_tag)
self.assertEqual(self.biloxi_bind_address, self.atlantaReceivedResponses[0].connection.remoteAddress)
self.assertEqual(self.biloxi_bind_port, self.atlantaReceivedResponses[0].connection.remotePort)
self.assertEqual(self.atlanta_bind_address, self.atlantaReceivedResponses[0].connection.bind_address)
self.assertEqual(self.atlanta_bind_port, self.atlantaReceivedResponses[0].connection.bind_port)
atlanta_received_response = self.atlantaReceivedResponses[0].sip_message
self.assertIsNotNone(atlanta_received_response.header.to_tag)
self.assertEqual(2, len(atlanta_received_response.vias))
self.assertEqual(self.atlanta_bind_address, self.aliceReceivedResponses[0].connection.remoteAddress)
self.assertEqual(self.atlanta_bind_port, self.aliceReceivedResponses[0].connection.remotePort)
self.assertEqual(self.alice_bind_address, self.aliceReceivedResponses[0].connection.bind_address)
self.assertEqual(self.aliceBindPort, self.aliceReceivedResponses[0].connection.bind_port)
alice_received_response = self.aliceReceivedResponses[0].sip_message
self.assertIsNotNone(alice_received_response.header.to_tag)
self.assertEqual(1, len(alice_received_response.vias))
self.assertEqual(self.alice_bind_address, atlanta_received_request.via_header_fields[0].host)
# TODO: This 404 nonsense is temporary. Alice sends to a biloxi domain via atlanta, atlanta forwards her request to biloxi,
# Biloxi sees that it is responsible for the request, and for right now, just answers 404.
self.assertEqual(404, self.atlantaReceivedResponses[0].sip_message.start_line.status_code)
self.assertEqual(404, self.aliceReceivedResponses[0].sip_message.start_line.status_code)
# TODO: Moar!!!
def run_02_biloxiToAtlanta(self):
pass
@property
def alice_bind_address(self):
# return '192.168.4.4'
return '127.0.0.2'
@property
def aliceBindPort(self):
# Note the port in the Via header field...
# return 5060
return 63354
@property
def atlanta_bind_address(self):
# return '192.168.4.2'
return '127.0.0.3'
@property
def atlanta_bind_port(self):
return 5060
@property
def biloxi_bind_address(self):
# return '192.168.4.3'
return '127.0.0.4'
@property
def biloxi_bind_port(self):
return 5060
@property
def bob_bind_address(self):
# return '192.168.4.5'
return '127.0.0.5'
@property
def bobBindPort(self):
return 5060
@property
def aliceRequestString(self):
# Bob's extension is 1002
# atlanta == .2 / .97
# biloxi == .3 / .96
# alice == .2 / .188
# bob == .5 / .204
# message_string = ('INVITE sip:1002@192.168.4.3 SIP/2.0\r\n'
# 'Via: SIP/2.0/UDP 192.168.4.4:63354;branch=z9hG4bK-524287-1---7a462a5d1b6fe13b;rport\r\n'
# 'Max-Forwards: 70\r\n'
# 'Contact: <sip:alice@192.168.4.4:63354;rinstance=d875ce4fd8f72441>\r\n'
# 'To: <sip:1002@192.168.4.3>\r\n'
# 'From: "Alice"<sip:alice@192.168.4.2>;tag=9980376d\r\n'
# 'Call-ID: YjBhMDliMWMxNzQ4ZTc5Nzg1ZTcyYTExMWMzZDlhNmQ\r\n'
# 'CSeq: 1 INVITE\r\n'
# 'Allow: INVITE, ACK, CANCEL, BYE, REFER, INFO, NOTIFY, UPDATE, PRACK, MESSAGE, OPTIONS, SUBSCRIBE, OPTIONS\r\n'
# 'Content-Type: application/sdp\r\n'
# 'Supported: replaces, 100rel\r\n'
# 'User-Agent: Bria iOS release 3.6.2 stamp 33024\r\n'
# 'Content-Length: 185\r\n'
# '\r\n'
# 'v=0\r\n'
# 'o=- 1457365987528724 1 IN IP4 192.168.4.4\r\n'
# 's=Cpc session\r\n'
# 'c=IN IP4 192.168.4.4\r\n'
# 't=0 0\r\n'
# 'm=audio 60668 RTP/AVP 0 101\r\n'
# 'a=rtpmap:101 telephone-event/8000\r\n'
# 'a=fmtp:101 0-15\r\n'
# 'a=sendrecv\r\n')
message_string = ('INVITE sip:1002@127.0.0.4 SIP/2.0\r\n'
'Via: SIP/2.0/UDP 127.0.0.2:63354;branch=z9hG4bK-524287-1---7a462a5d1b6fe13b;rport\r\n'
'Max-Forwards: 70\r\n'
'Contact: <sip:alice@127.0.0.2:63354;rinstance=d875ce4fd8f72441>\r\n'
'To: <sip:1002@127.0.0.4>\r\n'
'From: "Alice"<sip:alice@127.0.0.3>;tag=9980376d\r\n'
'Call-ID: YjBhMDliMWMxNzQ4ZTc5Nzg1ZTcyYTExMWMzZDlhNmQ\r\n'
'CSeq: 1 INVITE\r\n'
'Allow: INVITE, ACK, CANCEL, BYE, REFER, INFO, NOTIFY, UPDATE, PRACK, MESSAGE, OPTIONS, SUBSCRIBE, OPTIONS\r\n'
'Content-Type: application/sdp\r\n'
'Supported: replaces, 100rel\r\n'
'User-Agent: Bria iOS release 3.6.2 stamp 33024\r\n'
'Content-Length: 181\r\n'
'\r\n'
'v=0\r\n'
'o=- 1457365987528724 1 IN IP4 127.0.0.2\r\n'
's=Cpc session\r\n'
'c=IN IP4 127.0.0.2\r\n'
't=0 0\r\n'
'm=audio 60668 RTP/AVP 0 101\r\n'
'a=rtpmap:101 telephone-event/8000\r\n'
'a=fmtp:101 0-15\r\n'
'a=sendrecv\r\n')
return message_string
# @property
# def aliceResponseString(self):
# TODO: need to fix up the addresses and transport type and stuff.
# atlanta == .2 / .97
# biloxi == .3 / .96
# alice == .4 / .188
# bob == .5 / .204
# message_string = ('SIP/2.0 180 Ringing\r\n'
# 'Via: SIP/2.0/UDP 192.168.4.2;branch=z9hG4bKeb83.c2fe646b6c2d21c6f9f113d37c474768.0\r\n'
# 'Via: SIP/2.0/UDP 192.168.4.3:56731;received=192.168.4.4;branch=z9hG4bK-524287-1---e500d061e354193a;rport=56731\r\n'
# 'Via: SIP/2.0/UDP 192.168.4.5;branch=z9hG4bKeb83.c2fe646b6c2d21c6f9f113d37c474768.0\r\n'
# 'Record-Route: <sip:192.168.4.2;lr>\r\n'
# 'Record-Route: <sip:192.168.4.3;lr>\r\n'
# 'Require: 100rel\r\n'
# 'Contact: <sip:1002@192.168.0.204:52909;rinstance=7caea32dab180286>\r\n'
# 'To: "Bob"<sip:1002@192.168.0.96>;tag=52e9ef73\r\n'
# 'From: "Alice"<sip:1001@192.168.0.96>;tag=2210ba44\r\n'
# 'Call-ID: NTM5YzAxN2YwZGRhYTg2YjBkNDgyNWQyNTI3ZGNmNTE\r\n'
# 'CSeq: 1 INVITE\r\n'
# 'Allow: INVITE, ACK, CANCEL, BYE, REFER, INFO, NOTIFY, UPDATE, PRACK, MESSAGE, OPTIONS, SUBSCRIBE, OPTIONS\r\n'
# 'Supported: replaces\r\n'
# 'User-Agent: Bria iOS release 3.6.2 stamp 33024\r\n'
# 'Allow-Events: talk, hold\r\n'
# 'RSeq: 1\r\n'
# 'Content-Length: 0\r\n'
# '\r\n')
# message_string = ('SIP/2.0 180 Ringing\r\n'
# 'Via: SIP/2.0/UDP 127.0.0.3;branch=z9hG4bKeb83.c2fe646b6c2d21c6f9f113d37c474768.0\r\n'
# 'Via: SIP/2.0/UDP 127.0.0.4:56731;received=127.0.0.2;branch=z9hG4bK-524287-1---e500d061e354193a;rport=56731\r\n'
# 'Via: SIP/2.0/UDP 127.0.0.5;branch=z9hG4bKeb83.c2fe646b6c2d21c6f9f113d37c474768.0\r\n'
# 'Record-Route: <sip:127.0.0.3;lr>\r\n'
# 'Record-Route: <sip:127.0.0.4;lr>\r\n'
# 'Require: 100rel\r\n'
# 'Contact: <sip:1002@192.168.0.204:52909;rinstance=7caea32dab180286>\r\n'
# 'To: "Bob"<sip:1002@192.168.0.96>;tag=52e9ef73\r\n'
# 'From: "Alice"<sip:1001@192.168.0.96>;tag=2210ba44\r\n'
# 'Call-ID: NTM5YzAxN2YwZGRhYTg2YjBkNDgyNWQyNTI3ZGNmNTE\r\n'
# 'CSeq: 1 INVITE\r\n'
# 'Allow: INVITE, ACK, CANCEL, BYE, REFER, INFO, NOTIFY, UPDATE, PRACK, MESSAGE, OPTIONS, SUBSCRIBE, OPTIONS\r\n'
# 'Supported: replaces\r\n'
# 'User-Agent: Bria iOS release 3.6.2 stamp 33024\r\n'
# 'Allow-Events: talk, hold\r\n'
# 'RSeq: 1\r\n'
# 'Content-Length: 0\r\n'
# '\r\n')
# return message_string
@property
def bobRequestString(self):
# Alice's extension is 1001
# atlanta == .2
# biloxi == .3
# alice == .4
# bob == .5
# message_string = ('INVITE sip:1001@192.168.4.2 SIP/2.0\r\n'
# 'Via: SIP/2.0/UDP 192.168.4.5:63354;branch=z9hG4bK-524287-1---7a462a5d1b6fe13b;rport\r\n'
# 'Max-Forwards: 70\r\n'
# 'Contact: <sip:bob@192.168.4.3:63354;rinstance=d875ce4fd8f72441>\r\n'
# 'To: <sip:1001@192.168.4.2>\r\n'
# 'From: "Alice"<sip:alice@192.168.4.2>;tag=9980376d\r\n'
# 'Call-ID: YjBhMDliMWMxNzQ4ZTc5Nzg1ZTcyYTExMWMzZDlhNmQ\r\n'
# 'CSeq: 1 INVITE\r\n'
# 'Allow: INVITE, ACK, CANCEL, BYE, REFER, INFO, NOTIFY, UPDATE, PRACK, MESSAGE, OPTIONS, SUBSCRIBE, OPTIONS\r\n'
# 'Content-Type: application/sdp\r\n'
# 'Supported: replaces, 100rel\r\n'
# 'User-Agent: Bria iOS release 3.6.2 stamp 33024\r\n'
# 'Content-Length: 185\r\n'
# '\r\n'
# 'v=0\r\n'
# 'o=- 1457365987528724 1 IN IP4 192.168.4.5\r\n'
# 's=Cpc session\r\n'
# 'c=IN IP4 192.168.4.5\r\n'
# 't=0 0\r\n'
# 'm=audio 60668 RTP/AVP 0 101\r\n'
# 'a=rtpmap:101 telephone-event/8000\r\n'
# 'a=fmtp:101 0-15\r\n'
# 'a=sendrecv\r\n')
message_string = ('INVITE sip:1001@127.0.0.3 SIP/2.0\r\n'
'Via: SIP/2.0/UDP 127.0.0.5:63354;branch=z9hG4bK-524287-1---7a462a5d1b6fe13b;rport\r\n'
'Max-Forwards: 70\r\n'
'Contact: <sip:bob@127.0.0.4:63354;rinstance=d875ce4fd8f72441>\r\n'
'To: <sip:1001@127.0.0.3>\r\n'
'From: "Alice"<sip:alice@127.0.0.3>;tag=9980376d\r\n'
'Call-ID: YjBhMDliMWMxNzQ4ZTc5Nzg1ZTcyYTExMWMzZDlhNmQ\r\n'
'CSeq: 1 INVITE\r\n'
'Allow: INVITE, ACK, CANCEL, BYE, REFER, INFO, NOTIFY, UPDATE, PRACK, MESSAGE, OPTIONS, SUBSCRIBE, OPTIONS\r\n'
'Content-Type: application/sdp\r\n'
'Supported: replaces, 100rel\r\n'
'User-Agent: Bria iOS release 3.6.2 stamp 33024\r\n'
'Content-Length: 181\r\n'
'\r\n'
'v=0\r\n'
'o=- 1457365987528724 1 IN IP4 127.0.0.5\r\n'
's=Cpc session\r\n'
'c=IN IP4 127.0.0.5\r\n'
't=0 0\r\n'
'm=audio 60668 RTP/AVP 0 101\r\n'
'a=rtpmap:101 telephone-event/8000\r\n'
'a=fmtp:101 0-15\r\n'
'a=sendrecv\r\n')
return message_string
# @property
# def bobResponseString(self):
# TODO: need to fix up the addresses and transport type and stuff.
# atlanta == .2 / .97
# biloxi == .3 / .96
# alice == .4 / .188
# bob == .5 / .204
# message_string = ('SIP/2.0 180 Ringing\r\n'
# 'Via: SIP/2.0/UDP 192.168.4.3;branch=z9hG4bKeb83.c2fe646b6c2d21c6f9f113d37c474768.0\r\n'
# 'Via: SIP/2.0/UDP 192.168.4.2:56731;received=192.168.4.4;branch=z9hG4bK-524287-1---e500d061e354193a;rport=56731\r\n'
# 'Via: SIP/2.0/UDP 192.168.4.4;branch=z9hG4bKeb83.c2fe646b6c2d21c6f9f113d37c474768.0\r\n'
# 'Record-Route: <sip:192.168.4.3;lr>\r\n'
# 'Record-Route: <sip:192.168.4.2;lr>\r\n'
# 'Require: 100rel\r\n'
# 'Contact: <sip:1002@192.168.0.204:52909;rinstance=7caea32dab180286>\r\n'
# 'To: "Bob"<sip:1002@192.168.0.96>;tag=52e9ef73\r\n'
# 'From: "Alice"<sip:1001@192.168.0.96>;tag=2210ba44\r\n'
# 'Call-ID: NTM5YzAxN2YwZGRhYTg2YjBkNDgyNWQyNTI3ZGNmNTE\r\n'
# 'CSeq: 1 INVITE\r\n'
# 'Allow: INVITE, ACK, CANCEL, BYE, REFER, INFO, NOTIFY, UPDATE, PRACK, MESSAGE, OPTIONS, SUBSCRIBE, OPTIONS\r\n'
# 'Supported: replaces\r\n'
# 'User-Agent: Bria iOS release 3.6.2 stamp 33024\r\n'
# 'Allow-Events: talk, hold\r\n'
# 'RSeq: 1\r\n'
# 'Content-Length: 0\r\n'
# '\r\n')
# message_string = ('SIP/2.0 180 Ringing\r\n'
# 'Via: SIP/2.0/UDP 127.0.0.4;branch=z9hG4bKeb83.c2fe646b6c2d21c6f9f113d37c474768.0\r\n'
# 'Via: SIP/2.0/UDP 127.0.0.3:56731;received=127.0.0.2;branch=z9hG4bK-524287-1---e500d061e354193a;rport=56731\r\n'
# 'Via: SIP/2.0/UDP 127.0.0.2;branch=z9hG4bKeb83.c2fe646b6c2d21c6f9f113d37c474768.0\r\n'
# 'Record-Route: <sip:127.0.0.4;lr>\r\n'
# 'Record-Route: <sip:127.0.0.3;lr>\r\n'
# 'Require: 100rel\r\n'
# 'Contact: <sip:1002@192.168.0.204:52909;rinstance=7caea32dab180286>\r\n'
# 'To: "Bob"<sip:1002@192.168.0.96>;tag=52e9ef73\r\n'
# 'From: "Alice"<sip:1001@192.168.0.96>;tag=2210ba44\r\n'
# 'Call-ID: NTM5YzAxN2YwZGRhYTg2YjBkNDgyNWQyNTI3ZGNmNTE\r\n'
# 'CSeq: 1 INVITE\r\n'
# 'Allow: INVITE, ACK, CANCEL, BYE, REFER, INFO, NOTIFY, UPDATE, PRACK, MESSAGE, OPTIONS, SUBSCRIBE, OPTIONS\r\n'
# 'Supported: replaces\r\n'
# 'User-Agent: Bria iOS release 3.6.2 stamp 33024\r\n'
# 'Allow-Events: talk, hold\r\n'
# 'RSeq: 1\r\n'
# 'Content-Length: 0\r\n'
# '\r\n')
# return message_string
def aliceRequestEventHandler(self, a_connected_aip_message):
self.aliceReceivedRequests.append(a_connected_aip_message)
def aliceResponseEventHandler(self, a_connected_aip_message):
self.aliceReceivedResponses.append(a_connected_aip_message)
def atlantaRequestEventHandler(self, a_connected_aip_message):
self.atlantaReceivedRequests.append(a_connected_aip_message)
def atlantaResponseEventHandler(self, a_connected_aip_message):
self.atlantaReceivedResponses.append(a_connected_aip_message)
def biloxiRequestEventHandler(self, a_connected_aip_message):
self.biloxiReceivedRequests.append(a_connected_aip_message)
def biloxiResponseEventHandler(self, a_connected_aip_message):
self.biloxiReceivedResponses.append(a_connected_aip_message)
def bobRequestEventHandler(self, a_connected_aip_message):
self.bobReceivedRequests.append(a_connected_aip_message)
def bobResponseEventHandler(self, a_connected_aip_message):
self.bobReceivedResponses.append(a_connected_aip_message)
class TestStatelessProxyWithUDPTransport(AbstractStatelessProxyTestCase):
pass
class TestStatelessProxyWithTCPTransport(AbstractStatelessProxyTestCase):
pass
class TestStatelessProxyWithTLSTransport(AbstractStatelessProxyTestCase):
pass
| |
from os.path import dirname, abspath, join, isfile, expanduser
from subprocess import check_output, CalledProcessError, Popen, check_call
import sys
import vim
from clangd import vimsupport
from clangd.vimsupport import GetBoolValue, GetIntValue, GetVariableValue
from clangd.lsp_client import LSPClient, TimedOutError
from clangd.trie import Trie
from clangd import glog as log
DEFAULT_TRIGGER_STYLE = ['.', '>']
def FilterFileName(file_name):
for buf in vim.buffers:
if buf.name == file_name:
if buf.options['filetype'] in ['c', 'cpp', 'objc', 'objcpp']:
return False
return True
return True
def FilterCurrentFile():
file_types = vimsupport.CurrentFileTypes()
if not file_types:
return True
for file_type in file_types:
if file_type in ['c', 'cpp', 'objc', 'objcpp']:
return False
return True
def GetUriFromFilePath(file_path):
return 'file://%s' % file_path
def GetFilePathFromUri(uri):
return uri[7:]
def _FixVimLineColumn(buf, line, column):
line = min(line, len(buf))
column = min(
len(buf[line - 1]) - 1
if len(buf[line - 1]) > 0 else 0, column)
return (line, column)
def _MarkHighlights(highlights):
vimsupport.ClearClangdSyntaxMatches(groups=['clangdHighlightSection'])
for highlight in highlights:
vimsupport.AddDiagnosticSyntaxMatch(
highlight['range']['start']['line'] + 1,
highlight['range']['start']['character'] + 1,
highlight['range']['end']['line'] + 1,
highlight['range']['end']['character'] + 1,
group='clangdHighlightSection')
# m,f,c,v,t
# ordered
def GetCompletionItemKinds():
return ['m', 'f', 'c', 'v', 't', 'k']
def CompletionItemKind(kind):
##export const Text = 1;
if kind == 1:
return 'm'
##export const Method = 2;
elif kind == 2:
return 'f'
##export const Function = 3;
elif kind == 3:
return 'f'
##export const Constructor = 4;
elif kind == 4:
return 'f'
##export const Field = 5;
elif kind == 5:
return 'v'
##export const Variable = 6;
elif kind == 6:
return 'v'
##export const Class = 7;
elif kind == 7:
return 'c'
##export const Interface = 8;
elif kind == 8:
return 'c'
##export const Module = 9;
elif kind == 9:
return 'm'
##export const Property = 10;
elif kind == 10:
return 'v'
##export const Unit = 11;
elif kind == 11:
return 't'
##export const Value = 12;
elif kind == 12:
return 'v'
##export const Enum = 13;
elif kind == 13:
return 'c'
##export const Keyword = 14;
elif kind == 14:
return 'k'
##export const Snippet = 15;
elif kind == 15:
return 'k'
##export const Color = 16;
elif kind == 16:
return 'k'
##export const File = 17;
elif kind == 17:
return 'k'
##export const Reference = 18;
elif kind == 18:
return 't'
return ''
def check_alive(f):
def wrapper(self, *args, **kwarg):
if not self.isAlive():
return
return f(self, *args, **kwarg)
return wrapper
def check_opened_current_file(f):
def wrapper(self, *args, **kwarg):
if not self.OpenCurrentFile():
vimsupport.EchoMessage('backend refuses to open current file')
return
return f(self, *args, **kwarg)
return wrapper
def check_synchronized_current_file(f):
def wrapper(self, *args, **kwarg):
try:
self.didChangeFile(vim.current.buffer)
except TimedOutError:
vimsupport.EchoMessage('backend refuses to synchronize source')
return
return f(self, *args, **kwarg)
return wrapper
class ClangdManager(object):
def __init__(self):
self.lined_diagnostics = {}
self.state = {}
self._client = None
self._in_shutdown = False
self._documents = {}
self._triggerCharacters = set(DEFAULT_TRIGGER_STYLE)
self._signatureTriggerCharacters = set()
self._computed_completions_words = []
self._ClearLastCompletions()
def _ClearLastCompletions(self):
self._last_signatures = []
self._last_completions = self._GetEmptyCompletions()
self._last_completions_pos = (-1, -1)
def _GetEmptyCompletions(self):
completions_tries = {}
for kind in GetCompletionItemKinds():
completions_tries[kind] = Trie()
return completions_tries
def isAlive(self):
return self._client and self._client.isAlive()
def startServer(self, confirmed=False):
if self._client:
vimsupport.EchoMessage(
'clangd is connected, please stop it first!')
return
if confirmed or vimsupport.PresentYesOrNoDialog(
'Should we start clangd?'):
clangd_executable = str(GetVariableValue('g:clangd#clangd_executable'))
if not clangd_executable:
vim_script_folder_path = str(GetVariableValue('s:script_folder_path'))
clangd_executable = join(vim_script_folder_path, '..', 'script', 'bin', 'clangd')
clangd_executable = expanduser(clangd_executable)
clangd_log_path = expanduser(
GetVariableValue('g:clangd#log_path') + '/clangd.log')
try:
self._client = LSPClient(clangd_executable, clangd_log_path,
self)
rr = self._client.initialize()
capabilities = rr['capabilities']
if 'completionProvider' in capabilities and 'triggerCharacters' in capabilities['completionProvider']:
self._triggerCharacters = set(capabilities['completionProvider']['triggerCharacters'])
log.debug('codeComplete triggers: %s' % list(self._triggerCharacters))
if 'signatureHelpProvider' in capabilities and 'triggerCharacters' in capabilities['signatureHelpProvider']:
self._signatureTriggerCharacters = set(capabilities['signatureHelpProvider']['triggerCharacters'])
log.debug('signatureHelp triggers: %s' % list(self._signatureTriggerCharacters))
except:
if self._client:
client = self._client
client.CleanUp()
self._client = None
if confirmed:
raise
else:
log.exception('failed to start backend')
vimsupport.EchoMessage('failed to start backend executable')
def stopServer(self, confirmed=False, in_shutdown=False):
if in_shutdown:
self._in_shutdown = True
if confirmed or vimsupport.PresentYesOrNoDialog(
'Should we stop backend?'):
try:
if self._client:
client = self._client
client.shutdown()
client.exit()
self._client = None
except OSError:
if self._client:
client = self._client
client.CleanUp()
self._client = None
log.exception('failed to stop backend')
return
def restartServer(self):
log.warn('restart backend')
self.stopServer(confirmed=True)
self.startServer(confirmed=True)
def on_server_connected(self):
log.debug('event: backend is up')
self._client.onInitialized()
# wipe all exist documents
self._documents = {}
def on_server_down(self):
log.debug('event: backend is down unexceptedly')
self.lined_diagnostics = {}
vimsupport.ClearClangdSyntaxMatches()
vimsupport.UnplaceAllSigns()
if not self._in_shutdown:
self.stopServer(confirmed=True)
if GetBoolValue('g:clangd#restart_after_crash'):
self.startServer(confirmed=True)
def on_bad_message_received(self, wc, message):
log.warn('event: bad message')
def OpenFile(self, file_name):
if not self.isAlive():
return True
uri = GetUriFromFilePath(file_name)
try:
buf = vimsupport.GetBufferByName(file_name)
self.didOpenFile(buf)
except TimedOutError:
log.exception('failed to open %s' % file_name)
vimsupport.EchoTruncatedText('unable to open %s' % file_name)
return False
return True
def OpenCurrentFile(self):
file_name = vimsupport.CurrentBufferFileName()
if not file_name:
return False
if not self.OpenFile(file_name):
return False
return True
def SaveFile(self, file_name):
if not self.isAlive():
return True
uri = GetUriFromFilePath(file_name)
try:
self._client.didSaveTestDocument(uri)
except TimedOutError:
log.exception('unable to save %s' % file_name)
return False
log.debug('file %s saved' % file_name)
return True
def SaveCurrentFile(self):
file_name = vimsupport.CurrentBufferFileName()
if not file_name:
return True
return self.SaveFile(file_name)
def CloseFile(self, file_name):
if not self.isAlive():
return True
uri = GetUriFromFilePath(file_name)
if not uri in self._documents:
return
version = self._documents.pop(uri)['version']
try:
self._client.didCloseTestDocument(uri)
except TimedOutError:
log.exception('failed to close file %s' % file_name)
return False
log.debug('file %s closed' % file_name)
return True
def CloseCurrentFile(self):
file_name = vimsupport.CurrentBufferFileName()
if not file_name:
return True
return self.CloseFile(file_name)
def onDiagnostics(self, uri, diagnostics):
if uri not in self._documents:
return
log.debug('diagnostics for %s is updated' % uri)
self._documents[uri]['diagnostics'] = diagnostics
def HandleClientRequests(self):
if self._client:
self._client.handleClientRequests()
def GetDiagnostics(self, buf):
if not self.isAlive():
return []
file_name = buf.name
uri = GetUriFromFilePath(file_name)
needReopen = False
if not self.OpenFile(file_name):
return []
try:
self._client.handleClientRequests()
except TimedOutError:
log.exception('failed to get diagnostics %s' % file_name)
return []
if not uri in self._documents or not 'diagnostics' in self._documents[uri]:
return []
response = self._documents[uri]['diagnostics']
return vimsupport.ConvertDiagnosticsToQfList(file_name, response)
def GetDiagnosticsForCurrentFile(self):
if not self.isAlive():
return []
lined_diagnostics = {}
diagnostics = self.GetDiagnostics(vimsupport.CurrentBuffer())
for diagnostic in diagnostics:
# 0-index line
lnum = diagnostic['lnum'] - 1
if not lnum in lined_diagnostics:
lined_diagnostics[lnum] = []
lined_diagnostics[lnum].append(diagnostic)
# if we hit the cache, simple ignore
if lined_diagnostics == self.lined_diagnostics:
return diagnostics
# clean up current diagnostics
self.lined_diagnostics = lined_diagnostics
vimsupport.ClearClangdSyntaxMatches(groups = [ 'clangdErrorSection', 'clangdWarningSection' ])
vimsupport.UnplaceAllSigns()
for diagnostic in diagnostics:
vimsupport.AddDiagnosticSyntaxMatch(
diagnostic['lnum'],
diagnostic['col']+1,
is_error=diagnostic['severity'] >= 3)
vimsupport.PlaceSignForErrorMessageArray(self.lined_diagnostics)
return diagnostics
def NearestDiagnostic(self, line, column):
if len(self.lined_diagnostics[line]) == 1:
return self.lined_diagnostics[line][0]
sorted_diagnostics = sorted(
self.lined_diagnostics[line],
key=lambda diagnostic: abs(diagnostic['col'] - column))
return sorted_diagnostics[0]
def ErrorStatusForCurrentLine(self):
if not self.isAlive():
return ''
current_line, current_column = vimsupport.CurrentLineAndColumn()
if not current_line in self.lined_diagnostics:
return ''
diagnostic = self.NearestDiagnostic(current_line, current_column)
serverity_strings = [
'ignored',
'note',
'warning',
'error',
'fatal',
]
return serverity_strings[int(diagnostic['severity'])]
@check_alive
def EchoErrorMessageForCurrentLine(self):
current_line, current_column = vimsupport.CurrentLineAndColumn()
if not current_line in self.lined_diagnostics:
return
vimsupport.EchoText('')
diagnostic = self.NearestDiagnostic(current_line, current_column)
vimsupport.EchoTruncatedText(diagnostic['text'])
return diagnostic
@check_alive
def EchoDetailedErrorMessage(self):
current_line, _ = vimsupport.CurrentLineAndColumn()
if not current_line in self.lined_diagnostics:
return
vimsupport.EchoText('')
full_text = ''
for diagnostic in self.lined_diagnostics[current_line]:
full_text += 'L%d:C%d %s\n' % (diagnostic['lnum'],
diagnostic['col'],
diagnostic['text'])
vimsupport.EchoText(full_text[:-1])
return full_text
def didOpenFile(self, buf):
file_name = buf.name
uri = GetUriFromFilePath(buf.name)
if uri in self._documents:
return
file_type = buf.options['filetype'].decode('utf-8')
text = vimsupport.ExtractUTF8Text(buf)
self._documents[uri] = {}
self._documents[uri]['version'] = 1
self._documents[uri]['diagnostics'] = []
self._client.didOpenTestDocument(uri, text, file_type)
log.debug('file %s opened' % file_name)
def didChangeFile(self, buf):
file_name = buf.name
uri = GetUriFromFilePath(buf.name)
if not uri in self._documents:
# not sure why this happens
self.didOpenFile(buf)
return
version = self._documents[uri][
'version'] = self._documents[uri]['version'] + 1
textbody = vimsupport.ExtractUTF8Text(buf)
self._client.didChangeTestDocument(uri, version, textbody)
@check_alive
def UpdateSpecifiedBuffer(self, buf):
# FIME we need to add a temp name for every unamed buf?
if not buf.name:
return
if not buf.options['modified']:
if (len(buf) > 1) or (len(buf) == 1 and len(buf[0])):
return
self.didChangeFile(buf)
@check_alive
def UpdateCurrentBuffer(self):
buf = vimsupport.CurrentBuffer()
try:
self.UpdateSpecifiedBuffer(buf)
except TimedOutError:
log.exception('failed to update curent buffer')
vimsupport.EchoTruncatedText('unable to update curent buffer')
def _CalculateStartColumnAt(self, column, line):
start_column = min(column, len(line))
while start_column:
c = line[start_column - 1]
if not (str.isalnum(c) or c == '_'):
break
start_column -= 1
return start_column, line[start_column:column]
def onCodeCompletions(self, uri, line, column, completions):
if uri not in self._documents:
return
if uri != GetUriFromFilePath(vimsupport.CurrentBufferFileName()):
return
if not self._last_completions_pos == (line, column):
return
tries = self._GetEmptyCompletions()
log.debug('performed clang codecomplete at %d:%d, result %d items' %
(line, column, len(completions)))
for completion in completions:
if not 'kind' in completion:
continue
kind = CompletionItemKind(completion['kind'])
# insertText is missing from old clangd, we try to keep compatibility here
word = completion[
'insertText'] if 'insertText' in completion else completion[
'label']
# description
info = completion[
'detail'] if 'detail' in completion else completion['label']
# actual results to feed vim
tries[kind].insert(
word,
{
'word': word, # The actual completion
'kind': kind, # The type of completion, one character
'info': info, # description
'icase': 1, # ignore case
'dup': 1, # allow duplicates
})
# update cache
self._last_completions = tries
def onSignatureHelps(self, uri, line, column, activeSignature, activeParameter, signatures):
if uri not in self._documents:
return
if uri != GetUriFromFilePath(vimsupport.CurrentBufferFileName()):
return
if not self._last_completions_pos == (line, column):
return
last_signatures = []
log.debug('performed clang signatureHelp at %d:%d, result %d items' %
(line, column, len(signatures)))
for signature in signatures:
if not 'label' in signature:
continue
# insertText is missing from old clangd, we try to keep compatibility here
word = u', '.join(map(lambda x: x['label'], signature['parameters']))
# description
info = signature['label']
# actual results to feed vim
last_signatures.append(
{
'word': word, # The actual completion
'info': info, # description
'icase': 1, # ignore case
'dup': 1, # allow duplicates
'empty': 1, # this is only a hint
})
# update cache
self._last_signatures = last_signatures
def CodeCompleteAtCurrent(self):
if not self.isAlive():
return -1
if not self.OpenCurrentFile():
return -1
line, column = vimsupport.CurrentLineAndColumn()
start_column, start_word = self._CalculateStartColumnAt(
column, vimsupport.CurrentLine())
trigger_word = None
if start_column:
trigger_word = vimsupport.CurrentLine()[start_column - 1]
# skip from ';' and '}'
if trigger_word == ';' or trigger_word == '}' or trigger_word == ']':
return -1
if trigger_word in self._triggerCharacters:
return self.CodeCompleteImpl(start_column, line, column, trigger_word, start_word)
if trigger_word in self._signatureTriggerCharacters:
return self.SignatureHelpImpl(start_column, line, column, trigger_word, start_word)
return -1
def CodeCompleteImpl(self, start_column, line, column, trigger_word, start_word):
if not self._last_completions_pos == (line, start_column):
timeout_ms = GetIntValue('g:clangd#codecomplete_timeout')
self._last_completions_pos = (line, start_column)
uri = GetUriFromFilePath(vimsupport.CurrentBufferFileName())
try:
self._client.codeCompleteAt(
uri, line, start_column, timeout_ms=timeout_ms)
except TimedOutError:
log.warn('perform clang codeComplete timed out at %d:%d' %
(line, column))
# fetch cachable completions
tries = self._last_completions
flat_completions = []
for kind, trie in tries.items():
flat_completions.extend(trie.searchPrefix(start_word)[0:10])
self._computed_completions_words = flat_completions
return start_column + 1
def SignatureHelpImpl(self, start_column, line, column, trigger_word, start_word):
if not self._last_completions_pos == (line, start_column):
timeout_ms = GetIntValue('g:clangd#codecomplete_timeout')
self._last_completions_pos = (line, start_column)
uri = GetUriFromFilePath(vimsupport.CurrentBufferFileName())
try:
self._client.signatureHelp(
uri, line, start_column, timeout_ms=timeout_ms)
except TimedOutError:
log.warn('perform clang signatureHelp timed out at %d:%d' %
(line, column))
self._computed_completions_words = self._last_signatures
return start_column + 1
def GetCompletions(self):
if len(self._last_completions) == 0:
return {'words': [], 'refresh': 'always'}
_, column = vimsupport.CurrentLineAndColumn()
words = self._computed_completions_words
return {'words': words, 'refresh': 'always'}
@check_alive
def CloseAllFiles(self):
try:
for uri in list(self._documents.keys()):
self._client.didCloseTestDocument(uri)
except TimedOutError:
log.exception('failed to close all files')
def _UpdateBufferByTextEdits(self, buf, textedits):
text = vimsupport.ExtractUTF8Text(buf)
l = 0
c = 0
n = 0
for textedit in textedits:
start_line = textedit['range']['start']['line']
start_column = textedit['range']['start']['character']
end_line = textedit['range']['end']['line']
end_column = textedit['range']['end']['character']
while not (l == start_line and c == start_column):
if n + 1 >= len(text):
break
n += 1
if text[n - 1] == u'\n':
l += 1
c = 0
else:
c += 1
# unexcepted result
if n + 1 >= len(text):
break
bn = n
while not (l == end_line and c == end_column):
if n + 1 >= len(text):
n += 1
break
n += 1
if text[n - 1] == u'\n':
l += 1
c = 0
else:
c += 1
nn = bn + len(textedit['newText'])
text = text[0:bn] + textedit['newText'] + text[n:]
n = nn
vim.current.buffer[:] = text.split('\n')
@check_alive
@check_synchronized_current_file
def format(self):
buf = vim.current.buffer
# using current document's settings
tabSize = int(buf.options['tabstop'])
insertSpaces = bool(buf.options['expandtab'])
if buf.mark('<') is None or buf.mark('<') is None:
self._FormatBuffer(buf, tabSize, insertSpaces)
else:
self._FormatOnRange(buf, buf.mark('<'), buf.mark('>'), tabSize, insertSpaces)
def _FormatBuffer(self, buf, tabSize, insertSpaces):
uri = GetUriFromFilePath(buf.name)
try:
# actual format rpc
textedits = self._client.format(uri, tabSize, insertSpaces)
except TimedOutError:
log.exception('code format timed out')
vimsupport.EchoMessage('backend refuses to perform code format')
return
self._UpdateBufferByTextEdits(buf, textedits)
def _FormatOnRange(self, buf, start, end, tabSize, insertSpaces):
start_line, start_column = _FixVimLineColumn(buf, start[0], start[1])
end_line, end_column = _FixVimLineColumn(buf, end[0], end[1])
uri = GetUriFromFilePath(vimsupport.CurrentBufferFileName())
try:
# actual format rpc
textedits = self._client.rangeFormat(uri, start_line - 1, start_column,
end_line - 1, end_column, tabSize, insertSpaces)
except TimedOutError:
log.exception('code format')
vimsupport.EchoMessage("clangd refuse to perform code format")
return
self._UpdateBufferByTextEdits(buf, textedits)
@check_alive
@check_synchronized_current_file
def rename(self, line=None, character=None, new_name=None):
buf = vim.current.buffer
uri = GetUriFromFilePath(buf.name)
if line and character:
line, character = _FixVimLineColumn(buf, line, character)
else:
line, character = vimsupport.CurrentLineAndColumn()
try:
workspace_edits = self._client.rename(
uri, line=line, character=character, new_name=new_name)
except TimedOutError:
log.exception('rename')
vimsupport.EchoMessage("clangd refuse to perform rename")
return
# update each file in worksapce
self._UpdateBufferByWorkspaceEdits(workspace_edits)
def _UpdateBufferByWorkspaceEdits(self, workspace_edits):
for uri in workspace_edits["changes"]:
textedits = workspace_edits["changes"][uri]
file_name = GetFilePathFromUri(uri)
buf = vimsupport.GetBufferByName(file_name)
if buf:
self._UpdateBufferByTextEdits(buf, textedits)
@check_alive
@check_synchronized_current_file
def showHover(self):
buf = vim.current.buffer
uri = GetUriFromFilePath(buf.name)
line, character = vimsupport.CurrentLineAndColumn()
try:
hover = self._client.hover(
uri, line=line, character=character)
except TimedOutError:
log.exception('hover')
return
if hover['contents']:
vimsupport.EchoText('')
full_text = hover['contents']['value']
# clangd dones't implement range or kind
vimsupport.EchoTruncatedText(full_text)
return hover
@check_alive
@check_synchronized_current_file
def gotoDefinition(self):
buf = vim.current.buffer
uri = GetUriFromFilePath(buf.name)
line, character = vimsupport.CurrentLineAndColumn()
try:
highlights = self._client.gotoDefinition(
uri, line=line, character=character)
except TimedOutError:
log.exception('gotoDefinition')
return
if not highlights:
vimsupport.EchoText('')
vimsupport.EchoText('Not found')
return
# XXX we can use kind? or pop a table just like ctags
# See DocumentHighlightKind
highlight = highlights[0]
uri = highlight['uri']
file_name = GetFilePathFromUri(uri)
rangeDict = highlight['range']
line = rangeDict['start']['line']
column = rangeDict['start']['character']
vimsupport.EchoText('')
# Fix from 0-based line/column
line += 1
#column -= 1
vimsupport.EchoText('Jumping to L%d:C%d' % (line, column))
vimsupport.GotoBuffer(file_name, line, column)
_MarkHighlights(highlights)
@check_alive
@check_synchronized_current_file
def showHighlight(self):
buf = vim.current.buffer
uri = GetUriFromFilePath(buf.name)
line, character = vimsupport.CurrentLineAndColumn()
try:
highlights = self._client.highlight(
uri, line=line, character=character)
except TimedOutError:
log.exception('highlight')
return
_MarkHighlights(highlights)
@check_alive
@check_synchronized_current_file
def fixit(self):
buf = vim.current.buffer
uri = GetUriFromFilePath(buf.name)
# using current document's settings
if buf.mark('<') is None or buf.mark('<') is None:
start_line, start_column = (1, 1)
end_line, end_column = (sys.maxint, sys.maxint)
else:
start_line, start_column = buf.mark('<')
end_line, end_column = buf.mark('>')
start_line, start_column = _FixVimLineColumn(buf, start_line, start_column)
end_line, end_column = _FixVimLineColumn(buf, end_line, end_column)
# XXX we should use more accurate diagnostics
commands = self._client.codeAction(
uri,
start_line,
start_column,
end_line,
end_column,
diagnostics=self._documents[uri]['diagnostics'])
quickfix_list = []
quickfix_edits = []
i = 0
for command in commands:
title = command['title']
if command['command'] == 'clangd.applyFix':
for argument in command['arguments']:
workspace_edits = argument
quickfix_edits.append(workspace_edits)
quickfix_list.append(title)
i += 1
else:
log.debug('not supported command %s' % command['command'])
if not quickfix_list:
vimsupport.EchoText('No fix-its found')
return
try:
index = vimsupport.SelectFromList('Select one selection of fix-it:', quickfix_list)
except RuntimeError:
vimsupport.EchoText('No fix-it applied')
return
if index >= 0:
workspace_edits = quickfix_edits[index]
self._UpdateBufferByWorkspaceEdits(workspace_edits)
| |
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import fnmatch
import threading
from json import dumps, loads
import rclpy
from rcl_interfaces.msg import Parameter, ParameterType, ParameterValue
from rcl_interfaces.srv import ListParameters
from ros2node.api import get_absolute_node_name
from ros2param.api import call_get_parameters, call_set_parameters, get_parameter_value
from rosapi.proxy import get_nodes
""" Methods to interact with the param server. Values have to be passed
as JSON in order to facilitate dynamically typed SRV messages """
# Ensure thread safety for setting / getting parameters.
param_server_lock = threading.RLock()
_node = None
_parent_node_name = ""
_parameter_type_mapping = [
"",
"bool_value",
"integer_value",
"double_value",
"string_value",
"byte_array_value" "bool_array_value",
"integer_array_value",
"double_array_value",
"string_array_value",
]
def init(parent_node_name):
"""
Initializes params module with a rclpy.node.Node for further use.
This function has to be called before any other for the module to work.
"""
global _node, _parent_node_name
# TODO(@jubeira): remove this node; use rosapi node with MultiThreadedExecutor or
# async / await to prevent the service calls from blocking.
parent_node_basename = parent_node_name.split("/")[-1]
param_node_name = f"{parent_node_basename}_params"
_node = rclpy.create_node(
param_node_name, cli_args=["--ros-args", "-r", f"__node:={param_node_name}"]
)
_parent_node_name = get_absolute_node_name(parent_node_name)
def set_param(node_name, name, value, params_glob):
"""Sets a parameter in a given node"""
if params_glob and not any(fnmatch.fnmatch(str(name), glob) for glob in params_glob):
# If the glob list is not empty and there are no glob matches,
# stop the attempt to set the parameter.
return
# If the glob list is empty (i.e. false) or the parameter matches
# one of the glob strings, continue to set the parameter.
d = None
try:
d = loads(value)
value = d if isinstance(d, str) else value
except ValueError:
raise Exception(
"Due to the type flexibility of the ROS parameter server, the value argument to set_param must be a JSON-formatted string."
)
node_name = get_absolute_node_name(node_name)
with param_server_lock:
_set_param(node_name, name, value)
def _set_param(node_name, name, value, parameter_type=None):
"""
Internal helper function for set_param.
Attempts to set the given parameter in the target node with the desired value,
deducing the parameter type if it's not specified.
parameter_type allows forcing a type for the given value; this is useful to delete parameters.
"""
parameter = Parameter()
parameter.name = name
if parameter_type is None:
parameter.value = get_parameter_value(string_value=value)
else:
parameter.value = ParameterValue()
parameter.value.type = parameter_type
if parameter_type != ParameterType.PARAMETER_NOT_SET:
setattr(parameter.value, _parameter_type_mapping[parameter_type])
try:
# call_get_parameters will fail if node does not exist.
call_set_parameters(node=_node, node_name=node_name, parameters=[parameter])
except Exception:
pass
def get_param(node_name, name, default, params_glob):
"""Gets a parameter from a given node"""
if params_glob and not any(fnmatch.fnmatch(str(name), glob) for glob in params_glob):
# If the glob list is not empty and there are no glob matches,
# stop the attempt to get the parameter.
return
# If the glob list is empty (i.e. false) or the parameter matches
# one of the glob strings, continue to get the parameter.
if default != "":
try:
default = loads(default)
except ValueError:
pass # Keep default without modifications.
node_name = get_absolute_node_name(node_name)
with param_server_lock:
try:
# call_get_parameters will fail if node does not exist.
response = call_get_parameters(node=_node, node_name=node_name, parameter_names=[name])
pvalue = response.values[0]
# if type is 0 (parameter not set), the next line will raise an exception
# and return value shall go to default.
value = getattr(pvalue, _parameter_type_mapping[pvalue.type])
except Exception:
# If either the node or the parameter does not exist, return default.
value = default
return dumps(value)
def has_param(node_name, name, params_glob):
"""Checks whether a given node has a parameter or not"""
if params_glob and not any(fnmatch.fnmatch(str(name), glob) for glob in params_glob):
# If the glob list is not empty and there are no glob matches,
# stop the attempt to set the parameter.
return False
# If the glob list is empty (i.e. false) or the parameter matches
# one of the glob strings, check whether the parameter exists.
node_name = get_absolute_node_name(node_name)
with param_server_lock:
try:
response = call_get_parameters(node=_node, node_name=node_name, parameter_names=[name])
except Exception:
return False
return response.values[0].type > 0 and response.values[0].type < len(_parameter_type_mapping)
def delete_param(node_name, name, params_glob):
"""Deletes a parameter in a given node"""
if params_glob and not any(fnmatch.fnmatch(str(name), glob) for glob in params_glob):
# If the glob list is not empty and there are no glob matches,
# stop the attempt to delete the parameter.
return
# If the glob list is empty (i.e. false) or the parameter matches
# one of the glob strings, continue to delete the parameter.
node_name = get_absolute_node_name(node_name)
if has_param(node_name, name, params_glob):
with param_server_lock:
_set_param(node_name, name, None, ParameterType.PARAMETER_NOT_SET)
def get_param_names(params_glob):
params = []
nodes = get_nodes()
for node in nodes:
params.extend(get_node_param_names(node, params_glob))
return params
def get_node_param_names(node_name, params_glob):
"""Gets list of parameter names for a given node"""
node_name = get_absolute_node_name(node_name)
with param_server_lock:
if params_glob:
# If there is a parameter glob, filter by it.
return list(
filter(
lambda x: any(fnmatch.fnmatch(str(x), glob) for glob in params_glob),
_get_param_names(node_name),
)
)
else:
# If there is no parameter glob, don't filter.
return _get_param_names(node_name)
def _get_param_names(node_name):
# This method is called in a service callback; calling a service of the same node
# will cause a deadlock.
global _parent_node_name
if node_name == _parent_node_name:
return []
client = _node.create_client(ListParameters, f"{node_name}/list_parameters")
ready = client.wait_for_service(timeout_sec=5.0)
if not ready:
raise RuntimeError("Wait for list_parameters service timed out")
request = ListParameters.Request()
future = client.call_async(request)
rclpy.spin_until_future_complete(_node, future)
response = future.result()
if response is not None:
return [f"{node_name}:{param_name}" for param_name in response.result.names]
else:
return []
| |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""Shared subfunctions.
"""
import random
import textwrap
from math import ceil, log2
def wrapped_docstring():
'''Return a text-wrapped version of the module docstring'''
paras = []
para = []
for line in __doc__.strip().split('\n'):
line = line.strip()
if not line:
if para:
paras.append('\n'.join(para))
para = []
else:
para.append(line)
if para:
paras.append('\n'.join(para))
return '\n\n'.join(textwrap.fill(p) for p in paras)
def check_bool(x):
"""check_bool checks if input 'x' either a bool or
one of the following strings: ["true", "false"]
It returns value as Bool type.
"""
if isinstance(x, bool):
return x
if not x.lower() in ["true", "false"]:
raise RuntimeError("{} is not a boolean value.".format(x))
else:
return (x.lower() == "true")
def check_int(x):
"""check_int checks if input 'x' is decimal integer.
It returns value as an int type.
"""
if isinstance(x, int):
return x
if not x.isdecimal():
raise RuntimeError("{} is not a decimal number".format(x))
return int(x)
def as_snake_case_prefix(name):
""" Convert PascalCase name into snake_case name"""
outname = ""
for c in name:
if c.isupper() and len(outname) > 0:
outname += '_'
outname += c.lower()
return outname + ('_' if name else '')
def get_random_data_hex_literal(width):
""" Fetch 'width' random bits and return them as hex literal"""
width = int(width)
literal_str = hex(random.getrandbits(width))
literal_str = str(width) + "'h" + literal_str[2:]
return literal_str
def blockify(s, size, limit):
""" Make sure the output does not exceed a certain size per line"""
str_idx = 2
remain = size % (limit * 4)
numbits = remain if remain else limit * 4
s_list = []
remain = size
while remain > 0:
s_incr = int(numbits / 4)
s_list.append("{}'h{}".format(numbits, s[str_idx:str_idx + s_incr]))
str_idx += s_incr
remain -= numbits
numbits = limit * 4
return (",\n ".join(s_list))
def get_random_perm_hex_literal(numel):
""" Compute a random permutation of 'numel' elements and
return as packed hex literal"""
num_elements = int(numel)
width = int(ceil(log2(num_elements)))
idx = [x for x in range(num_elements)]
random.shuffle(idx)
literal_str = ""
for k in idx:
literal_str += format(k, '0' + str(width) + 'b')
# convert to hex for space efficiency
literal_str = hex(int(literal_str, 2))
return blockify(literal_str, width * numel, 64)
def hist_to_bars(hist, m):
'''Convert histogramm list into ASCII bar plot'''
bars = []
for i, j in enumerate(hist):
bar_prefix = "{:2}: ".format(i)
spaces = len(str(m)) - len(bar_prefix)
hist_bar = bar_prefix + (" " * spaces)
for k in range(j * 20 // max(hist)):
hist_bar += "|"
hist_bar += " ({:.2f}%)".format(100.0 * j / sum(hist)) if j else "--"
bars += [hist_bar]
return bars
def get_hd(word1, word2):
'''Calculate Hamming distance between two words.'''
if len(word1) != len(word2):
raise RuntimeError('Words are not of equal size')
return bin(int(word1, 2) ^ int(word2, 2)).count('1')
def hd_histogram(existing_words):
'''Build Hamming distance histogram'''
minimum_hd = len(existing_words[0])
maximum_hd = 0
minimum_hw = len(existing_words[0])
maximum_hw = 0
hist = [0] * (len(existing_words[0]) + 1)
for i, j in enumerate(existing_words):
minimum_hw = min(j.count('1'), minimum_hw)
maximum_hw = max(j.count('1'), maximum_hw)
if i < len(existing_words) - 1:
for k in existing_words[i + 1:]:
dist = get_hd(j, k)
hist[dist] += 1
minimum_hd = min(dist, minimum_hd)
maximum_hd = max(dist, maximum_hd)
stats = {}
stats["hist"] = hist
stats["bars"] = hist_to_bars(hist, len(existing_words))
stats["min_hd"] = minimum_hd
stats["max_hd"] = maximum_hd
stats["min_hw"] = minimum_hw
stats["max_hw"] = maximum_hw
return stats
def is_valid_codeword(config, codeword):
'''Checks whether the bitstring is a valid ECC codeword.'''
data_width = config['secded']['data_width']
ecc_width = config['secded']['ecc_width']
if len(codeword) != (data_width + ecc_width):
raise RuntimeError("Invalid codeword length {}".format(len(codeword)))
# Build syndrome and check whether it is zero.
syndrome = [0 for k in range(ecc_width)]
# The bitstring must be formatted as "data bits[N-1:0]" + "ecc bits[M-1:0]".
for j, fanin in enumerate(config['secded']['ecc_matrix']):
syndrome[j] = int(codeword[ecc_width - 1 - j])
for k in fanin:
syndrome[j] ^= int(codeword[ecc_width + data_width - 1 - k])
return sum(syndrome) == 0
def ecc_encode(config, dataword):
'''Calculate and prepend ECC bits.'''
if len(dataword) != config['secded']['data_width']:
raise RuntimeError("Invalid codeword length {}".format(len(dataword)))
# Note that certain codes like the Hamming code refer to previously
# calculated parity bits. Hence, we incrementally build the codeword
# and extend it such that previously calculated bits can be referenced.
codeword = dataword
for j, fanin in enumerate(config['secded']['ecc_matrix']):
bit = 0
for k in fanin:
bit ^= int(codeword[config['secded']['data_width'] + j - 1 - k])
codeword = str(bit) + codeword
return codeword
def scatter_bits(mask, bits):
'''Scatter the bits into unset positions of mask.'''
j = 0
scatterword = ''
for b in mask:
if b == '1':
scatterword += '1'
else:
scatterword += bits[j]
j += 1
return scatterword
def permute_bits(bits, permutation):
'''Permute the bits in a bitstring'''
bitlen = len(bits)
assert bitlen == len(permutation)
permword = ''
for k in permutation:
permword = bits[bitlen - k - 1] + permword
return permword
def _parse_hex(value):
'''Parse a hex value into an integer.
Args:
value: list[str] or str:
If a `list[str]`, parse each element as a 32-bit integer.
If a `str`, parse as a single hex string.
Returns:
int
'''
if isinstance(value, list):
result = 0
for (i, v) in enumerate(value):
result |= int(v, 16) << (i * 32)
return result
else:
value = value.translate(str.maketrans('', '', ' \r\n\t'))
return int(value, 16)
def random_or_hexvalue(dict_obj, key, num_bits):
'''Convert hex value at "key" to an integer or draw a random number.'''
# Initialize to default if this key does not exist.
dict_obj.setdefault(key, '0x0')
# Generate a random number of requested size in this case.
if dict_obj[key] == '<random>':
dict_obj[key] = random.getrandbits(num_bits)
# Otherwise attempt to convert this number to an int.
# Check that the range is correct.
else:
try:
dict_obj[key] = _parse_hex(dict_obj[key])
if dict_obj[key] >= 2**num_bits:
raise RuntimeError(
'Value "{}" is out of range.'
.format(dict_obj[key]))
except ValueError:
raise RuntimeError(
'Invalid value "{}". Must be hex or "<random>".'
.format(dict_obj[key]))
| |
# -*- coding: utf-8 -*-
"""Validation classes for various types of data."""
from __future__ import unicode_literals
import re
from operator import attrgetter
from marshmallow.compat import basestring, text_type, zip_longest
from marshmallow.exceptions import ValidationError
class Validator(object):
"""Base abstract class for validators.
.. note::
This class does not provide any behavior. It is only used to
add a useful `__repr__` implementation for validators.
"""
def __repr__(self):
args = self._repr_args()
args = '{0}, '.format(args) if args else ''
return (
'<{self.__class__.__name__}({args}error={self.error!r})>'
.format(self=self, args=args)
)
def _repr_args(self):
"""A string representation of the args passed to this validator. Used by
`__repr__`.
"""
return ''
class URL(Validator):
"""Validate a URL.
:param bool relative: Whether to allow relative URLs.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}`.
"""
URL_REGEX = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:[^:@]+?:[^:@]*?@|)' # basic auth
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
RELATIVE_URL_REGEX = re.compile(
r'^((?:http|ftp)s?://' # http:// or https://
r'(?:[^:@]+?:[^:@]*?@|)' # basic auth
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE) # host is optional, allow for relative URLs
default_message = 'Invalid URL.'
def __init__(self, relative=False, error=None):
self.relative = relative
self.error = error or self.default_message
def _repr_args(self):
return 'relative={0!r}'.format(self.relative)
def _format_error(self, value):
return self.error.format(input=value)
def __call__(self, value):
message = self._format_error(value)
if not value:
raise ValidationError(message)
regex = self.RELATIVE_URL_REGEX if self.relative else self.URL_REGEX
if not regex.search(value):
raise ValidationError(message)
return value
class Email(Validator):
"""Validate an email address.
:param str error: Error message to raise in case of a validation error. Can be
interpolated with `{input}`.
"""
USER_REGEX = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$" # dot-atom
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]'
r'|\\[\001-\011\013\014\016-\177])*"$)', re.IGNORECASE)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}|[A-Z0-9-]{2,})$'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE)
DOMAIN_WHITELIST = ('localhost',)
default_message = 'Invalid email address.'
def __init__(self, error=None):
self.error = error or self.default_message
def _format_error(self, value):
return self.error.format(input=value)
def __call__(self, value):
message = self._format_error(value)
if not value or '@' not in value:
raise ValidationError(message)
user_part, domain_part = value.rsplit('@', 1)
if not self.USER_REGEX.match(user_part):
raise ValidationError(message)
if domain_part not in self.DOMAIN_WHITELIST:
if not self.DOMAIN_REGEX.match(domain_part):
try:
domain_part = domain_part.encode('idna').decode('ascii')
except UnicodeError:
pass
else:
if self.DOMAIN_REGEX.match(domain_part):
return value
raise ValidationError(message)
return value
class Range(Validator):
"""Validator which succeeds if the value it is passed is greater
or equal to ``min`` and less than or equal to ``max``. If ``min``
is not specified, or is specified as `None`, no lower bound
exists. If ``max`` is not specified, or is specified as `None`,
no upper bound exists.
:param min: The minimum value (lower bound). If not provided, minimum
value will not be checked.
:param max: The maximum value (upper bound). If not provided, maximum
value will not be checked.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}`, `{min}` and `{max}`.
"""
message_min = 'Must be at least {min}.'
message_max = 'Must be at most {max}.'
message_all = 'Must be between {min} and {max}.'
def __init__(self, min=None, max=None, error=None):
self.min = min
self.max = max
self.error = error
def _repr_args(self):
return 'min={0!r}, max={1!r}'.format(self.min, self.max)
def _format_error(self, value, message):
return (self.error or message).format(input=value, min=self.min, max=self.max)
def __call__(self, value):
if self.min is not None and value < self.min:
message = self.message_min if self.max is None else self.message_all
raise ValidationError(self._format_error(value, message))
if self.max is not None and value > self.max:
message = self.message_max if self.min is None else self.message_all
raise ValidationError(self._format_error(value, message))
return value
class Length(Range):
"""Validator which succeeds if the value passed to it has a
length between a minimum and maximum. Uses len(), so it
can work for strings, lists, or anything with length.
:param int min: The minimum length. If not provided, minimum length
will not be checked.
:param int max: The maximum length. If not provided, maximum length
will not be checked.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}`, `{min}` and `{max}`.
"""
message_min = 'Shorter than minimum length {min}.'
message_max = 'Longer than maximum length {max}.'
message_all = 'Length must be between {min} and {max}.'
def __call__(self, value):
length = len(value)
if self.min is not None and length < self.min:
message = self.message_min if self.max is None else self.message_all
raise ValidationError(self._format_error(value, message))
if self.max is not None and length > self.max:
message = self.message_max if self.min is None else self.message_all
raise ValidationError(self._format_error(value, message))
return value
class Equal(Validator):
"""Validator which succeeds if the ``value`` passed to it is
equal to ``comparable``.
:param comparable: The object to compare to.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}` and `{other}`.
"""
default_message = 'Must be equal to {other}.'
def __init__(self, comparable, error=None):
self.comparable = comparable
self.error = error or self.default_message
def _repr_args(self):
return 'comparable={0!r}'.format(self.comparable)
def _format_error(self, value):
return self.error.format(input=value, other=self.comparable)
def __call__(self, value):
if value != self.comparable:
raise ValidationError(self._format_error(value))
return value
class Regexp(Validator):
"""Validate ``value`` against the provided regex.
:param regex: The regular expression string to use. Can also be a compiled
regular expression pattern.
:param flags: The regexp flags to use, for example re.IGNORECASE. Ignored
if ``regex`` is not a string.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}` and `{regex}`.
"""
default_message = 'String does not match expected pattern.'
def __init__(self, regex, flags=0, error=None):
self.regex = re.compile(regex, flags) if isinstance(regex, basestring) else regex
self.error = error or self.default_message
def _repr_args(self):
return 'regex={0!r}'.format(self.regex)
def _format_error(self, value):
return self.error.format(input=value, regex=self.regex.pattern)
def __call__(self, value):
if self.regex.match(value) is None:
raise ValidationError(self._format_error(value))
return value
class Predicate(Validator):
"""Call the specified ``method`` of the ``value`` object. The
validator succeeds if the invoked method returns an object that
evaluates to True in a Boolean context. Any additional keyword
argument will be passed to the method.
:param str method: The name of the method to invoke.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}` and `{method}`.
:param kwargs: Additional keyword arguments to pass to the method.
"""
default_message = 'Invalid input.'
def __init__(self, method, error=None, **kwargs):
self.method = method
self.error = error or self.default_message
self.kwargs = kwargs
def _repr_args(self):
return 'method={0!r}, kwargs={1!r}'.format(self.method, self.kwargs)
def _format_error(self, value):
return self.error.format(input=value, method=self.method)
def __call__(self, value):
method = getattr(value, self.method)
if not method(**self.kwargs):
raise ValidationError(self._format_error(value))
return value
class NoneOf(Validator):
"""Validator which fails if ``value`` is a member of ``iterable``.
:param iterable iterable: A sequence of invalid values.
:param str error: Error message to raise in case of a validation error. Can be
interpolated using `{input}` and `{values}`.
"""
default_message = 'Invalid input.'
def __init__(self, iterable, error=None):
self.iterable = iterable
self.values_text = ', '.join(text_type(each) for each in self.iterable)
self.error = error or self.default_message
def _repr_args(self):
return 'iterable={0!r}'.format(self.iterable)
def _format_error(self, value):
return self.error.format(
input=value,
values=self.values_text,
)
def __call__(self, value):
try:
if value in self.iterable:
raise ValidationError(self._format_error(value))
except TypeError:
pass
return value
class OneOf(Validator):
"""Validator which succeeds if ``value`` is a member of ``choices``.
:param iterable choices: A sequence of valid values.
:param iterable labels: Optional sequence of labels to pair with the choices.
:param str error: Error message to raise in case of a validation error. Can be
interpolated using `{input}`, `{choices}` and `{labels}`.
"""
default_message = 'Not a valid choice.'
def __init__(self, choices, labels=None, error=None):
self.choices = choices
self.choices_text = ', '.join(text_type(choice) for choice in self.choices)
self.labels = labels if labels is not None else []
self.labels_text = ', '.join(text_type(label) for label in self.labels)
self.error = error or self.default_message
def _repr_args(self):
return 'choices={0!r}, labels={1!r}'.format(self.choices, self.labels)
def _format_error(self, value):
return self.error.format(
input=value,
choices=self.choices_text,
labels=self.labels_text,
)
def __call__(self, value):
try:
if value not in self.choices:
raise ValidationError(self._format_error(value))
except TypeError:
raise ValidationError(self._format_error(value))
return value
def options(self, valuegetter=text_type):
"""Return a generator over the (value, label) pairs, where value
is a string associated with each choice. This convenience method
is useful to populate, for instance, a form select field.
:param valuegetter: Can be a callable or a string. In the former case, it must
be a one-argument callable which returns the value of a
choice. In the latter case, the string specifies the name
of an attribute of the choice objects. Defaults to `str()`
or `unicode()`.
"""
valuegetter = valuegetter if callable(valuegetter) else attrgetter(valuegetter)
pairs = zip_longest(self.choices, self.labels, fillvalue='')
return ((valuegetter(choice), label) for choice, label in pairs)
class ContainsOnly(OneOf):
"""Validator which succeeds if ``value`` is a sequence and each element
in the sequence is also in the sequence passed as ``choices``.
:param iterable choices: Same as :class:`OneOf`.
:param iterable labels: Same as :class:`OneOf`.
:param str error: Same as :class:`OneOf`.
"""
default_message = 'One or more of the choices you made was not acceptable.'
def _format_error(self, value):
value_text = ', '.join(text_type(val) for val in value)
return super(ContainsOnly, self)._format_error(value_text)
def __call__(self, value):
choices = list(self.choices)
if not value and choices:
raise ValidationError(self._format_error(value))
# We check list.index instead of using set.issubset so that
# unhashable types are handled.
for val in value:
try:
index = choices.index(val)
except ValueError:
raise ValidationError(self._format_error(value))
else:
del choices[index]
return value
| |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
from urlparse import urlparse
from resource_management import PropertiesFile
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.libraries.functions.version import compare_versions
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.resources.system import File, Execute, Directory
from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
from resource_management.core.shell import as_user
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.format import format
from resource_management.core.exceptions import Fail
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hive(name=None):
import params
XmlConfig("hive-site.xml",
conf_dir = params.hive_conf_dir,
configurations = params.config['configurations']['hive-site'],
owner=params.hive_user,
configuration_attributes=params.config['configuration_attributes']['hive-site']
)
if name in ["hiveserver2","metastore"]:
# Manually overriding service logon user & password set by the installation package
service_name = params.service_map[name]
ServiceConfig(service_name,
action="change_user",
username = params.hive_user,
password = Script.get_password(params.hive_user))
Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)
if name == 'metastore':
if params.init_metastore_schema:
check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}'
'&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
try:
Execute(check_schema_created_cmd)
except Fail:
create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}',
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
Execute(create_schema_cmd,
user = params.hive_user,
logoutput=True
)
if name == "hiveserver2":
if params.hive_execution_engine == "tez":
# Init the tez app dir in hadoop
script_file = __file__.replace('/', os.sep)
cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))
Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hive(name=None):
import params
if name == 'hiveserver2':
# BigInsights 4.0.* or lower
if params.stack_version != "" and compare_versions(params.stack_version, "4.1.0.0") < 0:
params.HdfsResource(params.webhcat_apps_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=0755
)
# Create webhcat dirs.
if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
params.HdfsResource(params.hcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.hcat_user,
mode=params.hcat_hdfs_user_mode
)
params.HdfsResource(params.webhcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=params.webhcat_hdfs_user_mode
)
# ****** Begin Copy Tarballs ******
# *********************************
if params.stack_version != "" and compare_versions(params.stack_version, '4.0.0.0') >= 0:
copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
# Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
copy_to_hdfs("pig",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.pig_tar_source,
custom_dest_file=params.pig_tar_dest_file)
copy_to_hdfs("hive",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.hive_tar_source,
custom_dest_file=params.hive_tar_dest_file)
wildcard_tarballs = ["sqoop", "hadoop_streaming"]
for tarball_name in wildcard_tarballs:
source_file_pattern = eval("params." + tarball_name + "_tar_source")
dest_dir = eval("params." + tarball_name + "_tar_dest_dir")
if source_file_pattern is None or dest_dir is None:
continue
source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
for source_file in source_files:
src_filename = os.path.basename(source_file)
dest_file = os.path.join(dest_dir, src_filename)
copy_to_hdfs(tarball_name,
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=source_file,
custom_dest_file=dest_file)
# ******* End Copy Tarballs *******
# *********************************
params.HdfsResource(params.hive_apps_whs_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
group=params.user_group,
mode=0770
)
# Create Hive User Dir
params.HdfsResource(params.hive_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
mode=params.hive_hdfs_user_mode
)
# hive.exec.scratchdir should be created via hive_user
# otherwise, hive.start.cleanup.scratchdir won't work, because ambari services always started by hive_user
if not is_empty(params.hive_exec_scratchdir):
params.HdfsResource(params.hive_exec_scratchdir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
group=params.hdfs_user,
mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
params.HdfsResource(None, action="execute")
Directory(params.hive_etc_dir_prefix,
mode=0755
)
# We should change configurations for client as well as for server.
# The reason is that stale-configs are service-level, not component.
for conf_dir in params.hive_conf_dirs_list:
fill_conf_dir(conf_dir)
if name == "client":
permissions = 0644
else:
permissions = 0660
XmlConfig("hive-site.xml",
conf_dir=params.hive_config_dir,
configurations=params.hive_site_config,
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.hive_user,
group=params.user_group,
mode=permissions)
if params.hive_specific_configs_supported and name == 'hiveserver2':
XmlConfig("hiveserver2-site.xml",
conf_dir=params.hive_server_conf_dir,
configurations=params.config['configurations']['hiveserver2-site'],
configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
owner=params.hive_user,
group=params.user_group,
mode=0644)
File(format("{hive_config_dir}/hive-env.sh"),
owner=params.hive_user,
group=params.user_group,
content=InlineTemplate(params.hive_env_sh_template)
)
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
create_parents=True,
owner='root',
group='root'
)
File(os.path.join(params.limits_conf_dir, 'hive.conf'),
owner='root',
group='root',
mode=0644,
content=Template("hive.conf.j2")
)
if name == 'metastore' or name == 'hiveserver2':
jdbc_connector()
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
mode = 0644,
)
if name == 'metastore':
File(params.start_metastore_path,
mode=0755,
content=StaticFile('startMetastore.sh')
)
if params.init_metastore_schema:
create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_bin}/schematool -initSchema "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p}")
check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_bin}/schematool -info "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p}"), params.hive_user)
Execute(create_schema_cmd,
not_if = check_schema_created_cmd,
user = params.hive_user
)
elif name == 'hiveserver2':
File(params.start_hiveserver2_path,
mode=0755,
content=Template(format('{start_hiveserver2_script}'))
)
if name != "client":
crt_directory(params.hive_pid_dir)
crt_directory(params.hive_log_dir)
crt_directory(params.hive_var_lib)
def fill_conf_dir(component_conf_dir):
import params
Directory(component_conf_dir,
owner=params.hive_user,
group=params.user_group,
create_parents=True
)
XmlConfig("mapred-site.xml",
conf_dir=component_conf_dir,
configurations=params.config['configurations']['mapred-site'],
configuration_attributes=params.config['configuration_attributes']['mapred-site'],
owner=params.hive_user,
group=params.user_group,
mode=0644)
crt_file(format("{component_conf_dir}/hive-default.xml.template"))
crt_file(format("{component_conf_dir}/hive-env.sh.template"))
log4j_exec_filename = 'hive-exec-log4j.properties'
if (params.log4j_exec_props != None):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=params.log4j_exec_props
)
elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
)
log4j_filename = 'hive-log4j.properties'
if (params.log4j_props != None):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=params.log4j_props
)
elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
)
def crt_directory(name):
import params
Directory(name,
create_parents=True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
def crt_file(name):
import params
File(name,
owner=params.hive_user,
group=params.user_group
)
def jdbc_connector():
import params
if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
# TODO: should be removed after ranger_hive_plugin will not provide jdbc
Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
path=["/bin", "/usr/bin/"],
sudo = True)
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source),
)
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.target),
#creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo = True)
else:
#for default hive db (Mysql)
Execute(('cp', '--remove-destination', format('/usr/share/java/{jdbc_jar_name}'), params.target),
#creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo=True
)
File(params.target,
mode = 0644,
)
| |
#!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pyexpect.replwrap will not work with unicode_literals
# from __future__ import unicode_literals
import os
import random
import sys
import unittest
import utils
# osquery-specific testing utils
import test_base
SHELL_TIMEOUT = 10
EXIT_CATASTROPHIC = 78
class OsqueryiTest(unittest.TestCase):
def setUp(self):
self.binary = test_base.getLatestOsqueryBinary('osqueryi')
self.osqueryi = test_base.OsqueryWrapper(command=self.binary)
self.dbpath = "%s%s" % (
test_base.CONFIG["options"]["database_path"],
str(random.randint(1000, 9999)))
@unittest.skipIf(os.name == "nt", "stderr tests not supported on Windows.")
def test_error(self):
'''Test that we throw an error on bad query'''
self.osqueryi.run_command(' ')
self.assertRaises(test_base.OsqueryException,
self.osqueryi.run_query, 'foo')
@test_base.flaky
def test_config_check_success(self):
'''Test that a 0-config passes'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=%s/test.config" % test_base.SCRIPT_DIR
],
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
@test_base.flaky
def test_config_dump(self):
'''Test that config raw output is dumped when requested'''
config = os.path.join(test_base.SCRIPT_DIR, "test_noninline_packs.conf")
proc = test_base.TimeoutRunner([
self.binary,
"--config_dump",
"--config_path=%s" % config
],
SHELL_TIMEOUT)
content = ""
with open(config, 'r') as fh:
content = fh.read()
actual = proc.stdout
if os.name == "nt":
actual = actual.replace('\r', '')
self.assertEqual(actual, '{"%s": %s}\n' % (config, content))
print (proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
@test_base.flaky
def test_config_check_failure_invalid_path(self):
'''Test that a missing config fails'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=/this/path/does/not/exist"
],
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 1)
@test_base.flaky
def test_config_check_failure_valid_path(self):
# Now with a valid path, but invalid content.
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=%s" % os.path.join(test_base.SCRIPT_DIR, "test.badconfig")
],
SHELL_TIMEOUT)
self.assertEqual(proc.proc.poll(), 1)
self.assertNotEqual(proc.stderr, "")
@test_base.flaky
def test_config_check_failure_missing_plugin(self):
# Finally with a missing config plugin
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_plugin=does_not_exist"
],
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
self.assertNotEqual(proc.proc.poll(), 0)
# Also do not accept a SIGSEG
self.assertEqual(proc.proc.poll(), EXIT_CATASTROPHIC)
@test_base.flaky
def test_config_check_example(self):
'''Test that the example config passes'''
example_path = os.path.join("deployment", "osquery.example.conf")
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--config_path=%s" % os.path.join(test_base.SCRIPT_DIR, "..", example_path)
],
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print (proc.stdout)
print (proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
def test_meta_commands(self):
'''Test the supported meta shell/help/info commands'''
commands = [
'.help',
'.all',
'.all osquery_info',
'.all this_table_does_not_exist',
'.echo',
'.echo on',
'.echo off',
'.header',
'.header off',
'.header on',
'.mode',
'.mode csv',
'.mode column',
'.mode line',
'.mode list',
'.mode pretty',
'.mode this_mode_does_not_exists',
'.nullvalue',
'.nullvalue ""',
'.print',
'.print hello',
'.schema osquery_info',
'.schema this_table_does_not_exist',
'.schema',
'.separator',
'.separator ,',
'.show',
'.tables osquery',
'.tables osquery_info',
'.tables this_table_does_not_exist',
'.tables',
'.trace',
'.width',
'.width 80',
'.timer',
'.timer on',
'.timer off'
]
for command in commands:
result = self.osqueryi.run_command(command)
pass
@test_base.flaky
def test_time(self):
'''Demonstrating basic usage of OsqueryWrapper with the time table'''
self.osqueryi.run_command(' ') # flush error output
result = self.osqueryi.run_query(
'SELECT hour, minutes, seconds FROM time;')
self.assertEqual(len(result), 1)
row = result[0]
self.assertTrue(0 <= int(row['hour']) <= 24)
self.assertTrue(0 <= int(row['minutes']) <= 60)
self.assertTrue(0 <= int(row['seconds']) <= 60)
# TODO: Running foreign table tests as non-priv user fails
@test_base.flaky
@unittest.skipIf(os.name == "nt", "foreign table tests not supported on Windows.")
def test_foreign_tables(self):
'''Requires the --enable_foreign flag to add at least one table.'''
self.osqueryi.run_command(' ')
query = 'SELECT count(1) c FROM osquery_registry;'
result = self.osqueryi.run_query(query)
before = int(result[0]['c'])
osqueryi2 = test_base.OsqueryWrapper(self.binary,
args={"enable_foreign": True})
osqueryi2.run_command(' ')
# This execution fails if the user is not Administrator on Windows
result = osqueryi2.run_query(query)
after = int(result[0]['c'])
self.assertGreater(after, before)
@test_base.flaky
def test_time_using_all(self):
self.osqueryi.run_command(' ')
result = self.osqueryi.run_command('.all time')
self.assertNotEqual(result.rstrip(), "Error querying table: time")
@test_base.flaky
def test_config_bad_json(self):
self.osqueryi = test_base.OsqueryWrapper(self.binary,
args={"config_path": "/"})
result = self.osqueryi.run_query('SELECT * FROM time;')
self.assertEqual(len(result), 1)
if __name__ == '__main__':
test_base.Tester().run()
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import colorful.fields
import django.contrib.gis.db.models.fields
from django.conf import settings
import webmap.utils
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Layer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='', help_text='Name of the layer', max_length=255, verbose_name='name')),
('slug', models.SlugField(unique=True, verbose_name='name in URL')),
('desc', models.TextField(help_text='Layer description.', null=True, verbose_name='description', blank=True)),
('order', models.IntegerField(default=0, verbose_name='order')),
('remark', models.TextField(help_text='Internal information about layer.', null=True, verbose_name='internal remark', blank=True)),
('enabled', models.BooleanField(default=True, help_text='True = the layer is enabled on map load', verbose_name='Enabled by defalut')),
],
options={
'ordering': ['order'],
'verbose_name': 'layer',
'verbose_name_plural': 'layers',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BaseLayer',
fields=[
('layer_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='webmap.Layer', on_delete=models.CASCADE)),
('url', models.URLField(help_text='Base layer tiles url. e.g. ', null=True, verbose_name='URL', blank=True)),
],
options={
'verbose_name': 'base layer',
'verbose_name_plural': 'base layers',
},
bases=('webmap.layer',),
),
migrations.CreateModel(
name='Legend',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255, verbose_name='name')),
('slug', models.SlugField(unique=True, verbose_name='name in URL')),
('desc', models.TextField(null=True, verbose_name='description', blank=True)),
('image', models.ImageField(upload_to='ikony', storage=webmap.utils.SlugifyFileSystemStorage(), verbose_name='image')),
],
options={
'verbose_name': 'legend item',
'verbose_name_plural': 'legend items',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='License',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='License name', max_length=255, verbose_name='name')),
('desc', models.TextField(help_text='License description.', null=True, verbose_name='description', blank=True)),
],
options={
'verbose_name': 'license',
'verbose_name_plural': 'licenses',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Marker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Name of the marker.', unique=True, max_length=255, verbose_name='name')),
('slug', models.SlugField(unique=True, null=True, verbose_name='name in URL')),
('desc', models.TextField(help_text='Detailed marker descrption.', null=True, verbose_name='description', blank=True)),
('remark', models.TextField(help_text='Internal information about layer.', null=True, verbose_name='internal remark', blank=True)),
('default_icon', models.ImageField(storage=webmap.utils.SlugifyFileSystemStorage(), upload_to='icons', null=True, verbose_name='default icon', blank=True)),
('menu_icon', models.ImageField(storage=webmap.utils.SlugifyFileSystemStorage(), upload_to='icons/marker/menu', null=True, verbose_name='menu icon', blank=True)),
('minzoom', models.PositiveIntegerField(default=1, help_text='Minimal zoom in which the POIs of this marker will be shown on the map.', verbose_name='Minimal zoom')),
('maxzoom', models.PositiveIntegerField(default=10, help_text='Maximal zoom in which the POIs of this marker will be shown on the map.', verbose_name='Maximal zoom')),
('line_width', models.FloatField(default=2, verbose_name='line width')),
('line_color', colorful.fields.RGBColorField(default='#ffc90e', verbose_name='line color')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('last_modification', models.DateTimeField(auto_now=True, verbose_name='last modification at')),
],
options={
'ordering': ['-layer__order', 'name'],
'verbose_name': 'marker',
'verbose_name_plural': 'markers',
'permissions': [('can_only_view', 'Can only view')],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OverlayLayer',
fields=[
('layer_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='webmap.Layer', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'overlay layer',
'verbose_name_plural': 'overlay layers',
},
bases=('webmap.layer',),
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Photo name', max_length=255, verbose_name='name', blank=True)),
('desc', models.TextField(help_text='Photo description.', null=True, verbose_name='description', blank=True)),
('order', models.IntegerField(default=0, verbose_name='order')),
('photographer', models.CharField(help_text='Full name of the author of the photography', max_length=255, verbose_name='Photography author', blank=True)),
('photo', models.ImageField(help_text='Upload photo in full resolution.', upload_to='photo', storage=webmap.utils.SlugifyFileSystemStorage(), verbose_name='photo')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at', null=True)),
('last_modification', models.DateTimeField(auto_now=True, verbose_name='last modification at', null=True)),
('author', models.ForeignKey(related_name='photo_create', verbose_name='author', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
('license', models.ForeignKey(verbose_name='license', to='webmap.License', on_delete=models.CASCADE)),
],
options={
'ordering': ['order'],
'verbose_name': 'photo',
'verbose_name_plural': 'photographies',
'permissions': [('can_view_photo_list', 'Can view photo list')],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Poi',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Exact place name', max_length=255, verbose_name='name')),
('importance', models.SmallIntegerField(default=0, help_text='Minimal zoom modificator (use 20+ to show always).<br/>', verbose_name='importance')),
('geom', django.contrib.gis.db.models.fields.GeometryField(help_text='Add point: Select pencil with plus sign icon and place your point to the map.<br/>\n Add line: Select line icon and by clicking to map draw the line. Finish drawing with double click.<br/>\n Add area: Select area icon and by clicking to mapy draw the area. Finish drawing with double click.<br/>\n Object edition: Select the first icon and then select object in map. Draw points in map to move them, use points in the middle of sections to add new edges.', srid=4326, verbose_name='place geometry')),
('desc', models.TextField(help_text='Text that will be shown after selecting POI.', null=True, verbose_name='description', blank=True)),
('desc_extra', models.TextField(help_text='Text that extends the description.', null=True, verbose_name='detailed description', blank=True)),
('url', models.URLField(help_text='Link to the web page of the place.', null=True, verbose_name='URL', blank=True)),
('address', models.CharField(help_text='Poi address (street, house number)', max_length=255, null=True, verbose_name='adress', blank=True)),
('remark', models.TextField(help_text='Internal information about POI.', null=True, verbose_name='Internal remark', blank=True)),
('properties_cache', models.CharField(max_length=255, null=True, blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('last_modification', models.DateTimeField(auto_now=True, verbose_name='last modification at')),
('author', models.ForeignKey(related_name='poi_create', verbose_name='author', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
('marker', models.ForeignKey(related_name='pois', verbose_name='marker', to='webmap.Marker', help_text='Select icon, that will be shown in map', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'place',
'verbose_name_plural': 'places',
'permissions': [('can_only_own_data_only', 'Can only edit his own data'), ('can_edit_advanced_fields', 'Can edit importance status')],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Property',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Status name', max_length=255, verbose_name='name')),
('as_filter', models.BooleanField(default=False, help_text='Show as a filter in right map menu?', verbose_name='as filter?')),
('order', models.IntegerField(default=0, verbose_name='order')),
('slug', models.SlugField(unique=True, verbose_name='Name in URL')),
('desc', models.TextField(help_text='Property description.', null=True, verbose_name='description', blank=True)),
('remark', models.TextField(help_text='Internal information about the property.', null=True, verbose_name='Internal remark', blank=True)),
('default_icon', models.ImageField(storage=webmap.utils.SlugifyFileSystemStorage(), upload_to='icons', null=True, verbose_name='default icon', blank=True)),
],
options={
'ordering': ['order'],
'verbose_name': 'property',
'verbose_name_plural': 'properties',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Sector',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='name')),
('slug', models.SlugField(unique=True, verbose_name='name in URL')),
('geom', django.contrib.gis.db.models.fields.PolygonField(help_text='Sector area', srid=4326, verbose_name='area')),
],
options={
'verbose_name': 'sector',
'verbose_name_plural': 'sectors',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Status name', unique=True, max_length=255, verbose_name='name')),
('desc', models.TextField(help_text='Status description.', null=True, verbose_name='description', blank=True)),
('show', models.BooleanField(default=False, help_text='Show to map user', verbose_name='show')),
('show_to_mapper', models.BooleanField(default=False, help_text='Show to mapper', verbose_name='show to mapper')),
],
options={
'verbose_name': 'status',
'verbose_name_plural': 'statuses',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='property',
name='status',
field=models.ForeignKey(verbose_name='status', to='webmap.Status', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='poi',
name='properties',
field=models.ManyToManyField(help_text='POI properties', to='webmap.Property', null=True, verbose_name='properties', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='poi',
name='status',
field=models.ForeignKey(default=0, verbose_name='status', to='webmap.Status', help_text='POI status, determinse if it will be shown in map', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='poi',
name='updated_by',
field=models.ForeignKey(related_name='poi_update', verbose_name='last updated by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='photo',
name='poi',
field=models.ForeignKey(related_name='photos', verbose_name='poi', to='webmap.Poi', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='photo',
name='updated_by',
field=models.ForeignKey(related_name='photo_update', verbose_name='last updated by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='marker',
name='layer',
field=models.ForeignKey(verbose_name='layer', to='webmap.Layer', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='marker',
name='status',
field=models.ForeignKey(verbose_name='status', to='webmap.Status', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='layer',
name='status',
field=models.ForeignKey(verbose_name='status', to='webmap.Status', on_delete=models.CASCADE),
preserve_default=True,
),
]
| |
from collections import defaultdict, namedtuple
import glob
import gzip
import logging
import os
import shutil
import StringIO
import stat
import subprocess
import tempfile
import common
import opkg
import release as bismark_release
_NodePackage = namedtuple('NodePackage',
['node', 'name', 'version', 'architecture'])
class NodePackage(_NodePackage):
@property
def package(self):
return bismark_release.Package(self.name, self.version, self.architecture)
def deploy(releases_root,
destination,
signing_key,
releases,
experiments,
node_groups):
deployment_path = tempfile.mkdtemp(prefix='bismark-downloads-staging-')
logging.info('staging deployment in %s', deployment_path)
# Fix permissons of the deployment path. mkdtemp gives 700 permissions,
# and rsync will copy those permissions to the Web server, so we end
# up with permission denied errors unless we fix permissions here.
user_perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
group_perms = stat.S_IRGRP | stat.S_IXGRP
other_perms = stat.S_IROTH | stat.S_IXOTH
os.chmod(deployment_path, user_perms | group_perms | other_perms)
for release in releases:
_deploy_packages(release, deployment_path)
_deploy_images(release, deployment_path)
_deploy_builtin_packages(release, deployment_path)
_deploy_extra_packages(release, deployment_path)
_deploy_upgrades(release, node_groups, deployment_path)
_deploy_experiment_packages(release,
experiments,
node_groups,
deployment_path)
_deploy_experiment_configurations(release,
experiments,
node_groups,
deployment_path)
_make_dummy_directories(deployment_path)
_deploy_dummy_experiment_configurations(deployment_path)
_deploy_packages_gz(deployment_path)
_deploy_packages_sig(deployment_path, signing_key)
_deploy_upgradable_sentinels(deployment_path)
_deploy_static(releases_root, deployment_path)
print 'The following files differ at the destination:'
diff_success = _diff_from_destination(deployment_path, destination)
if diff_success:
deploy_response = raw_input('\nDeploy to %s? (y/N) ' % (destination,))
if deploy_response == 'y':
print 'Deploying from %s to %s' % (deployment_path, destination)
_copy_to_destination(deployment_path, destination)
else:
print 'Skipping deployment'
clean_response = raw_input(
'\nDelete staging directory %s? (Y/n) ' % (deployment_path,))
if clean_response != 'n':
print 'Removing staging directory %s' % (deployment_path,)
shutil.rmtree(deployment_path)
else:
print 'Staging directory %s left intact' % (deployment_path,)
def _deploy_packages(release, deployment_path):
packages_path = release.packages_path
for package in release.packages:
destination = os.path.join(deployment_path,
'packages',
release.name,
package.architecture)
common.makedirs(destination)
destination_path = os.path.join(destination, package.filename)
source_filename = os.path.join(packages_path, '%s.ipk' % package.sha1)
shutil.copy2(source_filename, destination_path)
def _deploy_images(release, deployment_path):
images_path = release.images_path
for image in release.images:
destination_dir = os.path.join(deployment_path,
release.name,
image.architecture)
common.makedirs(destination_dir)
shutil.copy2(os.path.join(images_path, image.name), destination_dir)
def _deployment_package_paths(release, deployment_path):
logging.info('locating package in deployed path')
package_paths = dict()
for package in release.packages:
package_path = os.path.join(
deployment_path,
'packages',
release.name,
package.architecture,
package.filename)
package_paths[package.package] = package_path
return package_paths
def _deploy_builtin_packages(release, deployment_path):
package_paths = _deployment_package_paths(release, deployment_path)
for package in release.builtin_packages:
source = package_paths[package]
architectures = release.normalize_architecture(package.architecture)
for architecture in architectures:
link_dir = os.path.join(deployment_path,
release.name,
architecture,
'packages')
common.makedirs(link_dir)
link_name = os.path.join(link_dir, os.path.basename(source))
relative_source = os.path.relpath(source, link_dir)
os.symlink(relative_source, link_name)
def _deploy_extra_packages(release, deployment_path):
package_paths = _deployment_package_paths(release, deployment_path)
for package in release.extra_packages:
source = package_paths[package]
architectures = release.normalize_architecture(package.architecture)
for architecture in architectures:
link_dir = os.path.join(deployment_path,
release.name,
architecture,
'extra-packages')
common.makedirs(link_dir)
link_name = os.path.join(link_dir, os.path.basename(source))
relative_source = os.path.relpath(source, link_dir)
os.symlink(relative_source, link_name)
def _resolve_groups_to_nodes(node_groups, group_packages):
logging.info('resolving groups to nodes')
node_packages = set()
for group_package in group_packages:
for node in node_groups.resolve_to_nodes(group_package.group):
node_package = NodePackage(node,
group_package.name,
group_package.version,
group_package.architecture)
node_packages.add(node_package)
# TODO(sburnett): Change this to pick the latest version instead of
# throwing an error.
packages_per_node = set()
for package in node_packages:
key = (package.node, package.name, package.architecture)
if key in packages_per_node:
raise Exception('Conflicting package versions for a node: %s' % (key,))
packages_per_node.add(key)
return node_packages
def _normalize_default_packages(node_packages, nodes):
logging.info('normalizing packages')
packages = defaultdict(dict)
for node_package in node_packages:
if node_package.node == 'default':
continue
key = (node_package.name, node_package.architecture)
packages[key][node_package.node] = node_package.version
for node_package in node_packages:
if node_package.node != 'default':
continue
key = (node_package.name, node_package.architecture)
for node in nodes:
if node in packages[key]:
continue
packages[key][node] = node_package.version
normalized_packages = set()
for (name, architecture), nodes in packages.items():
for node, version in nodes.items():
node_package = NodePackage(node, name, version, architecture)
normalized_packages.add(node_package)
return normalized_packages
def _symlink_packages(release, packages, subdirectory, deployment_path):
package_paths = _deployment_package_paths(release, deployment_path)
for package in packages:
source = package_paths[package.package]
architectures = release.normalize_architecture(package.architecture)
for architecture in architectures:
link_dir = os.path.join(deployment_path,
release.name,
architecture,
subdirectory,
package.node)
common.makedirs(link_dir)
link_name = os.path.join(link_dir, os.path.basename(source))
relative_source = os.path.relpath(source, link_dir)
os.symlink(relative_source, link_name)
def _deploy_upgrades(release, node_groups, deployment_path):
resolved_upgrades = _resolve_groups_to_nodes(
node_groups,
release.package_upgrades)
nodes = set()
for node_package in resolved_upgrades:
nodes.add(node_package.node)
upgraded_packages = _normalize_default_packages(resolved_upgrades, nodes)
_symlink_packages(release,
upgraded_packages,
'updates-device',
deployment_path)
def _deploy_experiment_packages(release,
experiments,
node_groups,
deployment_path):
all_group_packages = set()
for name, experiment in experiments.iteritems():
for group_package in experiment.packages:
if group_package.release != release.name:
continue
all_group_packages.add(group_package)
node_packages = _resolve_groups_to_nodes(node_groups, all_group_packages)
nodes = set()
for node_package in node_packages:
nodes.add(node_package.node)
for _, experiment in experiments.iteritems():
for group in experiment.header_groups:
nodes.update(node_groups.resolve_to_nodes(group))
normalized_packages = _normalize_default_packages(node_packages, nodes)
_symlink_packages(release,
normalized_packages,
'experiments-device',
deployment_path)
def _normalize_default_experiments(node_dicts):
logging.info('normalizing experiments')
if 'default' not in node_dicts:
return node_dicts
default_dict = node_dicts['default']
for key, default_value in default_dict.items():
for node, value_dict in node_dicts.items():
if node == 'default':
continue
if key in value_dict:
continue
value_dict[key] = default_value
return node_dicts
def _bool_to_string(b):
if b:
return '1'
else:
return '0'
def _normalized_configuration_headers(experiments, node_groups):
group_configuration_headers = defaultdict(dict)
for name, experiment in experiments.iteritems():
for group in experiment.header_groups:
s = StringIO.StringIO()
print >>s, "config 'experiment' '%s'" % experiment.name
print >>s, " option 'display_name' '%s'" % experiment.display_name
print >>s, " option 'description' '%s'" % experiment.description
for conflict in experiment.conflicts:
print >>s, " list 'conflicts' '%s'" % conflict
required = _bool_to_string(experiment.is_required(group))
print >>s, " option 'required' '%s'" % required
revoked = _bool_to_string(experiment.is_revoked(group))
print >>s, " option 'revoked' '%s'" % revoked
installed = _bool_to_string(
experiment.is_installed_by_default(group))
print >>s, " option 'installed' '%s'" % installed
group_configuration_headers[group][name] = s.getvalue()
node_configuration_headers = defaultdict(dict)
for group, headers in group_configuration_headers.items():
for node in node_groups.resolve_to_nodes(group):
for experiment, header in headers.items():
if (experiment in node_configuration_headers[node] and
node_configuration_headers[node][experiment] != header):
raise Exception('conflicting experiment defintions')
node_configuration_headers[node][experiment] = header
return _normalize_default_experiments(node_configuration_headers)
def _normalized_configuration_bodies(release, experiments, node_groups):
group_experiment_packages = defaultdict(lambda: defaultdict(set))
for name, experiment in experiments.iteritems():
for group_package in experiment.packages:
if group_package.release != release.name:
continue
group_experiment_packages[
group_package.group][name].add(group_package)
bodies = defaultdict(dict)
for group, experiment_packages in group_experiment_packages.items():
for node in node_groups.resolve_to_nodes(group):
for experiment, packages in experiment_packages.items():
for package in packages:
architectures = release.normalize_architecture(
package.architecture)
for architecture in architectures:
key = architecture, experiment, package.name
if key in bodies[node] and bodies[node][key] != package.version:
raise Exception(
'conflicting versions for package in experiment: %s' % (key,))
bodies[node][key] = package.version
return _normalize_default_experiments(bodies)
def _deploy_experiment_configurations(release,
experiments,
node_groups,
deployment_path):
normalized_headers = _normalized_configuration_headers(
experiments, node_groups)
normalized_bodies = _normalized_configuration_bodies(
release, experiments, node_groups)
all_nodes = set()
all_nodes.update(normalized_headers.keys())
all_nodes.update(normalized_bodies.keys())
configurations = defaultdict(dict)
for node in all_nodes:
if node in normalized_bodies:
packages = normalized_bodies[node]
elif 'default' in normalized_bodies:
packages = normalized_bodies['default']
else:
continue
for architecture, experiment, name in sorted(packages):
if experiment not in configurations[architecture, node]:
if node in normalized_headers and experiment in normalized_headers[node]:
headers = normalized_headers[node][experiment]
else:
headers = normalized_headers['default'][experiment]
configurations[architecture, node][experiment] = headers
configurations[architecture, node][experiment] += (
" list 'package' '%s'\n" % name)
for (architecture, node), experiments in configurations.items():
filename = os.path.join(deployment_path,
release.name,
architecture,
'experiments-device',
node,
'Experiments')
common.makedirs(os.path.dirname(filename))
with open(filename, 'w') as handle:
for name, configuration in sorted(experiments.items()):
handle.write(configuration)
print >>handle, ''
def _make_dummy_directories(deployment_path):
patterns = [
'*/*',
]
for pattern in patterns:
full_pattern = os.path.join(deployment_path, pattern)
for dirname in glob.iglob(full_pattern):
if os.path.dirname(dirname) == 'packages':
continue
common.makedirs(os.path.join(dirname, 'experiments'))
common.makedirs(os.path.join(dirname, 'updates'))
def _deploy_dummy_experiment_configurations(deployment_path):
patterns = [
'*/*/experiments',
]
for pattern in patterns:
full_pattern = os.path.join(deployment_path, pattern)
for dirname in glob.iglob(full_pattern):
experiments_filename = os.path.join(dirname, 'Experiments')
with open(experiments_filename, 'w') as handle:
print >>handle
def _deploy_packages_gz(deployment_path):
patterns = [
'*/*/experiments',
'*/*/experiments-device/*',
'*/*/extra-packages',
'*/*/packages',
'*/*/updates',
'*/*/updates-device/*',
]
for pattern in patterns:
full_pattern = os.path.join(deployment_path, pattern)
for dirname in glob.iglob(full_pattern):
package_indices = []
for filename in sorted(glob.glob(os.path.join(dirname, '*.ipk'))):
package_index = opkg.generate_package_index(filename)
package_indices.append(package_index)
index_contents = '\n'.join(package_indices)
index_filename = os.path.join(dirname, 'Packages.gz')
handle = gzip.GzipFile(index_filename, 'wb', mtime=0)
handle.write(index_contents)
handle.close()
def _deploy_packages_sig(deployment_path, signing_key):
signing_key_path = os.path.expanduser(signing_key)
if not os.path.isfile(signing_key_path):
raise Exception('Cannot find signing key %r' % (signing_key_path,))
if stat.S_IMODE(os.stat(signing_key_path).st_mode) != 0400:
raise Exception('For security, %r must have 0400 permissions' % (
signing_key_path,))
patterns = [
'*/*/experiments',
'*/*/experiments-device/*',
'*/*/extra-packages',
'*/*/packages',
'*/*/updates',
'*/*/updates-device/*',
]
for pattern in patterns:
full_pattern = os.path.join(deployment_path, pattern)
for dirname in glob.iglob(full_pattern):
packages_gz_filename = os.path.join(dirname, 'Packages.gz')
if not os.path.isfile(packages_gz_filename):
continue
packages_sig_filename = os.path.join(dirname, 'Packages.sig')
command = 'openssl smime -in %s -sign -signer %s -binary -outform PEM -out %s' % (
packages_gz_filename, signing_key_path, packages_sig_filename)
logging.info('Going to run: %s', command)
return_code = subprocess.call(command, shell=True)
if return_code != 0:
logging.error('openssl smime exited with error code %s',
return_code)
raise Exception('Error signing Packages.gz')
def _deploy_upgradable_sentinels(deployment_path):
patterns = [
'*/*/updates-device/*',
'*/*/experiments-device/*',
]
for pattern in patterns:
full_pattern = os.path.join(deployment_path, pattern)
for dirname in glob.iglob(full_pattern):
if not os.path.isdir(dirname):
continue
with open(os.path.join(dirname, 'Upgradable'), 'w'):
pass
def _deploy_static(releases_root, deployment_path):
static_pattern = os.path.join(releases_root, 'static', '*')
for filename in glob.iglob(static_pattern):
if os.path.isdir(filename):
continue
if os.path.islink(filename):
destination = os.readlink(filename)
source = os.path.join(deployment_path, os.path.basename(filename))
os.symlink(destination, source)
continue
shutil.copy2(filename, deployment_path)
def _diff_from_destination(deployment_path, destination):
command = 'rsync -n -icvlrz --exclude=Packages.sig --delete %s/ %s' % (
deployment_path, destination)
logging.info('Going to run: %s', command)
return_code = subprocess.call(command, shell=True)
if return_code != 0:
print 'rsync exited with error code %d' % return_code
return False
return True
def _copy_to_destination(deployment_path, destination):
command = 'rsync -cvaz --delete %s/ %s' % (deployment_path, destination)
logging.info('Going to run: %s', command)
return_code = subprocess.call(command, shell=True)
if return_code != 0:
print 'rsync exited with error code %d' % return_code
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_anp_epg
short_description: Manage Endpoint Groups (EPGs) in schema templates
description:
- Manage EPGs in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
anp:
description:
- The name of the ANP.
type: str
required: yes
epg:
description:
- The name of the EPG to manage.
type: str
aliases: [ name ]
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
# contracts:
# description:
# - A list of contracts associated to this ANP.
# type: list
bd:
description:
- The BD associated to this ANP.
type: dict
suboptions:
name:
description:
- The name of the BD to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced BD.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced BD.
type: str
vrf:
version_added: '2.9'
description:
- The VRF associated to this ANP.
type: dict
suboptions:
name:
description:
- The name of the VRF to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced VRF.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced VRF.
type: str
subnets:
description:
- The subnets associated to this ANP.
type: list
suboptions:
subnet:
description:
- The IP range in CIDR notation.
type: str
required: true
aliases: [ ip ]
description:
description:
- The description of this subnet.
type: str
scope:
description:
- The scope of the subnet.
type: str
choices: [ private, public ]
shared:
description:
- Whether this subnet is shared between VRFs.
type: bool
no_default_gateway:
description:
- Whether this subnet has a default gateway.
type: bool
useg_epg:
description:
- Whether this is a USEG EPG.
type: bool
# useg_epg_attributes:
# description:
# - A dictionary consisting of USEG attributes.
# type: dict
intra_epg_isolation:
description:
- Whether intra EPG isolation is enforced.
- When not specified, this parameter defaults to C(unenforced).
type: str
choices: [ enforced, unenforced ]
intersite_multicaste_source:
description:
- Whether intersite multicast source is enabled.
- When not specified, this parameter defaults to C(no).
type: bool
preferred_group:
description:
- Whether this EPG is added to preferred group or not.
- When not specified, this parameter defaults to C(no).
type: bool
version_added: 2.9
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
seealso:
- module: mso_schema_template_anp
- module: mso_schema_template_anp_epg_subnet
- module: mso_schema_template_bd
- module: mso_schema_template_contract_filter
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
bd:
name: bd1
vrf:
name: vrf1
state: present
delegate_to: localhost
- name: Add a new EPG with preferred group.
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
state: present
preferred_group: yes
delegate_to: localhost
- name: Remove an EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
bd:
name: bd1
vrf:
name: vrf1
state: absent
delegate_to: localhost
- name: Query a specific EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
bd:
name: bd1
vrf:
name: vrf1
state: query
delegate_to: localhost
register: query_result
- name: Query all EPGs
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
bd:
name: bd1
vrf:
name: vrf1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, mso_subnet_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
anp=dict(type='str', required=True),
epg=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
bd=dict(type='dict', options=mso_reference_spec()),
vrf=dict(type='dict', options=mso_reference_spec()),
display_name=dict(type='str'),
useg_epg=dict(type='bool'),
intra_epg_isolation=dict(type='str', choices=['enforced', 'unenforced']),
intersite_multicaste_source=dict(type='bool'),
subnets=dict(type='list', options=mso_subnet_spec()),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
preferred_group=dict(type='bool'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['epg']],
['state', 'present', ['epg']],
],
)
schema = module.params['schema']
template = module.params['template']
anp = module.params['anp']
epg = module.params['epg']
display_name = module.params['display_name']
bd = module.params['bd']
vrf = module.params['vrf']
useg_epg = module.params['useg_epg']
intra_epg_isolation = module.params['intra_epg_isolation']
intersite_multicaste_source = module.params['intersite_multicaste_source']
subnets = module.params['subnets']
state = module.params['state']
preferred_group = module.params['preferred_group']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if schema_obj:
schema_id = schema_obj['id']
else:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get ANP
anps = [a['name'] for a in schema_obj['templates'][template_idx]['anps']]
if anp not in anps:
mso.fail_json(msg="Provided anp '{0}' does not exist. Existing anps: {1}".format(anp, ', '.join(anps)))
anp_idx = anps.index(anp)
# Get EPG
epgs = [e['name'] for e in schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs']]
if epg is not None and epg in epgs:
epg_idx = epgs.index(epg)
mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs'][epg_idx]
if state == 'query':
if epg is None:
mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs']
elif not mso.existing:
mso.fail_json(msg="EPG '{epg}' not found".format(epg=epg))
mso.exit_json()
epgs_path = '/templates/{0}/anps/{1}/epgs'.format(template, anp)
epg_path = '/templates/{0}/anps/{1}/epgs/{2}'.format(template, anp, epg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=epg_path))
elif state == 'present':
bd_ref = mso.make_reference(bd, 'bd', schema_id, template)
vrf_ref = mso.make_reference(vrf, 'vrf', schema_id, template)
subnets = mso.make_subnets(subnets)
if display_name is None and not mso.existing:
display_name = epg
payload = dict(
name=epg,
displayName=display_name,
uSegEpg=useg_epg,
intraEpg=intra_epg_isolation,
proxyArp=intersite_multicaste_source,
# FIXME: Missing functionality
# uSegAttrs=[],
contractRelationships=[],
subnets=subnets,
bdRef=bd_ref,
preferredGroup=preferred_group,
vrfRef=vrf_ref,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=epg_path, value=mso.sent))
else:
ops.append(dict(op='add', path=epgs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
| |
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Variational Inference on Gridworld
@author: thomas
"""
import tensorflow as tf
import numpy as np
from tfutils.helpers import anneal_linear, HParams
import matplotlib.pyplot as plt
from rlenv.grid import grid_env as grid_env
from rlutils.policies import egreedy
from rlutils.helpers import make_rl_data, make_test_data, plot_predictions, kl_preds_v2
from pythonutils.helpers import save, make_name, nested_list
import os
import logging
# Tensorflow parser
flags = tf.app.flags
flags.DEFINE_string("hpconfig", "", "Overrides default hyper-parameters.")
flags.DEFINE_string("save_dir", "results/grid", "Results directory.")
flags.DEFINE_string("check_dir", "/tmp/best_model", "Checkpoint directory.")
FLAGS = flags.FLAGS
def get_hps():
''' Hyperparameter settings '''
return HParams(
# General learning set-up
network = 1, # which network to run
n_epochs = 75000, # number of batches
batch_size = 32, # batch size
eval_freq = 500, # Evaluation frequency
test_size = 1500, # Test set size
debug = False, # tf debugging
lr_init = 0.0005, # Initial learning rate
lr_final_frac = 0.2, # lr_final = lr_init * lr_final_frac
anneal_frac_lr = 0.7, # percentage of n_epochs to anneal over
max_grad = None, # gradient clip size, None is no clipping
verbose = True, # print verbosity
# q(z|x,y) (Variational approximation)
var_type = ['continuous'], # Distribution for q(z|x,y), either ['continuous'] for Gaussian, or ['discrete'] for categorical
# repeated if depth>1, for multiple layers use: var_type = ['continuous','discrete'],
depth = 1, # depth of stochastic layers
h_size = 100, # dimensionality in variatonal ladder (deterministic part)
resnet = False, # Whether to use Resnet like architecture
# Discrete latent variables
K = 3, # categories per discrete latent variable
N = 3, # number of variables
tau_init = 2.0, # softmax temperature initial
tau_final = 0.001, # softmax temperature final
anneal_frac_tau = 0.7, # anneal fraction
# Continuous latent variables
z_size = 3, # number of continuous latent variables
n_flow = 5, # depth of flow (if n_flow=0 --> no flow)
ar = False, # type of flow, if False: affine coupling layer (Real NVP), if True: inverse autoregressive flow (IAF)
ignore_sigma_latent = False, # use sigma in variational approximation
ignore_sigma_flow = False, # use sigma in flow transformations
# p(y|x,z) (Decoder distribution)
out_lik = 'discrete', # distribution for p(y|x,z), can be 'normal', 'discrete' or 'discretized logistic'
ignore_sigma_outcome = True, # Whether to learn the SD of p(y|x,z).
#For discrete, whether to sample for categorical or deterministically argmax over the predicted class probabilities.
# VAE objective
k = 3, # number of importance samples
alpha = 0.5, # alpha in Renyi alpha divergence
kl_min = 0.07, # Number of "free bits/nats", only used if kl_min>0
use_lamb = False, # KL annealing (alternative to kl_min)
lamb_init = 0.1, # Initial contribution of KL to loss : L = p(y|x,z) + lambda*KL(q|p)
lamb_final = 1.0, # Final lambda
anneal_frac_lamb = 0.3, # anneal iteration fraction
# Reinforcement learning settings
artificial_data = True, # if True: no RL but sample data across state-space (decorrelated)
use_target_net = False, # if True: use target net in DQN
eps_init = 1.0, # initial epsilon in e-greedy action selection
eps_final = 0.10, # final epsilon
anneal_frac_eps = 0.6, # fraction of n_epochs to anneal over
gamma = 0.99, # discount factor
test_on_policy = False, # if True: plot evalutions while following policy (only useful with artificial_data=False)
# Hyperparameter looping
n_rep = 10, # number of repetitions per setting
loop_hyper = False, # If False, no looping (ignores other settings below)
item1 = 'kl_min', # First hyperparameter to loop over (should appear in settings above)
seq1 = [0,0.04,0.07,0.10,0.20], # Values to loop over
item2 = 'use_lamb', # Second hyperparameter
seq2 = [False, True], # Second loop values
)
def run(hps):
''' Main function: run training and evaluation '''
Env = grid_env(False)
Test_env = grid_env(False)
if hps.artificial_data:
s_valid_pre, a_valid_pre, s1_valid_pre, r_valid_pre, term_valid_pre = make_rl_data(Test_env,int(hps.test_size/2))
s_test_pre, a_test_pre, s1_test_pre, r_test_pre, term_test_pre = make_rl_data(Test_env,hps.test_size)
# Set-up hyperparameter loop
n_rep = hps.n_rep
seq1 = hps.seq1
seq2 = hps.seq2
results = np.empty([len(seq1),len(seq2),n_rep])
results_elbo = np.empty([len(seq1),len(seq2),n_rep])
results_distances = np.empty([len(seq1),len(seq2),3,n_rep])
av_rewards = nested_list(len(seq1),len(seq2),n_rep)
for j,item1 in enumerate(seq1):
hps._set(hps.item1,item1)
for l,item2 in enumerate(seq2):
hps._set(hps.item2,item2)
for rep in range(n_rep):
tf.reset_default_graph()
hps.lr_final = hps.lr_init*hps.lr_final_frac
# Initialize anneal parameters
np_lr= anneal_linear(0,hps.n_epochs * hps.anneal_frac_lr,hps.lr_final,hps.lr_init)
np_temp = anneal_linear(0,hps.n_epochs * hps.anneal_frac_tau,hps.tau_final,hps.tau_init)
np_lamb = anneal_linear(0,hps.n_epochs * hps.anneal_frac_lamb,hps.lamb_final,hps.lamb_init)
np_eps = anneal_linear(0,hps.n_epochs * hps.anneal_frac_eps,hps.eps_final,hps.eps_init)
# Build network
if hps.network == 1:
import networks.network_rl as net
model = net.Network(hps,Env.observation_shape)
# Check model size
total_size = 0
for v in tf.trainable_variables():
total_size += np.prod([int(s) for s in v.get_shape()])
print("Total number of trainable variables: {}".format(total_size))
# Session and initialization
with tf.Session() as sess:
if hps.debug:
sess = tf.python.debug.LocalCLIDebugWrapperSession(sess)
sess.add_tensor_filter("has_inf_or_nan", tf.python.debug.has_inf_or_nan)
sess.run(model.init_op)
saver = tf.train.Saver()
# Some storage
t = []
lr = []
elbo_keep = []
train_nats_keep = []
valid_nats_keep = []
test_nats_keep = []
min_valid_nats = 1e50
best_test_nats = 0.0
best_elbo = 0.0
best_iter = 0
died_ep = []
epoch_reward = []
# Train
print('Initialized, starting to train')
s = Env.reset()
for i in range(hps.n_epochs):
if not hps.artificial_data: # roll out in Env
s_batch = np.empty(np.append(hps.batch_size,Env.observation_shape),dtype='float32')
a_batch = np.empty([hps.batch_size,1],dtype='float32')
r_batch = np.empty([hps.batch_size,1],dtype='float32')
term_batch = np.empty([hps.batch_size,1],dtype='float32')
s1_batch = np.empty(np.append(hps.batch_size,Env.observation_shape),dtype='float32')
for _ in range(hps.batch_size):
Qsa = sess.run(model.Qsa, feed_dict = {model.x :s[None,:],
model.k : 1,
})
a = egreedy(Qsa[0],np_eps)
s1,r,dead = Env.step([a])
s_batch[_,],a_batch[_,],r_batch[_,],s1_batch[_,],term_batch[_,] = s,a,r,s1,dead
s = s1
#Env.plot()
if dead:
s = Env.reset() # process smaller batch
died_ep.extend([i])
else: # Sample some transitions across state-space
s_batch, a_batch, s1_batch, r_batch,term_batch = make_rl_data(Env,hps.batch_size)
# Calculate targets
if hps.use_target_net:
Qsa1 = sess.run(model.Qsa_t, feed_dict = {model.x : s1_batch,model.k : 1})
else:
Qsa1 = sess.run(model.Qsa, feed_dict = {model.x : s1_batch,model.k : 1})
Qmax = np.max(Qsa1,axis=1)[:,None]
Qmax *= (1. - term_batch)
Qtarget_batch = r_batch + hps.gamma * Qmax
# store stuff
epoch_reward.extend([np.mean(r_batch)])
# draw batch
__,__, np_elbo = sess.run([model.train_op,model.train_op_rl,model.elbo],{ model.x : s_batch,
model.y : s1_batch,
model.a : a_batch,
model.Qtarget : Qtarget_batch,
model.lr : np_lr,
model.lamb : np_lamb,
model.temp : np_temp,
model.is_training : True,
model.k: hps.k} )
# Annealing
if i % 250 == 1:
np_lr= anneal_linear(i,hps.n_epochs * hps.anneal_frac_lr,hps.lr_final,hps.lr_init)
np_temp = anneal_linear(i,hps.n_epochs * hps.anneal_frac_tau,hps.tau_final,hps.tau_init)
np_lamb = anneal_linear(i,hps.n_epochs * hps.anneal_frac_lamb,hps.lamb_final,hps.lamb_init)
np_eps = anneal_linear(i,hps.n_epochs * hps.anneal_frac_eps,hps.eps_final,hps.eps_init)
# Evaluate
if i % hps.eval_freq == 1:
if hps.use_target_net:
sess.run([model.copy_op])
if (not hps.artificial_data) and hps.test_on_policy:
s_valid, a_valid, s1_valid, r_valid, term_valid = make_test_data(sess,model,Test_env,hps.test_size,epsilon=0.05)
s_test, a_test, s1_test, r_test, term_test = make_test_data(sess,model,Test_env,hps.test_size,epsilon=0.05)
else:
s_valid, a_valid, s1_valid, r_valid, term_valid = s_valid_pre, a_valid_pre, s1_valid_pre, r_valid_pre, term_valid_pre
s_test, a_test, s1_test, r_test, term_test = s_test_pre, a_test_pre, s1_test_pre, r_test_pre, term_test_pre
train_elbo,train_nats,train_kl,train_rl_cost = sess.run([model.elbo,model.nats,model.kl,model.rl_cost],{ model.x : s_batch,
model.y : s1_batch,
model.a : a_batch,
model.Qtarget : Qtarget_batch,
model.lamb : np_lamb,
model.temp : 0.0001,
model.is_training : True,
model.k: 40})
valid_nats = sess.run(model.nats,{ model.x : s_valid,
model.y : s1_valid,
model.a : a_valid,
model.Qtarget : np.zeros(np.shape(a_valid)),
model.lamb : np_lamb,
model.temp : 0.0001,
model.is_training : True,
model.k: 40})
test_nats = sess.run(model.nats,{ model.x : s_test,
model.y : s1_test,
model.a : a_test,
model.Qtarget : np.zeros(np.shape(a_test)),
model.lamb : np_lamb,
model.temp : 0.0001,
model.is_training : True,
model.k: 40})
if hps.verbose:
print('Step',i,'ELBO: ',train_elbo, 'Training nats:',train_nats, 'Training KL:',train_kl, 'RL cost:',train_rl_cost,
' \n Valid nats',valid_nats, ' Test set nats',test_nats,
' \n Average reward in last 50 batches',np.mean(epoch_reward[-50:]), 'Learning rate',np_lr,'Softmax Temp',np_temp,'Epsilon:',np_eps)
t.extend([i])
lr.extend([np_lr])
train_nats_keep.extend([train_nats])
valid_nats_keep.extend([valid_nats])
test_nats_keep.extend([test_nats])
elbo_keep.extend([train_elbo])
if valid_nats < min_valid_nats:
min_valid_nats = valid_nats
#best_sample = y_samples # keep the sample
best_test_nats = test_nats
best_elbo = train_nats
best_iter = i
saver.save(sess,FLAGS.check_dir)
# VAE storage
print('Best result in iteration',best_iter,'with valid_nats',min_valid_nats,'and test nats',best_test_nats)
saver.restore(sess,FLAGS.check_dir)
print('Restored best VAE model')
# nats
fig = plt.figure()
plt.plot(t,train_nats_keep,label='train nats')
plt.plot(t,valid_nats_keep,label='valid nats')
plt.plot(t,test_nats_keep,label='test nats')
plt.plot(t,elbo_keep,label='ELBO')
plt.legend(loc=0)
if hps.loop_hyper:
save(os.path.join(hps.my_dir,'nats_{}={}_{}={}_{}'.format(hps.item1,item1,hps.item2,item2,rep)))
else:
save(os.path.join(hps.my_dir,'nats{}'.format(rep)),ext='png',close=True,verbose=False)
results[j,l,rep] = best_test_nats
results_elbo[j,l,rep] = best_elbo
# Distances from true distribution
distances = kl_preds_v2(model,sess,s_test,a_test)
results_distances[j,l,:,rep] = distances
# Visualize some predictions
n_row = 2
n_col = 2
s_start=np.array([0,0,1,3,5,3])
for extra_rep in range(3):
if hps.test_on_policy:
s_start = plot_predictions(model,sess,n_row,n_col,rep,hps,True,s_start)
else:
s_start = plot_predictions(model,sess,n_row,n_col,rep,hps,False)
if hps.loop_hyper:
name = os.path.join(hps.my_dir,'predictions_{}={}_{}={}_{}'.format(hps.item1,item1,hps.item2,item2,rep))
else:
name = os.path.join(hps.my_dir,'predictions_{}{}'.format(rep,extra_rep))
save(name,ext='png',close=True,verbose=False)
############# RL ################
if not hps.artificial_data:
window = 200
av_reward = np.convolve(epoch_reward, np.ones((window,))/window, mode='valid')
av_rewards[j][l][rep] = av_reward # average rewards of RL agent
# Show learned behaviour
if (not hps.artificial_data) and hps.verbose:
print('Start evaluating policy')
Env = grid_env(True)
Env.reset()
for lll in range(100):
Qsa = sess.run(model.Qsa, feed_dict = {model.x :s[None,:],
model.k : 1,
})
a = egreedy(Qsa[0],0.01)
s,r,dead = Env.step([a])
Env.plot()
if dead:
print('Died in step',lll,', restarting')
s = Env.reset()
plt.close()
# Overall results
results_raw = results
results = np.mean(results,axis=2)
results_raw_elbo = results_elbo
results_elbo = np.mean(results_elbo,axis=2)
results_raw_distances = results_distances
results_distances = np.mean(results_distances,axis=3)
logging.info('-------------------- Overall Results --------------------------')
logging.info('vae' if hps.network == 1 else 'mlp_{}'.format('deterministic' if hps.deterministic else 'stochastic'))
logging.info('Latent type %s of depth %s',hps.var_type[0],hps.depth)
logging.info('(z_size,n_flow) %s %s and (n,k) %s %s',hps.z_size,hps.n_flow,hps.N,hps.K)
logging.info('Results over %s runs',n_rep)
logging.info('Test nats: %s',results[0,0])
logging.info('Elbo: %s',-1*results_elbo[0,0])
logging.info('KL with true distr: %s',results_distances[0,0,:])
logging.info('Raw data over repetitions \n %s \n %s \n %s',results_raw,results_raw_elbo,results_raw_distances)
if hps.loop_hyper:
fig = plt.figure()
for i in range(results.shape[1]):
plt.plot([i for i in range(len(seq1))],results[:,i],label='{} = {}'.format(hps.item2,hps.seq2[i]))
plt.xlabel(hps.item1)
plt.gca().set_xticklabels(hps.seq1)
plt.legend(loc=0)
save(os.path.join(os.getcwd(),FLAGS.save_dir,'run_{}/looped'.format(make_name(hps))),ext='png',close=True,verbose=False)
if not hps.artificial_data:
fig = plt.figure()
for ii in range(len(seq1)):
for jj in range(len(seq2)):
signal = np.mean(np.array(av_rewards[ii][jj]),axis=0)
plt.plot([i for i in range(len(signal))],signal,label='{} = {},{} = {}'.format(hps.item1,hps.seq1[ii],hps.item2,hps.seq2[jj]))
plt.xlabel('Steps')
plt.legend(loc=0)
save(os.path.join(os.getcwd(),FLAGS.save_dir,'run_{}/looped_reward'.format(make_name(hps))),ext='png',close=True,verbose=False)
def init_logger(hps,my_dir=None):
if not os.path.exists(my_dir):
os.makedirs(my_dir)
handlers = [logging.FileHandler(os.path.join(my_dir,'results.txt'),mode='w'),
logging.StreamHandler()]
logging.basicConfig(level = logging.INFO, format = '%(message)s', handlers = handlers)
def main(_):
hps = get_hps().parse(FLAGS.hpconfig)
FLAGS.check_dir = FLAGS.check_dir + str(np.random.randint(0,1e7,1)[0])
if hps.depth>1 and len(hps.var_type) == 1:
hps.var_type = [hps.var_type[0] for i in range(hps.depth)]
print(hps.var_type)
if not hps.loop_hyper:
hps._set('seq1',[hps._items[hps.item1]])
hps._set('seq2',[hps._items[hps.item2]])
# logging and saving
hps.my_dir = os.path.join(os.getcwd(),FLAGS.save_dir,'{}'.format(make_name(hps)))
init_logger(hps,hps.my_dir)
with open(os.path.join(hps.my_dir,'hps.txt'),'w') as file:
file.write(repr(hps._items))
run(hps)
if __name__ == "__main__":
tf.app.run()
| |
# -*- coding: utf-8 -*-
#
# Receives and responds to the HTTP request from the Python server.
#
# Copyright (c) 2015 carlosperate https://github.com/carlosperate/
# Licensed under the Apache License, Version 2.0 (the "License"):
# http://www.apache.org/licenses/LICENSE-2.0
#
from __future__ import unicode_literals, absolute_import
import subprocess
import time
import json
import cgi
import re
try:
# 2.x name
import Tkinter
import urlparse
import tkFileDialog
import SimpleHTTPServer
except ImportError:
# 3.x name
import tkinter as Tkinter
import urllib.parse as urlparse
import tkinter.filedialog as tkFileDialog
import http.server as SimpleHTTPServer
from ardublocklyserver.py23 import py23
from ardublocklyserver.compilersettings import ServerCompilerSettings
from ardublocklyserver.sketchcreator import SketchCreator
class BlocklyRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
Simple Python HTTP request handler to pass over the AJAX requests.
"""
def do_POST(self):
"""
Serves the POST request, using form-like data
"""
message_back = None
content_type, parameters_dict = cgi.parse_header(
self.headers.get("Content-type"))
content_length = int(self.headers.get('content-length'))
if content_type == 'application/x-www-form-urlencoded':
parameters = urlparse.parse_qs(
py23.b_unicode(self.rfile.read(content_length)),
keep_blank_values=False)
message_back = handle_settings(parameters)
elif content_type == 'text/plain':
data_string = self.rfile.read(content_length)
try:
# At this point message back should contain a normal string
# with the sketch code
message_back =\
'// Ardublockly generated sketch\n' + \
data_string.decode('utf-8')
except Exception as e:
print(e)
print('\nThere was an error manipulating the sketch data!!!')
# Returning data is a JSON string with the Arduino CLI output
message_back = handle_sketch(message_back)
else:
print('\nError, content type not recognised: ' + str(content_type))
self.send_response(404, "Ups, not found!")
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write('Error: invalid content type')
return
# Responding
if message_back:
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(message_back.encode("utf-8"))
def log_request(self, code='-', size='-'):
"""
Log an accepted request.
This is called by send_response(), and printed to the stderr by
log_message. No need to fill the command line with successful responses,
so only print any non 200.
:param code:
:param size:
:return:
"""
if code != 200:
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
#################
# Main Handlers #
#################
def handle_settings(parameters):
def _get_value(parameters2):
""" Searches for a 'value' parameter in the dictionary. """
value2 = None
for key2 in parameters2:
if str(key2) == 'value':
value2 = str(parameters2[key2])
return value2
message_back = None
for key in parameters:
# Compiler
if str(key) == 'compiler':
if str(parameters[key]) == "['get']":
message_back = get_compiler_path()
elif str(parameters[key]) == "['set']":
message_back = set_compiler_path()
# Sketch
elif str(key) == 'sketch':
if str(parameters[key]) == "['get']":
message_back = get_sketch_path()
elif str(parameters[key]) == "['set']":
message_back = set_sketch_path()
# Arduino Board
elif str(key) == 'board':
if str(parameters[key]) == "['get']":
message_back = get_arduino_boards()
elif str(parameters[key]) == "['set']":
value = _get_value(parameters)
value = re.sub(r'^\[\'', '', value)
value = re.sub(r'\'\]', '', value)
message_back = set_arduino_board(value)
# Serial port
elif str(key) == 'serial':
if str(parameters[key]) == "['get']":
message_back = get_serial_ports()
elif str(parameters[key]) == "['set']":
value = _get_value(parameters)
value = re.sub(r'^\[\'', '', value)
value = re.sub(r'\'\]', '', value)
message_back = set_serial_port(value)
# Launch Only Options
elif str(key) == 'ide':
if str(parameters[key]) == "['get']":
message_back = get_load_ide_only()
elif str(parameters[key]) == "['set']":
value = _get_value(parameters)
value = re.sub(r'^\[\'', '', value)
value = re.sub(r'\'\]', '', value)
message_back = set_load_ide_only(value)
# The Value parameter is only used in some cases
elif str(key) == 'value':
pass
# Parameter not recognised
else:
print('The "' + str(key) + '" = ' + str(parameters[key]) +
' parameter is not recognised!')
return message_back
def handle_sketch(sketch_code):
"""
Creates an Arduino Sketch and invokes the Arduino CLI.
Creates a JSON string to return to the page with the following format:
{"response_type": "ide_output",
"element" : "div_ide_output",
"success" : "true",
"conclusion" : Short text as main conclusion,
"output" : Output string,
"error_output" : Output string,
"exit_code": Exit code}
"""
sketch_path = create_sketch_from_string(sketch_code)
success, conclusion, out, error, exit_code = load_arduino_cli(sketch_path)
json_data = \
{'response_type': 'ide_output',
'element': 'div_ide_output',
'success': success,
'conclusion': conclusion,
'output': out,
'error_output': error,
'exit_code': exit_code}
return json.dumps(json_data)
#######################################
# Sketch loading to Arduino functions #
#######################################
def load_arduino_cli(sketch_path=None):
"""
Launches a command line that invokes the Arduino IDE to open, verify or
upload an sketch, which address is indicated in the input parameter
:return: A tuple with the following data (output, error output, exit code)
"""
# Input sanitation and output defaults
if not isinstance(sketch_path, py23.string_type_compare) \
or not sketch_path:
sketch_path = create_sketch_default()
success = True
conclusion = ''
error = ''
out = ''
exit_code = ''
# Check if CLI flags have been set
if not ServerCompilerSettings().compiler_dir:
success = False
conclusion = 'Unable to find Arduino IDE'
error = 'The compiler directory has not been set.\n\r' + \
'Please set it in the Settings.'
else:
if not ServerCompilerSettings().launch_IDE_option:
success = False
conclusion = 'What should we do with the Sketch?'
error = 'The launch IDE option has not been set.n\r' + \
'Please select an IDE option in the Settings.'
elif ServerCompilerSettings().launch_IDE_option == 'upload':
if not ServerCompilerSettings().get_serial_port_flag():
success = False
conclusion = 'Serial Port unavailable'
error = 'The Serial Port does not exist.\n\r' + \
'Please check if the Arduino is correctly ' + \
'connected to the PC and select the Serial Port in ' +\
'the Settings.'
if not ServerCompilerSettings().get_arduino_board_flag():
success = False
conclusion = 'Unknown Arduino Board'
error = 'The Arduino Board has not been set.\n\r' + \
'Please select the appropriate Arduino Board from ' + \
'the settings.'
if success:
# Concatenates the CLI command and execute if the flags are valid
cli_command = [ServerCompilerSettings().compiler_dir]
if ServerCompilerSettings().launch_IDE_option == 'upload':
conclusion = 'Successfully Uploaded Sketch'
cli_command.append('--upload')
cli_command.append('--port')
cli_command.append(ServerCompilerSettings().get_serial_port_flag())
cli_command.append('--board')
cli_command.append(
ServerCompilerSettings().get_arduino_board_flag())
elif ServerCompilerSettings().launch_IDE_option == 'verify':
conclusion = 'Successfully Verified Sketch'
cli_command.append('--verify')
cli_command.append(sketch_path)
#cli_command = ' '.join(cli_command)
print('\n\rCLI command:')
print(cli_command)
if ServerCompilerSettings().launch_IDE_option == 'open':
# Launch Arduino IDE in a subprocess without blocking server
subprocess.Popen(cli_command, shell=False)
conclusion = 'Sketch opened in IDE'
out = 'The sketch should be loaded in the Arduino IDE.'
# Wait a few seconds to allow IDE to launch before sending back data
time.sleep(2)
else:
# Launch the Arduino CLI in a subprocess and capture output data
process = subprocess.Popen(
cli_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
out, error = process.communicate()
exit_code = str(process.returncode)
print('Arduino output:\n' + out)
print('Arduino Error output:\n' + error)
print('Arduino Exit code: ' + exit_code)
# For some reason Arduino CLI can return 256 on success
if (process.returncode != 0) and (process.returncode != 256):
success = False
if exit_code == str(1):
conclusion = 'Build or Upload failed'
if exit_code == str(2):
conclusion = 'Sketch not found'
if exit_code == str(3):
conclusion = 'Invalid command line argument'
if exit_code == str(4):
conclusion =\
'Preference passed to "get-pref" flag does not exist'
return success, conclusion, out, error, exit_code
def create_sketch_default():
return SketchCreator().create_sketch()
def create_sketch_from_string(sketch_code):
return SketchCreator().create_sketch(sketch_code)
######################################
# Dealing with Directories and files #
######################################
def browse_file():
"""
Opens a file browser and selects executable files
:return: Full path to selected file
"""
root = Tkinter.Tk()
# Make window almost invisible to focus it and ensure directory browser
# doesn't end up loading in the background behind main window.
root.withdraw()
root.overrideredirect(True)
root.geometry('0x0+0+0')
root.deiconify()
root.lift()
root.focus_force()
root.update()
file_path = tkFileDialog.askopenfilename()
root.destroy()
return file_path
def browse_dir():
"""
Opens a directory browser to select a folder.
:return: Full path to the selected folder
"""
root = Tkinter.Tk()
# Make window almost invisible to focus it and ensure directory browser
# doesn't end up loading in the background behind main window.
root.withdraw()
root.overrideredirect(True)
root.geometry('0x0+0+0')
root.deiconify()
root.lift()
root.focus_force()
file_path = tkFileDialog.askdirectory(
parent=root, initialdir="/", title='Please select a directory')
root.destroy()
return file_path
#####################
# Compiler Settings #
#####################
def set_compiler_path():
"""
Opens the file browser to select a file. Saves this filepath into
ServerCompilerSettings and if the filepath is different to that stored
already it triggers the new data to be saved into the settings file.
"""
new_path = browse_file()
if new_path != '':
ServerCompilerSettings().compiler_dir = new_path
return get_compiler_path()
def get_compiler_path():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_compiler",
"element" : "text_input",
"display_text" : "Compiler Directory"}
"""
compiler_directory = ServerCompilerSettings().compiler_dir
if not compiler_directory:
compiler_directory = 'Please select a valid Arduino compiler directory'
json_data = {'setting_type': 'compiler',
'element': 'text_input',
'display_text': compiler_directory}
return json.dumps(json_data)
###################
# Sketch settings #
###################
def set_sketch_path():
"""
Opens the directory browser to select a file. Saves this directory into
ServerCompilerSettings and if the directory is different to that stored
already it triggers the new data to be saved into the settings file.
"""
new_directory = browse_dir()
if new_directory != '':
ServerCompilerSettings().sketch_dir = new_directory
return get_sketch_path()
def get_sketch_path():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_sketch",
"element" : "text_input",
"display_text" : "Sketch Directory"}
"""
sketch_directory = ServerCompilerSettings().sketch_dir
if not sketch_directory:
sketch_directory = 'Please select a valid Sketch directory.'
json_data = {'setting_type': 'compiler',
'element': 'text_input',
'display_text': sketch_directory}
return json.dumps(json_data)
##########################
# Arduino Board settings #
##########################
def set_arduino_board(new_value):
ServerCompilerSettings().arduino_board = new_value
return get_arduino_boards()
def get_arduino_boards():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_board",
"element" : "dropdown",
"options" : [
{"value" : "XXX", "text" : "XXX"},
...]
"selected": "selected key"}
"""
json_data = \
{'setting_type': 'ide',
'element': 'dropdown',
'options': []}
#TODO: Check for None, however won't happen because static dict in settings
boards = ServerCompilerSettings().get_arduino_board_types()
for item in boards:
json_data['options'].append(
{'value': item, 'display_text': item})
json_data.update({'selected': ServerCompilerSettings().arduino_board})
return json.dumps(json_data)
########################
# Serial Port settings #
########################
def set_serial_port(new_value):
ServerCompilerSettings().serial_port = new_value
return get_serial_ports()
def get_serial_ports():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_serial",
"element" : "dropdown",
"options" : [
{"value" : "XXX", "text" : "XXX"},
...]
"selected": "selected key"}
"""
json_data = \
{'setting_type': 'ide',
'element': 'dropdown',
'options': []}
ports = ServerCompilerSettings().get_serial_ports()
if not ports:
json_data['options'].append({
'value': 'no_ports',
'display_text': 'There are no available Serial Ports'})
json_data.update({'selected': 'no_ports'})
else:
for key in ports:
json_data['options'].append(
{'value': key, 'display_text': ports[key]})
json_data.update({'selected': ServerCompilerSettings().serial_port})
return json.dumps(json_data)
#######################
# Launch IDE settings #
#######################
def set_load_ide_only(new_value):
ServerCompilerSettings().launch_IDE_option = new_value
return get_load_ide_only()
def get_load_ide_only():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_ide",
"element" : "dropdown",
"options" : [
{"value" : "XXX", "text" : "XXX"},
...]
"selected": "selected key"}
"""
json_data = \
{'setting_type': 'ide',
'element': 'dropdown',
'options': []}
#TODO: Check for None, however won't happen because static dict in settings
ide_options = ServerCompilerSettings().get_launch_ide_options()
for key in ide_options:
json_data['options'].append(
{'value': key, 'display_text': ide_options[key]})
json_data.update({'selected': ServerCompilerSettings().launch_IDE_option})
return json.dumps(json_data)
| |
#!/usr/bin/python
import getpass as gp
import os,re,sys,curses
from Crypto.Cipher import AES
from Clipboard import Clipboard
import WiKID
from WiKID import Domain,TokenConfig
def clear():
curses.setupterm()
sys.stdout.write(curses.tigetstr("clear"))
sys.stdout.flush()
def daemonize(tc, sc, pidfile = None):
import socket,tempfile
# Question: is this secure?
sockfile = tempfile.mkdtemp() + '/pywikid.sock'
s = socket.socket(socket.AF_UNIX)
pid = os.fork()
if pid != 0:
print "PYWIKID_SOCK=%s" % sockfile
if pidfile != None:
f = open(pidfile, 'w')
f.write(pid)
f.close()
sys.exit()
# Listen on a Unix socket
s.bind(sockfile)
s.listen(1)
while(1):
(client, addr) = s.accept()
Pin = client.recv(16)
if Pin == 'exit':
break
try:
passcode = tc.domains[sc].getPasscode(Pin)
client.send(passcode)
except:
try:
client.send('Error')
except:
pass
sys.exit(0)
def listDomains(tc):
serverCodeList = []
counter = 0
for name in tc.domains:
domain = tc.domains[name]
print "%s) %s" % (str(counter), domain.name)
serverCodeList.append(domain.serverCode)
counter += 1
return serverCodeList
def usage():
print "PyWikid 0.4 - (c) 2009 Hurricane Labs LLC"
print "Distributed under the GPL"
print "Usage:"
print "pywikid.py"
print " Display all domains and generate a code"
print ""
print "pywikid.py -xml"
print " Output the unencrypted XML version of your token"
print ""
print "pywikid.py <domain code>"
print " Generate a request for the given domain"
print ""
print "pywikid.py -d"
print " Present a menu of domains, and then fork a"
print " listener to handle client connections"
print ""
print "pywikid.py -d <domain code>"
print " Fork a listener to handle client connections for"
print " the given domain code."
print ""
print "pywikid.py -c"
print " Act as a client for an existing listener (above)."
print " The PYWIKID_SOCK environment variable must be set"
print " as shown in the output when the listener is started"
sys.exit(0)
def DeleteDomain(tc):
clear()
print "-" * 25
serverCodeList = listDomains(tc)
print "-" * 25
print "x) Exit and Cancel"
try:
choice = raw_input("Select a domain to delete: ")
except KeyboardInterrupt:
print ""
sys.exit()
if choice == 'x':
sys.exit(0)
try:
serverCode = serverCodeList[int(choice)]
ret = tc.deleteDomain(serverCode)
if ret == 1:
print "Success!"
else:
print "Failure!"
except ValueError:
print "Invalid choice!"
sys.exit(2)
def NewDomain(tc):
try:
sc = raw_input("Enter server code: ")
except KeyboardInterrupt:
print ""
sys.exit()
try:
pin1 = gp.getpass("Enter pin for this domain: ")
pin2 = gp.getpass("Verify pin for this domain: ")
if pin1 != pin2:
print "Pins don't match!"
print ""
sys.exit(2)
except KeyboardInterrupt:
print ""
sys.exit()
regcode = tc.newDomain(sc, pin1)
print "Domain successfully registered!"
print "Registration code is: %s" % regcode
def ChangePassphrase(tc):
try:
newPassphrase1 = gp.getpass("Enter new passphrase: ", sys.stderr)
newPassphrase2 = gp.getpass("Confirm new passphrase: ", sys.stderr)
except KeyboardInterrupt:
print ""
sys.exit(0)
if (newPassphrase1 != newPassphrase2):
print "Passwords don't match!"
sys.exit(2)
tc.updateFile(newPassphrase1)
def SocketClient(sockfile, close = 0):
import socket
if close == 1:
s = socket.socket(socket.AF_UNIX)
s.connect(sockfile)
s.send('exit')
return
try:
# Fix: prompt on stderr not stdin
Pin = gp.getpass("Pin: ", sys.stderr)
except KeyboardInterrupt:
print ""
sys.exit()
if len(Pin) == 0:
sys.exit()
s = socket.socket(socket.AF_UNIX)
s.connect(sockfile)
s.send(Pin)
print s.recv(16)
def ShowResponse(response, clipboard = 0):
print response
if clipboard != 0:
cb = Clipboard()
def main():
try:
sockfile = os.environ['PYWIKID_SOCK']
except:
sockfile = None
if (sockfile != None and len(sys.argv) > 1 and sys.argv[1] == '-c'):
if not os.path.exists(sockfile):
print "PyWikid 0.4 - (c) 2009 Hurricane Labs LLC"
print "Distributed under the GPL"
print "ERROR:"
print " Socket file not found. Please make sure that the"
print " daemon is running, and that PYWIKID_SOCK reflects"
print " the correct socket location."
sys.exit(1)
if (len(sys.argv) > 2 and sys.argv[2] == 'exit'):
SocketClient(sockfile, 1)
else:
SocketClient(sockfile)
sys.exit()
if len(sys.argv) > 1:
if (sys.argv[1] == '-h' or sys.argv[1] == '--help'):
usage()
# Try to use the TOKENFILE environment variable. Fall back to
# the current directory instead
try:
TokenFile = os.environ['TOKENFILE']
except:
TokenFile = './WiKIDToken.wkd'
# Quit if the token doesn't exist
if not os.path.exists(TokenFile):
print "PyWikid 0.4 - (c) 2009 Hurricane Labs LLC"
print "Distributed under the GPL"
print "ERROR:"
print " Token file not found. Please place the token file"
print " (WiKIDToken.wkd) in the current working directory"
sys.exit(1)
# Ask the user for the token passphrase
try:
# Fix: prompt on stderr not stdin
Passphrase = gp.getpass('Token Passphrase: ', sys.stderr)
except KeyboardInterrupt:
print ""
sys.exit()
# Some rudimentary options that we accept
# -xml :: Output the unencrypted token file
tc = TokenConfig(TokenFile)
if tc.loadToken(Passphrase) != 0:
sys.stderr.write("Error decrypting token. Probably bad passphrase.\n")
sys.exit()
if len(sys.argv) > 1:
if (sys.argv[1] == '-xml'):
print tc.toXml()
sys.exit()
elif (sys.argv[1] == '-aesxml'):
aesxml = tc.toAesXml()
if (len(sys.argv) > 2):
f = open(sys.argv[2], 'w')
f.write(aesxml)
f.close()
else:
print aesxml
sys.exit()
elif (sys.argv[1] == '-decrypt'):
f = open(TokenFile, 'r')
c = AES.new(WiKID.getKeyFromPassphrase(Passphrase), AES.MODE_ECB)
decToken = c.decrypt(f.read())
print WiKID.aesUnpad(decToken)
sys.exit()
elif re.compile('[0-9]{12}').match(sys.argv[1]):
# Get the PIN from the user
sc = sys.argv[1]
try:
# Fix: prompt on stderr not stdin
Pin = gp.getpass("Pin for '%s' domain: " % tc.domains[sc].name, sys.stderr)
except KeyboardInterrupt:
print ""
sys.exit()
print tc.domains[sys.argv[1]].getPasscode(Pin)
sys.exit(0)
elif (sys.argv[1] == '-d'):
try:
sc = sys.argv[2]
except:
print "Please choose a domain, or select an option"
print "-" * 25
serverCodeList = listDomains(tc)
print "-" * 25
print "x) Exit"
try:
choice = raw_input("Please select an option: ")
except KeyboardInterrupt:
print ""
sys.exit()
try:
sc = serverCodeList[int(choice)]
except ValueError:
print "Invalid choice!"
sys.exit(2)
# Daemonize
daemonize(tc, sc)
# Display a menu to the user
clear()
print "Please choose a domain, or select an option"
print "-" * 25
serverCodeList = listDomains(tc)
print "-" * 25
print "a) Add Domain"
print "c) Change Passphrase"
print "d) Delete Domain"
print "x) Exit"
try:
choice = raw_input("Please select an option: ")
if choice == "a":
NewDomain(tc)
sys.exit(0)
elif choice == "c":
ChangePassphrase(tc)
sys.exit(0)
elif choice == "d":
DeleteDomain(tc)
sys.exit(0)
elif choice == "x":
sys.exit(0)
except KeyboardInterrupt:
print ""
sys.exit()
try:
serverCode = serverCodeList[int(choice)]
except ValueError:
print "Invalid choice!"
sys.exit(2)
# Get the PIN from the user
try:
# Fix: prompt on stderr not stdin
Pin = gp.getpass("Pin for '%s' domain: " % tc.domains[serverCode].name, sys.stderr)
except KeyboardInterrupt:
print ""
sys.exit()
# Generate a pass code and hope for the best
print tc.domains[serverCode].getPasscode(Pin)
'''
TODO: We should probably do a little cleanup here. getPasscode might very
well make configuration changes, and we should take those into account.
'''
if __name__ == '__main__':
main()
| |
#!/usr/bin/python
import sys
import paho.mqtt.client as paho
import json
import threading
import Queue
from time import sleep
import numpy as np
import time
from random import random
from random import randint
# queue of commands for inter thread communication
file_lock = threading.Lock()
# Create topic from bot_id
def get_topic(bot_id):
return "wolfbot/" + bot_id + "/command"
# initialze a client and connect to the server
def prepare_mqttc(mqtt_host, bot_id, mqtt_port, command_q):
# create a mqtt client
mqttc = paho.Client(client_id="bot_" + bot_id, clean_session=True, userdata=command_q)
mqttc.on_message = on_command
mqttc.connect(host=mqtt_host, port=mqtt_port, keepalive=60)
# subscribe to TOPIC
topic = get_topic(bot_id)
mqttc.subscribe(topic)
return mqttc
# create request json
def create_pass_request(bot_id, bot_type, enter_lane, exit_lane):
msg = {}
msg["bot_id"] = bot_id
msg["bot_type"] = bot_type
msg["enter"] = enter_lane
msg["exit"] = exit_lane
msg["respond_to"] = get_topic(bot_id)
return json.dumps(msg)
# create complete json
def create_complete_msg(bot_id, bot_type):
msg = {}
msg["bot_id"] = bot_id
msg["bot_type"] = bot_type
msg["status"] = "complete"
return json.dumps(msg)
# The callback for when a PUBLISH message is received from the server.
def on_command(mqttc, userdata, msg):
# userdata has the command_q
command_q = userdata
# parse the payload
pass_comm = json.loads(msg.payload)
# send a command to the driver thread
if pass_comm["command"] == "go":
command_q.put("GO_AT_RED")
else :
command_q.put("STOP_AT_RED")
# the driver function which controls the bot.
def driver(mqttc, bot_id, bot_type, entry_lane, exit_lane, command_q, log_fname):
# check entry and exit lanes
global file_lock
logs = []
if entry_lane < 1 or entry_lane > 4 or exit_lane < 1 or exit_lane > 4 or entry_lane == exit_lane:
print "Invalid entry or exit lane"
return
#journey_state : AT_SRC, NEED_BLACK, REQUEST, NEED_RED, WAITING, CROSSING, DEPARTING, AT_DEST
journey_state = "REQUEST"
# by default, stop at red
command = "STOP_AT_RED"
# start listening to mqtt msgs
mqttc.loop_start()
# loop to control the motion and sensors based on TIM command
while (True):
# check for any commands from master thread
if command_q.empty() == False:
command = command_q.get()
# state machine using ifelse control
if journey_state == "REQUEST":
logs.append(bot_id)
logs.append(bot_type)
start_time = int(time.time())
logs.append(start_time)
# at the start of the entry lane
# request TIM to pass the intersection
pass_req = create_pass_request(bot_id, bot_type, entry_lane, exit_lane)
mqttc.publish("tim/jid_1/request", pass_req)
journey_state = "WAITING"
elif journey_state == "WAITING":
# waiting at red line for a go command from TIM
if command == "STOP_AT_RED":
continue
journey_state = "CROSSING"
elif journey_state == "CROSSING":
# sleep to simulate crossing
sleep(1)
journey_state = "COMPLETED"
elif journey_state == "COMPLETED":
complete_msg = create_complete_msg(bot_id, bot_type)
mqttc.publish("tim/jid_1/complete", complete_msg)
end_time = int(time.time())
logs.append(end_time)
logs.append(end_time - start_time)
journey_state = "AT_DEST"
elif journey_state == "AT_DEST":
# log all the data
file_lock.acquire()
f = open(log_fname, 'a')
for log in logs:
f.write(str(log) + ",")
f.write("\n")
f.close()
file_lock.release()
# disconnect after reaching the destination
mqttc.loop_stop()
mqttc.disconnect()
print str(bot_id) + " COMPLETED"
break
def generateSleepValues(n, delay):
return [random() * delay for x in range(n)]
def generateTrafficPerLane(enter_lane, n, log_fname):
sleep_dur = generateSleepValues(n, delay=enter_lane)
for x in range(0, n):
sleep(sleep_dur[x])
# create paramters for bot
bot_id = str(enter_lane) + "_" + str(x)
bot_type = "civilian"
exit_lane = enter_lane
while exit_lane == enter_lane:
exit_lane = randint(1, 4)
# seaparate command queue for each bot
command_q = Queue.Queue() # STOP_AT_RED, GO_AT_RED
# separate mqtt client for each bot
client = prepare_mqttc("localhost", bot_id, 1883, command_q)
# create a thread for the driver function
driver_thread = threading.Thread(target = driver, args = (client, bot_id, bot_type, enter_lane, exit_lane, command_q, log_fname))
driver_thread.start()
driver_thread.join()
print "Lane thread exiting " + str(enter_lane)
# main function
def main():
# check usage
if len(sys.argv) != 3:
print "usage : NUM_BOTS LOG_FILE"
return
n_bots = int(sys.argv[1])
log_fname = sys.argv[2]
threads = []
for x in range(1, 5):
thread = threading.Thread(target = generateTrafficPerLane, args = (x, n_bots, log_fname))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print "Simulation done"
if __name__ == "__main__":
main()
| |
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import math
import numpy as np
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.operation import Operation, precondition, SYMBOL, VALUE
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.input_type import (
DefaultInputs,
FloatInputType,
InputSpec,
ScalarOrTensorInputType,
StringInputType
)
class elementwise_unary(Operation):
"""
Elementwise Unary Op Superclass
"""
input_spec = InputSpec(x=ScalarOrTensorInputType(),)
def __init__(self, **kwargs):
super(elementwise_unary, self).__init__(**kwargs)
def type_inference(self):
return self.x.sym_type
"""
Elementwise unary op implementation(s)
"""
@register_op(doc_str="")
class abs(elementwise_unary):
"""
Return the absolute values of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(abs, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.abs(self.x.val)
@register_op(doc_str="")
class acos(elementwise_unary):
"""
Return the inverse cosine values of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(acos, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.arccos(self.x.val)
@register_op(doc_str="")
class asin(elementwise_unary):
"""
Return the inverse sine of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(asin, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.arcsin(self.x.val)
@register_op(doc_str="")
class atan(elementwise_unary):
"""
Return the inverse tangent of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(atan, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.arctan(self.x.val)
@register_op(doc_str="")
class atanh(elementwise_unary):
"""
Return the inverse hyperbolic tangent values of the input
``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(atanh, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.arctanh(self.x.val)
@register_op(doc_str="")
class ceil(elementwise_unary):
"""
Return the ceil values of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(ceil, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.ceil(self.x.val)
@register_op(doc_str="")
class clip(Operation):
"""
Clip the values in the input ``x`` to ``[alpha, beta]``, element-wise.
Any values less than ``alpha`` are set to ``alpha``, and any values greater
than ``beta`` are set to ``beta``.
Parameters
----------
x: tensor<[\*d], T> (Required)
alpha: const f32 (Required)
beta: const f32 (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=ScalarOrTensorInputType(),
alpha=FloatInputType(const=True),
beta=FloatInputType(const=True),
)
def __init__(self, **kwargs):
super(clip, self).__init__(**kwargs)
def type_inference(self):
return self.x.sym_type
@precondition(allow=VALUE)
def value_inference(self):
return np.minimum(np.maximum(self.x.val, self.alpha.val), self.beta.val)
@register_op(doc_str="")
class cos(elementwise_unary):
"""
Return cosine of ``x`` element-wise. Input domain is ``(-inf, inf)`` and
output range is ``[-1,1]``.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], T>
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(cos, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.cos(self.x.val)
@register_op(doc_str="")
class cosh(elementwise_unary):
"""
Return hyperbolic cosine of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], T>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(cosh, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.cosh(self.x.val)
@register_op(doc_str="")
class erf(elementwise_unary):
"""
Return the gauss error function of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(erf, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
erf_vector_function = np.vectorize(math.erf)
return erf_vector_function(self.x.val)
@register_op(doc_str="")
class exp(elementwise_unary):
"""
Return e^x, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(exp, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.exp(self.x.val)
@register_op(doc_str="")
class exp2(elementwise_unary):
"""
Return 2^x, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(exp2, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.exp2(self.x.val)
@register_op(doc_str="")
class floor(elementwise_unary):
"""
Return the floor of the input ``x``, element-wise, the same as rounding
towards negative infinity.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(floor, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.floor(self.x.val)
@register_op(doc_str="")
class inverse(Operation):
"""
Return the reciprocal value of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
epsilon: const fp32 (Optional, default=1e-4)
* This is a small constant that is added to the input, before taking its
inverse, for stability.
* ``y = 1 / (x + epsilon)``.
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=ScalarOrTensorInputType(),
epsilon=FloatInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
epsilon=1e-4,
)
def __init__(self, **kwargs):
super(inverse, self).__init__(**kwargs)
def type_inference(self):
return self.x.sym_type
@precondition(allow=VALUE)
def value_inference(self):
return np.reciprocal(self.x.val + self.epsilon.val)
@register_op(doc_str="")
class log(Operation):
"""
Return the natural logarithm value of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
epsilon: const fp32 (Optional, default=1e-45)
* This is a small constant that is added to the input, before taking log.
* ``y = log(x + epsilon)``.
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=ScalarOrTensorInputType(),
epsilon=FloatInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
epsilon=1e-45)
def __init__(self, **kwargs):
super(log, self).__init__(**kwargs)
def type_inference(self):
return self.x.sym_type
@precondition(allow=VALUE)
def value_inference(self):
return np.log(self.x.val + self.epsilon.val)
@register_op(doc_str="")
class logical_not(elementwise_unary):
"""
Return the value of NOT the input ``x``, element-wise. (``1`` for true, ``0``
for false in numeric domain.) A numeric value ``t`` is evaluated to true
``iff t != 0``.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(logical_not, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.logical_not(self.x.val)
@register_op(doc_str="")
class round(elementwise_unary):
"""
Return the round value of the input ``x`` to nearest integer, element-wise.
``0.5`` is rounded to ``0``.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(round, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.round(self.x.val)
@register_op(doc_str="")
class rsqrt(Operation):
"""
Return the reciprocal value of the square root of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
epsilon: const fp32 (Optional, default=1e-12)
* This is a small constant that is added to the input, before applying the
``rsqrt`` function, for stability.
* ``y = 1 / sqrt(x + epsilon)``.
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=ScalarOrTensorInputType(),
epsilon=FloatInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
epsilon=1e-12,
)
def __init__(self, **kwargs):
super(rsqrt, self).__init__(**kwargs)
def type_inference(self):
return self.x.sym_type
@precondition(allow=VALUE)
def value_inference(self):
return 1.0 / np.sqrt(self.x.val + self.epsilon.val)
@register_op(doc_str="")
class sign(elementwise_unary):
"""
Return the sign value of the input ``x``, element-wise.
All elements in the output will be either ``-1``. or ``1``.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(sign, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.sign(self.x.val)
@register_op(doc_str="")
class sin(elementwise_unary):
"""
Return the sine value of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(sin, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.sin(self.x.val)
@register_op(doc_str="")
class sinh(elementwise_unary):
"""
Return the hyperbolic sine value of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(sinh, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.sinh(self.x.val)
@register_op(doc_str="")
class sqrt(elementwise_unary):
"""
Returns the square root value of the input ``x``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(sqrt, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.sqrt(self.x.val)
@register_op(doc_str="")
class square(elementwise_unary):
"""
Return ``x^2``, element-wise.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(square, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.square(self.x.val)
@register_op(doc_str="")
class tan(elementwise_unary):
"""
Return the tangent value of the input ``x``, element-wise. Both input and output
ranges are ``(-inf, inf)``.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(tan, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.tan(self.x.val)
@register_op(doc_str="")
class tanh(elementwise_unary):
"""
Return the hyperbolic tangent value of the input ``x``, element-wise. Both input
and output ranges are ``(-inf, inf)`` while output range is ``[-1, 1]``.
Parameters
----------
x: tensor<[\*d], T> (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(tanh, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
return np.tanh(self.x.val)
@register_op(doc_str="")
class threshold(Operation):
"""
Set a lower bound ``alpha`` to the values in the input ``x``, element-wise.
Any values less than ``alpha`` are set to ``alpha``.
Parameters
----------
x: tensor<[\*d], T> (Required)
alpha: const fp32 (Required)
Returns
-------
tensor<[\*d], f32>
* A tensor of the same shape as ``x``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=ScalarOrTensorInputType(), alpha=FloatInputType(const=True),
)
def __init__(self, **kwargs):
super(threshold, self).__init__(**kwargs)
def type_inference(self):
return self.x.sym_type
@precondition(allow=VALUE)
def value_inference(self):
return np.maximum(self.x.val, self.alpha.val)
@register_op(doc_str="")
class cast(Operation):
"""
Cast the input ``x`` to the new type ``dtype``.
Parameters
----------
x: tensor<[\*d], T> (Required)
dtype: const str (Required)
* Can be one of the following types: ``int32``, ``int64``, ``fp32``, ``fp64``.
Returns
-------
tensor<[\*d], dtype>
* A tensor of the same shape as ``x``, with type ``dtype``.
Attributes
----------
T: i32, i64, fp16, fp32, fp64, bool.
"""
input_spec = InputSpec(
x=ScalarOrTensorInputType(), dtype=StringInputType(const=True)
)
def __init__(self, **kwargs):
super(cast, self).__init__(**kwargs)
def type_inference(self):
type_map = {
"int32": types.int32,
"int64": types.int64,
"fp16": types.fp16,
"fp32": types.fp32,
"fp64": types.fp64,
"bool": types.bool,
}
if self.dtype.val not in type_map.keys():
raise NotImplementedError(
"Parameter dtype of the cast operation can be one of the {}. "
"Provided {}".format(type_map.keys(), self.dtype.val)
)
if not types.is_tensor(self.x.sym_type):
return type_map[self.dtype.val]
ret_shape = self.x.shape
return types.tensor(type_map[self.dtype.val], ret_shape)
@precondition(allow=VALUE | SYMBOL)
def value_inference(self):
type_map = {
"int32": np.int32,
"int64": np.int64,
"fp16": np.float16,
"fp32": np.float32,
"fp64": np.float64,
"bool": np.bool,
}
if self.dtype.val not in type_map.keys():
raise NotImplementedError(
"Parameter dtype of the cast operation can be one of the {}. "
"Provided {}".format(type_map.keys(), self.dtype.val)
)
if self.x.val is None:
if self.x.sym_val is not None and not is_symbolic(self.x.sym_val) and len(self.x.sym_val.shape) == 1:
result = [np.array(val).astype(dtype=type_map[self.dtype.val]).item() if not is_symbolic(val) else val for val in self.x.sym_val]
return np.array(result)
return None
if not types.is_tensor(self.x.sym_type):
return self.x.val.astype(dtype=type_map[self.dtype.val])
else:
return np.array(self.x.val).astype(dtype=type_map[self.dtype.val])
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
# TODO(pekowsk): Remove import cfg and below comment in Havana.
# This import should no longer be needed when the amqp_rpc_single_reply_queue
# option is removed.
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
# TODO(pekowski): Remove this option in Havana.
amqp_opts = [
cfg.BoolOpt('amqp_rpc_single_reply_queue',
default=False,
help='Enable a fast single reply queue if using AMQP based '
'RPC like RabbitMQ or Qpid.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the teatDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new
Connection, or one from a pool. The function will also catch
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so
forth without making the caller be responsible for catching
them. If possible the function makes sure to return a
connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool"""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self"""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance"""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
""" Connection class for RPC replies / callbacks """
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('no calling threads waiting for msg_id : %s'
', message : %s') % (msg_id, message_data))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager used by
the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback to allow it to be invoked in a green
thread.
"""
def __init__(self, conf, callback, connection_pool):
"""
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data)
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version', None)
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
def _process_data(self, ctxt, version, method, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, **args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending" flag"""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
#TODO(pekowski): Remove MulticallWaiter() in Havana.
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(timeout=timeout or
conf.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
self._conf = conf
self.msg_id_cache = _MsgIdCache()
def done(self):
if self._done:
return
self._done = True
self._iterator.close()
self._iterator = None
self._connection.close()
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
self._result = data['result']
def __iter__(self):
"""Return a result until we get a 'None' response from consumer"""
if self._done:
raise StopIteration
while True:
try:
self._iterator.next()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
result = self._result
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection"""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
# TODO(pekowski): Remove all these comments in Havana.
# For amqp_rpc_single_reply_queue = False,
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
# For amqp_rpc_single_reply_queue = True,
# The 'with' statement is mandatory for closing the connection
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
# TODO(pekowski): Remove this flag and the code under the if clause
# in Havana.
if not conf.amqp_rpc_single_reply_queue:
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
else:
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
| |
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import os
import time
import socket
# 3p
from nose.plugins.attrib import attr
import requests
# project
from config import get_version
from tests.checks.common import AgentCheckTest, load_check
# Clusterwise metrics, pre aggregated on ES, compatible with all ES versions
PRIMARY_SHARD_METRICS = {
"elasticsearch.primaries.docs.count": ("gauge", "_all.primaries.docs.count"),
"elasticsearch.primaries.docs.deleted": ("gauge", "_all.primaries.docs.deleted"),
"elasticsearch.primaries.store.size": ("gauge", "_all.primaries.store.size_in_bytes"),
"elasticsearch.primaries.indexing.index.total": ("gauge", "_all.primaries.indexing.index_total"),
"elasticsearch.primaries.indexing.index.time": ("gauge", "_all.primaries.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.index.current": ("gauge", "_all.primaries.indexing.index_current"),
"elasticsearch.primaries.indexing.delete.total": ("gauge", "_all.primaries.indexing.delete_total"),
"elasticsearch.primaries.indexing.delete.time": ("gauge", "_all.primaries.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.delete.current": ("gauge", "_all.primaries.indexing.delete_current"),
"elasticsearch.primaries.get.total": ("gauge", "_all.primaries.get.total"),
"elasticsearch.primaries.get.time": ("gauge", "_all.primaries.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.current": ("gauge", "_all.primaries.get.current"),
"elasticsearch.primaries.get.exists.total": ("gauge", "_all.primaries.get.exists_total"),
"elasticsearch.primaries.get.exists.time": ("gauge", "_all.primaries.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.missing.total": ("gauge", "_all.primaries.get.missing_total"),
"elasticsearch.primaries.get.missing.time": ("gauge", "_all.primaries.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.total": ("gauge", "_all.primaries.search.query_total"),
"elasticsearch.primaries.search.query.time": ("gauge", "_all.primaries.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.current": ("gauge", "_all.primaries.search.query_current"),
"elasticsearch.primaries.search.fetch.total": ("gauge", "_all.primaries.search.fetch_total"),
"elasticsearch.primaries.search.fetch.time": ("gauge", "_all.primaries.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.fetch.current": ("gauge", "_all.primaries.search.fetch_current"),
"elasticsearch.indices.count": ("gauge", "indices", lambda indices: len(indices))
}
PRIMARY_SHARD_METRICS_POST_1_0 = {
"elasticsearch.primaries.merges.current": ("gauge", "_all.primaries.merges.current"),
"elasticsearch.primaries.merges.current.docs": ("gauge", "_all.primaries.merges.current_docs"),
"elasticsearch.primaries.merges.current.size": ("gauge", "_all.primaries.merges.current_size_in_bytes"),
"elasticsearch.primaries.merges.total": ("gauge", "_all.primaries.merges.total"),
"elasticsearch.primaries.merges.total.time": ("gauge", "_all.primaries.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.merges.total.docs": ("gauge", "_all.primaries.merges.total_docs"),
"elasticsearch.primaries.merges.total.size": ("gauge", "_all.primaries.merges.total_size_in_bytes"),
"elasticsearch.primaries.refresh.total": ("gauge", "_all.primaries.refresh.total"),
"elasticsearch.primaries.refresh.total.time": ("gauge", "_all.primaries.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.flush.total": ("gauge", "_all.primaries.flush.total"),
"elasticsearch.primaries.flush.total.time": ("gauge", "_all.primaries.flush.total_time_in_millis", lambda v: float(v)/1000)
}
STATS_METRICS = { # Metrics that are common to all Elasticsearch versions
"elasticsearch.docs.count": ("gauge", "indices.docs.count"),
"elasticsearch.docs.deleted": ("gauge", "indices.docs.deleted"),
"elasticsearch.store.size": ("gauge", "indices.store.size_in_bytes"),
"elasticsearch.indexing.index.total": ("gauge", "indices.indexing.index_total"),
"elasticsearch.indexing.index.time": ("gauge", "indices.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.index.current": ("gauge", "indices.indexing.index_current"),
"elasticsearch.indexing.delete.total": ("gauge", "indices.indexing.delete_total"),
"elasticsearch.indexing.delete.time": ("gauge", "indices.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.delete.current": ("gauge", "indices.indexing.delete_current"),
"elasticsearch.get.total": ("gauge", "indices.get.total"),
"elasticsearch.get.time": ("gauge", "indices.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.current": ("gauge", "indices.get.current"),
"elasticsearch.get.exists.total": ("gauge", "indices.get.exists_total"),
"elasticsearch.get.exists.time": ("gauge", "indices.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.missing.total": ("gauge", "indices.get.missing_total"),
"elasticsearch.get.missing.time": ("gauge", "indices.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.total": ("gauge", "indices.search.query_total"),
"elasticsearch.search.query.time": ("gauge", "indices.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.current": ("gauge", "indices.search.query_current"),
"elasticsearch.search.fetch.total": ("gauge", "indices.search.fetch_total"),
"elasticsearch.search.fetch.time": ("gauge", "indices.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.fetch.current": ("gauge", "indices.search.fetch_current"),
"elasticsearch.indices.segments.count": ("gauge", "indices.segments.count"),
"elasticsearch.indices.segments.memory_in_bytes": ("gauge", "indices.segments.memory_in_bytes"),
"elasticsearch.merges.current": ("gauge", "indices.merges.current"),
"elasticsearch.merges.current.docs": ("gauge", "indices.merges.current_docs"),
"elasticsearch.merges.current.size": ("gauge", "indices.merges.current_size_in_bytes"),
"elasticsearch.merges.total": ("gauge", "indices.merges.total"),
"elasticsearch.merges.total.time": ("gauge", "indices.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.merges.total.docs": ("gauge", "indices.merges.total_docs"),
"elasticsearch.merges.total.size": ("gauge", "indices.merges.total_size_in_bytes"),
"elasticsearch.refresh.total": ("gauge", "indices.refresh.total"),
"elasticsearch.refresh.total.time": ("gauge", "indices.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.flush.total": ("gauge", "indices.flush.total"),
"elasticsearch.flush.total.time": ("gauge", "indices.flush.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.process.open_fd": ("gauge", "process.open_file_descriptors"),
"elasticsearch.transport.rx_count": ("gauge", "transport.rx_count"),
"elasticsearch.transport.tx_count": ("gauge", "transport.tx_count"),
"elasticsearch.transport.rx_size": ("gauge", "transport.rx_size_in_bytes"),
"elasticsearch.transport.tx_size": ("gauge", "transport.tx_size_in_bytes"),
"elasticsearch.transport.server_open": ("gauge", "transport.server_open"),
"elasticsearch.thread_pool.bulk.active": ("gauge", "thread_pool.bulk.active"),
"elasticsearch.thread_pool.bulk.threads": ("gauge", "thread_pool.bulk.threads"),
"elasticsearch.thread_pool.bulk.queue": ("gauge", "thread_pool.bulk.queue"),
"elasticsearch.thread_pool.bulk.rejected": ("rate", "thread_pool.bulk.rejected"),
"elasticsearch.thread_pool.flush.active": ("gauge", "thread_pool.flush.active"),
"elasticsearch.thread_pool.flush.threads": ("gauge", "thread_pool.flush.threads"),
"elasticsearch.thread_pool.flush.queue": ("gauge", "thread_pool.flush.queue"),
"elasticsearch.thread_pool.flush.rejected": ("rate", "thread_pool.flush.rejected"),
"elasticsearch.thread_pool.generic.active": ("gauge", "thread_pool.generic.active"),
"elasticsearch.thread_pool.generic.threads": ("gauge", "thread_pool.generic.threads"),
"elasticsearch.thread_pool.generic.queue": ("gauge", "thread_pool.generic.queue"),
"elasticsearch.thread_pool.generic.rejected": ("rate", "thread_pool.generic.rejected"),
"elasticsearch.thread_pool.get.active": ("gauge", "thread_pool.get.active"),
"elasticsearch.thread_pool.get.threads": ("gauge", "thread_pool.get.threads"),
"elasticsearch.thread_pool.get.queue": ("gauge", "thread_pool.get.queue"),
"elasticsearch.thread_pool.get.rejected": ("rate", "thread_pool.get.rejected"),
"elasticsearch.thread_pool.index.active": ("gauge", "thread_pool.index.active"),
"elasticsearch.thread_pool.index.threads": ("gauge", "thread_pool.index.threads"),
"elasticsearch.thread_pool.index.queue": ("gauge", "thread_pool.index.queue"),
"elasticsearch.thread_pool.index.rejected": ("rate", "thread_pool.index.rejected"),
"elasticsearch.thread_pool.management.active": ("gauge", "thread_pool.management.active"),
"elasticsearch.thread_pool.management.threads": ("gauge", "thread_pool.management.threads"),
"elasticsearch.thread_pool.management.queue": ("gauge", "thread_pool.management.queue"),
"elasticsearch.thread_pool.management.rejected": ("rate", "thread_pool.management.rejected"),
"elasticsearch.thread_pool.percolate.active": ("gauge", "thread_pool.percolate.active"),
"elasticsearch.thread_pool.percolate.threads": ("gauge", "thread_pool.percolate.threads"),
"elasticsearch.thread_pool.percolate.queue": ("gauge", "thread_pool.percolate.queue"),
"elasticsearch.thread_pool.percolate.rejected": ("rate", "thread_pool.percolate.rejected"),
"elasticsearch.thread_pool.refresh.active": ("gauge", "thread_pool.refresh.active"),
"elasticsearch.thread_pool.refresh.threads": ("gauge", "thread_pool.refresh.threads"),
"elasticsearch.thread_pool.refresh.queue": ("gauge", "thread_pool.refresh.queue"),
"elasticsearch.thread_pool.refresh.rejected": ("rate", "thread_pool.refresh.rejected"),
"elasticsearch.thread_pool.search.active": ("gauge", "thread_pool.search.active"),
"elasticsearch.thread_pool.search.threads": ("gauge", "thread_pool.search.threads"),
"elasticsearch.thread_pool.search.queue": ("gauge", "thread_pool.search.queue"),
"elasticsearch.thread_pool.search.rejected": ("rate", "thread_pool.search.rejected"),
"elasticsearch.thread_pool.snapshot.active": ("gauge", "thread_pool.snapshot.active"),
"elasticsearch.thread_pool.snapshot.threads": ("gauge", "thread_pool.snapshot.threads"),
"elasticsearch.thread_pool.snapshot.queue": ("gauge", "thread_pool.snapshot.queue"),
"elasticsearch.thread_pool.snapshot.rejected": ("rate", "thread_pool.snapshot.rejected"),
"elasticsearch.thread_pool.suggest.active": ("gauge", "thread_pool.suggest.active"),
"elasticsearch.thread_pool.suggest.threads": ("gauge", "thread_pool.suggest.threads"),
"elasticsearch.thread_pool.suggest.queue": ("gauge", "thread_pool.suggest.queue"),
"elasticsearch.thread_pool.suggest.rejected": ("rate", "thread_pool.suggest.rejected"),
"elasticsearch.thread_pool.warmer.active": ("gauge", "thread_pool.warmer.active"),
"elasticsearch.thread_pool.warmer.threads": ("gauge", "thread_pool.warmer.threads"),
"elasticsearch.thread_pool.warmer.queue": ("gauge", "thread_pool.warmer.queue"),
"elasticsearch.thread_pool.warmer.rejected": ("rate", "thread_pool.warmer.rejected"),
"elasticsearch.http.current_open": ("gauge", "http.current_open"),
"elasticsearch.http.total_opened": ("gauge", "http.total_opened"),
"jvm.mem.heap_committed": ("gauge", "jvm.mem.heap_committed_in_bytes"),
"jvm.mem.heap_used": ("gauge", "jvm.mem.heap_used_in_bytes"),
"jvm.mem.heap_in_use": ("gauge", "jvm.mem.heap_used_percent"),
"jvm.mem.heap_max": ("gauge", "jvm.mem.heap_max_in_bytes"),
"jvm.mem.non_heap_committed": ("gauge", "jvm.mem.non_heap_committed_in_bytes"),
"jvm.mem.non_heap_used": ("gauge", "jvm.mem.non_heap_used_in_bytes"),
"jvm.mem.pools.young.used": ("gauge", "jvm.mem.pools.young.used_in_bytes"),
"jvm.mem.pools.young.max": ("gauge", "jvm.mem.pools.young.max_in_bytes"),
"jvm.mem.pools.old.used": ("gauge", "jvm.mem.pools.old.used_in_bytes"),
"jvm.mem.pools.old.max": ("gauge", "jvm.mem.pools.old.max_in_bytes"),
"jvm.mem.pools.survivor.used": ("gauge", "jvm.mem.pools.survivor.used_in_bytes"),
"jvm.mem.pools.survivor.max": ("gauge", "jvm.mem.pools.survivor.max_in_bytes"),
"jvm.threads.count": ("gauge", "jvm.threads.count"),
"jvm.threads.peak_count": ("gauge", "jvm.threads.peak_count"),
"elasticsearch.fs.total.total_in_bytes": ("gauge", "fs.total.total_in_bytes"),
"elasticsearch.fs.total.free_in_bytes": ("gauge", "fs.total.free_in_bytes"),
"elasticsearch.fs.total.available_in_bytes": ("gauge", "fs.total.available_in_bytes"),
}
JVM_METRICS_POST_0_90_10 = {
"jvm.gc.collectors.young.count": ("gauge", "jvm.gc.collectors.young.collection_count"),
"jvm.gc.collectors.young.collection_time": ("gauge", "jvm.gc.collectors.young.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collectors.old.count": ("gauge", "jvm.gc.collectors.old.collection_count"),
"jvm.gc.collectors.old.collection_time": ("gauge", "jvm.gc.collectors.old.collection_time_in_millis", lambda v: float(v)/1000)
}
JVM_METRICS_PRE_0_90_10 = {
"jvm.gc.concurrent_mark_sweep.count": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_count"),
"jvm.gc.concurrent_mark_sweep.collection_time": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.par_new.count": ("gauge", "jvm.gc.collectors.ParNew.collection_count"),
"jvm.gc.par_new.collection_time": ("gauge", "jvm.gc.collectors.ParNew.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collection_count": ("gauge", "jvm.gc.collection_count"),
"jvm.gc.collection_time": ("gauge", "jvm.gc.collection_time_in_millis", lambda v: float(v)/1000),
}
ADDITIONAL_METRICS_POST_0_90_5 = {
"elasticsearch.search.fetch.open_contexts": ("gauge", "indices.search.open_contexts"),
"elasticsearch.fielddata.size": ("gauge", "indices.fielddata.memory_size_in_bytes"),
"elasticsearch.fielddata.evictions": ("gauge", "indices.fielddata.evictions"),
}
ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0 = {
"elasticsearch.cache.filter.evictions": ("gauge", "indices.filter_cache.evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.filter_cache.memory_size_in_bytes"),
"elasticsearch.id_cache.size": ("gauge", "indices.id_cache.memory_size_in_bytes"),
}
ADDITIONAL_METRICS_PRE_0_90_5 = {
"elasticsearch.cache.field.evictions": ("gauge", "indices.cache.field_evictions"),
"elasticsearch.cache.field.size": ("gauge", "indices.cache.field_size_in_bytes"),
"elasticsearch.cache.filter.count": ("gauge", "indices.cache.filter_count"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.cache.filter_evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.cache.filter_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_0_0 = {
"elasticsearch.indices.translog.size_in_bytes": ("gauge", "indices.translog.size_in_bytes"),
"elasticsearch.indices.translog.operations": ("gauge", "indices.translog.operations"),
}
ADDITIONAL_METRICS_1_x = {
# Currently has issues in test framework:
# "elasticsearch.fs.total.disk_reads": ("rate", "fs.total.disk_reads"),
# "elasticsearch.fs.total.disk_writes": ("rate", "fs.total.disk_writes"),
# "elasticsearch.fs.total.disk_io_op": ("rate", "fs.total.disk_io_op"),
# "elasticsearch.fs.total.disk_read_size_in_bytes": ("gauge", "fs.total.disk_read_size_in_bytes"),
# "elasticsearch.fs.total.disk_write_size_in_bytes": ("gauge", "fs.total.disk_write_size_in_bytes"),
# "elasticsearch.fs.total.disk_io_size_in_bytes": ("gauge", "fs.total.disk_io_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_3_0 = {
"elasticsearch.indices.segments.index_writer_memory_in_bytes": ("gauge", "indices.segments.index_writer_memory_in_bytes"),
"elasticsearch.indices.segments.version_map_memory_in_bytes": ("gauge", "indices.segments.version_map_memory_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_4_0 = {
"elasticsearch.indices.indexing.throttle_time": ("rate", "indices.indexing.throttle_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indices.query_cache.memory_size_in_bytes": ("gauge", "indices.query_cache.memory_size_in_bytes"),
"elasticsearch.indices.query_cache.hit_count": ("rate", "indices.query_cache.hit_count"),
"elasticsearch.indices.query_cache.miss_count": ("rate", "indices.query_cache.miss_count"),
"elasticsearch.indices.query_cache.evictions": ("rate", "indices.query_cache.evictions"),
"elasticsearch.indices.segments.index_writer_max_memory_in_bytes": ("gauge", "indices.segments.index_writer_max_memory_in_bytes"),
"elasticsearch.indices.segments.fixed_bit_set_memory_in_bytes": ("gauge", "indices.segments.fixed_bit_set_memory_in_bytes"),
"elasticsearch.breakers.fielddata.estimated_size_in_bytes": ("gauge", "breakers.fielddata.estimated_size_in_bytes"),
"elasticsearch.breakers.fielddata.overhead": ("gauge", "breakers.fielddata.overhead"),
"elasticsearch.breakers.fielddata.tripped": ("rate", "breakers.fielddata.tripped"),
"elasticsearch.breakers.parent.estimated_size_in_bytes": ("gauge", "breakers.parent.estimated_size_in_bytes"),
"elasticsearch.breakers.parent.overhead": ("gauge", "breakers.parent.overhead"),
"elasticsearch.breakers.parent.tripped": ("rate", "breakers.parent.tripped"),
"elasticsearch.breakers.request.estimated_size_in_bytes": ("gauge", "breakers.request.estimated_size_in_bytes"),
"elasticsearch.breakers.request.overhead": ("gauge", "breakers.request.overhead"),
"elasticsearch.breakers.request.tripped": ("rate", "breakers.request.tripped"),
"elasticsearch.thread_pool.listener.active": ("gauge", "thread_pool.listener.active"),
"elasticsearch.thread_pool.listener.threads": ("gauge", "thread_pool.listener.threads"),
"elasticsearch.thread_pool.listener.queue": ("gauge", "thread_pool.listener.queue"),
"elasticsearch.thread_pool.listener.rejected": ("rate", "thread_pool.listener.rejected"),
}
ADDITIONAL_METRICS_POST_1_5_0 = {
"elasticsearch.indices.recovery.current_as_source": ("gauge", "indices.recovery.current_as_source"),
"elasticsearch.indices.recovery.current_as_target": ("gauge", "indices.recovery.current_as_target"),
"elasticsearch.indices.recovery.throttle_time": ("rate", "indices.recovery.throttle_time_in_millis", lambda v: float(v)/1000),
}
ADDITIONAL_METRICS_POST_1_6_0 = {
"elasticsearch.thread_pool.fetch_shard_started.active": ("gauge", "thread_pool.fetch_shard_started.active"),
"elasticsearch.thread_pool.fetch_shard_started.threads": ("gauge", "thread_pool.fetch_shard_started.threads"),
"elasticsearch.thread_pool.fetch_shard_started.queue": ("gauge", "thread_pool.fetch_shard_started.queue"),
"elasticsearch.thread_pool.fetch_shard_started.rejected": ("rate", "thread_pool.fetch_shard_started.rejected"),
"elasticsearch.thread_pool.fetch_shard_store.active": ("gauge", "thread_pool.fetch_shard_store.active"),
"elasticsearch.thread_pool.fetch_shard_store.threads": ("gauge", "thread_pool.fetch_shard_store.threads"),
"elasticsearch.thread_pool.fetch_shard_store.queue": ("gauge", "thread_pool.fetch_shard_store.queue"),
"elasticsearch.thread_pool.fetch_shard_store.rejected": ("rate", "thread_pool.fetch_shard_store.rejected"),
}
ADDITIONAL_METRICS_PRE_2_0 = {
"elasticsearch.thread_pool.merge.active": ("gauge", "thread_pool.merge.active"),
"elasticsearch.thread_pool.merge.threads": ("gauge", "thread_pool.merge.threads"),
"elasticsearch.thread_pool.merge.queue": ("gauge", "thread_pool.merge.queue"),
"elasticsearch.thread_pool.merge.rejected": ("rate", "thread_pool.merge.rejected"),
}
ADDITIONAL_METRICS_POST_2_0 = {
"elasticsearch.indices.query_cache.cache_size": ("gauge", "indices.query_cache.cache_size"),
"elasticsearch.indices.query_cache.cache_count": ("rate", "indices.query_cache.cache_count"),
"elasticsearch.indices.query_cache.total_count": ("rate", "indices.query_cache.total_count"),
"elasticsearch.indices.segments.doc_values_memory_in_bytes": ("gauge", "indices.segments.doc_values_memory_in_bytes"),
"elasticsearch.indices.segments.norms_memory_in_bytes": ("gauge", "indices.segments.norms_memory_in_bytes"),
"elasticsearch.indices.segments.stored_fields_memory_in_bytes": ("gauge", "indices.segments.stored_fields_memory_in_bytes"),
"elasticsearch.indices.segments.term_vectors_memory_in_bytes": ("gauge", "indices.segments.term_vectors_memory_in_bytes"),
"elasticsearch.indices.segments.terms_memory_in_bytes": ("gauge", "indices.segments.terms_memory_in_bytes"),
"elasticsearch.indices.request_cache.memory_size_in_bytes": ("gauge", "indices.request_cache.memory_size_in_bytes"),
"elasticsearch.indices.request_cache.evictions": ("rate", "indices.request_cache.evictions"),
"elasticsearch.indices.request_cache.hit_count": ("rate", "indices.request_cache.hit_count"),
"elasticsearch.indices.request_cache.miss_count": ("rate", "indices.request_cache.miss_count"),
}
ADDITIONAL_METRICS_POST_2_1 = {
"elasticsearch.indices.indexing.index_failed": ("rate", "indices.indexing.index_failed"),
"elasticsearch.thread_pool.force_merge.active": ("gauge", "thread_pool.force_merge.active"),
"elasticsearch.thread_pool.force_merge.threads": ("gauge", "thread_pool.force_merge.threads"),
"elasticsearch.thread_pool.force_merge.queue": ("gauge", "thread_pool.force_merge.queue"),
"elasticsearch.thread_pool.force_merge.rejected": ("rate", "thread_pool.force_merge.rejected"),
}
CLUSTER_HEALTH_METRICS = {
"elasticsearch.number_of_nodes": ("gauge", "number_of_nodes"),
"elasticsearch.number_of_data_nodes": ("gauge", "number_of_data_nodes"),
"elasticsearch.active_primary_shards": ("gauge", "active_primary_shards"),
"elasticsearch.active_shards": ("gauge", "active_shards"),
"elasticsearch.relocating_shards": ("gauge", "relocating_shards"),
"elasticsearch.initializing_shards": ("gauge", "initializing_shards"),
"elasticsearch.unassigned_shards": ("gauge", "unassigned_shards"),
"elasticsearch.cluster_status": ("gauge", "status", lambda v: {"red": 0, "yellow": 1, "green": 2}.get(v, -1)),
}
CLUSTER_PENDING_TASKS = {
"elasticsearch.pending_tasks_total": ("gauge", "pending_task_total"),
"elasticsearch.pending_tasks_priority_high": ("gauge", "pending_tasks_priority_high"),
"elasticsearch.pending_tasks_priority_urgent": ("gauge", "pending_tasks_priority_urgent"),
"elasticsearch.pending_tasks_time_in_queue": ("gauge", "pending_tasks_time_in_queue"),
}
def get_es_version():
version = os.environ.get("FLAVOR_VERSION")
if version is None:
return [1, 6, 0]
return [int(k) for k in version.split(".")]
@attr(requires='elastic')
class TestElastic(AgentCheckTest):
CHECK_NAME = "elastic"
def test_check(self):
conf_hostname = "foo"
port = 9200
bad_port = 9405
agent_config = {
"hostname": conf_hostname, "version": get_version(),
"api_key": "bar"
}
tags = [u"foo:bar", u"baz"]
cluster_tag = [u"cluster_name:elasticsearch"]
url = 'http://localhost:{0}'.format(port)
bad_url = 'http://localhost:{0}'.format(bad_port)
config = {
'instances': [
{'url': url, 'tags': tags}, # One with tags not external
{'url': url, 'cluster_stats': True}, # One without tags, external
{'url': bad_url}, # One bad url
]
}
self.assertRaises(
requests.exceptions.ConnectionError,
self.run_check, config=config, agent_config=agent_config)
default_tags = ["url:http://localhost:{0}".format(port)]
expected_metrics = dict(STATS_METRICS)
CLUSTER_HEALTH_METRICS.update(CLUSTER_PENDING_TASKS)
expected_metrics.update(CLUSTER_HEALTH_METRICS)
instance_config = self.check.get_instance_config(config['instances'][0])
es_version = self.check._get_es_version(instance_config)
self.assertEquals(es_version, get_es_version())
if es_version >= [0, 90, 5]:
expected_metrics.update(ADDITIONAL_METRICS_POST_0_90_5)
if es_version >= [0, 90, 10]:
expected_metrics.update(JVM_METRICS_POST_0_90_10)
else:
expected_metrics.update(JVM_METRICS_PRE_0_90_10)
else:
expected_metrics.update(ADDITIONAL_METRICS_PRE_0_90_5)
expected_metrics.update(JVM_METRICS_PRE_0_90_10)
if es_version >= [1, 0, 0]:
expected_metrics.update(ADDITIONAL_METRICS_POST_1_0_0)
if es_version < [2, 0, 0]:
expected_metrics.update(ADDITIONAL_METRICS_PRE_2_0)
if es_version >= [0, 90, 5]:
expected_metrics.update(ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0)
if es_version >= [1, 0, 0]:
expected_metrics.update(ADDITIONAL_METRICS_1_x)
if es_version >= [1, 3, 0]:
expected_metrics.update(ADDITIONAL_METRICS_POST_1_3_0)
if es_version >= [1, 4, 0]:
expected_metrics.update(ADDITIONAL_METRICS_POST_1_4_0)
if es_version >= [1, 5, 0]:
expected_metrics.update(ADDITIONAL_METRICS_POST_1_5_0)
if es_version >= [1, 6, 0]:
expected_metrics.update(ADDITIONAL_METRICS_POST_1_6_0)
if es_version >= [2, 0, 0]:
expected_metrics.update(ADDITIONAL_METRICS_POST_2_0)
if es_version >= [2, 1, 0]:
expected_metrics.update(ADDITIONAL_METRICS_POST_2_1)
if os.environ.get("DD_ELASTIC_LOCAL_HOSTNAME"):
local_hostname = os.environ.get("DD_ELASTIC_LOCAL_HOSTNAME")
elif es_version < [2, 0, 0]:
local_hostname = socket.gethostname()
else:
local_hostname = '127.0.0.1'
contexts = [
(conf_hostname, default_tags + tags),
(local_hostname, default_tags)
]
stats_keys = (
set(expected_metrics.keys()) - set(CLUSTER_HEALTH_METRICS.keys()) -
set(CLUSTER_PENDING_TASKS.keys())
)
for m_name, desc in expected_metrics.iteritems():
for hostname, m_tags in contexts:
m_tags = m_tags + cluster_tag
if (m_name in CLUSTER_HEALTH_METRICS and
hostname == local_hostname):
hostname = conf_hostname
if m_name in stats_keys:
m_tags = m_tags + [u"node_name:batman"]
if desc[0] == "gauge":
self.assertMetric(
m_name, tags=m_tags, count=1, hostname=hostname)
good_sc_tags = ['host:localhost', 'port:{0}'.format(port)]
bad_sc_tags = ['host:localhost', 'port:{0}'.format(bad_port)]
self.assertServiceCheckOK('elasticsearch.can_connect',
tags=good_sc_tags + tags,
count=1)
self.assertServiceCheckOK('elasticsearch.can_connect',
tags=good_sc_tags,
count=1)
self.assertServiceCheckCritical('elasticsearch.can_connect',
tags=bad_sc_tags,
count=1)
# Assert service metadata
self.assertServiceMetadata(['version'], count=3)
# FIXME: 0.90.13 returns randomly a red status instead of yellow,
# so we don't do a coverage test for it
# Remove me when we stop supporting 0.90.x (not supported anymore by ES)
if get_es_version() != [0, 90, 13]:
# Warning because elasticsearch status should be yellow, according to
# http://chrissimpson.co.uk/elasticsearch-yellow-cluster-status-explained.html
self.assertServiceCheckWarning('elasticsearch.cluster_health',
tags=good_sc_tags + tags,
count=1)
self.assertServiceCheckWarning('elasticsearch.cluster_health',
tags=good_sc_tags,
count=1)
# Assert event
self.assertEvent('ElasticSearch: foo just reported as yellow', count=1,
tags=default_tags+tags+cluster_tag,
msg_title='foo is yellow',
event_type='elasticsearch', alert_type='warning',
source_type_name='elasticsearch')
self.coverage_report()
def test_config_parser(self):
check = load_check(self.CHECK_NAME, {}, {})
instance = {
"username": "user",
"password": "pass",
"is_external": "yes",
"url": "http://foo.bar",
"tags": ["a", "b:c"],
}
c = check.get_instance_config(instance)
self.assertEquals(c.username, "user")
self.assertEquals(c.password, "pass")
self.assertEquals(c.cluster_stats, True)
self.assertEquals(c.url, "http://foo.bar")
self.assertEquals(c.tags, ["url:http://foo.bar", "a", "b:c"])
self.assertEquals(c.timeout, check.DEFAULT_TIMEOUT)
self.assertEquals(c.service_check_tags, ["host:foo.bar", "port:None", "a", "b:c"])
instance = {
"url": "http://192.168.42.42:12999",
"timeout": 15
}
c = check.get_instance_config(instance)
self.assertEquals(c.username, None)
self.assertEquals(c.password, None)
self.assertEquals(c.cluster_stats, False)
self.assertEquals(c.url, "http://192.168.42.42:12999")
self.assertEquals(c.tags, ["url:http://192.168.42.42:12999"])
self.assertEquals(c.timeout, 15)
self.assertEquals(c.service_check_tags,
["host:192.168.42.42", "port:12999"])
instance = {
"username": "user",
"password": "pass",
"url": "https://foo.bar:9200",
"ssl_verify": "true",
"ssl_cert": "/path/to/cert.pem",
"ssl_key": "/path/to/cert.key",
}
c = check.get_instance_config(instance)
self.assertEquals(c.username, "user")
self.assertEquals(c.password, "pass")
self.assertEquals(c.cluster_stats, False)
self.assertEquals(c.url, "https://foo.bar:9200")
self.assertEquals(c.tags, ["url:https://foo.bar:9200"])
self.assertEquals(c.timeout, check.DEFAULT_TIMEOUT)
self.assertEquals(c.service_check_tags, ["host:foo.bar", "port:9200"])
self.assertEquals(c.ssl_verify, "true")
self.assertEquals(c.ssl_cert, "/path/to/cert.pem")
self.assertEquals(c.ssl_key, "/path/to/cert.key")
def test_health_event(self):
dummy_tags = ['foo:bar', 'elastique:recherche']
server_tags = ['cluster_name:elasticsearch']
config = {'instances': [
{'url': 'http://localhost:9200', 'tags': dummy_tags}
]}
# Should be yellow at first
requests.put('http://localhost:9200/_settings', data='{"index": {"number_of_replicas": 1}}')
self.run_check(config)
self.assertEquals(len(self.events), 1)
self.assertIn('yellow', self.events[0]['msg_title'])
self.assertEquals(
['url:http://localhost:9200'] + dummy_tags + server_tags,
self.events[0]['tags']
)
self.assertServiceCheckWarning(
'elasticsearch.cluster_health',
tags=['host:localhost', 'port:9200'] + dummy_tags,
count=1
)
# Set number of replicas to 0 for all indices
requests.put('http://localhost:9200/_settings', data='{"index": {"number_of_replicas": 0}}')
time.sleep(5)
# Now shards should be green
self.run_check(config)
self.assertEquals(len(self.events), 1)
self.assertIn('green', self.events[0]['msg_title'])
self.assertEquals(
['url:http://localhost:9200'] + dummy_tags + server_tags,
self.events[0]['tags']
)
self.assertServiceCheckOK(
'elasticsearch.cluster_health',
tags=['host:localhost', 'port:9200'] + dummy_tags,
count=1
)
def test_pshard_metrics(self):
""" Tests that the pshard related metrics are forwarded and that the
document count for primary indexes is twice smaller as the global
document count when "number_of_replicas" is set to 1 """
elastic_latency = 10
config = {'instances': [
{'url': 'http://localhost:9200', 'pshard_stats': True}
]}
# Cleaning up everything won't hurt.
req = requests.get('http://localhost:9200/_cat/indices?v')
indices_info = req.text.split('\n')[1::-1]
for index_info in indices_info:
index_name = index_info.split()[1]
requests.delete('http://localhost:9200/' + index_name)
requests.put('http://localhost:9200/_settings', data='{"index": {"number_of_replicas": 1}}')
requests.put('http://localhost:9200/testindex/testtype/2', data='{"name": "Jane Doe", "age": 27}')
requests.put('http://localhost:9200/testindex/testtype/1', data='{"name": "John Doe", "age": 42}')
time.sleep(elastic_latency)
self.run_check(config)
pshard_stats_metrics = dict(PRIMARY_SHARD_METRICS)
if get_es_version() >= [1, 0, 0]:
pshard_stats_metrics.update(PRIMARY_SHARD_METRICS_POST_1_0)
for m_name, desc in pshard_stats_metrics.iteritems():
if desc[0] == "gauge":
self.assertMetric(m_name, count=1)
# Our pshard metrics are getting sent, let's check that they're accurate
# Note: please make sure you don't install Maven on the CI for future
# elastic search CI integrations. It would make the line below fail :/
self.assertMetric('elasticsearch.primaries.docs.count', value=2)
| |
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt', 'os2' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
__all__.append('_exit')
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
try:
from os2 import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def _get_masked_mode(mode):
mask = umask(0)
umask(mask)
return mode & ~mask
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(path [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. If the
target directory with the same mode as we specified already exists,
raises an OSError if exist_ok is False, otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError as e:
dir_exists = path.isdir(name)
expected_mode = _get_masked_mode(mode)
if dir_exists:
# S_ISGID is automatically copied by the OS from parent to child
# directories on mkdir. Don't consider it being set to be a mode
# mismatch as mkdir does not unset it when not specified in mode.
actual_mode = st.S_IMODE(lstat(name).st_mode) & ~st.S_ISGID
else:
actual_mode = -1
if not (e.errno == errno.EEXIST and exist_ok and dir_exists and
actual_mode == expected_mode):
if dir_exists and actual_mode != expected_mode:
e.strerror += ' (mode %o != expected mode %o)' % (
actual_mode, expected_mode)
raise
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except error as err:
if onerror is not None:
onerror(err)
return
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except error as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from collections.abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
__all__.append("unsetenv")
def _createenviron():
if name in ('os2', 'nt'):
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = name not in ('os2', 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
import copyreg as _copyreg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copyreg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copyreg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.