CombinedText stringlengths 4 3.42M |
|---|
from sympy import (symbols, factorial, sqrt, Rational, atan, I, log, fps, O,
Sum, oo, S, pi, cos, sin, Function, exp, Derivative, asin,
airyai, acos, acosh, gamma, erf, asech)
from sympy.series.formal import (rational_algorithm, FormalPowerSeries,
rational_independent, simpleDE, exp_re,
hyper_re)
from sympy.utilities.pytest import raises, XFAIL
x, y, z = symbols('x y z')
n, m, k = symbols('n m k')
f, r = Function('f'), Function('r')
def test_rational_algorithm():
f = 1 / ((x - 1)**2 * (x - 2))
assert rational_algorithm(f, x, k) == \
(-2**(-k - 1) + 1 - (factorial(k + 1) / factorial(k)), 0, 0)
f = (1 + x + x**2 + x**3) / ((x - 1) * (x - 2))
assert rational_algorithm(f, x, k) == \
(-15*2**(-k - 1) + 4, x + 4, 0)
f = z / (y*m - m*x - y*x + x**2)
assert rational_algorithm(f, x, k) == \
(((-y**(-k - 1)*z) / (y - m)) + ((m**(-k - 1)*z) / (y - m)), 0, 0)
f = x / (1 - x - x**2)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
(((-Rational(1, 2) + sqrt(5)/2)**(-k - 1) *
(-sqrt(5)/10 + Rational(1, 2))) +
((-sqrt(5)/2 - Rational(1, 2))**(-k - 1) *
(sqrt(5)/10 + Rational(1, 2))), 0, 0)
f = 1 / (x**2 + 2*x + 2)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
((I*(-1 + I)**(-k - 1)) / 2 - (I*(-1 - I)**(-k - 1)) / 2, 0, 0)
f = log(1 + x)
assert rational_algorithm(f, x, k) == \
(-(-1)**(-k) / k, 0, 1)
f = atan(x)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
(((I*I**(-k)) / 2 - (I*(-I)**(-k)) / 2) / k, 0, 1)
f = x*atan(x) - log(1 + x**2) / 2
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
(((I*I**(-k + 1)) / 2 - (I*(-I)**(-k + 1)) / 2) /
(k*(k - 1)), 0, 2)
f = log((1 + x) / (1 - x)) / 2 - atan(x)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
((-(-1)**(-k) / 2 - (I*I**(-k)) / 2 + (I*(-I)**(-k)) / 2 +
Rational(1, 2)) / k, 0, 1)
assert rational_algorithm(cos(x), x, k) is None
def test_rational_independent():
ri = rational_independent
assert ri([], x) == []
assert ri([cos(x), sin(x)], x) == [cos(x), sin(x)]
assert ri([x**2, sin(x), x*sin(x), x**3], x) == \
[x**3 + x**2, x*sin(x) + sin(x)]
assert ri([S.One, x*log(x), log(x), sin(x)/x, cos(x), sin(x), x], x) == \
[x + 1, x*log(x) + log(x), sin(x)/x + sin(x), cos(x)]
def test_simpleDE():
assert simpleDE(exp(x), x, f) == -f(x) + Derivative(f(x), x)
assert simpleDE(sin(x), x, f) == f(x) + Derivative(f(x), x, x)
assert simpleDE(log(1 + x), x, f) == \
(x + 1)*Derivative(f(x), x, 2) + Derivative(f(x), x)
assert simpleDE(asin(x), x, f) == \
x*Derivative(f(x), x) + (x**2 - 1)*Derivative(f(x), x, x)
assert simpleDE(exp(x)*sin(x), x, f) == \
2*f(x) - 2*Derivative(f(x)) + Derivative(f(x), x, x)
assert simpleDE(((1 + x)/(1 - x))**n, x, f) == \
2*n*f(x) + (x**2 - 1)*Derivative(f(x), x)
assert simpleDE(airyai(x), x, f) == -x*f(x) + Derivative(f(x), x, x)
def test_exp_re():
d = -f(x) + Derivative(f(x), x)
assert exp_re(d, r, k) == -r(k) + r(k + 1)
d = f(x) + Derivative(f(x), x, x)
assert exp_re(d, r, k) == r(k) + r(k + 2)
d = f(x) + Derivative(f(x), x) + Derivative(f(x), x, x)
assert exp_re(d, r, k) == r(k) + r(k + 1) + r(k + 2)
d = Derivative(f(x), x) + Derivative(f(x), x, x)
assert exp_re(d, r, k) == r(k) + r(k + 1)
d = Derivative(f(x), x, 3) + Derivative(f(x), x, 4) + Derivative(f(x))
assert exp_re(d, r, k) == r(k) + r(k + 2) + r(k + 3)
def test_hyper_re():
d = f(x) + Derivative(f(x), x, x)
assert hyper_re(d, r, k) == r(k) + (k+1)*(k+2)*r(k + 2)
d = -x*f(x) + Derivative(f(x), x, x)
assert hyper_re(d, r, k) == (k + 2)*(k + 3)*r(k + 3) - r(k)
d = 2*f(x) - 2*Derivative(f(x), x) + Derivative(f(x), x, x)
assert hyper_re(d, r, k) == \
(-2*k - 2)*r(k + 1) + (k + 1)*(k + 2)*r(k + 2) + 2*r(k)
d = 2*n*f(x) + (x**2 - 1)*Derivative(f(x), x)
assert hyper_re(d, r, k) == \
k*r(k) + 2*n*r(k + 1) + (-k - 2)*r(k + 2)
d = (x**10 + 4)*Derivative(f(x), x) + x*(x**10 - 1)*Derivative(f(x), x, x)
assert hyper_re(d, r, k) == \
(k*(k - 1) + k)*r(k) + (4*k - (k + 9)*(k + 10) + 40)*r(k + 10)
d = ((x**2 - 1)*Derivative(f(x), x, 3) + 3*x*Derivative(f(x), x, x) +
Derivative(f(x), x))
assert hyper_re(d, r, k) == \
((k*(k - 2)*(k - 1) + 3*k*(k - 1) + k)*r(k) +
(-k*(k + 1)*(k + 2))*r(k + 2))
def test_fps():
assert fps(1) == 1
assert fps(2, x) == 2
assert fps(2, x, dir='+') == 2
assert fps(2, x, dir='-') == 2
assert fps(x**2 + x + 1) == x**2 + x + 1
assert fps(1/x + 1/x**2) == 1/x + 1/x**2
assert fps(log(1 + x), hyper=False, rational=False) == log(1 + x)
f = fps(log(1 + x))
assert isinstance(f, FormalPowerSeries)
assert f.function == log(1 + x)
assert f.subs(x, y) == f
assert f[:5] == [0, x, -x**2/2, x**3/3, -x**4/4]
assert f.as_leading_term(x) == x
assert f.polynomial(6) == x - x**2/2 + x**3/3 - x**4/4 + x**5/5
k = f.ak.variables[0]
assert f.infinite == Sum((-(-1)**(-k)*x**k)/k, (k, 1, oo))
ft, s = f.truncate(n=None), f[:5]
for i, t in enumerate(ft):
if i == 5:
break
assert s[i] == t
raises(NotImplementedError, lambda: fps(y*x))
raises(ValueError, lambda: fps(x, dir=0))
def test_fps__rational():
assert fps(1/x) == (1/x)
assert fps((x**2 + x + 1) / x**3, dir=-1) == (x**2 + x + 1) / x**3
f = 1 / ((x - 1)**2 * (x - 2))
assert fps(f, x).truncate() == \
(-Rational(1, 2) - 5*x/4 - 17*x**2/8 - 49*x**3/16 - 129*x**4/32 -
321*x**5/64 + O(x**6))
f = (1 + x + x**2 + x**3) / ((x - 1) * (x - 2))
assert fps(f, x).truncate() == \
(Rational(1, 2) + 5*x/4 + 17*x**2/8 + 49*x**3/16 + 113*x**4/32 +
241*x**5/64 + O(x**6))
f = x / (1 - x - x**2)
assert fps(f, x, full=True).truncate() == \
x + x**2 + 2*x**3 + 3*x**4 + 5*x**5 + O(x**6)
f = 1 / (x**2 + 2*x + 2)
assert fps(f, x, full=True).truncate() == \
Rational(1, 2) - x/2 + x**2/4 - x**4/8 + x**5/8 + O(x**6)
f = log(1 + x)
assert fps(f, x).truncate() == \
x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
assert fps(f, x, dir=1).truncate() == fps(f, x, dir=-1).truncate()
assert fps(f, x, 2).truncate() == \
(log(3) - Rational(2, 3) - (x - 2)**2/18 + (x - 2)**3/81 -
(x - 2)**4/324 + (x - 2)**5/1215 + x/3 + O((x - 2)**6, (x, 2)))
assert fps(f, x, 2, dir=-1).truncate() == \
(log(3) - Rational(2, 3) - (-x + 2)**2/18 - (-x + 2)**3/81 -
(-x + 2)**4/324 - (-x + 2)**5/1215 + x/3 + O((x - 2)**6, (x, 2)))
f = atan(x)
assert fps(f, x, full=True).truncate() == x - x**3/3 + x**5/5 + O(x**6)
assert fps(f, x, full=True, dir=1).truncate() == \
fps(f, x, full=True, dir=-1).truncate()
assert fps(f, x, 2, full=True).truncate() == \
(atan(2) - Rational(2, 5) - 2*(x - 2)**2/25 + 11*(x - 2)**3/375 -
6*(x - 2)**4/625 + 41*(x - 2)**5/15625 + x/5 + O((x - 2)**6, (x, 2)))
assert fps(f, x, 2, full=True, dir=-1).truncate() == \
(atan(2) - Rational(2, 5) - 2*(-x + 2)**2/25 - 11*(-x + 2)**3/375 -
6*(-x + 2)**4/625 - 41*(-x + 2)**5/15625 + x/5 + O((x - 2)**6, (x, 2)))
f = x*atan(x) - log(1 + x**2) / 2
assert fps(f, x, full=True).truncate() == x**2/2 - x**4/12 + O(x**6)
f = log((1 + x) / (1 - x)) / 2 - atan(x)
assert fps(f, x, full=True).truncate(n=10) == 2*x**3/3 + 2*x**7/7 + O(x**10)
def test_fps__hyper():
f = sin(x)
assert fps(f, x).truncate() == x - x**3/6 + x**5/120 + O(x**6)
f = cos(x)
assert fps(f, x).truncate() == 1 - x**2/2 + x**4/24 + O(x**6)
f = exp(x)
assert fps(f, x).truncate() == \
1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
f = atan(x)
assert fps(f, x).truncate() == x - x**3/3 + x**5/5 + O(x**6)
f = exp(acos(x))
assert fps(f, x).truncate() == \
(exp(pi/2) - x*exp(pi/2) + x**2*exp(pi/2)/2 - x**3*exp(pi/2)/3 +
5*x**4*exp(pi/2)/24 - x**5*exp(pi/2)/6 + O(x**6))
f = exp(acosh(x))
assert fps(f, x).truncate() == I + x - I*x**2/2 - I*x**4/8 + O(x**6)
f = atan(1/x)
assert fps(f, x).truncate() == pi/2 - x + x**3/3 - x**5/5 + O(x**6)
f = x*atan(x) - log(1 + x**2) / 2
assert fps(f, x, rational=False).truncate() == x**2/2 - x**4/12 + O(x**6)
f = log(1 + x)
assert fps(f, x, rational=False).truncate() == \
x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
f = x*exp(x)*sin(2*x) # TODO: solved using rsolve, improve simpleDE
assert fps(f, x).truncate() == 2*x**2 + 2*x**3 - x**4/3 - x**5 + O(x**6)
f = airyai(x**2)
assert fps(f, x).truncate() == \
(3**Rational(5, 6)*gamma(Rational(1, 3))/(6*pi) -
3**Rational(2, 3)*x**2/(3*gamma(Rational(1, 3))) + O(x**6))
f = exp(x)*sin(x)
assert fps(f, x).truncate() == x + x**2 + x**3/3 - x**5/30 + O(x**6)
def test_fps_shift():
f = x**-5*sin(x)
assert fps(f, x).truncate() == \
1/x**4 - 1/(6*x**2) + S.One/120 - x**2/5040 + x**4/362880 + O(x**6)
f = x**2*atan(x)
assert fps(f, x, rational=False).truncate() == \
x**3 - x**5/3 + O(x**6)
f = cos(sqrt(x))*x
assert fps(f, x).truncate() == \
x - x**2/2 + x**3/24 - x**4/720 + x**5/40320 + O(x**6)
f = x**2*cos(sqrt(x))
assert fps(f, x).truncate() == \
x**2 - x**3/2 + x**4/24 - x**5/720 + O(x**6)
def test_fps__Add_expr():
f = x*atan(x) - log(1 + x**2) / 2
assert fps(f, x).truncate() == x**2/2 - x**4/12 + O(x**6)
f = sin(x) + cos(x) - exp(x) + log(1 + x)
assert fps(f, x).truncate() == x - 3*x**2/2 - x**4/4 + x**5/5 + O(x**6)
f = 1/x + sin(x)
assert fps(f, x).truncate() == 1/x + x - x**3/6 + x**5/120 + O(x**6)
def test_fps__asymptotic():
f = exp(x)
assert fps(f, x, oo) == f
assert fps(f, x, -oo).truncate() == O(1/x**6, (x, oo))
f = erf(x)
assert fps(f, x, oo).truncate() == 1 + O(1/x**6, (x, oo))
assert fps(f, x, -oo).truncate() == -1 + O(1/x**6, (x, oo))
f = atan(x)
assert fps(f, x, oo, full=True).truncate() == \
-1/(5*x**5) + 1/(3*x**3) - 1/x + pi/2 + O(1/x**6, (x, oo))
assert fps(f, x, -oo, full=True).truncate() == \
-1/(5*x**5) + 1/(3*x**3) - 1/x - pi/2 + O(1/x**6, (x, oo))
f = log(1 + x)
assert fps(f, x, oo) != \
(-1/(5*x**5) - 1/(4*x**4) + 1/(3*x**3) - 1/(2*x**2) + 1/x - log(1/x) +
O(1/x**6, (x, oo)))
assert fps(f, x, -oo) != \
(-1/(5*x**5) - 1/(4*x**4) + 1/(3*x**3) - 1/(2*x**2) + 1/x + I*pi -
log(-1/x) + O(1/x**6, (x, oo)))
def test_fps__fractional():
f = sin(sqrt(x)) / x
assert fps(f, x).truncate() == \
(1/sqrt(x) - sqrt(x)/6 + x**Rational(3, 2)/120 -
x**Rational(5, 2)/5040 + x**Rational(7, 2)/362880 -
x**Rational(9, 2)/39916800 + x**Rational(11, 2)/6227020800 + O(x**6))
f = sin(sqrt(x)) * x
assert fps(f, x).truncate() == \
(x**Rational(3, 2) - x**Rational(5, 2)/6 + x**Rational(7, 2)/120 -
x**Rational(9, 2)/5040 + x**Rational(11, 2)/362880 + O(x**6))
f = atan(sqrt(x)) / x**2
assert fps(f, x).truncate() == \
(x**Rational(-3, 2) - x**Rational(-1, 2)/3 + x**Rational(1, 2)/5 -
x**Rational(3, 2)/7 + x**Rational(5, 2)/9 - x**Rational(7, 2)/11 +
x**Rational(9, 2)/13 - x**Rational(11, 2)/15 + O(x**6))
f = exp(sqrt(x))
assert fps(f, x).truncate().expand() == \
(1 + x/2 + x**2/24 + x**3/720 + x**4/40320 + x**5/3628800 + sqrt(x) +
x**Rational(3, 2)/6 + x**Rational(5, 2)/120 + x**Rational(7, 2)/5040 +
x**Rational(9, 2)/362880 + x**Rational(11, 2)/39916800 + O(x**6))
f = exp(sqrt(x))*x
assert fps(f, x).truncate().expand() == \
(x + x**2/2 + x**3/24 + x**4/720 + x**5/40320 + x**Rational(3, 2) +
x**Rational(5, 2)/6 + x**Rational(7, 2)/120 + x**Rational(9, 2)/5040 +
x**Rational(11, 2)/362880 + O(x**6))
def test_fps__logarithmic_singularity():
f = log(1 + 1/x)
assert fps(f, x) != \
-log(x) + x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
assert fps(f, x, rational=False) != \
-log(x) + x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
@XFAIL
def test_fps__logarithmic_singularity_fail():
f = asech(x) # Algorithms for computing limits probably needs improvemnts
assert fps(f, x) == log(2) - log(x) - x**2/4 - 3*x**4/64 + O(x**6)
def test_fps__symbolic():
f = x**n*sin(x**2)
assert fps(f, x).truncate(8) == x**2*x**n - x**6*x**n/6 + O(x**(n + 8), x)
f = x**(n - 2)*cos(x)
assert fps(f, x).truncate() == \
(x**n*(-S(1)/2 + x**(-2)) + x**2*x**n/24 - x**4*x**n/720 +
O(x**(n + 6), x))
f = x**n*log(1 + x)
fp = fps(f, x)
k = fp.ak.variables[0]
assert fp.infinite == \
Sum((-(-1)**(-k)*x**k*x**n)/k, (k, 1, oo))
f = x**(n - 2)*sin(x) + x**n*exp(x)
assert fps(f, x).truncate() == \
(x**n*(1 + 1/x) + 5*x*x**n/6 + x**2*x**n/2 + 7*x**3*x**n/40 +
x**4*x**n/24 + 41*x**5*x**n/5040 + O(x**(n + 6), x))
f = (x - 2)**n*log(1 + x)
assert fps(f, x, 2).truncate() == \
((x - 2)**n*log(3) - (x - 2)**2*(x - 2)**n/18 +
(x - 2)**3*(x - 2)**n/81 - (x - 2)**4*(x - 2)**n/324 +
(x - 2)**5*(x - 2)**n/1215 + (x/3 - S(2)/3)*(x - 2)**n +
O((x - 2)**(n + 6), (x, 2)))
f = x**n*atan(x)
assert fps(f, x, oo).truncate() == \
(-x**n/(5*x**5) + x**n/(3*x**3) + x**n*(pi/2 - 1/x) +
O(x**(n - 6), (x, oo)))
@XFAIL
def test_xfail_fps__simpleDE():
f = exp(x)*sin(x)/x
assert fps(f, x).truncate() == 1 + x + x**2/3 - x**4/30 - x**5/90 + O(x**6)
fixed failing test for symbolic functions
from sympy import (symbols, factorial, sqrt, Rational, atan, I, log, fps, O,
Sum, oo, S, pi, cos, sin, Function, exp, Derivative, asin,
airyai, acos, acosh, gamma, erf, asech)
from sympy.series.formal import (rational_algorithm, FormalPowerSeries,
rational_independent, simpleDE, exp_re,
hyper_re)
from sympy.utilities.pytest import raises, XFAIL
x, y, z = symbols('x y z')
n, m, k = symbols('n m k', integer=True)
f, r = Function('f'), Function('r')
def test_rational_algorithm():
f = 1 / ((x - 1)**2 * (x - 2))
assert rational_algorithm(f, x, k) == \
(-2**(-k - 1) + 1 - (factorial(k + 1) / factorial(k)), 0, 0)
f = (1 + x + x**2 + x**3) / ((x - 1) * (x - 2))
assert rational_algorithm(f, x, k) == \
(-15*2**(-k - 1) + 4, x + 4, 0)
f = z / (y*m - m*x - y*x + x**2)
assert rational_algorithm(f, x, k) == \
(((-y**(-k - 1)*z) / (y - m)) + ((m**(-k - 1)*z) / (y - m)), 0, 0)
f = x / (1 - x - x**2)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
(((-Rational(1, 2) + sqrt(5)/2)**(-k - 1) *
(-sqrt(5)/10 + Rational(1, 2))) +
((-sqrt(5)/2 - Rational(1, 2))**(-k - 1) *
(sqrt(5)/10 + Rational(1, 2))), 0, 0)
f = 1 / (x**2 + 2*x + 2)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
((I*(-1 + I)**(-k - 1)) / 2 - (I*(-1 - I)**(-k - 1)) / 2, 0, 0)
f = log(1 + x)
assert rational_algorithm(f, x, k) == \
(-(-1)**(-k) / k, 0, 1)
f = atan(x)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
(((I*I**(-k)) / 2 - (I*(-I)**(-k)) / 2) / k, 0, 1)
f = x*atan(x) - log(1 + x**2) / 2
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
(((I*I**(-k + 1)) / 2 - (I*(-I)**(-k + 1)) / 2) /
(k*(k - 1)), 0, 2)
f = log((1 + x) / (1 - x)) / 2 - atan(x)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
((-(-1)**(-k) / 2 - (I*I**(-k)) / 2 + (I*(-I)**(-k)) / 2 +
Rational(1, 2)) / k, 0, 1)
assert rational_algorithm(cos(x), x, k) is None
def test_rational_independent():
ri = rational_independent
assert ri([], x) == []
assert ri([cos(x), sin(x)], x) == [cos(x), sin(x)]
assert ri([x**2, sin(x), x*sin(x), x**3], x) == \
[x**3 + x**2, x*sin(x) + sin(x)]
assert ri([S.One, x*log(x), log(x), sin(x)/x, cos(x), sin(x), x], x) == \
[x + 1, x*log(x) + log(x), sin(x)/x + sin(x), cos(x)]
def test_simpleDE():
assert simpleDE(exp(x), x, f) == -f(x) + Derivative(f(x), x)
assert simpleDE(sin(x), x, f) == f(x) + Derivative(f(x), x, x)
assert simpleDE(log(1 + x), x, f) == \
(x + 1)*Derivative(f(x), x, 2) + Derivative(f(x), x)
assert simpleDE(asin(x), x, f) == \
x*Derivative(f(x), x) + (x**2 - 1)*Derivative(f(x), x, x)
assert simpleDE(exp(x)*sin(x), x, f) == \
2*f(x) - 2*Derivative(f(x)) + Derivative(f(x), x, x)
assert simpleDE(((1 + x)/(1 - x))**n, x, f) == \
2*n*f(x) + (x**2 - 1)*Derivative(f(x), x)
assert simpleDE(airyai(x), x, f) == -x*f(x) + Derivative(f(x), x, x)
def test_exp_re():
d = -f(x) + Derivative(f(x), x)
assert exp_re(d, r, k) == -r(k) + r(k + 1)
d = f(x) + Derivative(f(x), x, x)
assert exp_re(d, r, k) == r(k) + r(k + 2)
d = f(x) + Derivative(f(x), x) + Derivative(f(x), x, x)
assert exp_re(d, r, k) == r(k) + r(k + 1) + r(k + 2)
d = Derivative(f(x), x) + Derivative(f(x), x, x)
assert exp_re(d, r, k) == r(k) + r(k + 1)
d = Derivative(f(x), x, 3) + Derivative(f(x), x, 4) + Derivative(f(x))
assert exp_re(d, r, k) == r(k) + r(k + 2) + r(k + 3)
def test_hyper_re():
d = f(x) + Derivative(f(x), x, x)
assert hyper_re(d, r, k) == r(k) + (k+1)*(k+2)*r(k + 2)
d = -x*f(x) + Derivative(f(x), x, x)
assert hyper_re(d, r, k) == (k + 2)*(k + 3)*r(k + 3) - r(k)
d = 2*f(x) - 2*Derivative(f(x), x) + Derivative(f(x), x, x)
assert hyper_re(d, r, k) == \
(-2*k - 2)*r(k + 1) + (k + 1)*(k + 2)*r(k + 2) + 2*r(k)
d = 2*n*f(x) + (x**2 - 1)*Derivative(f(x), x)
assert hyper_re(d, r, k) == \
k*r(k) + 2*n*r(k + 1) + (-k - 2)*r(k + 2)
d = (x**10 + 4)*Derivative(f(x), x) + x*(x**10 - 1)*Derivative(f(x), x, x)
assert hyper_re(d, r, k) == \
(k*(k - 1) + k)*r(k) + (4*k - (k + 9)*(k + 10) + 40)*r(k + 10)
d = ((x**2 - 1)*Derivative(f(x), x, 3) + 3*x*Derivative(f(x), x, x) +
Derivative(f(x), x))
assert hyper_re(d, r, k) == \
((k*(k - 2)*(k - 1) + 3*k*(k - 1) + k)*r(k) +
(-k*(k + 1)*(k + 2))*r(k + 2))
def test_fps():
assert fps(1) == 1
assert fps(2, x) == 2
assert fps(2, x, dir='+') == 2
assert fps(2, x, dir='-') == 2
assert fps(x**2 + x + 1) == x**2 + x + 1
assert fps(1/x + 1/x**2) == 1/x + 1/x**2
assert fps(log(1 + x), hyper=False, rational=False) == log(1 + x)
f = fps(log(1 + x))
assert isinstance(f, FormalPowerSeries)
assert f.function == log(1 + x)
assert f.subs(x, y) == f
assert f[:5] == [0, x, -x**2/2, x**3/3, -x**4/4]
assert f.as_leading_term(x) == x
assert f.polynomial(6) == x - x**2/2 + x**3/3 - x**4/4 + x**5/5
k = f.ak.variables[0]
assert f.infinite == Sum((-(-1)**(-k)*x**k)/k, (k, 1, oo))
ft, s = f.truncate(n=None), f[:5]
for i, t in enumerate(ft):
if i == 5:
break
assert s[i] == t
raises(NotImplementedError, lambda: fps(y*x))
raises(ValueError, lambda: fps(x, dir=0))
def test_fps__rational():
assert fps(1/x) == (1/x)
assert fps((x**2 + x + 1) / x**3, dir=-1) == (x**2 + x + 1) / x**3
f = 1 / ((x - 1)**2 * (x - 2))
assert fps(f, x).truncate() == \
(-Rational(1, 2) - 5*x/4 - 17*x**2/8 - 49*x**3/16 - 129*x**4/32 -
321*x**5/64 + O(x**6))
f = (1 + x + x**2 + x**3) / ((x - 1) * (x - 2))
assert fps(f, x).truncate() == \
(Rational(1, 2) + 5*x/4 + 17*x**2/8 + 49*x**3/16 + 113*x**4/32 +
241*x**5/64 + O(x**6))
f = x / (1 - x - x**2)
assert fps(f, x, full=True).truncate() == \
x + x**2 + 2*x**3 + 3*x**4 + 5*x**5 + O(x**6)
f = 1 / (x**2 + 2*x + 2)
assert fps(f, x, full=True).truncate() == \
Rational(1, 2) - x/2 + x**2/4 - x**4/8 + x**5/8 + O(x**6)
f = log(1 + x)
assert fps(f, x).truncate() == \
x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
assert fps(f, x, dir=1).truncate() == fps(f, x, dir=-1).truncate()
assert fps(f, x, 2).truncate() == \
(log(3) - Rational(2, 3) - (x - 2)**2/18 + (x - 2)**3/81 -
(x - 2)**4/324 + (x - 2)**5/1215 + x/3 + O((x - 2)**6, (x, 2)))
assert fps(f, x, 2, dir=-1).truncate() == \
(log(3) - Rational(2, 3) - (-x + 2)**2/18 - (-x + 2)**3/81 -
(-x + 2)**4/324 - (-x + 2)**5/1215 + x/3 + O((x - 2)**6, (x, 2)))
f = atan(x)
assert fps(f, x, full=True).truncate() == x - x**3/3 + x**5/5 + O(x**6)
assert fps(f, x, full=True, dir=1).truncate() == \
fps(f, x, full=True, dir=-1).truncate()
assert fps(f, x, 2, full=True).truncate() == \
(atan(2) - Rational(2, 5) - 2*(x - 2)**2/25 + 11*(x - 2)**3/375 -
6*(x - 2)**4/625 + 41*(x - 2)**5/15625 + x/5 + O((x - 2)**6, (x, 2)))
assert fps(f, x, 2, full=True, dir=-1).truncate() == \
(atan(2) - Rational(2, 5) - 2*(-x + 2)**2/25 - 11*(-x + 2)**3/375 -
6*(-x + 2)**4/625 - 41*(-x + 2)**5/15625 + x/5 + O((x - 2)**6, (x, 2)))
f = x*atan(x) - log(1 + x**2) / 2
assert fps(f, x, full=True).truncate() == x**2/2 - x**4/12 + O(x**6)
f = log((1 + x) / (1 - x)) / 2 - atan(x)
assert fps(f, x, full=True).truncate(n=10) == 2*x**3/3 + 2*x**7/7 + O(x**10)
def test_fps__hyper():
f = sin(x)
assert fps(f, x).truncate() == x - x**3/6 + x**5/120 + O(x**6)
f = cos(x)
assert fps(f, x).truncate() == 1 - x**2/2 + x**4/24 + O(x**6)
f = exp(x)
assert fps(f, x).truncate() == \
1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
f = atan(x)
assert fps(f, x).truncate() == x - x**3/3 + x**5/5 + O(x**6)
f = exp(acos(x))
assert fps(f, x).truncate() == \
(exp(pi/2) - x*exp(pi/2) + x**2*exp(pi/2)/2 - x**3*exp(pi/2)/3 +
5*x**4*exp(pi/2)/24 - x**5*exp(pi/2)/6 + O(x**6))
f = exp(acosh(x))
assert fps(f, x).truncate() == I + x - I*x**2/2 - I*x**4/8 + O(x**6)
f = atan(1/x)
assert fps(f, x).truncate() == pi/2 - x + x**3/3 - x**5/5 + O(x**6)
f = x*atan(x) - log(1 + x**2) / 2
assert fps(f, x, rational=False).truncate() == x**2/2 - x**4/12 + O(x**6)
f = log(1 + x)
assert fps(f, x, rational=False).truncate() == \
x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
f = x*exp(x)*sin(2*x) # TODO: solved using rsolve, improve simpleDE
assert fps(f, x).truncate() == 2*x**2 + 2*x**3 - x**4/3 - x**5 + O(x**6)
f = airyai(x**2)
assert fps(f, x).truncate() == \
(3**Rational(5, 6)*gamma(Rational(1, 3))/(6*pi) -
3**Rational(2, 3)*x**2/(3*gamma(Rational(1, 3))) + O(x**6))
f = exp(x)*sin(x)
assert fps(f, x).truncate() == x + x**2 + x**3/3 - x**5/30 + O(x**6)
def test_fps_shift():
f = x**-5*sin(x)
assert fps(f, x).truncate() == \
1/x**4 - 1/(6*x**2) + S.One/120 - x**2/5040 + x**4/362880 + O(x**6)
f = x**2*atan(x)
assert fps(f, x, rational=False).truncate() == \
x**3 - x**5/3 + O(x**6)
f = cos(sqrt(x))*x
assert fps(f, x).truncate() == \
x - x**2/2 + x**3/24 - x**4/720 + x**5/40320 + O(x**6)
f = x**2*cos(sqrt(x))
assert fps(f, x).truncate() == \
x**2 - x**3/2 + x**4/24 - x**5/720 + O(x**6)
def test_fps__Add_expr():
f = x*atan(x) - log(1 + x**2) / 2
assert fps(f, x).truncate() == x**2/2 - x**4/12 + O(x**6)
f = sin(x) + cos(x) - exp(x) + log(1 + x)
assert fps(f, x).truncate() == x - 3*x**2/2 - x**4/4 + x**5/5 + O(x**6)
f = 1/x + sin(x)
assert fps(f, x).truncate() == 1/x + x - x**3/6 + x**5/120 + O(x**6)
def test_fps__asymptotic():
f = exp(x)
assert fps(f, x, oo) == f
assert fps(f, x, -oo).truncate() == O(1/x**6, (x, oo))
f = erf(x)
assert fps(f, x, oo).truncate() == 1 + O(1/x**6, (x, oo))
assert fps(f, x, -oo).truncate() == -1 + O(1/x**6, (x, oo))
f = atan(x)
assert fps(f, x, oo, full=True).truncate() == \
-1/(5*x**5) + 1/(3*x**3) - 1/x + pi/2 + O(1/x**6, (x, oo))
assert fps(f, x, -oo, full=True).truncate() == \
-1/(5*x**5) + 1/(3*x**3) - 1/x - pi/2 + O(1/x**6, (x, oo))
f = log(1 + x)
assert fps(f, x, oo) != \
(-1/(5*x**5) - 1/(4*x**4) + 1/(3*x**3) - 1/(2*x**2) + 1/x - log(1/x) +
O(1/x**6, (x, oo)))
assert fps(f, x, -oo) != \
(-1/(5*x**5) - 1/(4*x**4) + 1/(3*x**3) - 1/(2*x**2) + 1/x + I*pi -
log(-1/x) + O(1/x**6, (x, oo)))
def test_fps__fractional():
f = sin(sqrt(x)) / x
assert fps(f, x).truncate() == \
(1/sqrt(x) - sqrt(x)/6 + x**Rational(3, 2)/120 -
x**Rational(5, 2)/5040 + x**Rational(7, 2)/362880 -
x**Rational(9, 2)/39916800 + x**Rational(11, 2)/6227020800 + O(x**6))
f = sin(sqrt(x)) * x
assert fps(f, x).truncate() == \
(x**Rational(3, 2) - x**Rational(5, 2)/6 + x**Rational(7, 2)/120 -
x**Rational(9, 2)/5040 + x**Rational(11, 2)/362880 + O(x**6))
f = atan(sqrt(x)) / x**2
assert fps(f, x).truncate() == \
(x**Rational(-3, 2) - x**Rational(-1, 2)/3 + x**Rational(1, 2)/5 -
x**Rational(3, 2)/7 + x**Rational(5, 2)/9 - x**Rational(7, 2)/11 +
x**Rational(9, 2)/13 - x**Rational(11, 2)/15 + O(x**6))
f = exp(sqrt(x))
assert fps(f, x).truncate().expand() == \
(1 + x/2 + x**2/24 + x**3/720 + x**4/40320 + x**5/3628800 + sqrt(x) +
x**Rational(3, 2)/6 + x**Rational(5, 2)/120 + x**Rational(7, 2)/5040 +
x**Rational(9, 2)/362880 + x**Rational(11, 2)/39916800 + O(x**6))
f = exp(sqrt(x))*x
assert fps(f, x).truncate().expand() == \
(x + x**2/2 + x**3/24 + x**4/720 + x**5/40320 + x**Rational(3, 2) +
x**Rational(5, 2)/6 + x**Rational(7, 2)/120 + x**Rational(9, 2)/5040 +
x**Rational(11, 2)/362880 + O(x**6))
def test_fps__logarithmic_singularity():
f = log(1 + 1/x)
assert fps(f, x) != \
-log(x) + x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
assert fps(f, x, rational=False) != \
-log(x) + x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
@XFAIL
def test_fps__logarithmic_singularity_fail():
f = asech(x) # Algorithms for computing limits probably needs improvemnts
assert fps(f, x) == log(2) - log(x) - x**2/4 - 3*x**4/64 + O(x**6)
def test_fps__symbolic():
f = x**n*sin(x**2)
assert fps(f, x).truncate(8) == x**2*x**n - x**6*x**n/6 + O(x**(n + 8), x)
f = x**(n - 2)*cos(x)
assert fps(f, x).truncate() == \
(x**n*(-S(1)/2 + x**(-2)) + x**2*x**n/24 - x**4*x**n/720 +
O(x**(n + 6), x))
f = x**n*log(1 + x)
fp = fps(f, x)
k = fp.ak.variables[0]
assert fp.infinite == \
Sum((-(-1)**(-k)*x**k*x**n)/k, (k, 1, oo))
f = x**(n - 2)*sin(x) + x**n*exp(x)
assert fps(f, x).truncate() == \
(x**n*(1 + 1/x) + 5*x*x**n/6 + x**2*x**n/2 + 7*x**3*x**n/40 +
x**4*x**n/24 + 41*x**5*x**n/5040 + O(x**(n + 6), x))
f = (x - 2)**n*log(1 + x)
assert fps(f, x, 2).truncate() == \
((x - 2)**n*log(3) - (x - 2)**2*(x - 2)**n/18 +
(x - 2)**3*(x - 2)**n/81 - (x - 2)**4*(x - 2)**n/324 +
(x - 2)**5*(x - 2)**n/1215 + (x/3 - S(2)/3)*(x - 2)**n +
O((x - 2)**(n + 6), (x, 2)))
f = x**n*atan(x)
assert fps(f, x, oo).truncate() == \
(-x**n/(5*x**5) + x**n/(3*x**3) + x**n*(pi/2 - 1/x) +
O(x**(n - 6), (x, oo)))
@XFAIL
def test_xfail_fps__simpleDE():
f = exp(x)*sin(x)/x
assert fps(f, x).truncate() == 1 + x + x**2/3 - x**4/30 - x**5/90 + O(x**6)
|
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
_TRACING_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.path.pardir))
def IsTestExpectedToFail(test_file_name):
""" Return whether html or js file that yet runnable by d8_runner.
This offers a solution to incremental enable more html files that are
runnable by d8. If you add a new html/js files that are not compatible
with d8_runner, please contact dsinclair@ or nduca@ for how to proceed.
"""
return os.path.abspath(test_file_name) in GetD8NonCompatibleFiles()
def GetD8NonCompatibleFiles():
""" Returns the list of files that are expected to throw exeception when run
by d8_runner.
"""
if not _D8_BLACK_LIST_FILES:
for f in _REL_PATH_D8_BLACK_LIST_FILES:
_D8_BLACK_LIST_FILES.add(os.path.join(_TRACING_DIR, f))
return _D8_BLACK_LIST_FILES
_D8_BLACK_LIST_FILES = set()
# TODO(dsinclair, nduca, nednguyen): burn down this set.
# (https://github.com/google/trace-viewer/issues/984)
_REL_PATH_D8_BLACK_LIST_FILES = {
"tracing/base/base64_test.html",
"tracing/base/bbox2.html",
"tracing/base/bbox2_test.html",
"tracing/base/color_test.html",
"tracing/base/deep_utils_test.html",
"tracing/base/event.html",
"tracing/base/event_target_test.html",
"tracing/base/extension_registry_basic.html",
"tracing/base/extension_registry.html",
"tracing/base/extension_registry_test.html",
"tracing/base/extension_registry_type_based.html",
"tracing/base/interval_tree_test.html",
"tracing/base/iteration_helpers_test.html",
"tracing/base/math.html",
"tracing/base/math_test.html",
"tracing/base/properties.html",
"tracing/base/properties_test.html",
"tracing/base/quad.html",
"tracing/base/quad_test.html",
"tracing/base/raf_test.html",
"tracing/base/range_test.html",
"tracing/base/range_utils_test.html",
"tracing/base/rect.html",
"tracing/base/rect_test.html",
"tracing/base/settings.html",
"tracing/base/settings_test.html",
"tracing/base/sorted_array_utils_test.html",
"tracing/base/statistics_test.html",
"tracing/base/task_test.html",
"tracing/base/tests.html",
"tracing/base/units/size_in_bytes_test.html",
"tracing/base/units/time_duration_test.html",
"tracing/base/units/time_stamp_test.html",
"tracing/base/units/time_test.html",
"tracing/base/unittest.html",
"tracing/base/unittest/html_test_results.html",
"tracing/base/unittest/interactive_test_runner.html",
"tracing/base/unittest/test_case_test.html",
"tracing/base/unittest_test.html",
"tracing/base/utils_test.html",
"tracing/core/auditor.html",
"tracing/core/filter_test.html",
"tracing/core/scripting_control.html",
"tracing/core/scripting_controller.html",
"tracing/core/scripting_controller_test.html",
"tracing/core/scripting_control_test.html",
"tracing/core/selection_controller.html",
"tracing/core/selection_controller_test.html",
"tracing/core/selection.html",
"tracing/core/selection_test.html",
"tracing/core/test_utils.html",
"tracing/extras/android/android_app.html",
"tracing/extras/android/android_auditor.html",
"tracing/extras/android/android_auditor_test.html",
"tracing/extras/android/android_model_helper.html",
"tracing/extras/android/android_model_helper_test.html",
"tracing/extras/chrome/cc/cc.html",
"tracing/extras/chrome/cc/display_item_list.html",
"tracing/extras/chrome/cc/display_item_list_test.html",
"tracing/extras/chrome/cc/input_latency_async_slice.html",
"tracing/extras/chrome/cc/input_latency_async_slice_test.html",
"tracing/extras/chrome/cc/layer_impl.html",
"tracing/extras/chrome/cc/layer_tree_host_impl.html",
"tracing/extras/chrome/cc/layer_tree_host_impl_test.html",
"tracing/extras/chrome/cc/layer_tree_impl.html",
"tracing/extras/chrome/cc/picture.html",
"tracing/extras/chrome/cc/picture_test.html",
"tracing/extras/chrome/cc/raster_task.html",
"tracing/extras/chrome/cc/raster_task_selection.html",
"tracing/extras/chrome/cc/raster_task_selection_test.html",
"tracing/extras/chrome/cc/region.html",
"tracing/extras/chrome/cc/render_pass.html",
"tracing/extras/chrome/cc/selection.html",
"tracing/extras/chrome/cc/tile.html",
"tracing/extras/chrome/cc/tile_test.html",
"tracing/extras/chrome/cc/util.html",
"tracing/extras/chrome/cc/util_test.html",
"tracing/extras/chrome/chrome_auditor.html",
"tracing/extras/chrome/chrome_auditor_test.html",
"tracing/extras/chrome/chrome_browser_helper_test.html",
"tracing/extras/chrome/chrome_model_helper_test.html",
"tracing/extras/chrome/chrome_test_utils.html",
"tracing/extras/chrome_config.html",
"tracing/extras/chrome/gpu/gpu_async_slice.html",
"tracing/extras/chrome/gpu/gpu_async_slice_test.html",
"tracing/extras/chrome/gpu/state.html",
"tracing/extras/chrome/gpu/state_test.html",
"tracing/extras/chrome/layout_object_test.html",
"tracing/extras/full_config.html",
"tracing/extras/importer/battor_importer.html",
"tracing/extras/importer/battor_importer_test.html",
"tracing/extras/importer/ddms_importer.html",
"tracing/extras/importer/ddms_importer_test.html",
"tracing/extras/importer/etw/etw_importer.html",
"tracing/extras/importer/etw/etw_importer_test.html",
"tracing/extras/importer/etw/eventtrace_parser.html",
"tracing/extras/importer/etw/eventtrace_parser_test.html",
"tracing/extras/importer/etw/parser.html",
"tracing/extras/importer/etw/process_parser.html",
"tracing/extras/importer/etw/process_parser_test.html",
"tracing/extras/importer/etw/thread_parser.html",
"tracing/extras/importer/etw/thread_parser_test.html",
"tracing/extras/importer/gzip_importer.html",
"tracing/extras/importer/gzip_importer_test.html",
"tracing/extras/importer/jszip.html",
"tracing/extras/importer/linux_perf/android_parser.html",
"tracing/extras/importer/linux_perf/android_parser_test.html",
"tracing/extras/importer/linux_perf/bus_parser.html",
"tracing/extras/importer/linux_perf/bus_parser_test.html",
"tracing/extras/importer/linux_perf/clock_parser.html",
"tracing/extras/importer/linux_perf/clock_parser_test.html",
"tracing/extras/importer/linux_perf/cpufreq_parser.html",
"tracing/extras/importer/linux_perf/cpufreq_parser_test.html",
"tracing/extras/importer/linux_perf/disk_parser.html",
"tracing/extras/importer/linux_perf/disk_parser_test.html",
"tracing/extras/importer/linux_perf/drm_parser.html",
"tracing/extras/importer/linux_perf/drm_parser_test.html",
"tracing/extras/importer/linux_perf/exynos_parser.html",
"tracing/extras/importer/linux_perf/exynos_parser_test.html",
"tracing/extras/importer/linux_perf/ftrace_importer.html",
"tracing/extras/importer/linux_perf/ftrace_importer_test.html",
"tracing/extras/importer/linux_perf/gesture_parser.html",
"tracing/extras/importer/linux_perf/gesture_parser_test.html",
"tracing/extras/importer/linux_perf/i915_parser.html",
"tracing/extras/importer/linux_perf/i915_parser_test.html",
"tracing/extras/importer/linux_perf/irq_parser.html",
"tracing/extras/importer/linux_perf/irq_parser_test.html",
"tracing/extras/importer/linux_perf/kfunc_parser.html",
"tracing/extras/importer/linux_perf/kfunc_parser_test.html",
"tracing/extras/importer/linux_perf/mali_parser.html",
"tracing/extras/importer/linux_perf/mali_parser_test.html",
"tracing/extras/importer/linux_perf/memreclaim_parser.html",
"tracing/extras/importer/linux_perf/memreclaim_parser_test.html",
"tracing/extras/importer/linux_perf/parser.html",
"tracing/extras/importer/linux_perf/power_parser.html",
"tracing/extras/importer/linux_perf/power_parser_test.html",
"tracing/extras/importer/linux_perf/regulator_parser.html",
"tracing/extras/importer/linux_perf/regulator_parser_test.html",
"tracing/extras/importer/linux_perf/sched_parser.html",
"tracing/extras/importer/linux_perf/sched_parser_test.html",
"tracing/extras/importer/linux_perf/sync_parser.html",
"tracing/extras/importer/linux_perf/sync_parser_test.html",
"tracing/extras/importer/linux_perf/workqueue_parser.html",
"tracing/extras/importer/linux_perf/workqueue_parser_test.html",
"tracing/extras/importer/trace2html_importer.html",
"tracing/extras/importer/trace2html_importer_test.html",
"tracing/extras/importer/trace_event_importer.html",
"tracing/extras/importer/trace_event_importer_perf_test.html",
"tracing/extras/importer/trace_event_importer_test.html",
"tracing/extras/importer/v8/v8_log_importer.html",
"tracing/extras/importer/v8/v8_log_importer_test.html",
"tracing/extras/importer/zip_importer.html",
"tracing/extras/lean_config.html",
"tracing/extras/net/net_async_slice.html",
"tracing/extras/net/net_async_slice_test.html",
"tracing/extras/net/net.html",
"tracing/extras/rail/animation_interaction_record.html",
"tracing/extras/rail/idle_interaction_record.html",
"tracing/extras/rail/load_interaction_record.html",
"tracing/extras/rail/rail_interaction_record.html",
"tracing/extras/rail/rail_interaction_record_test.html",
"tracing/extras/rail/rail_ir_finder.html",
"tracing/extras/rail/rail_ir_finder_test.html",
"tracing/extras/rail/rail_score.html",
"tracing/extras/rail/rail_score_test.html",
"tracing/extras/rail/response_interaction_record.html",
"tracing/extras/rail/response_interaction_record_test.html",
"tracing/extras/rail/stub_rail_interaction_record.html",
"tracing/extras/system_stats/system_stats_snapshot.html",
"tracing/extras/systrace_config.html",
"tracing/extras/tcmalloc/heap.html",
"tracing/extras/tcmalloc/heap_test.html",
"tracing/extras/tquery/filter_all_of.html",
"tracing/extras/tquery/filter_any_of.html",
"tracing/extras/tquery/filter_has_ancestor.html",
"tracing/extras/tquery/filter_has_duration.html",
"tracing/extras/tquery/filter_has_title.html",
"tracing/extras/tquery/filter_is_top_level.html",
"tracing/extras/tquery/tquery.html",
"tracing/extras/tquery/tquery_test.html",
"tracing/importer/empty_importer.html",
"tracing/importer/importer.html",
"tracing/model/alert.html",
"tracing/model/annotation.html",
"tracing/model/annotation_test.html",
"tracing/model/async_slice_group.html",
"tracing/model/async_slice_group_test.html",
"tracing/model/async_slice.html",
"tracing/model/attribute.html",
"tracing/model/attribute_test.html",
"tracing/model/comment_box_annotation.html",
"tracing/model/container_memory_dump.html",
"tracing/model/container_memory_dump_test.html",
"tracing/model/counter.html",
"tracing/model/counter_sample.html",
"tracing/model/counter_sample_test.html",
"tracing/model/counter_series.html",
"tracing/model/counter_test.html",
"tracing/model/cpu.html",
"tracing/model/cpu_slice.html",
"tracing/model/cpu_test.html",
"tracing/model/event.html",
"tracing/model/event_test.html",
"tracing/model/flow_event.html",
"tracing/model/frame.html",
"tracing/model/global_memory_dump.html",
"tracing/model/global_memory_dump_test.html",
"tracing/model/instant_event.html",
"tracing/model/interaction_record.html",
"tracing/model/kernel.html",
"tracing/model/kernel_test.html",
"tracing/model/memory_allocator_dump.html",
"tracing/model/memory_allocator_dump_test.html",
"tracing/model/model.html",
"tracing/model/model_indices_test.html",
"tracing/model/model_settings.html",
"tracing/model/model_settings_test.html",
"tracing/model/model_test.html",
"tracing/model/multi_async_slice_sub_view_test.html",
"tracing/model/object_collection.html",
"tracing/model/object_collection_test.html",
"tracing/model/object_instance.html",
"tracing/model/object_instance_test.html",
"tracing/model/object_snapshot.html",
"tracing/model/object_snapshot_test.html",
"tracing/model/process_base.html",
"tracing/model/process.html",
"tracing/model/process_memory_dump.html",
"tracing/model/process_memory_dump_test.html",
"tracing/model/process_test.html",
"tracing/model/proxy_selectable_item_test.html",
"tracing/model/rect_annotation.html",
"tracing/model/sample.html",
"tracing/model/sample_test.html",
"tracing/model/selectable_item_test.html",
"tracing/model/single_async_slice_sub_view_test.html",
"tracing/model/slice_group_test.html",
"tracing/model/slice.html",
"tracing/model/slice_test.html",
"tracing/model/thread.html",
"tracing/model/thread_slice.html",
"tracing/model/thread_test.html",
"tracing/model/thread_time_slice.html",
"tracing/model/timed_event.html",
"tracing/model/timed_event_test.html",
"tracing/model/time_to_object_instance_map_test.html",
"tracing/model/x_marker_annotation.html",
"tracing/trace2html.html",
"tracing/trace_viewer.html",
}
Add suppression for d8_tests.html to fix build.
Review URL: https://codereview.appspot.com/246710043
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
_TRACING_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.path.pardir))
def IsTestExpectedToFail(test_file_name):
""" Return whether html or js file that yet runnable by d8_runner.
This offers a solution to incremental enable more html files that are
runnable by d8. If you add a new html/js files that are not compatible
with d8_runner, please contact dsinclair@ or nduca@ for how to proceed.
"""
return os.path.abspath(test_file_name) in GetD8NonCompatibleFiles()
def GetD8NonCompatibleFiles():
""" Returns the list of files that are expected to throw exeception when run
by d8_runner.
"""
if not _D8_BLACK_LIST_FILES:
for f in _REL_PATH_D8_BLACK_LIST_FILES:
_D8_BLACK_LIST_FILES.add(os.path.join(_TRACING_DIR, f))
return _D8_BLACK_LIST_FILES
_D8_BLACK_LIST_FILES = set()
# TODO(dsinclair, nduca, nednguyen): burn down this set.
# (https://github.com/google/trace-viewer/issues/984)
_REL_PATH_D8_BLACK_LIST_FILES = {
"tracing/base/base64_test.html",
"tracing/base/bbox2.html",
"tracing/base/bbox2_test.html",
"tracing/base/color_test.html",
"tracing/base/deep_utils_test.html",
"tracing/base/event.html",
"tracing/base/event_target_test.html",
"tracing/base/extension_registry_basic.html",
"tracing/base/extension_registry.html",
"tracing/base/extension_registry_test.html",
"tracing/base/extension_registry_type_based.html",
"tracing/base/interval_tree_test.html",
"tracing/base/iteration_helpers_test.html",
"tracing/base/math.html",
"tracing/base/math_test.html",
"tracing/base/properties.html",
"tracing/base/properties_test.html",
"tracing/base/quad.html",
"tracing/base/quad_test.html",
"tracing/base/raf_test.html",
"tracing/base/range_test.html",
"tracing/base/range_utils_test.html",
"tracing/base/rect.html",
"tracing/base/rect_test.html",
"tracing/base/settings.html",
"tracing/base/settings_test.html",
"tracing/base/sorted_array_utils_test.html",
"tracing/base/statistics_test.html",
"tracing/base/task_test.html",
"tracing/base/tests.html",
"tracing/base/d8_tests.html",
"tracing/base/units/size_in_bytes_test.html",
"tracing/base/units/time_duration_test.html",
"tracing/base/units/time_stamp_test.html",
"tracing/base/units/time_test.html",
"tracing/base/unittest.html",
"tracing/base/unittest/html_test_results.html",
"tracing/base/unittest/interactive_test_runner.html",
"tracing/base/unittest/test_case_test.html",
"tracing/base/unittest_test.html",
"tracing/base/utils_test.html",
"tracing/core/auditor.html",
"tracing/core/filter_test.html",
"tracing/core/scripting_control.html",
"tracing/core/scripting_controller.html",
"tracing/core/scripting_controller_test.html",
"tracing/core/scripting_control_test.html",
"tracing/core/selection_controller.html",
"tracing/core/selection_controller_test.html",
"tracing/core/selection.html",
"tracing/core/selection_test.html",
"tracing/core/test_utils.html",
"tracing/extras/android/android_app.html",
"tracing/extras/android/android_auditor.html",
"tracing/extras/android/android_auditor_test.html",
"tracing/extras/android/android_model_helper.html",
"tracing/extras/android/android_model_helper_test.html",
"tracing/extras/chrome/cc/cc.html",
"tracing/extras/chrome/cc/display_item_list.html",
"tracing/extras/chrome/cc/display_item_list_test.html",
"tracing/extras/chrome/cc/input_latency_async_slice.html",
"tracing/extras/chrome/cc/input_latency_async_slice_test.html",
"tracing/extras/chrome/cc/layer_impl.html",
"tracing/extras/chrome/cc/layer_tree_host_impl.html",
"tracing/extras/chrome/cc/layer_tree_host_impl_test.html",
"tracing/extras/chrome/cc/layer_tree_impl.html",
"tracing/extras/chrome/cc/picture.html",
"tracing/extras/chrome/cc/picture_test.html",
"tracing/extras/chrome/cc/raster_task.html",
"tracing/extras/chrome/cc/raster_task_selection.html",
"tracing/extras/chrome/cc/raster_task_selection_test.html",
"tracing/extras/chrome/cc/region.html",
"tracing/extras/chrome/cc/render_pass.html",
"tracing/extras/chrome/cc/selection.html",
"tracing/extras/chrome/cc/tile.html",
"tracing/extras/chrome/cc/tile_test.html",
"tracing/extras/chrome/cc/util.html",
"tracing/extras/chrome/cc/util_test.html",
"tracing/extras/chrome/chrome_auditor.html",
"tracing/extras/chrome/chrome_auditor_test.html",
"tracing/extras/chrome/chrome_browser_helper_test.html",
"tracing/extras/chrome/chrome_model_helper_test.html",
"tracing/extras/chrome/chrome_test_utils.html",
"tracing/extras/chrome_config.html",
"tracing/extras/chrome/gpu/gpu_async_slice.html",
"tracing/extras/chrome/gpu/gpu_async_slice_test.html",
"tracing/extras/chrome/gpu/state.html",
"tracing/extras/chrome/gpu/state_test.html",
"tracing/extras/chrome/layout_object_test.html",
"tracing/extras/full_config.html",
"tracing/extras/importer/battor_importer.html",
"tracing/extras/importer/battor_importer_test.html",
"tracing/extras/importer/ddms_importer.html",
"tracing/extras/importer/ddms_importer_test.html",
"tracing/extras/importer/etw/etw_importer.html",
"tracing/extras/importer/etw/etw_importer_test.html",
"tracing/extras/importer/etw/eventtrace_parser.html",
"tracing/extras/importer/etw/eventtrace_parser_test.html",
"tracing/extras/importer/etw/parser.html",
"tracing/extras/importer/etw/process_parser.html",
"tracing/extras/importer/etw/process_parser_test.html",
"tracing/extras/importer/etw/thread_parser.html",
"tracing/extras/importer/etw/thread_parser_test.html",
"tracing/extras/importer/gzip_importer.html",
"tracing/extras/importer/gzip_importer_test.html",
"tracing/extras/importer/jszip.html",
"tracing/extras/importer/linux_perf/android_parser.html",
"tracing/extras/importer/linux_perf/android_parser_test.html",
"tracing/extras/importer/linux_perf/bus_parser.html",
"tracing/extras/importer/linux_perf/bus_parser_test.html",
"tracing/extras/importer/linux_perf/clock_parser.html",
"tracing/extras/importer/linux_perf/clock_parser_test.html",
"tracing/extras/importer/linux_perf/cpufreq_parser.html",
"tracing/extras/importer/linux_perf/cpufreq_parser_test.html",
"tracing/extras/importer/linux_perf/disk_parser.html",
"tracing/extras/importer/linux_perf/disk_parser_test.html",
"tracing/extras/importer/linux_perf/drm_parser.html",
"tracing/extras/importer/linux_perf/drm_parser_test.html",
"tracing/extras/importer/linux_perf/exynos_parser.html",
"tracing/extras/importer/linux_perf/exynos_parser_test.html",
"tracing/extras/importer/linux_perf/ftrace_importer.html",
"tracing/extras/importer/linux_perf/ftrace_importer_test.html",
"tracing/extras/importer/linux_perf/gesture_parser.html",
"tracing/extras/importer/linux_perf/gesture_parser_test.html",
"tracing/extras/importer/linux_perf/i915_parser.html",
"tracing/extras/importer/linux_perf/i915_parser_test.html",
"tracing/extras/importer/linux_perf/irq_parser.html",
"tracing/extras/importer/linux_perf/irq_parser_test.html",
"tracing/extras/importer/linux_perf/kfunc_parser.html",
"tracing/extras/importer/linux_perf/kfunc_parser_test.html",
"tracing/extras/importer/linux_perf/mali_parser.html",
"tracing/extras/importer/linux_perf/mali_parser_test.html",
"tracing/extras/importer/linux_perf/memreclaim_parser.html",
"tracing/extras/importer/linux_perf/memreclaim_parser_test.html",
"tracing/extras/importer/linux_perf/parser.html",
"tracing/extras/importer/linux_perf/power_parser.html",
"tracing/extras/importer/linux_perf/power_parser_test.html",
"tracing/extras/importer/linux_perf/regulator_parser.html",
"tracing/extras/importer/linux_perf/regulator_parser_test.html",
"tracing/extras/importer/linux_perf/sched_parser.html",
"tracing/extras/importer/linux_perf/sched_parser_test.html",
"tracing/extras/importer/linux_perf/sync_parser.html",
"tracing/extras/importer/linux_perf/sync_parser_test.html",
"tracing/extras/importer/linux_perf/workqueue_parser.html",
"tracing/extras/importer/linux_perf/workqueue_parser_test.html",
"tracing/extras/importer/trace2html_importer.html",
"tracing/extras/importer/trace2html_importer_test.html",
"tracing/extras/importer/trace_event_importer.html",
"tracing/extras/importer/trace_event_importer_perf_test.html",
"tracing/extras/importer/trace_event_importer_test.html",
"tracing/extras/importer/v8/v8_log_importer.html",
"tracing/extras/importer/v8/v8_log_importer_test.html",
"tracing/extras/importer/zip_importer.html",
"tracing/extras/lean_config.html",
"tracing/extras/net/net_async_slice.html",
"tracing/extras/net/net_async_slice_test.html",
"tracing/extras/net/net.html",
"tracing/extras/rail/animation_interaction_record.html",
"tracing/extras/rail/idle_interaction_record.html",
"tracing/extras/rail/load_interaction_record.html",
"tracing/extras/rail/rail_interaction_record.html",
"tracing/extras/rail/rail_interaction_record_test.html",
"tracing/extras/rail/rail_ir_finder.html",
"tracing/extras/rail/rail_ir_finder_test.html",
"tracing/extras/rail/rail_score.html",
"tracing/extras/rail/rail_score_test.html",
"tracing/extras/rail/response_interaction_record.html",
"tracing/extras/rail/response_interaction_record_test.html",
"tracing/extras/rail/stub_rail_interaction_record.html",
"tracing/extras/system_stats/system_stats_snapshot.html",
"tracing/extras/systrace_config.html",
"tracing/extras/tcmalloc/heap.html",
"tracing/extras/tcmalloc/heap_test.html",
"tracing/extras/tquery/filter_all_of.html",
"tracing/extras/tquery/filter_any_of.html",
"tracing/extras/tquery/filter_has_ancestor.html",
"tracing/extras/tquery/filter_has_duration.html",
"tracing/extras/tquery/filter_has_title.html",
"tracing/extras/tquery/filter_is_top_level.html",
"tracing/extras/tquery/tquery.html",
"tracing/extras/tquery/tquery_test.html",
"tracing/importer/empty_importer.html",
"tracing/importer/importer.html",
"tracing/model/alert.html",
"tracing/model/annotation.html",
"tracing/model/annotation_test.html",
"tracing/model/async_slice_group.html",
"tracing/model/async_slice_group_test.html",
"tracing/model/async_slice.html",
"tracing/model/attribute.html",
"tracing/model/attribute_test.html",
"tracing/model/comment_box_annotation.html",
"tracing/model/container_memory_dump.html",
"tracing/model/container_memory_dump_test.html",
"tracing/model/counter.html",
"tracing/model/counter_sample.html",
"tracing/model/counter_sample_test.html",
"tracing/model/counter_series.html",
"tracing/model/counter_test.html",
"tracing/model/cpu.html",
"tracing/model/cpu_slice.html",
"tracing/model/cpu_test.html",
"tracing/model/event.html",
"tracing/model/event_test.html",
"tracing/model/flow_event.html",
"tracing/model/frame.html",
"tracing/model/global_memory_dump.html",
"tracing/model/global_memory_dump_test.html",
"tracing/model/instant_event.html",
"tracing/model/interaction_record.html",
"tracing/model/kernel.html",
"tracing/model/kernel_test.html",
"tracing/model/memory_allocator_dump.html",
"tracing/model/memory_allocator_dump_test.html",
"tracing/model/model.html",
"tracing/model/model_indices_test.html",
"tracing/model/model_settings.html",
"tracing/model/model_settings_test.html",
"tracing/model/model_test.html",
"tracing/model/multi_async_slice_sub_view_test.html",
"tracing/model/object_collection.html",
"tracing/model/object_collection_test.html",
"tracing/model/object_instance.html",
"tracing/model/object_instance_test.html",
"tracing/model/object_snapshot.html",
"tracing/model/object_snapshot_test.html",
"tracing/model/process_base.html",
"tracing/model/process.html",
"tracing/model/process_memory_dump.html",
"tracing/model/process_memory_dump_test.html",
"tracing/model/process_test.html",
"tracing/model/proxy_selectable_item_test.html",
"tracing/model/rect_annotation.html",
"tracing/model/sample.html",
"tracing/model/sample_test.html",
"tracing/model/selectable_item_test.html",
"tracing/model/single_async_slice_sub_view_test.html",
"tracing/model/slice_group_test.html",
"tracing/model/slice.html",
"tracing/model/slice_test.html",
"tracing/model/thread.html",
"tracing/model/thread_slice.html",
"tracing/model/thread_test.html",
"tracing/model/thread_time_slice.html",
"tracing/model/timed_event.html",
"tracing/model/timed_event_test.html",
"tracing/model/time_to_object_instance_map_test.html",
"tracing/model/x_marker_annotation.html",
"tracing/trace2html.html",
"tracing/trace_viewer.html",
}
|
"""
Models for notifications app.
"""
import json
import jsonpickle
import six
from django.conf import settings
from django.db import models
from django.utils.module_loading import import_string
@six.python_2_unicode_compatible
class SentNotification(models.Model):
"""
Stores info on the notification that was sent.
"""
STATUS_PENDING = 0
STATUS_SUCCESS = 1
STATUS_FAILED = 2
STATUS_USER_DISABLED = 3
STATUSES = (
(0, 'Pending'),
(1, 'Success'),
(2, 'Failed'),
(3, 'User Disabled')
)
text_content = models.TextField(null=True, blank=True)
html_content = models.TextField(null=True, blank=True)
sent_from = models.CharField(max_length=100, null=True, blank=True)
recipients = models.CharField(max_length=2000) # Comma separated list of emails or numbers
subject = models.CharField(max_length=255, null=True, blank=True)
extra_data = models.TextField(null=True, blank=True) # json dictionary
date_sent = models.DateTimeField()
status = models.PositiveSmallIntegerField(choices=STATUSES, default=STATUS_PENDING)
notification_class = models.CharField(max_length=255)
error_message = models.TextField(null=True, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=None, null=True, on_delete=models.SET_NULL)
attachments = models.TextField(null=True, blank=True)
def __str__(self):
return self.notification_class
def get_recipients(self):
"""
Return the list of recipients for the notification. Recipient is defined by the notification class.
"""
return self.recipients.split(',')
def resend(self):
"""
Re-sends the notification by calling the notification class' resend method
"""
notification_class = import_string(self.notification_class)
notification_class.resend(self)
def get_extra_data(self):
"""
Return extra data that was saved
"""
if not self.extra_data:
return {}
else:
return json.loads(self.extra_data)
def get_attachments(self):
if self.attachments:
return jsonpickle.loads(self.attachments)
else:
return None
@six.python_2_unicode_compatible
class Notification(models.Model):
"""
NotificationClasses are created on app init.
"""
notification_class = models.CharField(max_length=255, unique=True)
verbose_name = models.CharField(max_length=255, blank=True, null=True)
can_disable = models.BooleanField(default=True)
def __str__(self):
return self.verbose_name if self.verbose_name else self.notification_class
class UserNotification(models.Model):
"""
Add a User Notification record, then add disabled notifications to disable records.
On your user Admin, add the field user_notification
"""
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True
)
disabled_notifications = models.ManyToManyField(Notification)
Fixed missing return as "Resend" action from admin always shows "The notification failed to resend."
"""
Models for notifications app.
"""
import json
import jsonpickle
import six
from django.conf import settings
from django.db import models
from django.utils.module_loading import import_string
@six.python_2_unicode_compatible
class SentNotification(models.Model):
"""
Stores info on the notification that was sent.
"""
STATUS_PENDING = 0
STATUS_SUCCESS = 1
STATUS_FAILED = 2
STATUS_USER_DISABLED = 3
STATUSES = (
(0, 'Pending'),
(1, 'Success'),
(2, 'Failed'),
(3, 'User Disabled')
)
text_content = models.TextField(null=True, blank=True)
html_content = models.TextField(null=True, blank=True)
sent_from = models.CharField(max_length=100, null=True, blank=True)
recipients = models.CharField(max_length=2000) # Comma separated list of emails or numbers
subject = models.CharField(max_length=255, null=True, blank=True)
extra_data = models.TextField(null=True, blank=True) # json dictionary
date_sent = models.DateTimeField()
status = models.PositiveSmallIntegerField(choices=STATUSES, default=STATUS_PENDING)
notification_class = models.CharField(max_length=255)
error_message = models.TextField(null=True, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=None, null=True, on_delete=models.SET_NULL)
attachments = models.TextField(null=True, blank=True)
def __str__(self):
return self.notification_class
def get_recipients(self):
"""
Return the list of recipients for the notification. Recipient is defined by the notification class.
"""
return self.recipients.split(',')
def resend(self):
"""
Re-sends the notification by calling the notification class' resend method
"""
notification_class = import_string(self.notification_class)
return notification_class.resend(self)
def get_extra_data(self):
"""
Return extra data that was saved
"""
if not self.extra_data:
return {}
else:
return json.loads(self.extra_data)
def get_attachments(self):
if self.attachments:
return jsonpickle.loads(self.attachments)
else:
return None
@six.python_2_unicode_compatible
class Notification(models.Model):
"""
NotificationClasses are created on app init.
"""
notification_class = models.CharField(max_length=255, unique=True)
verbose_name = models.CharField(max_length=255, blank=True, null=True)
can_disable = models.BooleanField(default=True)
def __str__(self):
return self.verbose_name if self.verbose_name else self.notification_class
class UserNotification(models.Model):
"""
Add a User Notification record, then add disabled notifications to disable records.
On your user Admin, add the field user_notification
"""
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True
)
disabled_notifications = models.ManyToManyField(Notification)
|
"""An improved base task implementing easy (and explicit) saving of outputs."""
import os
import logging
from inspect import getfullargspec
import numpy as np
from caput import pipeline, config, memh5
class MPILogFilter(logging.Filter):
"""Filter log entries by MPI rank.
Also this will optionally add MPI rank information, and add an elapsed time
entry.
Parameters
----------
add_mpi_info : boolean, optional
Add MPI rank/size info to log records that don't already have it.
level_rank0 : int
Log level for messages from rank=0.
level_all : int
Log level for messages from all other ranks.
"""
def __init__(
self, add_mpi_info=True, level_rank0=logging.INFO, level_all=logging.WARN
):
from mpi4py import MPI
self.add_mpi_info = add_mpi_info
self.level_rank0 = level_rank0
self.level_all = level_all
self.comm = MPI.COMM_WORLD
def filter(self, record):
# Add MPI info if desired
try:
record.mpi_rank
except AttributeError:
if self.add_mpi_info:
record.mpi_rank = self.comm.rank
record.mpi_size = self.comm.size
# Add a new field with the elapsed time in seconds (as a float)
record.elapsedTime = record.relativeCreated * 1e-3
# Return whether we should filter the record or not.
return (record.mpi_rank == 0 and record.levelno >= self.level_rank0) or (
record.mpi_rank > 0 and record.levelno >= self.level_all
)
def _log_level(x):
"""Interpret the input as a logging level.
Parameters
----------
x : int or str
Explicit integer logging level or one of 'DEBUG', 'INFO', 'WARN',
'ERROR' or 'CRITICAL'.
Returns
-------
level : int
"""
level_dict = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARN": logging.WARN,
"WARNING": logging.WARN,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}
if isinstance(x, int):
return x
elif isinstance(x, str) and x in level_dict:
return level_dict[x.upper()]
else:
raise ValueError("Logging level %s not understood" % repr(x))
class SetMPILogging(pipeline.TaskBase):
"""A task used to configure MPI aware logging.
Attributes
----------
level_rank0, level_all : int or str
Log level for rank=0, and other ranks respectively.
"""
level_rank0 = config.Property(proptype=_log_level, default=logging.INFO)
level_all = config.Property(proptype=_log_level, default=logging.WARN)
def __init__(self):
from mpi4py import MPI
import math
logging.captureWarnings(True)
rank_length = int(math.log10(MPI.COMM_WORLD.size)) + 1
mpi_fmt = "[MPI %%(mpi_rank)%id/%%(mpi_size)%id]" % (rank_length, rank_length)
filt = MPILogFilter(level_all=self.level_all, level_rank0=self.level_rank0)
# This uses the fact that caput.pipeline.Manager has already
# attempted to set up the logging. We just insert our custom filter
root_logger = logging.getLogger()
ch = root_logger.handlers[0]
ch.addFilter(filt)
formatter = logging.Formatter(
"%(elapsedTime)8.1fs "
+ mpi_fmt
+ " - %(levelname)-8s %(name)s: %(message)s"
)
ch.setFormatter(formatter)
class LoggedTask(pipeline.TaskBase):
"""A task with logger support."""
log_level = config.Property(proptype=_log_level, default=None)
def __init__(self):
# Get the logger for this task
self._log = logging.getLogger(
"%s.%s" % (self.__module__, self.__class__.__name__)
)
# Set the log level for this task if specified
if self.log_level is not None:
self.log.setLevel(self.log_level)
@property
def log(self):
"""The logger object for this task."""
return self._log
class MPITask(pipeline.TaskBase):
"""Base class for MPI using tasks. Just ensures that the task gets a `comm`
attribute.
"""
comm = None
def __init__(self):
from mpi4py import MPI
# Set default communicator
self.comm = MPI.COMM_WORLD
class _AddRankLogAdapter(logging.LoggerAdapter):
"""Add the rank of the logging process to a log message.
Attributes
----------
calling_obj : object
An object with a `comm` property that will be queried for the rank.
"""
calling_obj = None
def process(self, msg, kwargs):
if "extra" not in kwargs:
kwargs["extra"] = {}
kwargs["extra"]["mpi_rank"] = self.calling_obj.comm.rank
kwargs["extra"]["mpi_size"] = self.calling_obj.comm.size
return msg, kwargs
class MPILoggedTask(MPITask, LoggedTask):
"""A task base that has MPI aware logging."""
def __init__(self):
# Initialise the base classes
MPITask.__init__(self)
LoggedTask.__init__(self)
# Replace the logger with a LogAdapter instance that adds MPI process
# information
logadapter = _AddRankLogAdapter(self._log, None)
logadapter.calling_obj = self
self._log = logadapter
class SingleTask(MPILoggedTask, pipeline.BasicContMixin):
"""Process a task with at most one input and output.
Both input and output are expected to be :class:`memh5.BasicCont` objects.
This class allows writing of the output when requested.
Tasks inheriting from this class should override `process` and optionally
:meth:`setup` or :meth:`finish`. They should not override :meth:`next`.
If the value of :attr:`input_root` is anything other than the string "None"
then the input will be read (using :meth:`read_input`) from the file
``self.input_root + self.input_filename``. If the input is specified both as
a filename and as a product key in the pipeline configuration, an error
will be raised upon initialization.
If the value of :attr:`output_root` is anything other than the string
"None" then the output will be written (using :meth:`write_output`) to the
file ``self.output_root + self.output_filename``.
Attributes
----------
save : bool
Whether to save the output to disk or not.
output_name : string
A python format string used to construct the filename. Valid identifiers are:
- `count`: an integer giving which iteration of the task is this.
- `tag`: a string identifier for the output derived from the
containers `tag` attribute. If that attribute is not present
`count` is used instead.
- `key`: the name of the output key.
- `task`: the (unqualified) name of the task.
- `output_root`: the value of the output root argument. This is deprecated
and is just used for legacy support. The default value of
`output_name` means the previous behaviour works.
output_root : string
Pipeline settable parameter giving the first part of the output path.
Deprecated in favour of `output_name`.
nan_check : bool
Check the output for NaNs (and infs) logging if they are present.
nan_dump : bool
If NaN's are found, dump the container to disk.
nan_skip : bool
If NaN's are found, don't pass on the output.
versions : dict
Keys are module names (str) and values are their version strings. This is
attached to output metadata.
pipeline_config : dict
Global pipeline configuration. This is attached to output metadata.
Raises
------
`caput.pipeline.PipelineRuntimeError`
If this is used as a baseclass to a task overriding `self.process` with variable length or optional arguments.
"""
save = config.Property(default=False, proptype=bool)
output_root = config.Property(default="", proptype=str)
output_name = config.Property(default="{output_root}{tag}.h5", proptype=str)
nan_check = config.Property(default=True, proptype=bool)
nan_skip = config.Property(default=True, proptype=bool)
nan_dump = config.Property(default=True, proptype=bool)
# Metadata to get attached to the output
versions = config.Property(default={}, proptype=dict)
pipeline_config = config.Property(default={}, proptype=dict)
_count = 0
done = False
_no_input = False
def __init__(self):
super(SingleTask, self).__init__()
# Inspect the `process` method to see how many arguments it takes.
pro_argspec = getfullargspec(self.process)
n_args = len(pro_argspec.args) - 1
if pro_argspec.varargs or pro_argspec.varkw or pro_argspec.defaults:
msg = (
"`process` method may not have variable length or optional"
" arguments."
)
raise pipeline.PipelineRuntimeError(msg)
if n_args == 0:
self._no_input = True
else:
self._no_input = False
def next(self, *input):
"""Should not need to override. Implement `process` instead."""
self.log.info("Starting next for task %s" % self.__class__.__name__)
self.comm.Barrier()
# This should only be called once.
try:
if self.done:
raise pipeline.PipelineStopIteration()
except AttributeError:
self.done = True
# Process input and fetch output
if self._no_input:
if len(input) > 0:
# This should never happen. Just here to catch bugs.
raise RuntimeError("Somehow `input` was set.")
output = self.process()
else:
output = self.process(*input)
# Return immediately if output is None to skip writing phase.
if output is None:
return
# Set a tag in output if needed
if "tag" not in output.attrs and len(input) > 0 and "tag" in input[0].attrs:
output.attrs["tag"] = input[0].attrs["tag"]
# Check for NaN's etc
output = self._nan_process_output(output)
# Write the output if needed
self._save_output(output)
# Increment internal counter
self._count = self._count + 1
self.log.info("Leaving next for task %s" % self.__class__.__name__)
# Return the output for the next task
return output
def finish(self):
"""Should not need to override. Implement `process_finish` instead."""
self.log.info("Starting finish for task %s" % self.__class__.__name__)
try:
output = self.process_finish()
# Check for NaN's etc
output = self._nan_process_output(output)
# Write the output if needed
self._save_output(output)
self.log.info("Leaving finish for task %s" % self.__class__.__name__)
return output
except AttributeError:
self.log.info("No finish for task %s" % self.__class__.__name__)
pass
def _save_output(self, output):
# Routine to write output if needed.
if self.save and output is not None:
# add metadata to output
metadata = {"versions": self.versions, "config": self.pipeline_config}
for key, value in metadata.items():
output.add_history(key, value)
# Create a tag for the output file name
tag = output.attrs["tag"] if "tag" in output.attrs else self._count
# Construct the filename
name_parts = {
"tag": tag,
"count": self._count,
"task": self.__class__.__name__,
"key": self._out_keys[0] if self._out_keys else "",
"output_root": self.output_root,
}
outfile = self.output_name.format(**name_parts)
# Expand any variables in the path
outfile = os.path.expanduser(outfile)
outfile = os.path.expandvars(outfile)
self.log.debug("Writing output %s to disk.", outfile)
self.write_output(outfile, output)
def _nan_process_output(self, output):
# Process the output to check for NaN's
# Returns the output or, None if it should be skipped
if self.nan_check:
nan_found = self._nan_check_walk(output)
if nan_found and self.nan_dump:
# Construct the filename
tag = output.attrs["tag"] if "tag" in output.attrs else self._count
outfile = "nandump_" + self.__class__.__name__ + "_" + str(tag) + ".h5"
self.log.debug("NaN found. Dumping %s", outfile)
self.write_output(outfile, output)
if nan_found and self.nan_skip:
self.log.debug("NaN found. Skipping output.")
return None
return output
def _nan_check_walk(self, cont):
# Walk through a memh5 container and check for NaN's and Inf's.
# Logs any issues found and returns True if there were any found.
from mpi4py import MPI
if isinstance(cont, memh5.MemDiskGroup):
cont = cont._data
stack = [cont]
found = False
# Walk over the container tree...
while stack:
n = stack.pop()
# Check the dataset for non-finite numbers
if isinstance(n, memh5.MemDataset):
# Try to test for NaN's and infs. This will fail for compound datatypes...
arr = n[:]
try:
is_nan = np.isnan(arr)
is_inf = np.isinf(arr)
arr = n[:]
except TypeError:
continue
if is_nan.any():
self.log.info(
"NaN's found in dataset %s [%i of %i elements]",
n.name,
is_nan.sum(),
arr.size,
)
found = True
if is_inf.any():
self.log.info(
"Inf's found in dataset %s [%i of %i elements]",
n.name,
is_inf.sum(),
arr.size,
)
found = True
elif isinstance(n, (memh5.MemGroup, memh5.MemDiskGroup)):
for item in n.values():
stack.append(item)
# All ranks need to know if any rank found a NaN/Inf
found = self.comm.allreduce(found, op=MPI.MAX)
return found
class ReturnLastInputOnFinish(SingleTask):
"""Workaround for `caput.pipeline` issues.
This caches its input on every call to `process` and then returns
the last one for a finish call.
"""
x = None
def process(self, x):
"""Take a reference to the input.
Parameters
----------
x : object
"""
self.x = x
def process_finish(self):
"""Return the last input to process.
Returns
-------
x : object
Last input to process.
"""
return self.x
class ReturnFirstInputOnFinish(SingleTask):
"""Workaround for `caput.pipeline` issues.
This caches its input on the first call to `process` and
then returns it for a finish call.
"""
x = None
def process(self, x):
"""Take a reference to the input.
Parameters
----------
x : object
"""
if self.x is None:
self.x = x
def process_finish(self):
"""Return the last input to process.
Returns
-------
x : object
Last input to process.
"""
return self.x
class Delete(SingleTask):
"""Delete pipeline products to free memory."""
def process(self, x):
"""Delete the input and collect garbage.
Parameters
----------
x : object
The object to be deleted.
"""
import gc
self.log.info("Deleting %s" % type(x))
del x
gc.collect()
return None
def group_tasks(*tasks):
"""Create a Task that groups a bunch of tasks together.
This method creates a class that inherits from all the subtasks, and
calls each `process` method in sequence, passing the output of one to the
input of the next.
This should be used like:
>>> class SuperTask(group_tasks(SubTask1, SubTask2)):
>>> pass
At the moment if the ensemble has more than one setup method, the
SuperTask will need to implement an override that correctly calls each.
"""
class TaskGroup(*tasks):
# TODO: figure out how to make the setup work at the moment it just picks the first in MRO
# def setup(self, x): pass
def process(self, x):
for t in tasks:
self.log.debug("Calling process for subtask %s", t.__name__)
x = t.process(self, x)
return x
return TaskGroup
perf(SingleTask): remove no-effect-__getitem__ call
"""An improved base task implementing easy (and explicit) saving of outputs."""
import os
import logging
from inspect import getfullargspec
import numpy as np
from caput import pipeline, config, memh5
class MPILogFilter(logging.Filter):
"""Filter log entries by MPI rank.
Also this will optionally add MPI rank information, and add an elapsed time
entry.
Parameters
----------
add_mpi_info : boolean, optional
Add MPI rank/size info to log records that don't already have it.
level_rank0 : int
Log level for messages from rank=0.
level_all : int
Log level for messages from all other ranks.
"""
def __init__(
self, add_mpi_info=True, level_rank0=logging.INFO, level_all=logging.WARN
):
from mpi4py import MPI
self.add_mpi_info = add_mpi_info
self.level_rank0 = level_rank0
self.level_all = level_all
self.comm = MPI.COMM_WORLD
def filter(self, record):
# Add MPI info if desired
try:
record.mpi_rank
except AttributeError:
if self.add_mpi_info:
record.mpi_rank = self.comm.rank
record.mpi_size = self.comm.size
# Add a new field with the elapsed time in seconds (as a float)
record.elapsedTime = record.relativeCreated * 1e-3
# Return whether we should filter the record or not.
return (record.mpi_rank == 0 and record.levelno >= self.level_rank0) or (
record.mpi_rank > 0 and record.levelno >= self.level_all
)
def _log_level(x):
"""Interpret the input as a logging level.
Parameters
----------
x : int or str
Explicit integer logging level or one of 'DEBUG', 'INFO', 'WARN',
'ERROR' or 'CRITICAL'.
Returns
-------
level : int
"""
level_dict = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARN": logging.WARN,
"WARNING": logging.WARN,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}
if isinstance(x, int):
return x
elif isinstance(x, str) and x in level_dict:
return level_dict[x.upper()]
else:
raise ValueError("Logging level %s not understood" % repr(x))
class SetMPILogging(pipeline.TaskBase):
"""A task used to configure MPI aware logging.
Attributes
----------
level_rank0, level_all : int or str
Log level for rank=0, and other ranks respectively.
"""
level_rank0 = config.Property(proptype=_log_level, default=logging.INFO)
level_all = config.Property(proptype=_log_level, default=logging.WARN)
def __init__(self):
from mpi4py import MPI
import math
logging.captureWarnings(True)
rank_length = int(math.log10(MPI.COMM_WORLD.size)) + 1
mpi_fmt = "[MPI %%(mpi_rank)%id/%%(mpi_size)%id]" % (rank_length, rank_length)
filt = MPILogFilter(level_all=self.level_all, level_rank0=self.level_rank0)
# This uses the fact that caput.pipeline.Manager has already
# attempted to set up the logging. We just insert our custom filter
root_logger = logging.getLogger()
ch = root_logger.handlers[0]
ch.addFilter(filt)
formatter = logging.Formatter(
"%(elapsedTime)8.1fs "
+ mpi_fmt
+ " - %(levelname)-8s %(name)s: %(message)s"
)
ch.setFormatter(formatter)
class LoggedTask(pipeline.TaskBase):
"""A task with logger support."""
log_level = config.Property(proptype=_log_level, default=None)
def __init__(self):
# Get the logger for this task
self._log = logging.getLogger(
"%s.%s" % (self.__module__, self.__class__.__name__)
)
# Set the log level for this task if specified
if self.log_level is not None:
self.log.setLevel(self.log_level)
@property
def log(self):
"""The logger object for this task."""
return self._log
class MPITask(pipeline.TaskBase):
"""Base class for MPI using tasks. Just ensures that the task gets a `comm`
attribute.
"""
comm = None
def __init__(self):
from mpi4py import MPI
# Set default communicator
self.comm = MPI.COMM_WORLD
class _AddRankLogAdapter(logging.LoggerAdapter):
"""Add the rank of the logging process to a log message.
Attributes
----------
calling_obj : object
An object with a `comm` property that will be queried for the rank.
"""
calling_obj = None
def process(self, msg, kwargs):
if "extra" not in kwargs:
kwargs["extra"] = {}
kwargs["extra"]["mpi_rank"] = self.calling_obj.comm.rank
kwargs["extra"]["mpi_size"] = self.calling_obj.comm.size
return msg, kwargs
class MPILoggedTask(MPITask, LoggedTask):
"""A task base that has MPI aware logging."""
def __init__(self):
# Initialise the base classes
MPITask.__init__(self)
LoggedTask.__init__(self)
# Replace the logger with a LogAdapter instance that adds MPI process
# information
logadapter = _AddRankLogAdapter(self._log, None)
logadapter.calling_obj = self
self._log = logadapter
class SingleTask(MPILoggedTask, pipeline.BasicContMixin):
"""Process a task with at most one input and output.
Both input and output are expected to be :class:`memh5.BasicCont` objects.
This class allows writing of the output when requested.
Tasks inheriting from this class should override `process` and optionally
:meth:`setup` or :meth:`finish`. They should not override :meth:`next`.
If the value of :attr:`input_root` is anything other than the string "None"
then the input will be read (using :meth:`read_input`) from the file
``self.input_root + self.input_filename``. If the input is specified both as
a filename and as a product key in the pipeline configuration, an error
will be raised upon initialization.
If the value of :attr:`output_root` is anything other than the string
"None" then the output will be written (using :meth:`write_output`) to the
file ``self.output_root + self.output_filename``.
Attributes
----------
save : bool
Whether to save the output to disk or not.
output_name : string
A python format string used to construct the filename. Valid identifiers are:
- `count`: an integer giving which iteration of the task is this.
- `tag`: a string identifier for the output derived from the
containers `tag` attribute. If that attribute is not present
`count` is used instead.
- `key`: the name of the output key.
- `task`: the (unqualified) name of the task.
- `output_root`: the value of the output root argument. This is deprecated
and is just used for legacy support. The default value of
`output_name` means the previous behaviour works.
output_root : string
Pipeline settable parameter giving the first part of the output path.
Deprecated in favour of `output_name`.
nan_check : bool
Check the output for NaNs (and infs) logging if they are present.
nan_dump : bool
If NaN's are found, dump the container to disk.
nan_skip : bool
If NaN's are found, don't pass on the output.
versions : dict
Keys are module names (str) and values are their version strings. This is
attached to output metadata.
pipeline_config : dict
Global pipeline configuration. This is attached to output metadata.
Raises
------
`caput.pipeline.PipelineRuntimeError`
If this is used as a baseclass to a task overriding `self.process` with variable length or optional arguments.
"""
save = config.Property(default=False, proptype=bool)
output_root = config.Property(default="", proptype=str)
output_name = config.Property(default="{output_root}{tag}.h5", proptype=str)
nan_check = config.Property(default=True, proptype=bool)
nan_skip = config.Property(default=True, proptype=bool)
nan_dump = config.Property(default=True, proptype=bool)
# Metadata to get attached to the output
versions = config.Property(default={}, proptype=dict)
pipeline_config = config.Property(default={}, proptype=dict)
_count = 0
done = False
_no_input = False
def __init__(self):
super(SingleTask, self).__init__()
# Inspect the `process` method to see how many arguments it takes.
pro_argspec = getfullargspec(self.process)
n_args = len(pro_argspec.args) - 1
if pro_argspec.varargs or pro_argspec.varkw or pro_argspec.defaults:
msg = (
"`process` method may not have variable length or optional"
" arguments."
)
raise pipeline.PipelineRuntimeError(msg)
if n_args == 0:
self._no_input = True
else:
self._no_input = False
def next(self, *input):
"""Should not need to override. Implement `process` instead."""
self.log.info("Starting next for task %s" % self.__class__.__name__)
self.comm.Barrier()
# This should only be called once.
try:
if self.done:
raise pipeline.PipelineStopIteration()
except AttributeError:
self.done = True
# Process input and fetch output
if self._no_input:
if len(input) > 0:
# This should never happen. Just here to catch bugs.
raise RuntimeError("Somehow `input` was set.")
output = self.process()
else:
output = self.process(*input)
# Return immediately if output is None to skip writing phase.
if output is None:
return
# Set a tag in output if needed
if "tag" not in output.attrs and len(input) > 0 and "tag" in input[0].attrs:
output.attrs["tag"] = input[0].attrs["tag"]
# Check for NaN's etc
output = self._nan_process_output(output)
# Write the output if needed
self._save_output(output)
# Increment internal counter
self._count = self._count + 1
self.log.info("Leaving next for task %s" % self.__class__.__name__)
# Return the output for the next task
return output
def finish(self):
"""Should not need to override. Implement `process_finish` instead."""
self.log.info("Starting finish for task %s" % self.__class__.__name__)
try:
output = self.process_finish()
# Check for NaN's etc
output = self._nan_process_output(output)
# Write the output if needed
self._save_output(output)
self.log.info("Leaving finish for task %s" % self.__class__.__name__)
return output
except AttributeError:
self.log.info("No finish for task %s" % self.__class__.__name__)
pass
def _save_output(self, output):
# Routine to write output if needed.
if self.save and output is not None:
# add metadata to output
metadata = {"versions": self.versions, "config": self.pipeline_config}
for key, value in metadata.items():
output.add_history(key, value)
# Create a tag for the output file name
tag = output.attrs["tag"] if "tag" in output.attrs else self._count
# Construct the filename
name_parts = {
"tag": tag,
"count": self._count,
"task": self.__class__.__name__,
"key": self._out_keys[0] if self._out_keys else "",
"output_root": self.output_root,
}
outfile = self.output_name.format(**name_parts)
# Expand any variables in the path
outfile = os.path.expanduser(outfile)
outfile = os.path.expandvars(outfile)
self.log.debug("Writing output %s to disk.", outfile)
self.write_output(outfile, output)
def _nan_process_output(self, output):
# Process the output to check for NaN's
# Returns the output or, None if it should be skipped
if self.nan_check:
nan_found = self._nan_check_walk(output)
if nan_found and self.nan_dump:
# Construct the filename
tag = output.attrs["tag"] if "tag" in output.attrs else self._count
outfile = "nandump_" + self.__class__.__name__ + "_" + str(tag) + ".h5"
self.log.debug("NaN found. Dumping %s", outfile)
self.write_output(outfile, output)
if nan_found and self.nan_skip:
self.log.debug("NaN found. Skipping output.")
return None
return output
def _nan_check_walk(self, cont):
# Walk through a memh5 container and check for NaN's and Inf's.
# Logs any issues found and returns True if there were any found.
from mpi4py import MPI
if isinstance(cont, memh5.MemDiskGroup):
cont = cont._data
stack = [cont]
found = False
# Walk over the container tree...
while stack:
n = stack.pop()
# Check the dataset for non-finite numbers
if isinstance(n, memh5.MemDataset):
# Try to test for NaN's and infs. This will fail for compound datatypes...
arr = n[:]
try:
is_nan = np.isnan(arr)
is_inf = np.isinf(arr)
except TypeError:
continue
if is_nan.any():
self.log.info(
"NaN's found in dataset %s [%i of %i elements]",
n.name,
is_nan.sum(),
arr.size,
)
found = True
if is_inf.any():
self.log.info(
"Inf's found in dataset %s [%i of %i elements]",
n.name,
is_inf.sum(),
arr.size,
)
found = True
elif isinstance(n, (memh5.MemGroup, memh5.MemDiskGroup)):
for item in n.values():
stack.append(item)
# All ranks need to know if any rank found a NaN/Inf
found = self.comm.allreduce(found, op=MPI.MAX)
return found
class ReturnLastInputOnFinish(SingleTask):
"""Workaround for `caput.pipeline` issues.
This caches its input on every call to `process` and then returns
the last one for a finish call.
"""
x = None
def process(self, x):
"""Take a reference to the input.
Parameters
----------
x : object
"""
self.x = x
def process_finish(self):
"""Return the last input to process.
Returns
-------
x : object
Last input to process.
"""
return self.x
class ReturnFirstInputOnFinish(SingleTask):
"""Workaround for `caput.pipeline` issues.
This caches its input on the first call to `process` and
then returns it for a finish call.
"""
x = None
def process(self, x):
"""Take a reference to the input.
Parameters
----------
x : object
"""
if self.x is None:
self.x = x
def process_finish(self):
"""Return the last input to process.
Returns
-------
x : object
Last input to process.
"""
return self.x
class Delete(SingleTask):
"""Delete pipeline products to free memory."""
def process(self, x):
"""Delete the input and collect garbage.
Parameters
----------
x : object
The object to be deleted.
"""
import gc
self.log.info("Deleting %s" % type(x))
del x
gc.collect()
return None
def group_tasks(*tasks):
"""Create a Task that groups a bunch of tasks together.
This method creates a class that inherits from all the subtasks, and
calls each `process` method in sequence, passing the output of one to the
input of the next.
This should be used like:
>>> class SuperTask(group_tasks(SubTask1, SubTask2)):
>>> pass
At the moment if the ensemble has more than one setup method, the
SuperTask will need to implement an override that correctly calls each.
"""
class TaskGroup(*tasks):
# TODO: figure out how to make the setup work at the moment it just picks the first in MRO
# def setup(self, x): pass
def process(self, x):
for t in tasks:
self.log.debug("Calling process for subtask %s", t.__name__)
x = t.process(self, x)
return x
return TaskGroup
|
# -*- coding: utf-8 -*-
import re
import sublime, sublime_plugin
class TotalFileCommand(sublime_plugin.TextCommand):
def run(self, edit):
cleaned = []
numbers = []
region = sublime.Region(0, self.view.size());
for lineRegion in self.view.lines(region):
line = self.view.substr(lineRegion)
if (line == ""):
break
try:
m = re.match(ur"£\s*([0-9\.,]{1,9})\s(.*)", line)
if (m):
cost = float(m.group(1).strip(' '))
numbers.append(cost)
desc = m.group(2)
cleaned.append(u"£{0:>9.2f} {1}".format(cost, desc))
except ValueError:
cleaned.append(line)
total = sum(numbers)
while cleaned[-1].strip() == '':
del cleaned[-1]
cleaned.append("")
cleaned.append(u"£{0:>9.2f} Total".format(total))
cleaned = '\n'.join(cleaned)
#edit = self.view.begin_edit("")
self.view.erase(edit, region)
self.view.insert(edit, 0, cleaned)
#self.view.end_edit(edit)
Handle non-matching lines which don't cause match errors and don't clean the empty lines if there are no cleaned lines
# -*- coding: utf-8 -*-
import re
import sublime, sublime_plugin
class TotalFileCommand(sublime_plugin.TextCommand):
def run(self, edit):
cleaned = []
numbers = []
region = sublime.Region(0, self.view.size());
for lineRegion in self.view.lines(region):
line = self.view.substr(lineRegion)
if (line == ""):
break
try:
m = re.match(u"£\s*([0-9\.,]{1,9})\s*(.*)", line, re.U)
if (m):
cost = float(m.group(1).strip(' '))
numbers.append(cost)
desc = m.group(2)
cleaned.append(u"£{0:>9.2f} {1}".format(cost, desc))
else:
cleaned.append(line)
except ValueError:
cleaned.append(line)
total = sum(numbers)
if (len(cleaned) > 0):
while cleaned[-1].strip() == '':
del cleaned[-1]
cleaned.append("")
cleaned.append(u"£{0:>9.2f} Total".format(total))
cleaned = '\n'.join(cleaned)
#edit = self.view.begin_edit("")
self.view.erase(edit, region)
self.view.insert(edit, 0, cleaned)
#self.view.end_edit(edit)
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for using Python's :mod:`json` module with BSON documents.
This module provides two helper methods `dumps` and `loads` that wrap the
native :mod:`json` methods and provide explicit BSON conversion to and from
JSON. :class:`~bson.json_util.JSONOptions` provides a way to control how JSON
is emitted and parsed, with the default being the Relaxed Extended JSON format.
:mod:`~bson.json_util` can also generate Canonical or legacy `Extended JSON`_
when :const:`CANONICAL_JSON_OPTIONS` or :const:`LEGACY_JSON_OPTIONS` is
provided, respectively.
.. _Extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json.rst
Example usage (deserialization):
.. doctest::
>>> from bson.json_util import loads
>>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]')
[{'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code('function x() { return 1; }', {})}, {'bin': Binary(b'...', 128)}]
Example usage with :const:`RELAXED_JSON_OPTIONS` (the default):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }")},
... {'bin': Binary(b"\x01\x02\x03\x04")}])
'[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]'
Example usage (with :const:`CANONICAL_JSON_OPTIONS`):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps, CANONICAL_JSON_OPTIONS
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }")},
... {'bin': Binary(b"\x01\x02\x03\x04")}],
... json_options=CANONICAL_JSON_OPTIONS)
'[{"foo": [{"$numberInt": "1"}, {"$numberInt": "2"}]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]'
Example usage (with :const:`LEGACY_JSON_OPTIONS`):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps, LEGACY_JSON_OPTIONS
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }", {})},
... {'bin': Binary(b"\x01\x02\x03\x04")}],
... json_options=LEGACY_JSON_OPTIONS)
'[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]'
Alternatively, you can manually pass the `default` to :func:`json.dumps`.
It won't handle :class:`~bson.binary.Binary` and :class:`~bson.code.Code`
instances (as they are extended strings you can't provide custom defaults),
but it will be faster as there is less recursion.
.. note::
If your application does not need the flexibility offered by
:class:`JSONOptions` and spends a large amount of time in the `json_util`
module, look to
`python-bsonjs <https://pypi.python.org/pypi/python-bsonjs>`_ for a nice
performance improvement. `python-bsonjs` is a fast BSON to MongoDB
Extended JSON converter for Python built on top of
`libbson <https://github.com/mongodb/libbson>`_. `python-bsonjs` works best
with PyMongo when using :class:`~bson.raw_bson.RawBSONDocument`.
"""
import base64
import datetime
import json
import math
import re
import uuid
from pymongo.errors import ConfigurationError
import bson
from bson import EPOCH_AWARE, RE_TYPE, SON
from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES,
UUID_SUBTYPE)
from bson.code import Code
from bson.codec_options import CodecOptions
from bson.dbref import DBRef
from bson.decimal128 import Decimal128
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.tz_util import utc
_RE_OPT_TABLE = {
"i": re.I,
"l": re.L,
"m": re.M,
"s": re.S,
"u": re.U,
"x": re.X,
}
class DatetimeRepresentation:
LEGACY = 0
"""Legacy MongoDB Extended JSON datetime representation.
:class:`datetime.datetime` instances will be encoded to JSON in the
format `{"$date": <dateAsMilliseconds>}`, where `dateAsMilliseconds` is
a 64-bit signed integer giving the number of milliseconds since the Unix
epoch UTC. This was the default encoding before PyMongo version 3.4.
.. versionadded:: 3.4
"""
NUMBERLONG = 1
"""NumberLong datetime representation.
:class:`datetime.datetime` instances will be encoded to JSON in the
format `{"$date": {"$numberLong": "<dateAsMilliseconds>"}}`,
where `dateAsMilliseconds` is the string representation of a 64-bit signed
integer giving the number of milliseconds since the Unix epoch UTC.
.. versionadded:: 3.4
"""
ISO8601 = 2
"""ISO-8601 datetime representation.
:class:`datetime.datetime` instances greater than or equal to the Unix
epoch UTC will be encoded to JSON in the format `{"$date": "<ISO-8601>"}`.
:class:`datetime.datetime` instances before the Unix epoch UTC will be
encoded as if the datetime representation is
:const:`~DatetimeRepresentation.NUMBERLONG`.
.. versionadded:: 3.4
"""
class JSONMode:
LEGACY = 0
"""Legacy Extended JSON representation.
In this mode, :func:`~bson.json_util.dumps` produces PyMongo's legacy
non-standard JSON output. Consider using
:const:`~bson.json_util.JSONMode.RELAXED` or
:const:`~bson.json_util.JSONMode.CANONICAL` instead.
.. versionadded:: 3.5
"""
RELAXED = 1
"""Relaxed Extended JSON representation.
In this mode, :func:`~bson.json_util.dumps` produces Relaxed Extended JSON,
a mostly JSON-like format. Consider using this for things like a web API,
where one is sending a document (or a projection of a document) that only
uses ordinary JSON type primitives. In particular, the ``int``,
:class:`~bson.int64.Int64`, and ``float`` numeric types are represented in
the native JSON number format. This output is also the most human readable
and is useful for debugging and documentation.
.. seealso:: The specification for Relaxed `Extended JSON`_.
.. versionadded:: 3.5
"""
CANONICAL = 2
"""Canonical Extended JSON representation.
In this mode, :func:`~bson.json_util.dumps` produces Canonical Extended
JSON, a type preserving format. Consider using this for things like
testing, where one has to precisely specify expected types in JSON. In
particular, the ``int``, :class:`~bson.int64.Int64`, and ``float`` numeric
types are encoded with type wrappers.
.. seealso:: The specification for Canonical `Extended JSON`_.
.. versionadded:: 3.5
"""
class JSONOptions(CodecOptions):
"""Encapsulates JSON options for :func:`dumps` and :func:`loads`.
:Parameters:
- `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects
are encoded to MongoDB Extended JSON's *Strict mode* type
`NumberLong`, ie ``'{"$numberLong": "<number>" }'``. Otherwise they
will be encoded as an `int`. Defaults to ``False``.
- `datetime_representation`: The representation to use when encoding
instances of :class:`datetime.datetime`. Defaults to
:const:`~DatetimeRepresentation.LEGACY`.
- `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to
MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it
will be encoded as ``'{"$uuid": "<hex>" }'``. Defaults to ``False``.
- `json_mode`: The :class:`JSONMode` to use when encoding BSON types to
Extended JSON. Defaults to :const:`~JSONMode.LEGACY`.
- `document_class`: BSON documents returned by :func:`loads` will be
decoded to an instance of this class. Must be a subclass of
:class:`collections.MutableMapping`. Defaults to :class:`dict`.
- `uuid_representation`: The :class:`~bson.binary.UuidRepresentation`
to use when encoding and decoding instances of :class:`uuid.UUID`.
Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`.
- `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type
`Date` will be decoded to timezone aware instances of
:class:`datetime.datetime`. Otherwise they will be naive. Defaults
to ``False``.
- `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the
timezone from which :class:`~datetime.datetime` objects should be
decoded. Defaults to :const:`~bson.tz_util.utc`.
- `args`: arguments to :class:`~bson.codec_options.CodecOptions`
- `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions`
.. seealso:: The specification for Relaxed and Canonical `Extended JSON`_.
.. versionchanged:: 4.0
The default for `json_mode` was changed from :const:`JSONMode.LEGACY`
to :const:`JSONMode.RELAXED`.
The default for `uuid_representation` was changed from
:const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to
:const:`~bson.binary.UuidRepresentation.UNSPECIFIED`.
.. versionchanged:: 3.5
Accepts the optional parameter `json_mode`.
.. versionchanged:: 4.0
Changed default value of `tz_aware` to False.
"""
def __new__(cls, strict_number_long=None,
datetime_representation=None,
strict_uuid=None, json_mode=JSONMode.RELAXED,
*args, **kwargs):
kwargs["tz_aware"] = kwargs.get("tz_aware", False)
if kwargs["tz_aware"]:
kwargs["tzinfo"] = kwargs.get("tzinfo", utc)
if datetime_representation not in (DatetimeRepresentation.LEGACY,
DatetimeRepresentation.NUMBERLONG,
DatetimeRepresentation.ISO8601,
None):
raise ConfigurationError(
"JSONOptions.datetime_representation must be one of LEGACY, "
"NUMBERLONG, or ISO8601 from DatetimeRepresentation.")
self = super(JSONOptions, cls).__new__(cls, *args, **kwargs)
if json_mode not in (JSONMode.LEGACY,
JSONMode.RELAXED,
JSONMode.CANONICAL):
raise ConfigurationError(
"JSONOptions.json_mode must be one of LEGACY, RELAXED, "
"or CANONICAL from JSONMode.")
self.json_mode = json_mode
if self.json_mode == JSONMode.RELAXED:
if strict_number_long:
raise ConfigurationError(
"Cannot specify strict_number_long=True with"
" JSONMode.RELAXED")
if datetime_representation not in (None,
DatetimeRepresentation.ISO8601):
raise ConfigurationError(
"datetime_representation must be DatetimeRepresentation."
"ISO8601 or omitted with JSONMode.RELAXED")
if strict_uuid not in (None, True):
raise ConfigurationError(
"Cannot specify strict_uuid=False with JSONMode.RELAXED")
self.strict_number_long = False
self.datetime_representation = DatetimeRepresentation.ISO8601
self.strict_uuid = True
elif self.json_mode == JSONMode.CANONICAL:
if strict_number_long not in (None, True):
raise ConfigurationError(
"Cannot specify strict_number_long=False with"
" JSONMode.RELAXED")
if datetime_representation not in (
None, DatetimeRepresentation.NUMBERLONG):
raise ConfigurationError(
"datetime_representation must be DatetimeRepresentation."
"NUMBERLONG or omitted with JSONMode.RELAXED")
if strict_uuid not in (None, True):
raise ConfigurationError(
"Cannot specify strict_uuid=False with JSONMode.RELAXED")
self.strict_number_long = True
self.datetime_representation = DatetimeRepresentation.NUMBERLONG
self.strict_uuid = True
else: # JSONMode.LEGACY
self.strict_number_long = False
self.datetime_representation = DatetimeRepresentation.LEGACY
self.strict_uuid = False
if strict_number_long is not None:
self.strict_number_long = strict_number_long
if datetime_representation is not None:
self.datetime_representation = datetime_representation
if strict_uuid is not None:
self.strict_uuid = strict_uuid
return self
def _arguments_repr(self):
return ('strict_number_long=%r, '
'datetime_representation=%r, '
'strict_uuid=%r, json_mode=%r, %s' % (
self.strict_number_long,
self.datetime_representation,
self.strict_uuid,
self.json_mode,
super(JSONOptions, self)._arguments_repr()))
def _options_dict(self):
# TODO: PYTHON-2442 use _asdict() instead
options_dict = super(JSONOptions, self)._options_dict()
options_dict.update({
'strict_number_long': self.strict_number_long,
'datetime_representation': self.datetime_representation,
'strict_uuid': self.strict_uuid,
'json_mode': self.json_mode})
return options_dict
def with_options(self, **kwargs):
"""
Make a copy of this JSONOptions, overriding some options::
>>> from bson.json_util import CANONICAL_JSON_OPTIONS
>>> CANONICAL_JSON_OPTIONS.tz_aware
True
>>> json_options = CANONICAL_JSON_OPTIONS.with_options(tz_aware=False, tzinfo=None)
>>> json_options.tz_aware
False
.. versionadded:: 3.12
"""
opts = self._options_dict()
for opt in ('strict_number_long', 'datetime_representation',
'strict_uuid', 'json_mode'):
opts[opt] = kwargs.get(opt, getattr(self, opt))
opts.update(kwargs)
return JSONOptions(**opts)
LEGACY_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.LEGACY)
""":class:`JSONOptions` for encoding to PyMongo's legacy JSON format.
.. seealso:: The documentation for :const:`bson.json_util.JSONMode.LEGACY`.
.. versionadded:: 3.5
"""
CANONICAL_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.CANONICAL)
""":class:`JSONOptions` for Canonical Extended JSON.
.. seealso:: The documentation for :const:`bson.json_util.JSONMode.CANONICAL`.
.. versionadded:: 3.5
"""
RELAXED_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.RELAXED)
""":class:`JSONOptions` for Relaxed Extended JSON.
.. seealso:: The documentation for :const:`bson.json_util.JSONMode.RELAXED`.
.. versionadded:: 3.5
"""
DEFAULT_JSON_OPTIONS = RELAXED_JSON_OPTIONS
"""The default :class:`JSONOptions` for JSON encoding/decoding.
The same as :const:`RELAXED_JSON_OPTIONS`.
.. versionchanged:: 4.0
Changed from :const:`LEGACY_JSON_OPTIONS` to
:const:`RELAXED_JSON_OPTIONS`.
.. versionadded:: 3.4
"""
def dumps(obj, *args, **kwargs):
"""Helper function that wraps :func:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
encoding of MongoDB Extended JSON types. Defaults to
:const:`DEFAULT_JSON_OPTIONS`.
.. versionchanged:: 4.0
Now outputs MongoDB Relaxed Extended JSON by default (using
:const:`DEFAULT_JSON_OPTIONS`).
.. versionchanged:: 3.4
Accepts optional parameter `json_options`. See :class:`JSONOptions`.
"""
json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS)
return json.dumps(_json_convert(obj, json_options), *args, **kwargs)
def loads(s, *args, **kwargs):
"""Helper function that wraps :func:`json.loads`.
Automatically passes the object_hook for BSON type conversion.
Raises ``TypeError``, ``ValueError``, ``KeyError``, or
:exc:`~bson.errors.InvalidId` on invalid MongoDB Extended JSON.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
decoding of MongoDB Extended JSON types. Defaults to
:const:`DEFAULT_JSON_OPTIONS`.
.. versionchanged:: 3.5
Parses Relaxed and Canonical Extended JSON as well as PyMongo's legacy
format. Now raises ``TypeError`` or ``ValueError`` when parsing JSON
type wrappers with values of the wrong type or any extra keys.
.. versionchanged:: 3.4
Accepts optional parameter `json_options`. See :class:`JSONOptions`.
"""
json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS)
kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(
pairs, json_options)
return json.loads(s, *args, **kwargs)
def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS):
"""Recursive helper method that converts BSON types so they can be
converted into json.
"""
if hasattr(obj, 'items'):
return SON(((k, _json_convert(v, json_options))
for k, v in obj.items()))
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes)):
return list((_json_convert(v, json_options) for v in obj))
try:
return default(obj, json_options)
except TypeError:
return obj
def object_pairs_hook(pairs, json_options=DEFAULT_JSON_OPTIONS):
return object_hook(json_options.document_class(pairs), json_options)
def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS):
if "$oid" in dct:
return _parse_canonical_oid(dct)
if (isinstance(dct.get('$ref'), str) and
"$id" in dct and
isinstance(dct.get('$db'), (str, type(None)))):
return _parse_canonical_dbref(dct)
if "$date" in dct:
return _parse_canonical_datetime(dct, json_options)
if "$regex" in dct:
return _parse_legacy_regex(dct)
if "$minKey" in dct:
return _parse_canonical_minkey(dct)
if "$maxKey" in dct:
return _parse_canonical_maxkey(dct)
if "$binary" in dct:
if "$type" in dct:
return _parse_legacy_binary(dct, json_options)
else:
return _parse_canonical_binary(dct, json_options)
if "$code" in dct:
return _parse_canonical_code(dct)
if "$uuid" in dct:
return _parse_legacy_uuid(dct, json_options)
if "$undefined" in dct:
return None
if "$numberLong" in dct:
return _parse_canonical_int64(dct)
if "$timestamp" in dct:
tsp = dct["$timestamp"]
return Timestamp(tsp["t"], tsp["i"])
if "$numberDecimal" in dct:
return _parse_canonical_decimal128(dct)
if "$dbPointer" in dct:
return _parse_canonical_dbpointer(dct)
if "$regularExpression" in dct:
return _parse_canonical_regex(dct)
if "$symbol" in dct:
return _parse_canonical_symbol(dct)
if "$numberInt" in dct:
return _parse_canonical_int32(dct)
if "$numberDouble" in dct:
return _parse_canonical_double(dct)
return dct
def _parse_legacy_regex(doc):
pattern = doc["$regex"]
# Check if this is the $regex query operator.
if isinstance(pattern, Regex):
return doc
flags = 0
# PyMongo always adds $options but some other tools may not.
for opt in doc.get("$options", ""):
flags |= _RE_OPT_TABLE.get(opt, 0)
return Regex(pattern, flags)
def _parse_legacy_uuid(doc, json_options):
"""Decode a JSON legacy $uuid to Python UUID."""
if len(doc) != 1:
raise TypeError('Bad $uuid, extra field(s): %s' % (doc,))
if not isinstance(doc["$uuid"], str):
raise TypeError('$uuid must be a string: %s' % (doc,))
if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED:
return Binary.from_uuid(uuid.UUID(doc["$uuid"]))
else:
return uuid.UUID(doc["$uuid"])
def _binary_or_uuid(data, subtype, json_options):
# special handling for UUID
if subtype in ALL_UUID_SUBTYPES:
uuid_representation = json_options.uuid_representation
binary_value = Binary(data, subtype)
if uuid_representation == UuidRepresentation.UNSPECIFIED:
return binary_value
if subtype == UUID_SUBTYPE:
# Legacy behavior: use STANDARD with binary subtype 4.
uuid_representation = UuidRepresentation.STANDARD
elif uuid_representation == UuidRepresentation.STANDARD:
# subtype == OLD_UUID_SUBTYPE
# Legacy behavior: STANDARD is the same as PYTHON_LEGACY.
uuid_representation = UuidRepresentation.PYTHON_LEGACY
return binary_value.as_uuid(uuid_representation)
if subtype == 0:
return data
return Binary(data, subtype)
def _parse_legacy_binary(doc, json_options):
if isinstance(doc["$type"], int):
doc["$type"] = "%02x" % doc["$type"]
subtype = int(doc["$type"], 16)
if subtype >= 0xffffff80: # Handle mongoexport values
subtype = int(doc["$type"][6:], 16)
data = base64.b64decode(doc["$binary"].encode())
return _binary_or_uuid(data, subtype, json_options)
def _parse_canonical_binary(doc, json_options):
binary = doc["$binary"]
b64 = binary["base64"]
subtype = binary["subType"]
if not isinstance(b64, str):
raise TypeError('$binary base64 must be a string: %s' % (doc,))
if not isinstance(subtype, str) or len(subtype) > 2:
raise TypeError('$binary subType must be a string at most 2 '
'characters: %s' % (doc,))
if len(binary) != 2:
raise TypeError('$binary must include only "base64" and "subType" '
'components: %s' % (doc,))
data = base64.b64decode(b64.encode())
return _binary_or_uuid(data, int(subtype, 16), json_options)
def _parse_canonical_datetime(doc, json_options):
"""Decode a JSON datetime to python datetime.datetime."""
dtm = doc["$date"]
if len(doc) != 1:
raise TypeError('Bad $date, extra field(s): %s' % (doc,))
# mongoexport 2.6 and newer
if isinstance(dtm, str):
# Parse offset
if dtm[-1] == 'Z':
dt = dtm[:-1]
offset = 'Z'
elif dtm[-6] in ('+', '-') and dtm[-3] == ':':
# (+|-)HH:MM
dt = dtm[:-6]
offset = dtm[-6:]
elif dtm[-5] in ('+', '-'):
# (+|-)HHMM
dt = dtm[:-5]
offset = dtm[-5:]
elif dtm[-3] in ('+', '-'):
# (+|-)HH
dt = dtm[:-3]
offset = dtm[-3:]
else:
dt = dtm
offset = ''
# Parse the optional factional seconds portion.
dot_index = dt.rfind('.')
microsecond = 0
if dot_index != -1:
microsecond = int(float(dt[dot_index:]) * 1000000)
dt = dt[:dot_index]
aware = datetime.datetime.strptime(
dt, "%Y-%m-%dT%H:%M:%S").replace(microsecond=microsecond,
tzinfo=utc)
if offset and offset != 'Z':
if len(offset) == 6:
hours, minutes = offset[1:].split(':')
secs = (int(hours) * 3600 + int(minutes) * 60)
elif len(offset) == 5:
secs = (int(offset[1:3]) * 3600 + int(offset[3:]) * 60)
elif len(offset) == 3:
secs = int(offset[1:3]) * 3600
if offset[0] == "-":
secs *= -1
aware = aware - datetime.timedelta(seconds=secs)
if json_options.tz_aware:
if json_options.tzinfo:
aware = aware.astimezone(json_options.tzinfo)
return aware
else:
return aware.replace(tzinfo=None)
return bson._millis_to_datetime(int(dtm), json_options)
def _parse_canonical_oid(doc):
"""Decode a JSON ObjectId to bson.objectid.ObjectId."""
if len(doc) != 1:
raise TypeError('Bad $oid, extra field(s): %s' % (doc,))
return ObjectId(doc['$oid'])
def _parse_canonical_symbol(doc):
"""Decode a JSON symbol to Python string."""
symbol = doc['$symbol']
if len(doc) != 1:
raise TypeError('Bad $symbol, extra field(s): %s' % (doc,))
return str(symbol)
def _parse_canonical_code(doc):
"""Decode a JSON code to bson.code.Code."""
for key in doc:
if key not in ('$code', '$scope'):
raise TypeError('Bad $code, extra field(s): %s' % (doc,))
return Code(doc['$code'], scope=doc.get('$scope'))
def _parse_canonical_regex(doc):
"""Decode a JSON regex to bson.regex.Regex."""
regex = doc['$regularExpression']
if len(doc) != 1:
raise TypeError('Bad $regularExpression, extra field(s): %s' % (doc,))
if len(regex) != 2:
raise TypeError('Bad $regularExpression must include only "pattern"'
'and "options" components: %s' % (doc,))
opts = regex['options']
if not isinstance(opts, str):
raise TypeError('Bad $regularExpression options, options must be '
'string, was type %s' % (type(opts)))
return Regex(regex['pattern'], opts)
def _parse_canonical_dbref(doc):
"""Decode a JSON DBRef to bson.dbref.DBRef."""
return DBRef(doc.pop('$ref'), doc.pop('$id'),
database=doc.pop('$db', None), **doc)
def _parse_canonical_dbpointer(doc):
"""Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef."""
dbref = doc['$dbPointer']
if len(doc) != 1:
raise TypeError('Bad $dbPointer, extra field(s): %s' % (doc,))
if isinstance(dbref, DBRef):
dbref_doc = dbref.as_doc()
# DBPointer must not contain $db in its value.
if dbref.database is not None:
raise TypeError(
'Bad $dbPointer, extra field $db: %s' % (dbref_doc,))
if not isinstance(dbref.id, ObjectId):
raise TypeError(
'Bad $dbPointer, $id must be an ObjectId: %s' % (dbref_doc,))
if len(dbref_doc) != 2:
raise TypeError(
'Bad $dbPointer, extra field(s) in DBRef: %s' % (dbref_doc,))
return dbref
else:
raise TypeError('Bad $dbPointer, expected a DBRef: %s' % (doc,))
def _parse_canonical_int32(doc):
"""Decode a JSON int32 to python int."""
i_str = doc['$numberInt']
if len(doc) != 1:
raise TypeError('Bad $numberInt, extra field(s): %s' % (doc,))
if not isinstance(i_str, str):
raise TypeError('$numberInt must be string: %s' % (doc,))
return int(i_str)
def _parse_canonical_int64(doc):
"""Decode a JSON int64 to bson.int64.Int64."""
l_str = doc['$numberLong']
if len(doc) != 1:
raise TypeError('Bad $numberLong, extra field(s): %s' % (doc,))
return Int64(l_str)
def _parse_canonical_double(doc):
"""Decode a JSON double to python float."""
d_str = doc['$numberDouble']
if len(doc) != 1:
raise TypeError('Bad $numberDouble, extra field(s): %s' % (doc,))
if not isinstance(d_str, str):
raise TypeError('$numberDouble must be string: %s' % (doc,))
return float(d_str)
def _parse_canonical_decimal128(doc):
"""Decode a JSON decimal128 to bson.decimal128.Decimal128."""
d_str = doc['$numberDecimal']
if len(doc) != 1:
raise TypeError('Bad $numberDecimal, extra field(s): %s' % (doc,))
if not isinstance(d_str, str):
raise TypeError('$numberDecimal must be string: %s' % (doc,))
return Decimal128(d_str)
def _parse_canonical_minkey(doc):
"""Decode a JSON MinKey to bson.min_key.MinKey."""
if type(doc['$minKey']) is not int or doc['$minKey'] != 1:
raise TypeError('$minKey value must be 1: %s' % (doc,))
if len(doc) != 1:
raise TypeError('Bad $minKey, extra field(s): %s' % (doc,))
return MinKey()
def _parse_canonical_maxkey(doc):
"""Decode a JSON MaxKey to bson.max_key.MaxKey."""
if type(doc['$maxKey']) is not int or doc['$maxKey'] != 1:
raise TypeError('$maxKey value must be 1: %s', (doc,))
if len(doc) != 1:
raise TypeError('Bad $minKey, extra field(s): %s' % (doc,))
return MaxKey()
def _encode_binary(data, subtype, json_options):
if json_options.json_mode == JSONMode.LEGACY:
return SON([
('$binary', base64.b64encode(data).decode()),
('$type', "%02x" % subtype)])
return {'$binary': SON([
('base64', base64.b64encode(data).decode()),
('subType', "%02x" % subtype)])}
def default(obj, json_options=DEFAULT_JSON_OPTIONS):
# We preserve key order when rendering SON, DBRef, etc. as JSON by
# returning a SON for those types instead of a dict.
if isinstance(obj, ObjectId):
return {"$oid": str(obj)}
if isinstance(obj, DBRef):
return _json_convert(obj.as_doc(), json_options=json_options)
if isinstance(obj, datetime.datetime):
if (json_options.datetime_representation ==
DatetimeRepresentation.ISO8601):
if not obj.tzinfo:
obj = obj.replace(tzinfo=utc)
if obj >= EPOCH_AWARE:
off = obj.tzinfo.utcoffset(obj)
if (off.days, off.seconds, off.microseconds) == (0, 0, 0):
tz_string = 'Z'
else:
tz_string = obj.strftime('%z')
millis = int(obj.microsecond / 1000)
fracsecs = ".%03d" % (millis,) if millis else ""
return {"$date": "%s%s%s" % (
obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string)}
millis = bson._datetime_to_millis(obj)
if (json_options.datetime_representation ==
DatetimeRepresentation.LEGACY):
return {"$date": millis}
return {"$date": {"$numberLong": str(millis)}}
if json_options.strict_number_long and isinstance(obj, Int64):
return {"$numberLong": str(obj)}
if isinstance(obj, (RE_TYPE, Regex)):
flags = ""
if obj.flags & re.IGNORECASE:
flags += "i"
if obj.flags & re.LOCALE:
flags += "l"
if obj.flags & re.MULTILINE:
flags += "m"
if obj.flags & re.DOTALL:
flags += "s"
if obj.flags & re.UNICODE:
flags += "u"
if obj.flags & re.VERBOSE:
flags += "x"
if isinstance(obj.pattern, str):
pattern = obj.pattern
else:
pattern = obj.pattern.decode('utf-8')
if json_options.json_mode == JSONMode.LEGACY:
return SON([("$regex", pattern), ("$options", flags)])
return {'$regularExpression': SON([("pattern", pattern),
("options", flags)])}
if isinstance(obj, MinKey):
return {"$minKey": 1}
if isinstance(obj, MaxKey):
return {"$maxKey": 1}
if isinstance(obj, Timestamp):
return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])}
if isinstance(obj, Code):
if obj.scope is None:
return {'$code': str(obj)}
return SON([
('$code', str(obj)),
('$scope', _json_convert(obj.scope, json_options))])
if isinstance(obj, Binary):
return _encode_binary(obj, obj.subtype, json_options)
if isinstance(obj, bytes):
return _encode_binary(obj, 0, json_options)
if isinstance(obj, uuid.UUID):
if json_options.strict_uuid:
binval = Binary.from_uuid(
obj, uuid_representation=json_options.uuid_representation)
return _encode_binary(binval, binval.subtype, json_options)
else:
return {"$uuid": obj.hex}
if isinstance(obj, Decimal128):
return {"$numberDecimal": str(obj)}
if isinstance(obj, bool):
return obj
if (json_options.json_mode == JSONMode.CANONICAL and
isinstance(obj, int)):
if -2 ** 31 <= obj < 2 ** 31:
return {'$numberInt': str(obj)}
return {'$numberLong': str(obj)}
if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float):
if math.isnan(obj):
return {'$numberDouble': 'NaN'}
elif math.isinf(obj):
representation = 'Infinity' if obj > 0 else '-Infinity'
return {'$numberDouble': representation}
elif json_options.json_mode == JSONMode.CANONICAL:
# repr() will return the shortest string guaranteed to produce the
# original value, when float() is called on it.
return {'$numberDouble': str(repr(obj))}
raise TypeError("%r is not JSON serializable" % obj)
PYTHON-1965 The bson package should not depend on the pymongo package (#725)
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for using Python's :mod:`json` module with BSON documents.
This module provides two helper methods `dumps` and `loads` that wrap the
native :mod:`json` methods and provide explicit BSON conversion to and from
JSON. :class:`~bson.json_util.JSONOptions` provides a way to control how JSON
is emitted and parsed, with the default being the Relaxed Extended JSON format.
:mod:`~bson.json_util` can also generate Canonical or legacy `Extended JSON`_
when :const:`CANONICAL_JSON_OPTIONS` or :const:`LEGACY_JSON_OPTIONS` is
provided, respectively.
.. _Extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json.rst
Example usage (deserialization):
.. doctest::
>>> from bson.json_util import loads
>>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]')
[{'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code('function x() { return 1; }', {})}, {'bin': Binary(b'...', 128)}]
Example usage with :const:`RELAXED_JSON_OPTIONS` (the default):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }")},
... {'bin': Binary(b"\x01\x02\x03\x04")}])
'[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]'
Example usage (with :const:`CANONICAL_JSON_OPTIONS`):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps, CANONICAL_JSON_OPTIONS
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }")},
... {'bin': Binary(b"\x01\x02\x03\x04")}],
... json_options=CANONICAL_JSON_OPTIONS)
'[{"foo": [{"$numberInt": "1"}, {"$numberInt": "2"}]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]'
Example usage (with :const:`LEGACY_JSON_OPTIONS`):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps, LEGACY_JSON_OPTIONS
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }", {})},
... {'bin': Binary(b"\x01\x02\x03\x04")}],
... json_options=LEGACY_JSON_OPTIONS)
'[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]'
Alternatively, you can manually pass the `default` to :func:`json.dumps`.
It won't handle :class:`~bson.binary.Binary` and :class:`~bson.code.Code`
instances (as they are extended strings you can't provide custom defaults),
but it will be faster as there is less recursion.
.. note::
If your application does not need the flexibility offered by
:class:`JSONOptions` and spends a large amount of time in the `json_util`
module, look to
`python-bsonjs <https://pypi.python.org/pypi/python-bsonjs>`_ for a nice
performance improvement. `python-bsonjs` is a fast BSON to MongoDB
Extended JSON converter for Python built on top of
`libbson <https://github.com/mongodb/libbson>`_. `python-bsonjs` works best
with PyMongo when using :class:`~bson.raw_bson.RawBSONDocument`.
"""
import base64
import datetime
import json
import math
import re
import uuid
import bson
from bson import EPOCH_AWARE, RE_TYPE, SON
from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES,
UUID_SUBTYPE)
from bson.code import Code
from bson.codec_options import CodecOptions
from bson.dbref import DBRef
from bson.decimal128 import Decimal128
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.tz_util import utc
_RE_OPT_TABLE = {
"i": re.I,
"l": re.L,
"m": re.M,
"s": re.S,
"u": re.U,
"x": re.X,
}
class DatetimeRepresentation:
LEGACY = 0
"""Legacy MongoDB Extended JSON datetime representation.
:class:`datetime.datetime` instances will be encoded to JSON in the
format `{"$date": <dateAsMilliseconds>}`, where `dateAsMilliseconds` is
a 64-bit signed integer giving the number of milliseconds since the Unix
epoch UTC. This was the default encoding before PyMongo version 3.4.
.. versionadded:: 3.4
"""
NUMBERLONG = 1
"""NumberLong datetime representation.
:class:`datetime.datetime` instances will be encoded to JSON in the
format `{"$date": {"$numberLong": "<dateAsMilliseconds>"}}`,
where `dateAsMilliseconds` is the string representation of a 64-bit signed
integer giving the number of milliseconds since the Unix epoch UTC.
.. versionadded:: 3.4
"""
ISO8601 = 2
"""ISO-8601 datetime representation.
:class:`datetime.datetime` instances greater than or equal to the Unix
epoch UTC will be encoded to JSON in the format `{"$date": "<ISO-8601>"}`.
:class:`datetime.datetime` instances before the Unix epoch UTC will be
encoded as if the datetime representation is
:const:`~DatetimeRepresentation.NUMBERLONG`.
.. versionadded:: 3.4
"""
class JSONMode:
LEGACY = 0
"""Legacy Extended JSON representation.
In this mode, :func:`~bson.json_util.dumps` produces PyMongo's legacy
non-standard JSON output. Consider using
:const:`~bson.json_util.JSONMode.RELAXED` or
:const:`~bson.json_util.JSONMode.CANONICAL` instead.
.. versionadded:: 3.5
"""
RELAXED = 1
"""Relaxed Extended JSON representation.
In this mode, :func:`~bson.json_util.dumps` produces Relaxed Extended JSON,
a mostly JSON-like format. Consider using this for things like a web API,
where one is sending a document (or a projection of a document) that only
uses ordinary JSON type primitives. In particular, the ``int``,
:class:`~bson.int64.Int64`, and ``float`` numeric types are represented in
the native JSON number format. This output is also the most human readable
and is useful for debugging and documentation.
.. seealso:: The specification for Relaxed `Extended JSON`_.
.. versionadded:: 3.5
"""
CANONICAL = 2
"""Canonical Extended JSON representation.
In this mode, :func:`~bson.json_util.dumps` produces Canonical Extended
JSON, a type preserving format. Consider using this for things like
testing, where one has to precisely specify expected types in JSON. In
particular, the ``int``, :class:`~bson.int64.Int64`, and ``float`` numeric
types are encoded with type wrappers.
.. seealso:: The specification for Canonical `Extended JSON`_.
.. versionadded:: 3.5
"""
class JSONOptions(CodecOptions):
"""Encapsulates JSON options for :func:`dumps` and :func:`loads`.
:Parameters:
- `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects
are encoded to MongoDB Extended JSON's *Strict mode* type
`NumberLong`, ie ``'{"$numberLong": "<number>" }'``. Otherwise they
will be encoded as an `int`. Defaults to ``False``.
- `datetime_representation`: The representation to use when encoding
instances of :class:`datetime.datetime`. Defaults to
:const:`~DatetimeRepresentation.LEGACY`.
- `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to
MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it
will be encoded as ``'{"$uuid": "<hex>" }'``. Defaults to ``False``.
- `json_mode`: The :class:`JSONMode` to use when encoding BSON types to
Extended JSON. Defaults to :const:`~JSONMode.LEGACY`.
- `document_class`: BSON documents returned by :func:`loads` will be
decoded to an instance of this class. Must be a subclass of
:class:`collections.MutableMapping`. Defaults to :class:`dict`.
- `uuid_representation`: The :class:`~bson.binary.UuidRepresentation`
to use when encoding and decoding instances of :class:`uuid.UUID`.
Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`.
- `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type
`Date` will be decoded to timezone aware instances of
:class:`datetime.datetime`. Otherwise they will be naive. Defaults
to ``False``.
- `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the
timezone from which :class:`~datetime.datetime` objects should be
decoded. Defaults to :const:`~bson.tz_util.utc`.
- `args`: arguments to :class:`~bson.codec_options.CodecOptions`
- `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions`
.. seealso:: The specification for Relaxed and Canonical `Extended JSON`_.
.. versionchanged:: 4.0
The default for `json_mode` was changed from :const:`JSONMode.LEGACY`
to :const:`JSONMode.RELAXED`.
The default for `uuid_representation` was changed from
:const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to
:const:`~bson.binary.UuidRepresentation.UNSPECIFIED`.
.. versionchanged:: 3.5
Accepts the optional parameter `json_mode`.
.. versionchanged:: 4.0
Changed default value of `tz_aware` to False.
"""
def __new__(cls, strict_number_long=None,
datetime_representation=None,
strict_uuid=None, json_mode=JSONMode.RELAXED,
*args, **kwargs):
kwargs["tz_aware"] = kwargs.get("tz_aware", False)
if kwargs["tz_aware"]:
kwargs["tzinfo"] = kwargs.get("tzinfo", utc)
if datetime_representation not in (DatetimeRepresentation.LEGACY,
DatetimeRepresentation.NUMBERLONG,
DatetimeRepresentation.ISO8601,
None):
raise ValueError(
"JSONOptions.datetime_representation must be one of LEGACY, "
"NUMBERLONG, or ISO8601 from DatetimeRepresentation.")
self = super(JSONOptions, cls).__new__(cls, *args, **kwargs)
if json_mode not in (JSONMode.LEGACY,
JSONMode.RELAXED,
JSONMode.CANONICAL):
raise ValueError(
"JSONOptions.json_mode must be one of LEGACY, RELAXED, "
"or CANONICAL from JSONMode.")
self.json_mode = json_mode
if self.json_mode == JSONMode.RELAXED:
if strict_number_long:
raise ValueError(
"Cannot specify strict_number_long=True with"
" JSONMode.RELAXED")
if datetime_representation not in (None,
DatetimeRepresentation.ISO8601):
raise ValueError(
"datetime_representation must be DatetimeRepresentation."
"ISO8601 or omitted with JSONMode.RELAXED")
if strict_uuid not in (None, True):
raise ValueError(
"Cannot specify strict_uuid=False with JSONMode.RELAXED")
self.strict_number_long = False
self.datetime_representation = DatetimeRepresentation.ISO8601
self.strict_uuid = True
elif self.json_mode == JSONMode.CANONICAL:
if strict_number_long not in (None, True):
raise ValueError(
"Cannot specify strict_number_long=False with"
" JSONMode.RELAXED")
if datetime_representation not in (
None, DatetimeRepresentation.NUMBERLONG):
raise ValueError(
"datetime_representation must be DatetimeRepresentation."
"NUMBERLONG or omitted with JSONMode.RELAXED")
if strict_uuid not in (None, True):
raise ValueError(
"Cannot specify strict_uuid=False with JSONMode.RELAXED")
self.strict_number_long = True
self.datetime_representation = DatetimeRepresentation.NUMBERLONG
self.strict_uuid = True
else: # JSONMode.LEGACY
self.strict_number_long = False
self.datetime_representation = DatetimeRepresentation.LEGACY
self.strict_uuid = False
if strict_number_long is not None:
self.strict_number_long = strict_number_long
if datetime_representation is not None:
self.datetime_representation = datetime_representation
if strict_uuid is not None:
self.strict_uuid = strict_uuid
return self
def _arguments_repr(self):
return ('strict_number_long=%r, '
'datetime_representation=%r, '
'strict_uuid=%r, json_mode=%r, %s' % (
self.strict_number_long,
self.datetime_representation,
self.strict_uuid,
self.json_mode,
super(JSONOptions, self)._arguments_repr()))
def _options_dict(self):
# TODO: PYTHON-2442 use _asdict() instead
options_dict = super(JSONOptions, self)._options_dict()
options_dict.update({
'strict_number_long': self.strict_number_long,
'datetime_representation': self.datetime_representation,
'strict_uuid': self.strict_uuid,
'json_mode': self.json_mode})
return options_dict
def with_options(self, **kwargs):
"""
Make a copy of this JSONOptions, overriding some options::
>>> from bson.json_util import CANONICAL_JSON_OPTIONS
>>> CANONICAL_JSON_OPTIONS.tz_aware
True
>>> json_options = CANONICAL_JSON_OPTIONS.with_options(tz_aware=False, tzinfo=None)
>>> json_options.tz_aware
False
.. versionadded:: 3.12
"""
opts = self._options_dict()
for opt in ('strict_number_long', 'datetime_representation',
'strict_uuid', 'json_mode'):
opts[opt] = kwargs.get(opt, getattr(self, opt))
opts.update(kwargs)
return JSONOptions(**opts)
LEGACY_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.LEGACY)
""":class:`JSONOptions` for encoding to PyMongo's legacy JSON format.
.. seealso:: The documentation for :const:`bson.json_util.JSONMode.LEGACY`.
.. versionadded:: 3.5
"""
CANONICAL_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.CANONICAL)
""":class:`JSONOptions` for Canonical Extended JSON.
.. seealso:: The documentation for :const:`bson.json_util.JSONMode.CANONICAL`.
.. versionadded:: 3.5
"""
RELAXED_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.RELAXED)
""":class:`JSONOptions` for Relaxed Extended JSON.
.. seealso:: The documentation for :const:`bson.json_util.JSONMode.RELAXED`.
.. versionadded:: 3.5
"""
DEFAULT_JSON_OPTIONS = RELAXED_JSON_OPTIONS
"""The default :class:`JSONOptions` for JSON encoding/decoding.
The same as :const:`RELAXED_JSON_OPTIONS`.
.. versionchanged:: 4.0
Changed from :const:`LEGACY_JSON_OPTIONS` to
:const:`RELAXED_JSON_OPTIONS`.
.. versionadded:: 3.4
"""
def dumps(obj, *args, **kwargs):
"""Helper function that wraps :func:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
encoding of MongoDB Extended JSON types. Defaults to
:const:`DEFAULT_JSON_OPTIONS`.
.. versionchanged:: 4.0
Now outputs MongoDB Relaxed Extended JSON by default (using
:const:`DEFAULT_JSON_OPTIONS`).
.. versionchanged:: 3.4
Accepts optional parameter `json_options`. See :class:`JSONOptions`.
"""
json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS)
return json.dumps(_json_convert(obj, json_options), *args, **kwargs)
def loads(s, *args, **kwargs):
"""Helper function that wraps :func:`json.loads`.
Automatically passes the object_hook for BSON type conversion.
Raises ``TypeError``, ``ValueError``, ``KeyError``, or
:exc:`~bson.errors.InvalidId` on invalid MongoDB Extended JSON.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
decoding of MongoDB Extended JSON types. Defaults to
:const:`DEFAULT_JSON_OPTIONS`.
.. versionchanged:: 3.5
Parses Relaxed and Canonical Extended JSON as well as PyMongo's legacy
format. Now raises ``TypeError`` or ``ValueError`` when parsing JSON
type wrappers with values of the wrong type or any extra keys.
.. versionchanged:: 3.4
Accepts optional parameter `json_options`. See :class:`JSONOptions`.
"""
json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS)
kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(
pairs, json_options)
return json.loads(s, *args, **kwargs)
def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS):
"""Recursive helper method that converts BSON types so they can be
converted into json.
"""
if hasattr(obj, 'items'):
return SON(((k, _json_convert(v, json_options))
for k, v in obj.items()))
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes)):
return list((_json_convert(v, json_options) for v in obj))
try:
return default(obj, json_options)
except TypeError:
return obj
def object_pairs_hook(pairs, json_options=DEFAULT_JSON_OPTIONS):
return object_hook(json_options.document_class(pairs), json_options)
def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS):
if "$oid" in dct:
return _parse_canonical_oid(dct)
if (isinstance(dct.get('$ref'), str) and
"$id" in dct and
isinstance(dct.get('$db'), (str, type(None)))):
return _parse_canonical_dbref(dct)
if "$date" in dct:
return _parse_canonical_datetime(dct, json_options)
if "$regex" in dct:
return _parse_legacy_regex(dct)
if "$minKey" in dct:
return _parse_canonical_minkey(dct)
if "$maxKey" in dct:
return _parse_canonical_maxkey(dct)
if "$binary" in dct:
if "$type" in dct:
return _parse_legacy_binary(dct, json_options)
else:
return _parse_canonical_binary(dct, json_options)
if "$code" in dct:
return _parse_canonical_code(dct)
if "$uuid" in dct:
return _parse_legacy_uuid(dct, json_options)
if "$undefined" in dct:
return None
if "$numberLong" in dct:
return _parse_canonical_int64(dct)
if "$timestamp" in dct:
tsp = dct["$timestamp"]
return Timestamp(tsp["t"], tsp["i"])
if "$numberDecimal" in dct:
return _parse_canonical_decimal128(dct)
if "$dbPointer" in dct:
return _parse_canonical_dbpointer(dct)
if "$regularExpression" in dct:
return _parse_canonical_regex(dct)
if "$symbol" in dct:
return _parse_canonical_symbol(dct)
if "$numberInt" in dct:
return _parse_canonical_int32(dct)
if "$numberDouble" in dct:
return _parse_canonical_double(dct)
return dct
def _parse_legacy_regex(doc):
pattern = doc["$regex"]
# Check if this is the $regex query operator.
if isinstance(pattern, Regex):
return doc
flags = 0
# PyMongo always adds $options but some other tools may not.
for opt in doc.get("$options", ""):
flags |= _RE_OPT_TABLE.get(opt, 0)
return Regex(pattern, flags)
def _parse_legacy_uuid(doc, json_options):
"""Decode a JSON legacy $uuid to Python UUID."""
if len(doc) != 1:
raise TypeError('Bad $uuid, extra field(s): %s' % (doc,))
if not isinstance(doc["$uuid"], str):
raise TypeError('$uuid must be a string: %s' % (doc,))
if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED:
return Binary.from_uuid(uuid.UUID(doc["$uuid"]))
else:
return uuid.UUID(doc["$uuid"])
def _binary_or_uuid(data, subtype, json_options):
# special handling for UUID
if subtype in ALL_UUID_SUBTYPES:
uuid_representation = json_options.uuid_representation
binary_value = Binary(data, subtype)
if uuid_representation == UuidRepresentation.UNSPECIFIED:
return binary_value
if subtype == UUID_SUBTYPE:
# Legacy behavior: use STANDARD with binary subtype 4.
uuid_representation = UuidRepresentation.STANDARD
elif uuid_representation == UuidRepresentation.STANDARD:
# subtype == OLD_UUID_SUBTYPE
# Legacy behavior: STANDARD is the same as PYTHON_LEGACY.
uuid_representation = UuidRepresentation.PYTHON_LEGACY
return binary_value.as_uuid(uuid_representation)
if subtype == 0:
return data
return Binary(data, subtype)
def _parse_legacy_binary(doc, json_options):
if isinstance(doc["$type"], int):
doc["$type"] = "%02x" % doc["$type"]
subtype = int(doc["$type"], 16)
if subtype >= 0xffffff80: # Handle mongoexport values
subtype = int(doc["$type"][6:], 16)
data = base64.b64decode(doc["$binary"].encode())
return _binary_or_uuid(data, subtype, json_options)
def _parse_canonical_binary(doc, json_options):
binary = doc["$binary"]
b64 = binary["base64"]
subtype = binary["subType"]
if not isinstance(b64, str):
raise TypeError('$binary base64 must be a string: %s' % (doc,))
if not isinstance(subtype, str) or len(subtype) > 2:
raise TypeError('$binary subType must be a string at most 2 '
'characters: %s' % (doc,))
if len(binary) != 2:
raise TypeError('$binary must include only "base64" and "subType" '
'components: %s' % (doc,))
data = base64.b64decode(b64.encode())
return _binary_or_uuid(data, int(subtype, 16), json_options)
def _parse_canonical_datetime(doc, json_options):
"""Decode a JSON datetime to python datetime.datetime."""
dtm = doc["$date"]
if len(doc) != 1:
raise TypeError('Bad $date, extra field(s): %s' % (doc,))
# mongoexport 2.6 and newer
if isinstance(dtm, str):
# Parse offset
if dtm[-1] == 'Z':
dt = dtm[:-1]
offset = 'Z'
elif dtm[-6] in ('+', '-') and dtm[-3] == ':':
# (+|-)HH:MM
dt = dtm[:-6]
offset = dtm[-6:]
elif dtm[-5] in ('+', '-'):
# (+|-)HHMM
dt = dtm[:-5]
offset = dtm[-5:]
elif dtm[-3] in ('+', '-'):
# (+|-)HH
dt = dtm[:-3]
offset = dtm[-3:]
else:
dt = dtm
offset = ''
# Parse the optional factional seconds portion.
dot_index = dt.rfind('.')
microsecond = 0
if dot_index != -1:
microsecond = int(float(dt[dot_index:]) * 1000000)
dt = dt[:dot_index]
aware = datetime.datetime.strptime(
dt, "%Y-%m-%dT%H:%M:%S").replace(microsecond=microsecond,
tzinfo=utc)
if offset and offset != 'Z':
if len(offset) == 6:
hours, minutes = offset[1:].split(':')
secs = (int(hours) * 3600 + int(minutes) * 60)
elif len(offset) == 5:
secs = (int(offset[1:3]) * 3600 + int(offset[3:]) * 60)
elif len(offset) == 3:
secs = int(offset[1:3]) * 3600
if offset[0] == "-":
secs *= -1
aware = aware - datetime.timedelta(seconds=secs)
if json_options.tz_aware:
if json_options.tzinfo:
aware = aware.astimezone(json_options.tzinfo)
return aware
else:
return aware.replace(tzinfo=None)
return bson._millis_to_datetime(int(dtm), json_options)
def _parse_canonical_oid(doc):
"""Decode a JSON ObjectId to bson.objectid.ObjectId."""
if len(doc) != 1:
raise TypeError('Bad $oid, extra field(s): %s' % (doc,))
return ObjectId(doc['$oid'])
def _parse_canonical_symbol(doc):
"""Decode a JSON symbol to Python string."""
symbol = doc['$symbol']
if len(doc) != 1:
raise TypeError('Bad $symbol, extra field(s): %s' % (doc,))
return str(symbol)
def _parse_canonical_code(doc):
"""Decode a JSON code to bson.code.Code."""
for key in doc:
if key not in ('$code', '$scope'):
raise TypeError('Bad $code, extra field(s): %s' % (doc,))
return Code(doc['$code'], scope=doc.get('$scope'))
def _parse_canonical_regex(doc):
"""Decode a JSON regex to bson.regex.Regex."""
regex = doc['$regularExpression']
if len(doc) != 1:
raise TypeError('Bad $regularExpression, extra field(s): %s' % (doc,))
if len(regex) != 2:
raise TypeError('Bad $regularExpression must include only "pattern"'
'and "options" components: %s' % (doc,))
opts = regex['options']
if not isinstance(opts, str):
raise TypeError('Bad $regularExpression options, options must be '
'string, was type %s' % (type(opts)))
return Regex(regex['pattern'], opts)
def _parse_canonical_dbref(doc):
"""Decode a JSON DBRef to bson.dbref.DBRef."""
return DBRef(doc.pop('$ref'), doc.pop('$id'),
database=doc.pop('$db', None), **doc)
def _parse_canonical_dbpointer(doc):
"""Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef."""
dbref = doc['$dbPointer']
if len(doc) != 1:
raise TypeError('Bad $dbPointer, extra field(s): %s' % (doc,))
if isinstance(dbref, DBRef):
dbref_doc = dbref.as_doc()
# DBPointer must not contain $db in its value.
if dbref.database is not None:
raise TypeError(
'Bad $dbPointer, extra field $db: %s' % (dbref_doc,))
if not isinstance(dbref.id, ObjectId):
raise TypeError(
'Bad $dbPointer, $id must be an ObjectId: %s' % (dbref_doc,))
if len(dbref_doc) != 2:
raise TypeError(
'Bad $dbPointer, extra field(s) in DBRef: %s' % (dbref_doc,))
return dbref
else:
raise TypeError('Bad $dbPointer, expected a DBRef: %s' % (doc,))
def _parse_canonical_int32(doc):
"""Decode a JSON int32 to python int."""
i_str = doc['$numberInt']
if len(doc) != 1:
raise TypeError('Bad $numberInt, extra field(s): %s' % (doc,))
if not isinstance(i_str, str):
raise TypeError('$numberInt must be string: %s' % (doc,))
return int(i_str)
def _parse_canonical_int64(doc):
"""Decode a JSON int64 to bson.int64.Int64."""
l_str = doc['$numberLong']
if len(doc) != 1:
raise TypeError('Bad $numberLong, extra field(s): %s' % (doc,))
return Int64(l_str)
def _parse_canonical_double(doc):
"""Decode a JSON double to python float."""
d_str = doc['$numberDouble']
if len(doc) != 1:
raise TypeError('Bad $numberDouble, extra field(s): %s' % (doc,))
if not isinstance(d_str, str):
raise TypeError('$numberDouble must be string: %s' % (doc,))
return float(d_str)
def _parse_canonical_decimal128(doc):
"""Decode a JSON decimal128 to bson.decimal128.Decimal128."""
d_str = doc['$numberDecimal']
if len(doc) != 1:
raise TypeError('Bad $numberDecimal, extra field(s): %s' % (doc,))
if not isinstance(d_str, str):
raise TypeError('$numberDecimal must be string: %s' % (doc,))
return Decimal128(d_str)
def _parse_canonical_minkey(doc):
"""Decode a JSON MinKey to bson.min_key.MinKey."""
if type(doc['$minKey']) is not int or doc['$minKey'] != 1:
raise TypeError('$minKey value must be 1: %s' % (doc,))
if len(doc) != 1:
raise TypeError('Bad $minKey, extra field(s): %s' % (doc,))
return MinKey()
def _parse_canonical_maxkey(doc):
"""Decode a JSON MaxKey to bson.max_key.MaxKey."""
if type(doc['$maxKey']) is not int or doc['$maxKey'] != 1:
raise TypeError('$maxKey value must be 1: %s', (doc,))
if len(doc) != 1:
raise TypeError('Bad $minKey, extra field(s): %s' % (doc,))
return MaxKey()
def _encode_binary(data, subtype, json_options):
if json_options.json_mode == JSONMode.LEGACY:
return SON([
('$binary', base64.b64encode(data).decode()),
('$type', "%02x" % subtype)])
return {'$binary': SON([
('base64', base64.b64encode(data).decode()),
('subType', "%02x" % subtype)])}
def default(obj, json_options=DEFAULT_JSON_OPTIONS):
# We preserve key order when rendering SON, DBRef, etc. as JSON by
# returning a SON for those types instead of a dict.
if isinstance(obj, ObjectId):
return {"$oid": str(obj)}
if isinstance(obj, DBRef):
return _json_convert(obj.as_doc(), json_options=json_options)
if isinstance(obj, datetime.datetime):
if (json_options.datetime_representation ==
DatetimeRepresentation.ISO8601):
if not obj.tzinfo:
obj = obj.replace(tzinfo=utc)
if obj >= EPOCH_AWARE:
off = obj.tzinfo.utcoffset(obj)
if (off.days, off.seconds, off.microseconds) == (0, 0, 0):
tz_string = 'Z'
else:
tz_string = obj.strftime('%z')
millis = int(obj.microsecond / 1000)
fracsecs = ".%03d" % (millis,) if millis else ""
return {"$date": "%s%s%s" % (
obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string)}
millis = bson._datetime_to_millis(obj)
if (json_options.datetime_representation ==
DatetimeRepresentation.LEGACY):
return {"$date": millis}
return {"$date": {"$numberLong": str(millis)}}
if json_options.strict_number_long and isinstance(obj, Int64):
return {"$numberLong": str(obj)}
if isinstance(obj, (RE_TYPE, Regex)):
flags = ""
if obj.flags & re.IGNORECASE:
flags += "i"
if obj.flags & re.LOCALE:
flags += "l"
if obj.flags & re.MULTILINE:
flags += "m"
if obj.flags & re.DOTALL:
flags += "s"
if obj.flags & re.UNICODE:
flags += "u"
if obj.flags & re.VERBOSE:
flags += "x"
if isinstance(obj.pattern, str):
pattern = obj.pattern
else:
pattern = obj.pattern.decode('utf-8')
if json_options.json_mode == JSONMode.LEGACY:
return SON([("$regex", pattern), ("$options", flags)])
return {'$regularExpression': SON([("pattern", pattern),
("options", flags)])}
if isinstance(obj, MinKey):
return {"$minKey": 1}
if isinstance(obj, MaxKey):
return {"$maxKey": 1}
if isinstance(obj, Timestamp):
return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])}
if isinstance(obj, Code):
if obj.scope is None:
return {'$code': str(obj)}
return SON([
('$code', str(obj)),
('$scope', _json_convert(obj.scope, json_options))])
if isinstance(obj, Binary):
return _encode_binary(obj, obj.subtype, json_options)
if isinstance(obj, bytes):
return _encode_binary(obj, 0, json_options)
if isinstance(obj, uuid.UUID):
if json_options.strict_uuid:
binval = Binary.from_uuid(
obj, uuid_representation=json_options.uuid_representation)
return _encode_binary(binval, binval.subtype, json_options)
else:
return {"$uuid": obj.hex}
if isinstance(obj, Decimal128):
return {"$numberDecimal": str(obj)}
if isinstance(obj, bool):
return obj
if (json_options.json_mode == JSONMode.CANONICAL and
isinstance(obj, int)):
if -2 ** 31 <= obj < 2 ** 31:
return {'$numberInt': str(obj)}
return {'$numberLong': str(obj)}
if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float):
if math.isnan(obj):
return {'$numberDouble': 'NaN'}
elif math.isinf(obj):
representation = 'Infinity' if obj > 0 else '-Infinity'
return {'$numberDouble': representation}
elif json_options.json_mode == JSONMode.CANONICAL:
# repr() will return the shortest string guaranteed to produce the
# original value, when float() is called on it.
return {'$numberDouble': str(repr(obj))}
raise TypeError("%r is not JSON serializable" % obj)
|
# coding=utf-8
# noinspection PyUnresolvedReferences
from chatcommunicate import add_room, block_room, CmdException, command, get_report_data, is_privileged, message, \
tell_rooms, tell_rooms_with, get_message
# noinspection PyUnresolvedReferences
from globalvars import GlobalVars
import findspam
# noinspection PyUnresolvedReferences
from datetime import datetime
from apigetpost import api_get_post, PostData
import datahandling
from datahandling import *
from metasmoke import Metasmoke
from blacklists import load_blacklists
from parsing import *
from spamhandling import check_if_spam, handle_spam
from gitmanager import GitManager
import threading
import random
import requests
import sys
import os
import time
from html import unescape
from ast import literal_eval
# noinspection PyCompatibility
import regex
from helpers import only_blacklists_changed, only_modules_changed, log, expand_shorthand_link, reload_modules
from classes import Post
from classes.feedback import *
# TODO: Do we need uid == -2 check? Turn into "is_user_valid" check
#
#
# System command functions below here
# This "null" command is just bypass for the "unrecognized command" message,
# so that pingbot can respond instead.
@command(aliases=['ping-help', 'groups'])
def null():
return None
# --- Blacklist Functions --- #
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, whole_msg=True, privileged=True)
def addblu(msg, user):
"""
Adds a user to site whitelist
:param msg: ChatExchange message
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user((uid, val), message_url, "")
return "User blacklisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/addblu profileurl` *or* `!!/addblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str)
def isblu(user):
"""
Check if a user is blacklisted
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if is_blacklisted_user((uid, val)):
return "User is blacklisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/isblu profileurl` *or* `!!/isblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(str, privileged=True)
def rmblu(user):
"""
Removes user from site blacklist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if remove_blacklisted_user((uid, val)):
return "User removed from blacklist (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted."
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/rmblu profileurl` *or* `!!/rmblu userid sitename`.")
# --- Whitelist functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyMissingTypeHints
@command(str, privileged=True)
def addwlu(user):
"""
Adds a user to site whitelist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
add_whitelisted_user((uid, val))
return "User whitelisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/addwlu profileurl` *or* `!!/addwlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyMissingTypeHints
@command(str)
def iswlu(user):
"""
Checks if a user is whitelisted
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if is_whitelisted_user((uid, val)):
return "User is whitelisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/iswlu profileurl` *or* `!!/iswlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, privileged=True)
def rmwlu(user):
"""
Removes a user from site whitelist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) != -1 and val != "":
if remove_whitelisted_user((uid, val)):
return "User removed from whitelist (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted."
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/rmwlu profileurl` *or* `!!/rmwlu userid sitename`.")
# noinspection PyIncorrectDocstring
@command(str)
def blacklist(_):
"""
Returns a string which explains the usage of the new blacklist commands.
:return: A string
"""
raise CmdException("The !!/blacklist command has been deprecated. "
"Please use !!/blacklist-website, !!/blacklist-username,"
"!!/blacklist-keyword, or perhaps !!/watch-keyword. "
"Remember to escape dots in URLs using \\.")
def check_blacklist(string_to_test, is_username, is_watchlist, is_phone):
# Test the string and provide a warning message if it is already caught.
if is_username:
question = Post(api_response={'title': 'Valid title', 'body': 'Valid body',
'owner': {'display_name': string_to_test, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
answer = Post(api_response={'title': 'Valid title', 'body': 'Valid body',
'owner': {'display_name': string_to_test, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
else:
question = Post(api_response={'title': 'Valid title', 'body': string_to_test,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
answer = Post(api_response={'title': 'Valid title', 'body': string_to_test,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
question_reasons, _ = findspam.FindSpam.test_post(question)
answer_reasons, _ = findspam.FindSpam.test_post(answer)
# Filter out duplicates
reasons = list(set(question_reasons) | set(answer_reasons))
# Filter out watchlist results
filter_out = ["potentially bad ns", "potentially bad asn"]
if not is_watchlist:
filter_out.append("potentially bad keyword")
# Ignore "Mostly non-latin body/answer" for phone number watches
if is_phone:
filter_out.extend(["mostly non-latin", "phone number detected", "messaging number detected"])
if filter_out:
reasons = list(filter(
lambda reason: all([x not in reason.lower() for x in filter_out]), reasons))
return reasons
def format_blacklist_reasons(reasons):
# Capitalize
reasons = list(map(lambda reason: reason.capitalize(), reasons))
# Join
if len(reasons) < 3:
reason_string = " and ".join(reasons)
else:
reason_string = ", and ".join([", ".join(reasons[:-1]), reasons[-1]])
return reason_string
def do_blacklist(blacklist_type, msg, force=False):
"""
Adds a string to the website blacklist and commits/pushes to GitHub
:param raw_pattern:
:param blacklist_type:
:param msg:
:param force:
:return: A string
"""
chat_user_profile_link = "https://chat.{host}/users/{id}".format(host=msg._client.host,
id=msg.owner.id)
pattern = rebuild_str(msg.content_source.split(" ", 1)[1])
if "number" not in blacklist_type:
try:
r = regex.compile(pattern, city=findspam.FindSpam.city_list)
except regex._regex_core.error:
raise CmdException("An invalid pattern was provided, please check your command.")
if r.search(GlobalVars.valid_content):
raise CmdException("That pattern is probably too broad, refusing to commit.")
if not force:
if "number" in blacklist_type or \
regex.match(r'(?:\[a-z_]\*)?(?:\(\?:)?\d+(?:[][\\W_*()?:]+\d+)+(?:\[a-z_]\*)?$', pattern):
is_phone = True
else:
is_phone = False
is_watchlist = bool("watch" in blacklist_type)
concretized_pattern = pattern.replace("\\W", "-").replace("\\.", ".").replace("\\d", "8")
concretized_pattern = regex.sub(r"[+*?][+?]?|\{\d*(?:,\d*)?\}", "", concretized_pattern)
for username in False, True:
reasons = check_blacklist(
concretized_pattern, is_username=username, is_watchlist=is_watchlist, is_phone=is_phone)
if reasons:
raise CmdException(
"That pattern looks like it's already caught by " +
format_blacklist_reasons(reasons) +
"; append `-force` if you really want to do that.")
metasmoke_down = False
try:
code_permissions = is_code_privileged(msg._client.host, msg.owner.id)
except (requests.exceptions.ConnectionError, ValueError, TypeError):
code_permissions = False # Because we need the system to assume that we don't have code privs.
metasmoke_down = True
_status, result = GitManager.add_to_blacklist(
blacklist=blacklist_type,
item_to_blacklist=pattern,
username=msg.owner.name,
chat_profile_link=chat_user_profile_link,
code_permissions=code_permissions,
metasmoke_down=metasmoke_down
)
if not _status:
raise CmdException(result)
if code_permissions and only_blacklists_changed(GitManager.get_local_diff()):
try:
if not GlobalVars.on_master:
# Restart if HEAD detached
log('warning', "Pulling local with HEAD detached, checkout deploy", f=True)
os._exit(8)
GitManager.pull_local()
GlobalVars.reload()
findspam.FindSpam.reload_blacklists()
tell_rooms_with('debug', GlobalVars.s_norestart)
time.sleep(2)
return None
except Exception:
pass
return result
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, give_name=True, aliases=["blacklist-keyword",
"blacklist-website",
"blacklist-username",
"blacklist-number",
"blacklist-keyword-force",
"blacklist-website-force",
"blacklist-username-force",
"blacklist-number-force"])
def blacklist_keyword(msg, pattern, alias_used="blacklist-keyword"):
"""
Adds a pattern to the blacklist and commits/pushes to GitHub
:param msg:
:param pattern:
:return: A string
"""
parts = alias_used.split("-")
return do_blacklist(parts[1], msg, force=len(parts) > 2)
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, give_name=True,
aliases=["watch-keyword", "watch-force", "watch-keyword-force",
"watch-number", "watch-number-force"])
def watch(msg, pattern, alias_used="watch"):
"""
Adds a pattern to the watched keywords list and commits/pushes to GitHub
:param msg:
:param pattern:
:return: A string
"""
return do_blacklist("watch_number" if "number" in alias_used else "watch_keyword",
msg, force=alias_used.split("-")[-1] == "force")
@command(str, whole_msg=True, privileged=True, give_name=True, aliases=["unwatch"])
def unblacklist(msg, item, alias_used="unwatch"):
"""
Removes a pattern from watchlist/blacklist and commits/pushes to GitHub
:param msg:
:param pattern:
:return: A string
"""
if alias_used == "unwatch":
blacklist_type = "watch"
elif alias_used == "unblacklist":
blacklist_type = "blacklist"
else:
raise CmdException("Invalid blacklist type.")
metasmoke_down = False
try:
code_privs = is_code_privileged(msg._client.host, msg.owner.id)
except (requests.exceptions.ConnectionError, ValueError):
code_privs = False
metasmoke_down = True
pattern = msg.content_source.split(" ", 1)[1]
_status, result = GitManager.remove_from_blacklist(
rebuild_str(pattern), msg.owner.name, blacklist_type,
code_privileged=code_privs, metasmoke_down=metasmoke_down)
if not _status:
raise CmdException(result)
if only_blacklists_changed(GitManager.get_local_diff()):
try:
if not GlobalVars.on_master:
# Restart if HEAD detached
log('warning', "Pulling local with HEAD detached, checkout deploy", f=True)
os._exit(8)
GitManager.pull_local()
GlobalVars.reload()
findspam.FindSpam.reload_blacklists()
tell_rooms_with('debug', GlobalVars.s_norestart)
time.sleep(2)
return None
except Exception:
pass
return result
@command(int, privileged=True, whole_msg=True)
def approve(msg, pr_id):
code_permissions = is_code_privileged(msg._client.host, msg.owner.id)
if not code_permissions:
raise CmdException("You need code privileges to approve pull requests")
# Forward this, because checks are better placed in gitmanager.py
try:
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
comment = "{} [approved]({}) this PR in {}\n\n{}".format(
msg.owner.name, message_url, msg.room.name,
# The image of (code-admins|approved) from PullApprove
"https://camo.githubusercontent.com/18c997a6b1ac764dfd43963f5071d03a3c7fc97b/68747470733a2f2f696d672e7368"
"69656c64732e696f2f62616467652f636f64652d2d61646d696e732d617070726f7665642d627269676874677265656e2e737667")
message = GitManager.merge_pull_request(pr_id, comment)
if only_blacklists_changed(GitManager.get_local_diff()):
try:
if not GlobalVars.on_master:
# Restart if HEAD detached
log('warning', "Pulling local with HEAD detached, checkout deploy", f=True)
os._exit(8)
GitManager.pull_local()
GlobalVars.reload()
findspam.FindSpam.reload_blacklists()
tell_rooms_with('debug', GlobalVars.s_norestart)
time.sleep(2)
return None
except Exception:
pass
return message
except Exception as e:
raise CmdException(str(e))
@command(privileged=True, aliases=["remote-diff", "remote_diff"])
def remotediff():
will_require_full_restart = "SmokeDetector will require a full restart to pull changes: " \
"{}".format(str(not only_blacklists_changed(GitManager.get_remote_diff())))
return "{}\n\n{}".format(GitManager.get_remote_diff(), will_require_full_restart)
# --- Joke Commands --- #
@command(whole_msg=True)
def blame(msg):
unlucky_victim = msg._client.get_user(random.choice(msg.room.get_current_user_ids()))
return "It's [{}](https://chat.{}/users/{})'s fault.".format(
unlucky_victim.name, msg._client.host, unlucky_victim.id)
@command(str, whole_msg=True, aliases=["blame\u180E"])
def blame2(msg, x):
base = {"\u180E": 0, "\u200B": 1, "\u200C": 2, "\u200D": 3, "\u2060": 4, "\u2063": 5, "\uFEFF": 6}
user = sum([(len(base)**i) * base[char] for i, char in enumerate(reversed(x))])
try:
unlucky_victim = msg._client.get_user(user)
return "It's [{}](https://chat.{}/users/{})'s fault.".format(
unlucky_victim.name, msg._client.host, unlucky_victim.id)
except requests.exceptions.HTTPError:
unlucky_victim = msg.owner
return "It's [{}](https://chat.{}/users/{})'s fault.".format(
unlucky_victim.name, msg._client.host, unlucky_victim.id)
# noinspection PyIncorrectDocstring
@command()
def brownie():
"""
Returns a string equal to "Brown!" (This is a joke command)
:return: A string
"""
return "Brown!"
COFFEES = ['Espresso', 'Macchiato', 'Ristretto', 'Americano', 'Latte', 'Cappuccino', 'Mocha', 'Affogato', 'jQuery']
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def coffee(msg, other_user):
"""
Returns a string stating who the coffee is for (This is a joke command)
:param msg:
:param other_user:
:return: A string
"""
if other_user is None:
return "*brews a cup of {} for @{}*".format(random.choice(COFFEES), msg.owner.name.replace(" ", ""))
else:
other_user = regex.sub(r'^@*|\b\s.{1,}', '', other_user)
return "*brews a cup of {} for @{}*".format(random.choice(COFFEES), other_user)
# noinspection PyIncorrectDocstring
@command()
def lick():
"""
Returns a string when a user says 'lick' (This is a joke command)
:return: A string
"""
return "*licks ice cream cone*"
TEAS = ['earl grey', 'green', 'chamomile', 'lemon', 'darjeeling', 'mint', 'jasmine', 'passionfruit']
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def tea(msg, other_user):
"""
Returns a string stating who the tea is for (This is a joke command)
:param msg:
:param other_user:
:return: A string
"""
if other_user is None:
return "*brews a cup of {} tea for @{}*".format(random.choice(TEAS), msg.owner.name.replace(" ", ""))
else:
other_user = regex.sub(r'^@*|\b\s.{1,}', '', other_user)
return "*brews a cup of {} tea for @{}*".format(random.choice(TEAS), other_user)
# noinspection PyIncorrectDocstring
@command()
def wut():
"""
Returns a string when a user asks 'wut' (This is a joke command)
:return: A string
"""
return "Whaddya mean, 'wut'? Humans..."
@command(aliases=["zomg_hats"])
def hats():
wb_start = datetime(2017, 12, 13, 0, 0, 0)
wb_end = datetime(2018, 1, 3, 0, 0, 0)
now = datetime.utcnow()
return_string = ""
if wb_start > now:
diff = wb_start - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "WE LOVE HATS! Winter Bash will begin in {} {}, {} {}, {} {}, and {} {}.".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
elif wb_end > now:
diff = wb_end - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "Winter Bash won't end for {} {}, {} {}, {} {}, and {} {}. GO EARN SOME HATS!".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
return return_string
# --- Block application from posting functions --- #
# noinspection PyIncorrectDocstring
@command(int, int, whole_msg=True, privileged=True, arity=(1, 2))
def block(msg, block_time, room_id):
"""
Blocks posts from application for a period of time
:param msg:
:param block_time:
:param room_id:
:return: None
"""
time_to_block = block_time if 0 < block_time < 14400 else 900
which_room = "globally" if room_id is None else "in room {} on {}".format(room_id, msg._client.host)
block_message = "Reports blocked for {} second(s) {}.".format(time_to_block, which_room)
tell_rooms(block_message, ((msg._client.host, msg.room.id), "debug", "metatavern"), ())
block_room(room_id, msg._client.host, time.time() + time_to_block)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(int, int, whole_msg=True, privileged=True, arity=(1, 2))
def unblock(msg, room_id):
"""
Unblocks posting to a room
:param msg:
:param room_id:
:return: None
"""
block_room(room_id, msg._client.host, -1)
which_room = "globally" if room_id is None else "in room {} on {}".format(room_id, msg._client.host)
unblock_message = "Reports unblocked {}.".format(which_room)
tell_rooms(unblock_message, ((msg._client.host, msg.room.id), "debug", "metatavern"), ())
# --- Administration Commands --- #
ALIVE_MSG = [
'Yup', 'You doubt me?', 'Of course', '... did I miss something?', 'plz send teh coffee',
'Watching this endless list of new questions *never* gets boring', 'Kinda sorta',
'You should totally drop that and use jQuery', r'¯\\_(ツ)\_/¯',
]
# noinspection PyIncorrectDocstring
@command(aliases=["live"])
def alive():
"""
Returns a string indicating the process is still active
:return: A string
"""
return random.choice(ALIVE_MSG)
# noinspection PyIncorrectDocstring
@command(int, privileged=True, arity=(0, 1), aliases=["errlogs", "errlog", "errorlog"])
def errorlogs(count):
"""
Shows the most recent lines in the error logs
:param count:
:return: A string
"""
return fetch_lines_from_error_log(count or 50)
@command(whole_msg=True, aliases=["ms-status", "ms-down", "ms-up"], give_name=True)
def metasmoke(msg, alias_used):
if alias_used in {"metasmoke", "ms-status"}:
status_text = [
"metasmoke is up. Current failure count: {}".format(GlobalVars.metasmoke_failures),
"metasmoke is down. Current failure count: {}".format(GlobalVars.metasmoke_failures),
]
return status_text[GlobalVars.metasmoke_down]
# The next aliases/functionalities require privilege
if not is_privileged(msg.owner, msg.room):
raise CmdException(GlobalVars.not_privileged_warning)
if alias_used == "ms-down":
GlobalVars.metasmoke_down = True
GlobalVars.metasmoke_failures = 999
return "metasmoke is now considered down."
if alias_used == "ms-up":
GlobalVars.metasmoke_down = False
GlobalVars.metasmoke_failures = 0
return "metasmoke is now considered up."
raise CmdException("Bad command alias. Blame a developer.")
# noinspection PyIncorrectDocstring
@command(aliases=["commands", "help"])
def info():
"""
Returns the help text
:return: A string
"""
return "I'm " + GlobalVars.chatmessage_prefix +\
" a bot that detects spam and offensive posts on the network and"\
" posts alerts to chat."\
" [A command list is available here](https://charcoal-se.org/smokey/Commands)."
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def welcome(msg, other_user):
"""
Returns the welcome text
:param msg:
:param other_user:
:return: A string
"""
w_msg = ("Welcome to {room}{user}! I'm {me}, a bot that detects spam and offensive posts on the network, "
"and posts alerts to chat. You can find more about me on the "
"[Charcoal website](https://charcoal-se.org/).")
if other_user is None:
raise CmdException(w_msg.format(room=msg.room.name, user="", me=GlobalVars.chatmessage_prefix))
else:
other_user = regex.sub(r'^@*|\b\s.{1,}', '', other_user)
raise CmdException(w_msg.format(room=msg.room.name, user=" @" + other_user, me=GlobalVars.chatmessage_prefix))
# noinspection PyIncorrectDocstring
@command()
def location():
"""
Returns the current location the application is running from
:return: A string with current location
"""
return GlobalVars.location
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(privileged=True)
def master():
"""
Forces a system exit with exit code = 8
:return: None
"""
os._exit(8)
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(privileged=True)
def pull():
"""
Pull an update from GitHub
:return: String on failure, None on success
"""
remote_diff = GitManager.get_remote_diff()
if only_blacklists_changed(remote_diff):
GitManager.pull_remote()
findspam.FindSpam.reload_blacklists()
GlobalVars.reload()
tell_rooms_with('debug', GlobalVars.s_norestart)
return
request = requests.get('https://api.github.com/repos/{}/git/refs/heads/deploy'.format(
GlobalVars.bot_repo_slug))
latest_sha = request.json()["object"]["sha"]
request = requests.get(
'https://api.github.com/repos/{}/commits/{}/statuses'.format(
GlobalVars.bot_repo_slug, latest_sha))
states = []
for ci_status in request.json():
state = ci_status["state"]
states.append(state)
if "success" in states:
if only_modules_changed(remote_diff):
GitManager.pull_remote()
reload_modules()
GlobalVars.reload()
tell_rooms_with('debug', GlobalVars.s_norestart2)
return
else:
os._exit(3)
elif "error" in states or "failure" in states:
raise CmdException("CI build failed! :( Please check your commit.")
elif "pending" in states or not states:
raise CmdException("CI build is still pending, wait until the build has finished and then pull again.")
@command(whole_msg=True, aliases=['pull-sync'])
def sync_remote(msg):
"""
Force a branch sync from origin/master with [git branch -M]
:param msg:
:return: A string containing a response message
"""
if not is_code_privileged(msg._client.host, msg.owner.id):
raise CmdException("You don't have code privileges to run this command.")
return GitManager.sync_remote()[1]
@command(privileged=True, give_name=True, aliases=[
"gitstatus", "git-status", "git-help"
])
def git(alias_used="git"):
if alias_used == "git":
raise CmdException("Bad alias. Try another command")
if alias_used == "git-help":
return "Available commands: git-help, git-status, git-merge-abort, git-reset"
alias_used = alias_used.replace("-", "")
if alias_used == "gitstatus":
return GitManager.current_git_status()
elif alias_used == "gitmergeabort":
return GitManager.merge_abort()
elif alias_used == "gitreset":
return GitManager.reset_head()
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(whole_msg=True, privileged=True, give_name=True, aliases=["restart", "reload"])
def reboot(msg, alias_used="reboot"):
"""
Forces a system exit with exit code = 5
:param msg:
:return: None
"""
if alias_used in {"reboot", "restart"}:
tell_rooms("Goodbye, cruel world", ("debug", (msg._client.host, msg.room.id)), ())
time.sleep(3)
os._exit(5)
elif alias_used in {"reload"}:
reload_modules()
tell_rooms_with('debug', GlobalVars.s_norestart2)
time.sleep(3)
else:
raise RuntimeError("Invalid alias!")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(whole_msg=True)
def amiprivileged(msg):
"""
Tells user whether or not they have privileges
:param msg:
:return: A string
"""
if is_privileged(msg.owner, msg.room):
return "\u2713 You are a privileged user."
return "\u2573 " + GlobalVars.not_privileged_warning
# noinspection PyIncorrectDocstring,
@command(whole_msg=True)
def amicodeprivileged(msg):
"""
Tells user whether or not they have code privileges
:param msg:
:return: A string
"""
update_code_privileged_users_list()
if is_code_privileged(msg._client.host, msg.owner.id):
return "\u2713 You are a code-privileged user."
return "\u2573 No, you are not a code-privileged user."
# noinspection PyIncorrectDocstring
@command()
def apiquota():
"""
Report how many API hits remain for the day
:return: A string
"""
return "The current API quota remaining is {}.".format(GlobalVars.apiquota)
# noinspection PyIncorrectDocstring
@command()
def queuestatus():
"""
Report current API queue
:return: A string
"""
return GlobalVars.bodyfetcher.print_queue()
@command(str)
def inqueue(url):
post_id, site, post_type = fetch_post_id_and_site_from_url(url)
if post_type != "question":
raise CmdException("Can't check for answers.")
if site in GlobalVars.bodyfetcher.queue:
for i, id in enumerate(GlobalVars.bodyfetcher.queue[site].keys()):
if id == post_id:
return "#" + str(i + 1) + " in queue."
return "Not in queue."
@command()
def listening():
# return "{} post(s) currently monitored for deletion.".format(len(GlobalVars.deletion_watcher.posts))
return "Currently listening to:\n" + repr(GlobalVars.deletion_watcher.posts)
@command()
def last_feedbacked():
return datahandling.last_feedbacked
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(str, whole_msg=True, privileged=True, arity=(0, 1))
def stappit(msg, location_search):
"""
Forces a system exit with exit code = 6
:param msg:
:param location_search:
:return: None
"""
if location_search is None or location_search.lower() in GlobalVars.location.lower():
tell_rooms("Goodbye, cruel world", ((msg._client.host, msg.room.id)), ())
time.sleep(3)
os._exit(6)
def td_format(td_object):
# source: http://stackoverflow.com/a/13756038/5244995
seconds = int(td_object.total_seconds())
periods = [
('year', 60 * 60 * 24 * 365),
('month', 60 * 60 * 24 * 30),
('day', 60 * 60 * 24),
('hour', 60 * 60),
('minute', 60),
('second', 1)
]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 1:
strings.append("%s %s" % (period_value, period_name))
else:
strings.append("%s %ss" % (period_value, period_name))
return ", ".join(strings)
# noinspection PyIncorrectDocstring
@command()
def status():
"""
Returns the amount of time the application has been running
:return: A string
"""
now = datetime.utcnow()
diff = now - GlobalVars.startup_utc_date
return 'Running since {time} UTC ({relative})'.format(time=GlobalVars.startup_utc, relative=td_format(diff))
# noinspection PyIncorrectDocstring
@command(privileged=True, whole_msg=True)
def stopflagging(msg):
Tasks.do(Metasmoke.stop_autoflagging)
log('warning', 'Disabling autoflagging ({} ran !!/stopflagging, message {})'.format(msg.owner.name, msg.id))
return 'Stopping'
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(str, whole_msg=True, privileged=True, aliases=["standby-except"], give_name=True)
def standby(msg, location_search, alias_used="standby"):
"""
Forces a system exit with exit code = 7
:param msg:
:param location_search:
:return: None
"""
match = location_search.lower() in GlobalVars.location.lower()
reverse_search = "except" in alias_used
# Use `!=` as Logical XOR
if match != reverse_search:
tell_rooms("{location} is switching to standby".format(location=GlobalVars.location),
("debug", (msg._client.host, msg.room.id)), (), notify_site="/standby")
time.sleep(3)
os._exit(7)
# noinspection PyIncorrectDocstring
@command(str, aliases=["test-q", "test-a", "test-u", "test-t", "test-json"], give_name=True)
def test(content, alias_used="test"):
"""
Test an answer to determine if it'd be automatically reported
:param content:
:return: A string
"""
result = "> "
site = ""
option_count = 0
for segment in content.split():
if segment.startswith("site="):
site = expand_shorthand_link(segment[5:])
else:
# Stop parsing options at first non-option
break
option_count += 1
content = content.split(' ', option_count)[-1] # Strip parsed options
if alias_used == "test-q":
kind = "a question"
fakepost = Post(api_response={'title': 'Valid title', 'body': content,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': False, 'score': 0})
elif alias_used == "test-a":
kind = "an answer"
fakepost = Post(api_response={'title': 'Valid title', 'body': content,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': True, 'score': 0})
elif alias_used == "test-u":
kind = "a username"
fakepost = Post(api_response={'title': 'Valid title', 'body': "Valid question body",
'owner': {'display_name': content, 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': False, 'score': 0})
elif alias_used == "test-t":
kind = "a title"
fakepost = Post(api_response={'title': content, 'body': "Valid question body",
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': False, 'score': 0})
elif alias_used == "test-json":
# Only load legit json object
try:
json_obj = json.loads(content)
except ValueError as e:
raise CmdException("Error: {}".format(e))
if not isinstance(json_obj, dict):
raise CmdException("Only accepts a json object as input")
# List of valid keys and their corresponding classes
valid_keys = [
('title', str), ('body', str), ('username', str), ('type', str),
('reputation', int), ('score', int)
]
right_types = list(filter(lambda p: p[0] in json_obj and isinstance(json_obj[p[0]], p[1]), valid_keys))
wrong_types = list(filter(lambda p: p[0] in json_obj and not isinstance(json_obj[p[0]], p[1]), valid_keys))
# Alert if valid key is of wrong class
if len(wrong_types) > 0:
raise CmdException("Invalid type: {}".format(", ".join(
["{} should be {}".format(x, y.__name__) for (x, y) in wrong_types])))
# Alert if none of the valid keys are used
elif len(right_types) == 0:
raise CmdException("At least one of the following keys needed: {}".format(", ".join(
["{} ({})".format(x, y.__name__) for (x, y) in valid_keys])))
# Craft a fake response
fake_response = {
'title': json_obj['title'] if 'title' in json_obj else 'Valid post title',
'body': json_obj['body'] if 'body' in json_obj else 'Valid post body',
'owner': {
'display_name': json_obj['username'] if 'username' in json_obj else 'Valid username',
'reputation': json_obj['reputation'] if 'reputation' in json_obj else 0,
'link': ''
},
'IsAnswer': 'type' in json_obj and not json_obj['type'] == "question",
'site': site,
'score': json_obj['score'] if 'score' in json_obj else 0
}
# Handle that pluralization bug
kind = "an answer" if fake_response['IsAnswer'] else "a question"
fakepost = Post(api_response=fake_response)
else:
kind = "a post, title or username"
fakepost = Post(api_response={'title': content, 'body': content,
'owner': {'display_name': content, 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': False, 'score': 0})
reasons, why_response = findspam.FindSpam.test_post(fakepost)
if len(reasons) == 0:
result += "Would not be caught as {}".format(kind)
if site == "chat.stackexchange.com":
result += " on this magic userspace"
elif len(site) > 0:
result += " on site `{}`".format(site)
result += "."
else:
result += ", ".join(reasons).capitalize()
if why_response is not None and len(why_response) > 0:
result += "\n----------\n"
result += why_response
return result
# noinspection PyIncorrectDocstring
@command()
def threads():
"""
Returns a description of current threads, for debugging
:return: A string
"""
threads_list = ["{ident}: {name}".format(ident=t.ident, name=t.name) for t in threading.enumerate()]
return "\n".join(threads_list)
# noinspection PyIncorrectDocstring
@command(aliases=["rev", "ver"])
def version():
"""
Returns the current version of the application
:return: A string
"""
return '{id} [{commit_name}]({repository}/commit/{commit_code})'.format(id=GlobalVars.location,
commit_name=GlobalVars.commit_with_author,
commit_code=GlobalVars.commit['id'],
repository=GlobalVars.bot_repository)
# noinspection PyIncorrectDocstring
@command(whole_msg=True)
def whoami(msg):
"""
Returns user id of smoke detector
:param msg:
:return:
"""
return "My id for this room is {}, and it's not apnorton's fault.".format(msg._client._br.user_id)
# --- Notification functions --- #
# noinspection PyIncorrectDocstring
@command(int, whole_msg=True, aliases=["allnotifications", "allnoti"])
def allnotificationsites(msg, room_id):
"""
Returns a string stating what sites a user will be notified about
:param msg:
:param room_id:
:return: A string
"""
sites = get_all_notification_sites(msg.owner.id, msg._client.host, room_id)
if len(sites) == 0:
return "You won't get notified for any sites in that room."
return "You will get notified for these sites:\r\n" + ", ".join(sites)
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, literal_eval, whole_msg=True, arity=(2, 3))
def notify(msg, room_id, se_site, always_ping):
"""
Subscribe a user to events on a site in a single room
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
# TODO: Add check whether smokey reports in that room
response, full_site = add_to_notification_list(msg.owner.id, msg._client.host, room_id, se_site,
always_ping=(always_ping if always_ping is not None else True))
if response == 0:
return "You'll now get pings from me if I report a post on `{site}`, in room "\
"`{room}` on `chat.{domain}`".format(site=se_site, room=room_id, domain=msg._client.host)
elif response == -1:
raise CmdException("That notification configuration is already registered.")
elif response == -2:
raise CmdException("The given SE site does not exist.")
else:
raise CmdException("Unrecognized code returned when adding notification.")
# temp command
@command(privileged=True)
def migrate_notifications():
for i, notification in enumerate(GlobalVars.notifications):
if len(notification) == 4:
GlobalVars.notifications[i] = notification + (True,)
with open("notifications.p", "wb") as f:
pickle.dump(GlobalVars.notifications, f, protocol=pickle.HIGHEST_PROTOCOL)
return "shoutouts to simpleflips"
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(whole_msg=True, aliases=["unnotify-all"])
def unnotify_all(msg):
"""
Unsubscribes a user to all events
:param msg:
:return: A string
"""
remove_all_from_notification_list(msg.owner.id)
return "I will no longer ping you if I report a post anywhere."
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def unnotify(msg, room_id, se_site):
"""
Unsubscribes a user to specific events
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
response = remove_from_notification_list(msg.owner.id, msg._client.host, room_id, se_site)
if response:
return "I will no longer ping you if I report a post on `{site}`, in room `{room}` "\
"on `chat.{domain}`".format(site=se_site, room=room_id, domain=msg._client.host)
raise CmdException("That configuration doesn't exist.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def willbenotified(msg, room_id, se_site):
"""
Returns a string stating whether a user will be notified or not
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
if will_i_be_notified(msg.owner.id, msg._client.host, room_id, se_site):
return "Yes, you will be notified for that site in that room."
return "No, you won't be notified for that site in that room."
RETURN_NAMES = {"admin": ["admin", "admins"], "code_admin": ["code admin", "code admins"]}
VALID_ROLES = {"admin": "admin",
"code_admin": "code_admin",
"admins": "admin",
"codeadmins": "code_admin"}
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, whole_msg=True)
def whois(msg, role):
"""
Return a list of important users
:param msg:
:param role:
:return: A string
"""
if role not in VALID_ROLES:
raise CmdException("That is not a user level I can check. "
"I know about {0}".format(", ".join(set(VALID_ROLES.values()))))
ms_route = "https://metasmoke.erwaysoftware.com/api/v2.0/users/with_role/{}".format(VALID_ROLES[role])
params = {
'filter': 'HMMKFJ',
'key': GlobalVars.metasmoke_key,
'per_page': 100
}
user_response = requests.get(ms_route, params=params)
user_response.encoding = 'utf-8-sig'
user_response = user_response.json()
chat_host = msg._client.host
# Build our list of admin chat ids
key = ""
if chat_host == "stackexchange.com":
key = 'stackexchange_chat_id'
elif chat_host == "meta.stackexchange.com":
key = 'meta_stackexchange_chat_id'
elif chat_host == "stackoverflow.com":
key = 'stackoverflow_chat_id'
admin_ids = [a[key] for a in user_response['items'] if a[key] and a['id'] != -1]
all_users_in_room = msg.room.get_current_user_ids()
admins_in_room = list(set(admin_ids) & set(all_users_in_room))
admins_not_in_room = list(set(admin_ids) - set(admins_in_room))
admins_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admin_ids]
admins_in_room_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admins_in_room]
admins_not_in_room_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admins_not_in_room]
return_name = RETURN_NAMES[VALID_ROLES[role]][0 if len(admin_ids) == 1 else 1]
response = "I am aware of {} {}".format(len(admin_ids), return_name)
if admins_in_room_list:
admins_in_room_list.sort(key=lambda x: x[2]) # Sort by last message (last seen = x[3])
response += ". Currently in this room: **"
for admin in admins_in_room_list:
response += "{}, ".format(admin[1])
response = response[:-2] + "**. "
response += "Not currently in this room: "
for admin in admins_not_in_room_list:
response += "{}, ".format(admin[1])
response = response[:-2] + "."
else:
response += ": "
for admin in admins_list:
response += "{}, ".format(admin[1])
response = response[:-2] + ". "
response += "None of them are currently in this room. Other users in this room might be able to help you."
return response
@command(int, str, privileged=True, whole_msg=True)
def invite(msg, room_id, roles):
add_room((msg._client.host, room_id), roles.split(","))
return "I'll now send messages with types `{}` to room `{}` on `{}`." \
" (Note that this will not persist after restarts.)".format(roles, room_id, msg._client.host)
# --- Post Responses --- #
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=False, give_name=True, aliases=["scan", "report-force"])
def report(msg, args, alias_used="report"):
"""
Report a post (or posts)
:param msg:
:return: A string (or None)
"""
if not is_privileged(msg.owner, msg.room) and alias_used != "scan":
raise CmdException(GlobalVars.not_privileged_warning)
crn, wait = can_report_now(msg.owner.id, msg._client.host)
if not crn:
raise CmdException("You can execute the !!/{} command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts in "
"one go.".format(alias_used, wait))
alias_used = alias_used or "report"
argsraw = args.split(' "', 1)
urls = argsraw[0].split(' ')
message_url = "https://chat.{0}/transcript/{1}?m={2}".format(msg._client.host, msg.room.id, msg.id)
# Handle determining whether a custom report reason was provided.
try:
# Custom handle trailing quotation marks at the end of the custom reason, which could happen.
if argsraw[1][-1] is '"':
custom_reason = argsraw[1][:-1]
else:
custom_reason = argsraw[1]
except IndexError:
custom_reason = None
if len(urls) > 5:
raise CmdException("To avoid SmokeDetector reporting posts too slowly, you can "
"{} at most 5 posts at a time. This is to avoid "
"SmokeDetector's chat messages getting rate-limited too much, "
"which would slow down reports.".format(alias_used))
# report_posts(urls, reported_by, reported_in, blacklist_by, operation="report", custom_reason=None):
output = report_posts(urls, msg.owner.name, msg.room.name, message_url, alias_used, custom_reason)
if output:
if 1 < len(urls) > output.count("\n") + 1:
add_or_update_multiple_reporter(msg.owner.id, msg._client.host, time.time())
return output
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(str, whole_msg=True, privileged=True, aliases=['reportuser'])
def allspam(msg, url):
"""
Reports all of a user's posts as spam
:param msg:
:param url: A user profile URL
:return:
"""
api_key = 'IAkbitmze4B8KpacUfLqkw(('
crn, wait = can_report_now(msg.owner.id, msg._client.host)
if not crn:
raise CmdException("You can execute the !!/allspam command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts in "
"one go.".format(wait))
user = get_user_from_url(url)
if user is None:
raise CmdException("That doesn't look like a valid user URL.")
user_sites = []
user_posts = []
# Detect whether link is to network profile or site profile
if user[1] == 'stackexchange.com':
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch sites
request_url = "http://api.stackexchange.com/2.2/users/{}/associated".format(user[0])
params = {
'filter': '!6Pbp)--cWmv(1',
'key': api_key
}
res = requests.get(request_url, params=params).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
if 'items' not in res or len(res['items']) == 0:
raise CmdException("The specified user does not appear to exist.")
if res['has_more']:
raise CmdException("The specified user has an abnormally high number of accounts. Please consider flagging "
"for moderator attention, otherwise use !!/report on the user's posts individually.")
# Add accounts with posts
for site in res['items']:
if site['question_count'] > 0 or site['answer_count'] > 0:
user_sites.append((site['user_id'], get_api_sitename_from_url(site['site_url'])))
else:
user_sites.append((user[0], get_api_sitename_from_url(user[1])))
# Fetch posts
for u_id, u_site in user_sites:
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch posts
request_url = "http://api.stackexchange.com/2.2/users/{}/posts".format(u_id)
params = {
'filter': '!)Q4RrMH0DC96Y4g9yVzuwUrW',
'key': api_key,
'site': u_site
}
res = requests.get(request_url, params=params).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
if 'items' not in res or len(res['items']) == 0:
raise CmdException("The specified user has no posts on this site.")
posts = res['items']
if posts[0]['owner']['reputation'] > 100:
raise CmdException("The specified user's reputation is abnormally high. Please consider flagging for "
"moderator attention, otherwise use !!/report on the posts individually.")
# Add blacklisted user - use most downvoted post as post URL
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user(user, message_url, sorted(posts, key=lambda x: x['score'])[0]['owner']['link'])
# TODO: Postdata refactor, figure out a better way to use apigetpost
for post in posts:
post_data = PostData()
post_data.post_id = post['post_id']
post_data.post_url = url_to_shortlink(post['link'])
*discard, post_data.site, post_data.post_type = fetch_post_id_and_site_from_url(
url_to_shortlink(post['link']))
post_data.title = unescape(post['title'])
post_data.owner_name = unescape(post['owner']['display_name'])
post_data.owner_url = post['owner']['link']
post_data.owner_rep = post['owner']['reputation']
post_data.body = post['body']
post_data.score = post['score']
post_data.up_vote_count = post['up_vote_count']
post_data.down_vote_count = post['down_vote_count']
if post_data.post_type == "answer":
# Annoyingly we have to make another request to get the question ID, since it is only returned by the
# /answers route
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch posts
req_url = "http://api.stackexchange.com/2.2/answers/{}".format(post['post_id'])
params = {
'filter': '!*Jxb9s5EOrE51WK*',
'key': api_key,
'site': u_site
}
answer_res = requests.get(req_url, params=params).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
# Finally, set the attribute
post_data.question_id = answer_res['items'][0]['question_id']
post_data.is_answer = True
user_posts.append(post_data)
if len(user_posts) == 0:
raise CmdException("The specified user hasn't posted anything.")
if len(user_posts) > 15:
raise CmdException("The specified user has an abnormally high number of spam posts. Please consider flagging "
"for moderator attention, otherwise use !!/report on the posts individually.")
why_info = u"User manually reported by *{}* in room *{}*.\n".format(msg.owner.name, msg.room.name)
# Handle all posts
for index, post in enumerate(user_posts, start=1):
batch = ""
if len(user_posts) > 1:
batch = " (batch report: post {} out of {})".format(index, len(user_posts))
handle_spam(post=Post(api_response=post.as_dict),
reasons=["Manually reported " + post.post_type + batch],
why=why_info)
time.sleep(2) # Should this be implemented differently?
if len(user_posts) > 2:
add_or_update_multiple_reporter(msg.owner.id, msg._client.host, time.time())
def report_posts(urls, reported_by, reported_in=None, blacklist_by=None, operation="report", custom_reason=None):
operation = operation or "report"
action_done = {"report": "reported", "report-force": "reported", "scan": "scanned"}[operation]
if reported_in is None:
reported_from = " by *{}*".format(reported_by)
elif reported_in is True:
reported_from = " by *{}* from the metasmoke API".format(reported_by)
else:
reported_from = " by user *{}* in room *{}*".format(reported_by, reported_in)
if custom_reason:
with_reason = " with reason: *{}*".format(custom_reason)
else:
with_reason = ""
report_info = "Post manually {}{}{}.\n\n".format(action_done, reported_from, with_reason)
normalized_urls = []
for url in urls:
t = url_to_shortlink(url)
if not t:
normalized_urls.append("That does not look like a valid post URL.")
elif t not in normalized_urls:
normalized_urls.append(t)
else:
normalized_urls.append("A duplicate URL was provided.")
urls = normalized_urls
users_to_blacklist = []
output = []
for index, url in enumerate(urls, start=1):
if not url.startswith("http://") and not url.startswith("https://"):
# Return the bad URL directly.
output.append("Post {}: {}".format(index, url))
continue
post_data = api_get_post(rebuild_str(url))
if post_data is None:
output.append("Post {}: That does not look like a valid post URL.".format(index))
continue
if post_data is False:
output.append("Post {}: Could not find data for this post in the API. "
"It may already have been deleted.".format(index))
continue
if has_already_been_posted(post_data.site, post_data.post_id, post_data.title) and not is_false_positive(
(post_data.post_id, post_data.site)):
# Don't re-report if the post wasn't marked as a false positive. If it was marked as a false positive,
# this re-report might be attempting to correct that/fix a mistake/etc.
if GlobalVars.metasmoke_key is not None:
se_link = to_protocol_relative(post_data.post_url)
ms_link = resolve_ms_link(se_link) or to_metasmoke_link(se_link)
output.append("Post {}: Already recently reported [ [MS]({}) ]".format(index, ms_link))
continue
else:
output.append("Post {}: Already recently reported".format(index))
continue
url = to_protocol_relative(post_data.post_url)
post = Post(api_response=post_data.as_dict)
user = get_user_from_url(post_data.owner_url)
if fetch_post_id_and_site_from_url(url)[2] == "answer":
parent_data = api_get_post("https://{}/q/{}".format(post.post_site, post_data.question_id))
post._is_answer = True
post._parent = Post(api_response=parent_data.as_dict)
scan_spam, scan_reasons, scan_why = check_if_spam(post) # Scan it first
if operation in {"report", "report-force"}: # Force blacklist user even if !!/report falls back to scan
if user is not None:
users_to_blacklist.append((user, blacklist_by, post_data.post_url))
# Expand real scan results from dirty returm value when not "!!/scan"
# Presence of "scan_why" indicates the post IS spam but ignored
if operation != "scan" and (not scan_spam) and scan_why:
scan_spam = True
scan_reasons, scan_why = scan_reasons
# If "report-force" then jump to the next block
if scan_spam and operation in {"scan", "report"}:
handle_spam(post=post, reasons=scan_reasons, why=report_info + scan_why.lstrip())
continue
# scan_spam == False or "report-force"
if operation in {"report", "report-force"}:
batch = ""
if len(urls) > 1:
batch = " (batch report: post {} out of {})".format(index, len(urls))
if scan_spam:
why_append = "This post would have also been caught for: " + ", ".join(scan_reasons).capitalize() + \
'\n' + scan_why
else:
why_append = "This post would not have been caught otherwise."
handle_spam(post=post,
reasons=["Manually reported " + post_data.post_type + batch],
why=report_info + why_append)
continue
# scan_spam == False and "scan"
else:
if scan_why:
output.append("Post {}: Looks like spam but not reported: {}".format(index, scan_why.capitalize()))
else:
output.append("Post {}: This does not look like spam".format(index))
for item in users_to_blacklist:
add_blacklisted_user(*item)
if len(output):
return "\n".join(output)
return None
@command(str, str, privileged=True, whole_msg=True)
def feedback(msg, post_url, feedback):
post_url = url_to_shortlink(post_url)[6:]
if not post_url:
raise CmdException("No such feedback.")
for feedbacks in (TRUE_FEEDBACKS, FALSE_FEEDBACKS, NAA_FEEDBACKS):
if feedback in feedbacks:
feedbacks[feedback].send(post_url, msg)
return
raise CmdException("No such feedback.")
@command(privileged=True, aliases=['dump-data'])
def dump_data():
try:
s, metadata = SmokeyTransfer.dump()
s = "{}, {}, {}\n{}".format(metadata['time'], metadata['location'], metadata['rev'], s)
tell_rooms_with('dump', s)
except Exception:
log_exception(*sys.exc_info())
raise CmdException("Failed to dump data. Run `!!/errorlogs` for details.")
return "Data successfully dumped"
@command(int, privileged=True, aliases=['load-data'])
def load_data(msg_id):
msg = get_message(msg_id)
if msg.owner.id != 120914: # TODO: implement an is_self() in chatcommunicate, don't use magic numbers
raise CmdException("Message owner is not SmokeDetector, refusing to load")
try:
SmokeyTransfer.load(msg.content_source)
except ValueError as e:
raise CmdException(str(e)) from None
except Exception:
log_exception(*sys.exc_info())
raise CmdException("Failed to load data. Run `!!/errorlogs` for details.")
return "Data successfully loaded"
#
#
# Subcommands go below here
# noinspection PyIncorrectDocstring,PyBroadException
DELETE_ALIASES = ["delete", "del", "remove", "poof", "gone", "kaboom"]
@command(message, reply=True, privileged=True, aliases=[alias + "-force" for alias in DELETE_ALIASES])
def delete_force(msg):
"""
Delete a post from the room, ignoring protection for Charcoal HQ
:param msg:
:return: None
"""
# noinspection PyBroadException
try:
msg.delete()
except Exception: # I don't want to dig into ChatExchange
pass # couldn't delete message
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyBroadException
@command(message, reply=True, privileged=True, aliases=DELETE_ALIASES)
def delete(msg):
"""
Delete a post from a chatroom, with an override for Charcoal HQ.
:param msg:
:return: None
"""
post_data = get_report_data(msg)
if post_data and msg.room.id == 11540:
return "Reports from SmokeDetector in Charcoal HQ are generally kept "\
"as records. If you really need to delete a report, please use "\
"`sd delete-force`. See [this note on message deletion]"\
"(https://charcoal-se.org/smokey/Commands"\
"#a-note-on-message-deletion) for more details."
else:
try:
msg.delete()
except Exception: # I don't want to dig into ChatExchange
pass
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True, privileged=True)
def postgone(msg):
"""
Removes link from a marked report message
:param msg:
:return: None
"""
edited = edited_message_after_postgone_command(msg.content)
if edited is None:
raise CmdException("That's not a report.")
msg.edit(edited)
# noinspection PyIncorrectDocstring
@command(message, str, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=FALSE_FEEDBACKS.keys(),
arity=(1, 2))
def false(feedback, msg, comment, alias_used="false"):
"""
Marks a post as a false positive
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = FALSE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
add_false_positive((post_id, site))
user = get_user_from_url(owner_url)
if user is not None:
if feedback_type.blacklist:
add_whitelisted_user(user)
result = "Registered " + post_type + " as false positive and whitelisted user."
elif is_blacklisted_user(user):
remove_blacklisted_user(user)
result = "Registered " + post_type + " as false positive and removed user from the blacklist."
else:
result = "Registered " + post_type + " as false positive."
else:
result = "Registered " + post_type + " as false positive."
try:
if msg.room.id != 11540:
msg.delete()
except Exception: # I don't want to dig into ChatExchange
pass
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
return result if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(message, str, reply=True, privileged=True, whole_msg=True, arity=(1, 2), give_name=True, aliases=["ig"])
def ignore(feedback, msg, comment, alias_used="ignore"):
"""
Marks a post to be ignored
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, _ = post_data
Feedback.send_custom("ignore", post_url, feedback)
post_id, site, _ = fetch_post_id_and_site_from_url(post_url)
add_ignored_post((post_id, site))
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
if alias_used == "ig":
return None
return "Post ignored; alerts about it will no longer be posted."
# noinspection PyIncorrectDocstring
@command(message, str, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=NAA_FEEDBACKS.keys(),
arity=(1, 2))
def naa(feedback, msg, comment, alias_used="naa"):
"""
Marks a post as NAA
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, _ = post_data
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
if post_type != "answer":
raise CmdException("That report was a question; questions cannot be marked as NAAs.")
feedback_type = NAA_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, _ = fetch_post_id_and_site_from_url(post_url)
add_ignored_post((post_id, site))
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
return "Recorded answer as an NAA in metasmoke." if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring
@command(message, str, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=TRUE_FEEDBACKS.keys(),
arity=(1, 2))
def true(feedback, msg, comment, alias_used="true"):
"""
Marks a post as a true positive
:param feedback:
:param msg:
:return: string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = TRUE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
try:
user = get_user_from_url(owner_url)
except TypeError as e:
raise CmdException('Could not get user from URL {0!r}'.format(owner_url))
if user is not None:
if feedback_type.blacklist:
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user(user, message_url, post_url)
result = "Registered " + post_type + " as true positive and blacklisted user."
else:
result = "Registered " + post_type + " as true positive. If you want to "\
"blacklist the poster, use `trueu` or `tpu`."
else:
result = "Registered " + post_type + " as true positive."
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
datahandling.last_feedbacked = ((post_id, site), time.time() + 60)
return result if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True, aliases=['wtf'])
def why(msg):
"""
Returns reasons a post was reported
:param msg:
:return: A string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That's not a report.")
else:
*post, _ = fetch_post_id_and_site_from_url(post_data[0])
why_info = get_why(post[1], post[0])
if why_info:
return why_info
else:
raise CmdException("There is no `why` data for that user (anymore).")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True)
def autoflagged(msg):
"""
Determines whether a post was automatically flagged by Metasmoke
:param msg:
:return: A string
"""
# sneaky!
update_reason_weights()
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That's not a report.")
is_autoflagged, names = Metasmoke.determine_if_autoflagged(post_data[0])
if is_autoflagged:
return "That post was automatically flagged, using flags from: {}.".format(", ".join(names))
else:
return "That post was **not** automatically flagged by metasmoke."
image ......
# coding=utf-8
# noinspection PyUnresolvedReferences
from chatcommunicate import add_room, block_room, CmdException, command, get_report_data, is_privileged, message, \
tell_rooms, tell_rooms_with, get_message
# noinspection PyUnresolvedReferences
from globalvars import GlobalVars
import findspam
# noinspection PyUnresolvedReferences
from datetime import datetime
from apigetpost import api_get_post, PostData
import datahandling
from datahandling import *
from metasmoke import Metasmoke
from blacklists import load_blacklists
from parsing import *
from spamhandling import check_if_spam, handle_spam
from gitmanager import GitManager
import threading
import random
import requests
import sys
import os
import time
from html import unescape
from ast import literal_eval
# noinspection PyCompatibility
import regex
from helpers import only_blacklists_changed, only_modules_changed, log, expand_shorthand_link, reload_modules
from classes import Post
from classes.feedback import *
# TODO: Do we need uid == -2 check? Turn into "is_user_valid" check
#
#
# System command functions below here
# This "null" command is just bypass for the "unrecognized command" message,
# so that pingbot can respond instead.
@command(aliases=['ping-help', 'groups'])
def null():
return None
# --- Blacklist Functions --- #
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, whole_msg=True, privileged=True)
def addblu(msg, user):
"""
Adds a user to site whitelist
:param msg: ChatExchange message
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user((uid, val), message_url, "")
return "User blacklisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/addblu profileurl` *or* `!!/addblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str)
def isblu(user):
"""
Check if a user is blacklisted
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if is_blacklisted_user((uid, val)):
return "User is blacklisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/isblu profileurl` *or* `!!/isblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(str, privileged=True)
def rmblu(user):
"""
Removes user from site blacklist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if remove_blacklisted_user((uid, val)):
return "User removed from blacklist (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted."
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/rmblu profileurl` *or* `!!/rmblu userid sitename`.")
# --- Whitelist functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyMissingTypeHints
@command(str, privileged=True)
def addwlu(user):
"""
Adds a user to site whitelist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
add_whitelisted_user((uid, val))
return "User whitelisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/addwlu profileurl` *or* `!!/addwlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyMissingTypeHints
@command(str)
def iswlu(user):
"""
Checks if a user is whitelisted
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if is_whitelisted_user((uid, val)):
return "User is whitelisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/iswlu profileurl` *or* `!!/iswlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, privileged=True)
def rmwlu(user):
"""
Removes a user from site whitelist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) != -1 and val != "":
if remove_whitelisted_user((uid, val)):
return "User removed from whitelist (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted."
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/rmwlu profileurl` *or* `!!/rmwlu userid sitename`.")
# noinspection PyIncorrectDocstring
@command(str)
def blacklist(_):
"""
Returns a string which explains the usage of the new blacklist commands.
:return: A string
"""
raise CmdException("The !!/blacklist command has been deprecated. "
"Please use !!/blacklist-website, !!/blacklist-username,"
"!!/blacklist-keyword, or perhaps !!/watch-keyword. "
"Remember to escape dots in URLs using \\.")
def check_blacklist(string_to_test, is_username, is_watchlist, is_phone):
# Test the string and provide a warning message if it is already caught.
if is_username:
question = Post(api_response={'title': 'Valid title', 'body': 'Valid body',
'owner': {'display_name': string_to_test, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
answer = Post(api_response={'title': 'Valid title', 'body': 'Valid body',
'owner': {'display_name': string_to_test, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
else:
question = Post(api_response={'title': 'Valid title', 'body': string_to_test,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
answer = Post(api_response={'title': 'Valid title', 'body': string_to_test,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
question_reasons, _ = findspam.FindSpam.test_post(question)
answer_reasons, _ = findspam.FindSpam.test_post(answer)
# Filter out duplicates
reasons = list(set(question_reasons) | set(answer_reasons))
# Filter out watchlist results
filter_out = ["potentially bad ns", "potentially bad asn"]
if not is_watchlist:
filter_out.append("potentially bad keyword")
# Ignore "Mostly non-latin body/answer" for phone number watches
if is_phone:
filter_out.extend(["mostly non-latin", "phone number detected", "messaging number detected"])
if filter_out:
reasons = list(filter(
lambda reason: all([x not in reason.lower() for x in filter_out]), reasons))
return reasons
def format_blacklist_reasons(reasons):
# Capitalize
reasons = list(map(lambda reason: reason.capitalize(), reasons))
# Join
if len(reasons) < 3:
reason_string = " and ".join(reasons)
else:
reason_string = ", and ".join([", ".join(reasons[:-1]), reasons[-1]])
return reason_string
def do_blacklist(blacklist_type, msg, force=False):
"""
Adds a string to the website blacklist and commits/pushes to GitHub
:param raw_pattern:
:param blacklist_type:
:param msg:
:param force:
:return: A string
"""
chat_user_profile_link = "https://chat.{host}/users/{id}".format(host=msg._client.host,
id=msg.owner.id)
pattern = rebuild_str(msg.content_source.split(" ", 1)[1])
if "number" not in blacklist_type:
try:
r = regex.compile(pattern, city=findspam.FindSpam.city_list)
except regex._regex_core.error:
raise CmdException("An invalid pattern was provided, please check your command.")
if r.search(GlobalVars.valid_content):
raise CmdException("That pattern is probably too broad, refusing to commit.")
if not force:
if "number" in blacklist_type or \
regex.match(r'(?:\[a-z_]\*)?(?:\(\?:)?\d+(?:[][\\W_*()?:]+\d+)+(?:\[a-z_]\*)?$', pattern):
is_phone = True
else:
is_phone = False
is_watchlist = bool("watch" in blacklist_type)
concretized_pattern = pattern.replace("\\W", "-").replace("\\.", ".").replace("\\d", "8")
concretized_pattern = regex.sub(r"[+*?][+?]?|\{\d*(?:,\d*)?\}", "", concretized_pattern)
for username in False, True:
reasons = check_blacklist(
concretized_pattern, is_username=username, is_watchlist=is_watchlist, is_phone=is_phone)
if reasons:
raise CmdException(
"That pattern looks like it's already caught by " +
format_blacklist_reasons(reasons) +
"; append `-force` if you really want to do that.")
metasmoke_down = False
try:
code_permissions = is_code_privileged(msg._client.host, msg.owner.id)
except (requests.exceptions.ConnectionError, ValueError, TypeError):
code_permissions = False # Because we need the system to assume that we don't have code privs.
metasmoke_down = True
_status, result = GitManager.add_to_blacklist(
blacklist=blacklist_type,
item_to_blacklist=pattern,
username=msg.owner.name,
chat_profile_link=chat_user_profile_link,
code_permissions=code_permissions,
metasmoke_down=metasmoke_down
)
if not _status:
raise CmdException(result)
if code_permissions and only_blacklists_changed(GitManager.get_local_diff()):
try:
if not GlobalVars.on_master:
# Restart if HEAD detached
log('warning', "Pulling local with HEAD detached, checkout deploy", f=True)
os._exit(8)
GitManager.pull_local()
GlobalVars.reload()
findspam.FindSpam.reload_blacklists()
tell_rooms_with('debug', GlobalVars.s_norestart)
time.sleep(2)
return None
except Exception:
pass
return result
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, give_name=True, aliases=["blacklist-keyword",
"blacklist-website",
"blacklist-username",
"blacklist-number",
"blacklist-keyword-force",
"blacklist-website-force",
"blacklist-username-force",
"blacklist-number-force"])
def blacklist_keyword(msg, pattern, alias_used="blacklist-keyword"):
"""
Adds a pattern to the blacklist and commits/pushes to GitHub
:param msg:
:param pattern:
:return: A string
"""
parts = alias_used.split("-")
return do_blacklist(parts[1], msg, force=len(parts) > 2)
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, give_name=True,
aliases=["watch-keyword", "watch-force", "watch-keyword-force",
"watch-number", "watch-number-force"])
def watch(msg, pattern, alias_used="watch"):
"""
Adds a pattern to the watched keywords list and commits/pushes to GitHub
:param msg:
:param pattern:
:return: A string
"""
return do_blacklist("watch_number" if "number" in alias_used else "watch_keyword",
msg, force=alias_used.split("-")[-1] == "force")
@command(str, whole_msg=True, privileged=True, give_name=True, aliases=["unwatch"])
def unblacklist(msg, item, alias_used="unwatch"):
"""
Removes a pattern from watchlist/blacklist and commits/pushes to GitHub
:param msg:
:param pattern:
:return: A string
"""
if alias_used == "unwatch":
blacklist_type = "watch"
elif alias_used == "unblacklist":
blacklist_type = "blacklist"
else:
raise CmdException("Invalid blacklist type.")
metasmoke_down = False
try:
code_privs = is_code_privileged(msg._client.host, msg.owner.id)
except (requests.exceptions.ConnectionError, ValueError):
code_privs = False
metasmoke_down = True
pattern = msg.content_source.split(" ", 1)[1]
_status, result = GitManager.remove_from_blacklist(
rebuild_str(pattern), msg.owner.name, blacklist_type,
code_privileged=code_privs, metasmoke_down=metasmoke_down)
if not _status:
raise CmdException(result)
if only_blacklists_changed(GitManager.get_local_diff()):
try:
if not GlobalVars.on_master:
# Restart if HEAD detached
log('warning', "Pulling local with HEAD detached, checkout deploy", f=True)
os._exit(8)
GitManager.pull_local()
GlobalVars.reload()
findspam.FindSpam.reload_blacklists()
tell_rooms_with('debug', GlobalVars.s_norestart)
time.sleep(2)
return None
except Exception:
pass
return result
@command(int, privileged=True, whole_msg=True)
def approve(msg, pr_id):
code_permissions = is_code_privileged(msg._client.host, msg.owner.id)
if not code_permissions:
raise CmdException("You need code privileges to approve pull requests")
# Forward this, because checks are better placed in gitmanager.py
try:
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
comment = "{} [approved]({}) this PR in {}\n\n".format(
msg.owner.name, message_url, msg.room.name,
# The image of (code-admins|approved) from PullApprove
"https://camo.githubusercontent.com/18c997a6b1ac764dfd43963f5071d03a3c7fc97b/68747470733a2f2f696d672e7368"
"69656c64732e696f2f62616467652f636f64652d2d61646d696e732d617070726f7665642d627269676874677265656e2e737667")
message = GitManager.merge_pull_request(pr_id, comment)
if only_blacklists_changed(GitManager.get_local_diff()):
try:
if not GlobalVars.on_master:
# Restart if HEAD detached
log('warning', "Pulling local with HEAD detached, checkout deploy", f=True)
os._exit(8)
GitManager.pull_local()
GlobalVars.reload()
findspam.FindSpam.reload_blacklists()
tell_rooms_with('debug', GlobalVars.s_norestart)
time.sleep(2)
return None
except Exception:
pass
return message
except Exception as e:
raise CmdException(str(e))
@command(privileged=True, aliases=["remote-diff", "remote_diff"])
def remotediff():
will_require_full_restart = "SmokeDetector will require a full restart to pull changes: " \
"{}".format(str(not only_blacklists_changed(GitManager.get_remote_diff())))
return "{}\n\n{}".format(GitManager.get_remote_diff(), will_require_full_restart)
# --- Joke Commands --- #
@command(whole_msg=True)
def blame(msg):
unlucky_victim = msg._client.get_user(random.choice(msg.room.get_current_user_ids()))
return "It's [{}](https://chat.{}/users/{})'s fault.".format(
unlucky_victim.name, msg._client.host, unlucky_victim.id)
@command(str, whole_msg=True, aliases=["blame\u180E"])
def blame2(msg, x):
base = {"\u180E": 0, "\u200B": 1, "\u200C": 2, "\u200D": 3, "\u2060": 4, "\u2063": 5, "\uFEFF": 6}
user = sum([(len(base)**i) * base[char] for i, char in enumerate(reversed(x))])
try:
unlucky_victim = msg._client.get_user(user)
return "It's [{}](https://chat.{}/users/{})'s fault.".format(
unlucky_victim.name, msg._client.host, unlucky_victim.id)
except requests.exceptions.HTTPError:
unlucky_victim = msg.owner
return "It's [{}](https://chat.{}/users/{})'s fault.".format(
unlucky_victim.name, msg._client.host, unlucky_victim.id)
# noinspection PyIncorrectDocstring
@command()
def brownie():
"""
Returns a string equal to "Brown!" (This is a joke command)
:return: A string
"""
return "Brown!"
COFFEES = ['Espresso', 'Macchiato', 'Ristretto', 'Americano', 'Latte', 'Cappuccino', 'Mocha', 'Affogato', 'jQuery']
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def coffee(msg, other_user):
"""
Returns a string stating who the coffee is for (This is a joke command)
:param msg:
:param other_user:
:return: A string
"""
if other_user is None:
return "*brews a cup of {} for @{}*".format(random.choice(COFFEES), msg.owner.name.replace(" ", ""))
else:
other_user = regex.sub(r'^@*|\b\s.{1,}', '', other_user)
return "*brews a cup of {} for @{}*".format(random.choice(COFFEES), other_user)
# noinspection PyIncorrectDocstring
@command()
def lick():
"""
Returns a string when a user says 'lick' (This is a joke command)
:return: A string
"""
return "*licks ice cream cone*"
TEAS = ['earl grey', 'green', 'chamomile', 'lemon', 'darjeeling', 'mint', 'jasmine', 'passionfruit']
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def tea(msg, other_user):
"""
Returns a string stating who the tea is for (This is a joke command)
:param msg:
:param other_user:
:return: A string
"""
if other_user is None:
return "*brews a cup of {} tea for @{}*".format(random.choice(TEAS), msg.owner.name.replace(" ", ""))
else:
other_user = regex.sub(r'^@*|\b\s.{1,}', '', other_user)
return "*brews a cup of {} tea for @{}*".format(random.choice(TEAS), other_user)
# noinspection PyIncorrectDocstring
@command()
def wut():
"""
Returns a string when a user asks 'wut' (This is a joke command)
:return: A string
"""
return "Whaddya mean, 'wut'? Humans..."
@command(aliases=["zomg_hats"])
def hats():
wb_start = datetime(2017, 12, 13, 0, 0, 0)
wb_end = datetime(2018, 1, 3, 0, 0, 0)
now = datetime.utcnow()
return_string = ""
if wb_start > now:
diff = wb_start - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "WE LOVE HATS! Winter Bash will begin in {} {}, {} {}, {} {}, and {} {}.".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
elif wb_end > now:
diff = wb_end - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "Winter Bash won't end for {} {}, {} {}, {} {}, and {} {}. GO EARN SOME HATS!".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
return return_string
# --- Block application from posting functions --- #
# noinspection PyIncorrectDocstring
@command(int, int, whole_msg=True, privileged=True, arity=(1, 2))
def block(msg, block_time, room_id):
"""
Blocks posts from application for a period of time
:param msg:
:param block_time:
:param room_id:
:return: None
"""
time_to_block = block_time if 0 < block_time < 14400 else 900
which_room = "globally" if room_id is None else "in room {} on {}".format(room_id, msg._client.host)
block_message = "Reports blocked for {} second(s) {}.".format(time_to_block, which_room)
tell_rooms(block_message, ((msg._client.host, msg.room.id), "debug", "metatavern"), ())
block_room(room_id, msg._client.host, time.time() + time_to_block)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(int, int, whole_msg=True, privileged=True, arity=(1, 2))
def unblock(msg, room_id):
"""
Unblocks posting to a room
:param msg:
:param room_id:
:return: None
"""
block_room(room_id, msg._client.host, -1)
which_room = "globally" if room_id is None else "in room {} on {}".format(room_id, msg._client.host)
unblock_message = "Reports unblocked {}.".format(which_room)
tell_rooms(unblock_message, ((msg._client.host, msg.room.id), "debug", "metatavern"), ())
# --- Administration Commands --- #
ALIVE_MSG = [
'Yup', 'You doubt me?', 'Of course', '... did I miss something?', 'plz send teh coffee',
'Watching this endless list of new questions *never* gets boring', 'Kinda sorta',
'You should totally drop that and use jQuery', r'¯\\_(ツ)\_/¯',
]
# noinspection PyIncorrectDocstring
@command(aliases=["live"])
def alive():
"""
Returns a string indicating the process is still active
:return: A string
"""
return random.choice(ALIVE_MSG)
# noinspection PyIncorrectDocstring
@command(int, privileged=True, arity=(0, 1), aliases=["errlogs", "errlog", "errorlog"])
def errorlogs(count):
"""
Shows the most recent lines in the error logs
:param count:
:return: A string
"""
return fetch_lines_from_error_log(count or 50)
@command(whole_msg=True, aliases=["ms-status", "ms-down", "ms-up"], give_name=True)
def metasmoke(msg, alias_used):
if alias_used in {"metasmoke", "ms-status"}:
status_text = [
"metasmoke is up. Current failure count: {}".format(GlobalVars.metasmoke_failures),
"metasmoke is down. Current failure count: {}".format(GlobalVars.metasmoke_failures),
]
return status_text[GlobalVars.metasmoke_down]
# The next aliases/functionalities require privilege
if not is_privileged(msg.owner, msg.room):
raise CmdException(GlobalVars.not_privileged_warning)
if alias_used == "ms-down":
GlobalVars.metasmoke_down = True
GlobalVars.metasmoke_failures = 999
return "metasmoke is now considered down."
if alias_used == "ms-up":
GlobalVars.metasmoke_down = False
GlobalVars.metasmoke_failures = 0
return "metasmoke is now considered up."
raise CmdException("Bad command alias. Blame a developer.")
# noinspection PyIncorrectDocstring
@command(aliases=["commands", "help"])
def info():
"""
Returns the help text
:return: A string
"""
return "I'm " + GlobalVars.chatmessage_prefix +\
" a bot that detects spam and offensive posts on the network and"\
" posts alerts to chat."\
" [A command list is available here](https://charcoal-se.org/smokey/Commands)."
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def welcome(msg, other_user):
"""
Returns the welcome text
:param msg:
:param other_user:
:return: A string
"""
w_msg = ("Welcome to {room}{user}! I'm {me}, a bot that detects spam and offensive posts on the network, "
"and posts alerts to chat. You can find more about me on the "
"[Charcoal website](https://charcoal-se.org/).")
if other_user is None:
raise CmdException(w_msg.format(room=msg.room.name, user="", me=GlobalVars.chatmessage_prefix))
else:
other_user = regex.sub(r'^@*|\b\s.{1,}', '', other_user)
raise CmdException(w_msg.format(room=msg.room.name, user=" @" + other_user, me=GlobalVars.chatmessage_prefix))
# noinspection PyIncorrectDocstring
@command()
def location():
"""
Returns the current location the application is running from
:return: A string with current location
"""
return GlobalVars.location
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(privileged=True)
def master():
"""
Forces a system exit with exit code = 8
:return: None
"""
os._exit(8)
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(privileged=True)
def pull():
"""
Pull an update from GitHub
:return: String on failure, None on success
"""
remote_diff = GitManager.get_remote_diff()
if only_blacklists_changed(remote_diff):
GitManager.pull_remote()
findspam.FindSpam.reload_blacklists()
GlobalVars.reload()
tell_rooms_with('debug', GlobalVars.s_norestart)
return
request = requests.get('https://api.github.com/repos/{}/git/refs/heads/deploy'.format(
GlobalVars.bot_repo_slug))
latest_sha = request.json()["object"]["sha"]
request = requests.get(
'https://api.github.com/repos/{}/commits/{}/statuses'.format(
GlobalVars.bot_repo_slug, latest_sha))
states = []
for ci_status in request.json():
state = ci_status["state"]
states.append(state)
if "success" in states:
if only_modules_changed(remote_diff):
GitManager.pull_remote()
reload_modules()
GlobalVars.reload()
tell_rooms_with('debug', GlobalVars.s_norestart2)
return
else:
os._exit(3)
elif "error" in states or "failure" in states:
raise CmdException("CI build failed! :( Please check your commit.")
elif "pending" in states or not states:
raise CmdException("CI build is still pending, wait until the build has finished and then pull again.")
@command(whole_msg=True, aliases=['pull-sync'])
def sync_remote(msg):
"""
Force a branch sync from origin/master with [git branch -M]
:param msg:
:return: A string containing a response message
"""
if not is_code_privileged(msg._client.host, msg.owner.id):
raise CmdException("You don't have code privileges to run this command.")
return GitManager.sync_remote()[1]
@command(privileged=True, give_name=True, aliases=[
"gitstatus", "git-status", "git-help"
])
def git(alias_used="git"):
if alias_used == "git":
raise CmdException("Bad alias. Try another command")
if alias_used == "git-help":
return "Available commands: git-help, git-status, git-merge-abort, git-reset"
alias_used = alias_used.replace("-", "")
if alias_used == "gitstatus":
return GitManager.current_git_status()
elif alias_used == "gitmergeabort":
return GitManager.merge_abort()
elif alias_used == "gitreset":
return GitManager.reset_head()
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(whole_msg=True, privileged=True, give_name=True, aliases=["restart", "reload"])
def reboot(msg, alias_used="reboot"):
"""
Forces a system exit with exit code = 5
:param msg:
:return: None
"""
if alias_used in {"reboot", "restart"}:
tell_rooms("Goodbye, cruel world", ("debug", (msg._client.host, msg.room.id)), ())
time.sleep(3)
os._exit(5)
elif alias_used in {"reload"}:
reload_modules()
tell_rooms_with('debug', GlobalVars.s_norestart2)
time.sleep(3)
else:
raise RuntimeError("Invalid alias!")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(whole_msg=True)
def amiprivileged(msg):
"""
Tells user whether or not they have privileges
:param msg:
:return: A string
"""
if is_privileged(msg.owner, msg.room):
return "\u2713 You are a privileged user."
return "\u2573 " + GlobalVars.not_privileged_warning
# noinspection PyIncorrectDocstring,
@command(whole_msg=True)
def amicodeprivileged(msg):
"""
Tells user whether or not they have code privileges
:param msg:
:return: A string
"""
update_code_privileged_users_list()
if is_code_privileged(msg._client.host, msg.owner.id):
return "\u2713 You are a code-privileged user."
return "\u2573 No, you are not a code-privileged user."
# noinspection PyIncorrectDocstring
@command()
def apiquota():
"""
Report how many API hits remain for the day
:return: A string
"""
return "The current API quota remaining is {}.".format(GlobalVars.apiquota)
# noinspection PyIncorrectDocstring
@command()
def queuestatus():
"""
Report current API queue
:return: A string
"""
return GlobalVars.bodyfetcher.print_queue()
@command(str)
def inqueue(url):
post_id, site, post_type = fetch_post_id_and_site_from_url(url)
if post_type != "question":
raise CmdException("Can't check for answers.")
if site in GlobalVars.bodyfetcher.queue:
for i, id in enumerate(GlobalVars.bodyfetcher.queue[site].keys()):
if id == post_id:
return "#" + str(i + 1) + " in queue."
return "Not in queue."
@command()
def listening():
# return "{} post(s) currently monitored for deletion.".format(len(GlobalVars.deletion_watcher.posts))
return "Currently listening to:\n" + repr(GlobalVars.deletion_watcher.posts)
@command()
def last_feedbacked():
return datahandling.last_feedbacked
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(str, whole_msg=True, privileged=True, arity=(0, 1))
def stappit(msg, location_search):
"""
Forces a system exit with exit code = 6
:param msg:
:param location_search:
:return: None
"""
if location_search is None or location_search.lower() in GlobalVars.location.lower():
tell_rooms("Goodbye, cruel world", ((msg._client.host, msg.room.id)), ())
time.sleep(3)
os._exit(6)
def td_format(td_object):
# source: http://stackoverflow.com/a/13756038/5244995
seconds = int(td_object.total_seconds())
periods = [
('year', 60 * 60 * 24 * 365),
('month', 60 * 60 * 24 * 30),
('day', 60 * 60 * 24),
('hour', 60 * 60),
('minute', 60),
('second', 1)
]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 1:
strings.append("%s %s" % (period_value, period_name))
else:
strings.append("%s %ss" % (period_value, period_name))
return ", ".join(strings)
# noinspection PyIncorrectDocstring
@command()
def status():
"""
Returns the amount of time the application has been running
:return: A string
"""
now = datetime.utcnow()
diff = now - GlobalVars.startup_utc_date
return 'Running since {time} UTC ({relative})'.format(time=GlobalVars.startup_utc, relative=td_format(diff))
# noinspection PyIncorrectDocstring
@command(privileged=True, whole_msg=True)
def stopflagging(msg):
Tasks.do(Metasmoke.stop_autoflagging)
log('warning', 'Disabling autoflagging ({} ran !!/stopflagging, message {})'.format(msg.owner.name, msg.id))
return 'Stopping'
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(str, whole_msg=True, privileged=True, aliases=["standby-except"], give_name=True)
def standby(msg, location_search, alias_used="standby"):
"""
Forces a system exit with exit code = 7
:param msg:
:param location_search:
:return: None
"""
match = location_search.lower() in GlobalVars.location.lower()
reverse_search = "except" in alias_used
# Use `!=` as Logical XOR
if match != reverse_search:
tell_rooms("{location} is switching to standby".format(location=GlobalVars.location),
("debug", (msg._client.host, msg.room.id)), (), notify_site="/standby")
time.sleep(3)
os._exit(7)
# noinspection PyIncorrectDocstring
@command(str, aliases=["test-q", "test-a", "test-u", "test-t", "test-json"], give_name=True)
def test(content, alias_used="test"):
"""
Test an answer to determine if it'd be automatically reported
:param content:
:return: A string
"""
result = "> "
site = ""
option_count = 0
for segment in content.split():
if segment.startswith("site="):
site = expand_shorthand_link(segment[5:])
else:
# Stop parsing options at first non-option
break
option_count += 1
content = content.split(' ', option_count)[-1] # Strip parsed options
if alias_used == "test-q":
kind = "a question"
fakepost = Post(api_response={'title': 'Valid title', 'body': content,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': False, 'score': 0})
elif alias_used == "test-a":
kind = "an answer"
fakepost = Post(api_response={'title': 'Valid title', 'body': content,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': True, 'score': 0})
elif alias_used == "test-u":
kind = "a username"
fakepost = Post(api_response={'title': 'Valid title', 'body': "Valid question body",
'owner': {'display_name': content, 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': False, 'score': 0})
elif alias_used == "test-t":
kind = "a title"
fakepost = Post(api_response={'title': content, 'body': "Valid question body",
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': False, 'score': 0})
elif alias_used == "test-json":
# Only load legit json object
try:
json_obj = json.loads(content)
except ValueError as e:
raise CmdException("Error: {}".format(e))
if not isinstance(json_obj, dict):
raise CmdException("Only accepts a json object as input")
# List of valid keys and their corresponding classes
valid_keys = [
('title', str), ('body', str), ('username', str), ('type', str),
('reputation', int), ('score', int)
]
right_types = list(filter(lambda p: p[0] in json_obj and isinstance(json_obj[p[0]], p[1]), valid_keys))
wrong_types = list(filter(lambda p: p[0] in json_obj and not isinstance(json_obj[p[0]], p[1]), valid_keys))
# Alert if valid key is of wrong class
if len(wrong_types) > 0:
raise CmdException("Invalid type: {}".format(", ".join(
["{} should be {}".format(x, y.__name__) for (x, y) in wrong_types])))
# Alert if none of the valid keys are used
elif len(right_types) == 0:
raise CmdException("At least one of the following keys needed: {}".format(", ".join(
["{} ({})".format(x, y.__name__) for (x, y) in valid_keys])))
# Craft a fake response
fake_response = {
'title': json_obj['title'] if 'title' in json_obj else 'Valid post title',
'body': json_obj['body'] if 'body' in json_obj else 'Valid post body',
'owner': {
'display_name': json_obj['username'] if 'username' in json_obj else 'Valid username',
'reputation': json_obj['reputation'] if 'reputation' in json_obj else 0,
'link': ''
},
'IsAnswer': 'type' in json_obj and not json_obj['type'] == "question",
'site': site,
'score': json_obj['score'] if 'score' in json_obj else 0
}
# Handle that pluralization bug
kind = "an answer" if fake_response['IsAnswer'] else "a question"
fakepost = Post(api_response=fake_response)
else:
kind = "a post, title or username"
fakepost = Post(api_response={'title': content, 'body': content,
'owner': {'display_name': content, 'reputation': 1, 'link': ''},
'site': site, 'IsAnswer': False, 'score': 0})
reasons, why_response = findspam.FindSpam.test_post(fakepost)
if len(reasons) == 0:
result += "Would not be caught as {}".format(kind)
if site == "chat.stackexchange.com":
result += " on this magic userspace"
elif len(site) > 0:
result += " on site `{}`".format(site)
result += "."
else:
result += ", ".join(reasons).capitalize()
if why_response is not None and len(why_response) > 0:
result += "\n----------\n"
result += why_response
return result
# noinspection PyIncorrectDocstring
@command()
def threads():
"""
Returns a description of current threads, for debugging
:return: A string
"""
threads_list = ["{ident}: {name}".format(ident=t.ident, name=t.name) for t in threading.enumerate()]
return "\n".join(threads_list)
# noinspection PyIncorrectDocstring
@command(aliases=["rev", "ver"])
def version():
"""
Returns the current version of the application
:return: A string
"""
return '{id} [{commit_name}]({repository}/commit/{commit_code})'.format(id=GlobalVars.location,
commit_name=GlobalVars.commit_with_author,
commit_code=GlobalVars.commit['id'],
repository=GlobalVars.bot_repository)
# noinspection PyIncorrectDocstring
@command(whole_msg=True)
def whoami(msg):
"""
Returns user id of smoke detector
:param msg:
:return:
"""
return "My id for this room is {}, and it's not apnorton's fault.".format(msg._client._br.user_id)
# --- Notification functions --- #
# noinspection PyIncorrectDocstring
@command(int, whole_msg=True, aliases=["allnotifications", "allnoti"])
def allnotificationsites(msg, room_id):
"""
Returns a string stating what sites a user will be notified about
:param msg:
:param room_id:
:return: A string
"""
sites = get_all_notification_sites(msg.owner.id, msg._client.host, room_id)
if len(sites) == 0:
return "You won't get notified for any sites in that room."
return "You will get notified for these sites:\r\n" + ", ".join(sites)
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, literal_eval, whole_msg=True, arity=(2, 3))
def notify(msg, room_id, se_site, always_ping):
"""
Subscribe a user to events on a site in a single room
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
# TODO: Add check whether smokey reports in that room
response, full_site = add_to_notification_list(msg.owner.id, msg._client.host, room_id, se_site,
always_ping=(always_ping if always_ping is not None else True))
if response == 0:
return "You'll now get pings from me if I report a post on `{site}`, in room "\
"`{room}` on `chat.{domain}`".format(site=se_site, room=room_id, domain=msg._client.host)
elif response == -1:
raise CmdException("That notification configuration is already registered.")
elif response == -2:
raise CmdException("The given SE site does not exist.")
else:
raise CmdException("Unrecognized code returned when adding notification.")
# temp command
@command(privileged=True)
def migrate_notifications():
for i, notification in enumerate(GlobalVars.notifications):
if len(notification) == 4:
GlobalVars.notifications[i] = notification + (True,)
with open("notifications.p", "wb") as f:
pickle.dump(GlobalVars.notifications, f, protocol=pickle.HIGHEST_PROTOCOL)
return "shoutouts to simpleflips"
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(whole_msg=True, aliases=["unnotify-all"])
def unnotify_all(msg):
"""
Unsubscribes a user to all events
:param msg:
:return: A string
"""
remove_all_from_notification_list(msg.owner.id)
return "I will no longer ping you if I report a post anywhere."
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def unnotify(msg, room_id, se_site):
"""
Unsubscribes a user to specific events
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
response = remove_from_notification_list(msg.owner.id, msg._client.host, room_id, se_site)
if response:
return "I will no longer ping you if I report a post on `{site}`, in room `{room}` "\
"on `chat.{domain}`".format(site=se_site, room=room_id, domain=msg._client.host)
raise CmdException("That configuration doesn't exist.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def willbenotified(msg, room_id, se_site):
"""
Returns a string stating whether a user will be notified or not
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
if will_i_be_notified(msg.owner.id, msg._client.host, room_id, se_site):
return "Yes, you will be notified for that site in that room."
return "No, you won't be notified for that site in that room."
RETURN_NAMES = {"admin": ["admin", "admins"], "code_admin": ["code admin", "code admins"]}
VALID_ROLES = {"admin": "admin",
"code_admin": "code_admin",
"admins": "admin",
"codeadmins": "code_admin"}
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, whole_msg=True)
def whois(msg, role):
"""
Return a list of important users
:param msg:
:param role:
:return: A string
"""
if role not in VALID_ROLES:
raise CmdException("That is not a user level I can check. "
"I know about {0}".format(", ".join(set(VALID_ROLES.values()))))
ms_route = "https://metasmoke.erwaysoftware.com/api/v2.0/users/with_role/{}".format(VALID_ROLES[role])
params = {
'filter': 'HMMKFJ',
'key': GlobalVars.metasmoke_key,
'per_page': 100
}
user_response = requests.get(ms_route, params=params)
user_response.encoding = 'utf-8-sig'
user_response = user_response.json()
chat_host = msg._client.host
# Build our list of admin chat ids
key = ""
if chat_host == "stackexchange.com":
key = 'stackexchange_chat_id'
elif chat_host == "meta.stackexchange.com":
key = 'meta_stackexchange_chat_id'
elif chat_host == "stackoverflow.com":
key = 'stackoverflow_chat_id'
admin_ids = [a[key] for a in user_response['items'] if a[key] and a['id'] != -1]
all_users_in_room = msg.room.get_current_user_ids()
admins_in_room = list(set(admin_ids) & set(all_users_in_room))
admins_not_in_room = list(set(admin_ids) - set(admins_in_room))
admins_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admin_ids]
admins_in_room_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admins_in_room]
admins_not_in_room_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admins_not_in_room]
return_name = RETURN_NAMES[VALID_ROLES[role]][0 if len(admin_ids) == 1 else 1]
response = "I am aware of {} {}".format(len(admin_ids), return_name)
if admins_in_room_list:
admins_in_room_list.sort(key=lambda x: x[2]) # Sort by last message (last seen = x[3])
response += ". Currently in this room: **"
for admin in admins_in_room_list:
response += "{}, ".format(admin[1])
response = response[:-2] + "**. "
response += "Not currently in this room: "
for admin in admins_not_in_room_list:
response += "{}, ".format(admin[1])
response = response[:-2] + "."
else:
response += ": "
for admin in admins_list:
response += "{}, ".format(admin[1])
response = response[:-2] + ". "
response += "None of them are currently in this room. Other users in this room might be able to help you."
return response
@command(int, str, privileged=True, whole_msg=True)
def invite(msg, room_id, roles):
add_room((msg._client.host, room_id), roles.split(","))
return "I'll now send messages with types `{}` to room `{}` on `{}`." \
" (Note that this will not persist after restarts.)".format(roles, room_id, msg._client.host)
# --- Post Responses --- #
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=False, give_name=True, aliases=["scan", "report-force"])
def report(msg, args, alias_used="report"):
"""
Report a post (or posts)
:param msg:
:return: A string (or None)
"""
if not is_privileged(msg.owner, msg.room) and alias_used != "scan":
raise CmdException(GlobalVars.not_privileged_warning)
crn, wait = can_report_now(msg.owner.id, msg._client.host)
if not crn:
raise CmdException("You can execute the !!/{} command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts in "
"one go.".format(alias_used, wait))
alias_used = alias_used or "report"
argsraw = args.split(' "', 1)
urls = argsraw[0].split(' ')
message_url = "https://chat.{0}/transcript/{1}?m={2}".format(msg._client.host, msg.room.id, msg.id)
# Handle determining whether a custom report reason was provided.
try:
# Custom handle trailing quotation marks at the end of the custom reason, which could happen.
if argsraw[1][-1] is '"':
custom_reason = argsraw[1][:-1]
else:
custom_reason = argsraw[1]
except IndexError:
custom_reason = None
if len(urls) > 5:
raise CmdException("To avoid SmokeDetector reporting posts too slowly, you can "
"{} at most 5 posts at a time. This is to avoid "
"SmokeDetector's chat messages getting rate-limited too much, "
"which would slow down reports.".format(alias_used))
# report_posts(urls, reported_by, reported_in, blacklist_by, operation="report", custom_reason=None):
output = report_posts(urls, msg.owner.name, msg.room.name, message_url, alias_used, custom_reason)
if output:
if 1 < len(urls) > output.count("\n") + 1:
add_or_update_multiple_reporter(msg.owner.id, msg._client.host, time.time())
return output
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(str, whole_msg=True, privileged=True, aliases=['reportuser'])
def allspam(msg, url):
"""
Reports all of a user's posts as spam
:param msg:
:param url: A user profile URL
:return:
"""
api_key = 'IAkbitmze4B8KpacUfLqkw(('
crn, wait = can_report_now(msg.owner.id, msg._client.host)
if not crn:
raise CmdException("You can execute the !!/allspam command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts in "
"one go.".format(wait))
user = get_user_from_url(url)
if user is None:
raise CmdException("That doesn't look like a valid user URL.")
user_sites = []
user_posts = []
# Detect whether link is to network profile or site profile
if user[1] == 'stackexchange.com':
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch sites
request_url = "http://api.stackexchange.com/2.2/users/{}/associated".format(user[0])
params = {
'filter': '!6Pbp)--cWmv(1',
'key': api_key
}
res = requests.get(request_url, params=params).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
if 'items' not in res or len(res['items']) == 0:
raise CmdException("The specified user does not appear to exist.")
if res['has_more']:
raise CmdException("The specified user has an abnormally high number of accounts. Please consider flagging "
"for moderator attention, otherwise use !!/report on the user's posts individually.")
# Add accounts with posts
for site in res['items']:
if site['question_count'] > 0 or site['answer_count'] > 0:
user_sites.append((site['user_id'], get_api_sitename_from_url(site['site_url'])))
else:
user_sites.append((user[0], get_api_sitename_from_url(user[1])))
# Fetch posts
for u_id, u_site in user_sites:
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch posts
request_url = "http://api.stackexchange.com/2.2/users/{}/posts".format(u_id)
params = {
'filter': '!)Q4RrMH0DC96Y4g9yVzuwUrW',
'key': api_key,
'site': u_site
}
res = requests.get(request_url, params=params).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
if 'items' not in res or len(res['items']) == 0:
raise CmdException("The specified user has no posts on this site.")
posts = res['items']
if posts[0]['owner']['reputation'] > 100:
raise CmdException("The specified user's reputation is abnormally high. Please consider flagging for "
"moderator attention, otherwise use !!/report on the posts individually.")
# Add blacklisted user - use most downvoted post as post URL
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user(user, message_url, sorted(posts, key=lambda x: x['score'])[0]['owner']['link'])
# TODO: Postdata refactor, figure out a better way to use apigetpost
for post in posts:
post_data = PostData()
post_data.post_id = post['post_id']
post_data.post_url = url_to_shortlink(post['link'])
*discard, post_data.site, post_data.post_type = fetch_post_id_and_site_from_url(
url_to_shortlink(post['link']))
post_data.title = unescape(post['title'])
post_data.owner_name = unescape(post['owner']['display_name'])
post_data.owner_url = post['owner']['link']
post_data.owner_rep = post['owner']['reputation']
post_data.body = post['body']
post_data.score = post['score']
post_data.up_vote_count = post['up_vote_count']
post_data.down_vote_count = post['down_vote_count']
if post_data.post_type == "answer":
# Annoyingly we have to make another request to get the question ID, since it is only returned by the
# /answers route
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch posts
req_url = "http://api.stackexchange.com/2.2/answers/{}".format(post['post_id'])
params = {
'filter': '!*Jxb9s5EOrE51WK*',
'key': api_key,
'site': u_site
}
answer_res = requests.get(req_url, params=params).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
# Finally, set the attribute
post_data.question_id = answer_res['items'][0]['question_id']
post_data.is_answer = True
user_posts.append(post_data)
if len(user_posts) == 0:
raise CmdException("The specified user hasn't posted anything.")
if len(user_posts) > 15:
raise CmdException("The specified user has an abnormally high number of spam posts. Please consider flagging "
"for moderator attention, otherwise use !!/report on the posts individually.")
why_info = u"User manually reported by *{}* in room *{}*.\n".format(msg.owner.name, msg.room.name)
# Handle all posts
for index, post in enumerate(user_posts, start=1):
batch = ""
if len(user_posts) > 1:
batch = " (batch report: post {} out of {})".format(index, len(user_posts))
handle_spam(post=Post(api_response=post.as_dict),
reasons=["Manually reported " + post.post_type + batch],
why=why_info)
time.sleep(2) # Should this be implemented differently?
if len(user_posts) > 2:
add_or_update_multiple_reporter(msg.owner.id, msg._client.host, time.time())
def report_posts(urls, reported_by, reported_in=None, blacklist_by=None, operation="report", custom_reason=None):
operation = operation or "report"
action_done = {"report": "reported", "report-force": "reported", "scan": "scanned"}[operation]
if reported_in is None:
reported_from = " by *{}*".format(reported_by)
elif reported_in is True:
reported_from = " by *{}* from the metasmoke API".format(reported_by)
else:
reported_from = " by user *{}* in room *{}*".format(reported_by, reported_in)
if custom_reason:
with_reason = " with reason: *{}*".format(custom_reason)
else:
with_reason = ""
report_info = "Post manually {}{}{}.\n\n".format(action_done, reported_from, with_reason)
normalized_urls = []
for url in urls:
t = url_to_shortlink(url)
if not t:
normalized_urls.append("That does not look like a valid post URL.")
elif t not in normalized_urls:
normalized_urls.append(t)
else:
normalized_urls.append("A duplicate URL was provided.")
urls = normalized_urls
users_to_blacklist = []
output = []
for index, url in enumerate(urls, start=1):
if not url.startswith("http://") and not url.startswith("https://"):
# Return the bad URL directly.
output.append("Post {}: {}".format(index, url))
continue
post_data = api_get_post(rebuild_str(url))
if post_data is None:
output.append("Post {}: That does not look like a valid post URL.".format(index))
continue
if post_data is False:
output.append("Post {}: Could not find data for this post in the API. "
"It may already have been deleted.".format(index))
continue
if has_already_been_posted(post_data.site, post_data.post_id, post_data.title) and not is_false_positive(
(post_data.post_id, post_data.site)):
# Don't re-report if the post wasn't marked as a false positive. If it was marked as a false positive,
# this re-report might be attempting to correct that/fix a mistake/etc.
if GlobalVars.metasmoke_key is not None:
se_link = to_protocol_relative(post_data.post_url)
ms_link = resolve_ms_link(se_link) or to_metasmoke_link(se_link)
output.append("Post {}: Already recently reported [ [MS]({}) ]".format(index, ms_link))
continue
else:
output.append("Post {}: Already recently reported".format(index))
continue
url = to_protocol_relative(post_data.post_url)
post = Post(api_response=post_data.as_dict)
user = get_user_from_url(post_data.owner_url)
if fetch_post_id_and_site_from_url(url)[2] == "answer":
parent_data = api_get_post("https://{}/q/{}".format(post.post_site, post_data.question_id))
post._is_answer = True
post._parent = Post(api_response=parent_data.as_dict)
scan_spam, scan_reasons, scan_why = check_if_spam(post) # Scan it first
if operation in {"report", "report-force"}: # Force blacklist user even if !!/report falls back to scan
if user is not None:
users_to_blacklist.append((user, blacklist_by, post_data.post_url))
# Expand real scan results from dirty returm value when not "!!/scan"
# Presence of "scan_why" indicates the post IS spam but ignored
if operation != "scan" and (not scan_spam) and scan_why:
scan_spam = True
scan_reasons, scan_why = scan_reasons
# If "report-force" then jump to the next block
if scan_spam and operation in {"scan", "report"}:
handle_spam(post=post, reasons=scan_reasons, why=report_info + scan_why.lstrip())
continue
# scan_spam == False or "report-force"
if operation in {"report", "report-force"}:
batch = ""
if len(urls) > 1:
batch = " (batch report: post {} out of {})".format(index, len(urls))
if scan_spam:
why_append = "This post would have also been caught for: " + ", ".join(scan_reasons).capitalize() + \
'\n' + scan_why
else:
why_append = "This post would not have been caught otherwise."
handle_spam(post=post,
reasons=["Manually reported " + post_data.post_type + batch],
why=report_info + why_append)
continue
# scan_spam == False and "scan"
else:
if scan_why:
output.append("Post {}: Looks like spam but not reported: {}".format(index, scan_why.capitalize()))
else:
output.append("Post {}: This does not look like spam".format(index))
for item in users_to_blacklist:
add_blacklisted_user(*item)
if len(output):
return "\n".join(output)
return None
@command(str, str, privileged=True, whole_msg=True)
def feedback(msg, post_url, feedback):
post_url = url_to_shortlink(post_url)[6:]
if not post_url:
raise CmdException("No such feedback.")
for feedbacks in (TRUE_FEEDBACKS, FALSE_FEEDBACKS, NAA_FEEDBACKS):
if feedback in feedbacks:
feedbacks[feedback].send(post_url, msg)
return
raise CmdException("No such feedback.")
@command(privileged=True, aliases=['dump-data'])
def dump_data():
try:
s, metadata = SmokeyTransfer.dump()
s = "{}, {}, {}\n{}".format(metadata['time'], metadata['location'], metadata['rev'], s)
tell_rooms_with('dump', s)
except Exception:
log_exception(*sys.exc_info())
raise CmdException("Failed to dump data. Run `!!/errorlogs` for details.")
return "Data successfully dumped"
@command(int, privileged=True, aliases=['load-data'])
def load_data(msg_id):
msg = get_message(msg_id)
if msg.owner.id != 120914: # TODO: implement an is_self() in chatcommunicate, don't use magic numbers
raise CmdException("Message owner is not SmokeDetector, refusing to load")
try:
SmokeyTransfer.load(msg.content_source)
except ValueError as e:
raise CmdException(str(e)) from None
except Exception:
log_exception(*sys.exc_info())
raise CmdException("Failed to load data. Run `!!/errorlogs` for details.")
return "Data successfully loaded"
#
#
# Subcommands go below here
# noinspection PyIncorrectDocstring,PyBroadException
DELETE_ALIASES = ["delete", "del", "remove", "poof", "gone", "kaboom"]
@command(message, reply=True, privileged=True, aliases=[alias + "-force" for alias in DELETE_ALIASES])
def delete_force(msg):
"""
Delete a post from the room, ignoring protection for Charcoal HQ
:param msg:
:return: None
"""
# noinspection PyBroadException
try:
msg.delete()
except Exception: # I don't want to dig into ChatExchange
pass # couldn't delete message
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyBroadException
@command(message, reply=True, privileged=True, aliases=DELETE_ALIASES)
def delete(msg):
"""
Delete a post from a chatroom, with an override for Charcoal HQ.
:param msg:
:return: None
"""
post_data = get_report_data(msg)
if post_data and msg.room.id == 11540:
return "Reports from SmokeDetector in Charcoal HQ are generally kept "\
"as records. If you really need to delete a report, please use "\
"`sd delete-force`. See [this note on message deletion]"\
"(https://charcoal-se.org/smokey/Commands"\
"#a-note-on-message-deletion) for more details."
else:
try:
msg.delete()
except Exception: # I don't want to dig into ChatExchange
pass
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True, privileged=True)
def postgone(msg):
"""
Removes link from a marked report message
:param msg:
:return: None
"""
edited = edited_message_after_postgone_command(msg.content)
if edited is None:
raise CmdException("That's not a report.")
msg.edit(edited)
# noinspection PyIncorrectDocstring
@command(message, str, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=FALSE_FEEDBACKS.keys(),
arity=(1, 2))
def false(feedback, msg, comment, alias_used="false"):
"""
Marks a post as a false positive
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = FALSE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
add_false_positive((post_id, site))
user = get_user_from_url(owner_url)
if user is not None:
if feedback_type.blacklist:
add_whitelisted_user(user)
result = "Registered " + post_type + " as false positive and whitelisted user."
elif is_blacklisted_user(user):
remove_blacklisted_user(user)
result = "Registered " + post_type + " as false positive and removed user from the blacklist."
else:
result = "Registered " + post_type + " as false positive."
else:
result = "Registered " + post_type + " as false positive."
try:
if msg.room.id != 11540:
msg.delete()
except Exception: # I don't want to dig into ChatExchange
pass
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
return result if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(message, str, reply=True, privileged=True, whole_msg=True, arity=(1, 2), give_name=True, aliases=["ig"])
def ignore(feedback, msg, comment, alias_used="ignore"):
"""
Marks a post to be ignored
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, _ = post_data
Feedback.send_custom("ignore", post_url, feedback)
post_id, site, _ = fetch_post_id_and_site_from_url(post_url)
add_ignored_post((post_id, site))
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
if alias_used == "ig":
return None
return "Post ignored; alerts about it will no longer be posted."
# noinspection PyIncorrectDocstring
@command(message, str, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=NAA_FEEDBACKS.keys(),
arity=(1, 2))
def naa(feedback, msg, comment, alias_used="naa"):
"""
Marks a post as NAA
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, _ = post_data
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
if post_type != "answer":
raise CmdException("That report was a question; questions cannot be marked as NAAs.")
feedback_type = NAA_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, _ = fetch_post_id_and_site_from_url(post_url)
add_ignored_post((post_id, site))
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
return "Recorded answer as an NAA in metasmoke." if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring
@command(message, str, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=TRUE_FEEDBACKS.keys(),
arity=(1, 2))
def true(feedback, msg, comment, alias_used="true"):
"""
Marks a post as a true positive
:param feedback:
:param msg:
:return: string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = TRUE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
try:
user = get_user_from_url(owner_url)
except TypeError as e:
raise CmdException('Could not get user from URL {0!r}'.format(owner_url))
if user is not None:
if feedback_type.blacklist:
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user(user, message_url, post_url)
result = "Registered " + post_type + " as true positive and blacklisted user."
else:
result = "Registered " + post_type + " as true positive. If you want to "\
"blacklist the poster, use `trueu` or `tpu`."
else:
result = "Registered " + post_type + " as true positive."
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
datahandling.last_feedbacked = ((post_id, site), time.time() + 60)
return result if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True, aliases=['wtf'])
def why(msg):
"""
Returns reasons a post was reported
:param msg:
:return: A string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That's not a report.")
else:
*post, _ = fetch_post_id_and_site_from_url(post_data[0])
why_info = get_why(post[1], post[0])
if why_info:
return why_info
else:
raise CmdException("There is no `why` data for that user (anymore).")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True)
def autoflagged(msg):
"""
Determines whether a post was automatically flagged by Metasmoke
:param msg:
:return: A string
"""
# sneaky!
update_reason_weights()
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That's not a report.")
is_autoflagged, names = Metasmoke.determine_if_autoflagged(post_data[0])
if is_autoflagged:
return "That post was automatically flagged, using flags from: {}.".format(", ".join(names))
else:
return "That post was **not** automatically flagged by metasmoke."
|
"""
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from unittest.mock import patch
from byceps.blueprints.shop_order.signals import order_canceled
from byceps.services.shop.order import service as order_service
from tests.helpers import current_party_set, current_user_set
from .base import OrderEmailTestBase
class EmailOnOrderCanceledSignalTest(OrderEmailTestBase):
def setUp(self):
super().setUp()
brand = self.create_brand('acmecon', 'Acme Entertainment Convention')
self.set_brand_email_sender_address(brand.id, 'acmecon@example.com')
self.party = self.create_party(brand.id,
'acmecon-2014',
'Acme Entertainment Convention 2014')
self.shop = self.create_shop(self.party.id)
self.user = self.create_user_with_detail('Versager')
self.order = self.place_order(self.user)
order_service.cancel_order(self.order.id, self.admin.id, 'dubious reason')
@patch('byceps.email.send')
def test_email_on_order_canceled(self, send_email_mock):
self.order.cancelation_reason = 'Du hast nicht rechtzeitig bezahlt.'
self.send_event(self.order.id)
expected_sender = 'acmecon@example.com'
expected_recipients = [self.user.email_address]
expected_subject = '\u274c Deine Bestellung (AC-14-B00017) wurde storniert.'
expected_body = '''
Hallo Versager,
deine Bestellung mit der Bestellnummer AC-14-B00017 wurde von uns aus folgendem Grund storniert:
Du hast nicht rechtzeitig bezahlt.
Für Fragen stehen wir gerne zur Verfügung.
Viele Grüße,
das Team der Acme Entertainment Convention
--
Acme Entertainment Convention
E-Mail: acmecon@example.com
'''.strip()
send_email_mock.assert_called_once_with(
expected_sender,
expected_recipients,
expected_subject,
expected_body)
# helpers
def place_order(self, orderer):
return self.place_order_with_items(self.shop.id, orderer,
'AC-14-B00017', None, [])
def send_event(self, order_id):
with \
current_party_set(self.app, self.party), \
current_user_set(self.app, self.user), \
self.app.app_context():
order_canceled.send(None, order_id=order_id)
Pass reason to service, don't modify order object temporarily
"""
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from unittest.mock import patch
from byceps.blueprints.shop_order.signals import order_canceled
from byceps.services.shop.order import service as order_service
from tests.helpers import current_party_set, current_user_set
from .base import OrderEmailTestBase
class EmailOnOrderCanceledSignalTest(OrderEmailTestBase):
def setUp(self):
super().setUp()
brand = self.create_brand('acmecon', 'Acme Entertainment Convention')
self.set_brand_email_sender_address(brand.id, 'acmecon@example.com')
self.party = self.create_party(brand.id,
'acmecon-2014',
'Acme Entertainment Convention 2014')
self.shop = self.create_shop(self.party.id)
self.user = self.create_user_with_detail('Versager')
self.order = self.place_order(self.user)
reason = 'Du hast nicht rechtzeitig bezahlt.'
order_service.cancel_order(self.order.id, self.admin.id, reason)
@patch('byceps.email.send')
def test_email_on_order_canceled(self, send_email_mock):
self.send_event(self.order.id)
expected_sender = 'acmecon@example.com'
expected_recipients = [self.user.email_address]
expected_subject = '\u274c Deine Bestellung (AC-14-B00017) wurde storniert.'
expected_body = '''
Hallo Versager,
deine Bestellung mit der Bestellnummer AC-14-B00017 wurde von uns aus folgendem Grund storniert:
Du hast nicht rechtzeitig bezahlt.
Für Fragen stehen wir gerne zur Verfügung.
Viele Grüße,
das Team der Acme Entertainment Convention
--
Acme Entertainment Convention
E-Mail: acmecon@example.com
'''.strip()
send_email_mock.assert_called_once_with(
expected_sender,
expected_recipients,
expected_subject,
expected_body)
# helpers
def place_order(self, orderer):
return self.place_order_with_items(self.shop.id, orderer,
'AC-14-B00017', None, [])
def send_event(self, order_id):
with \
current_party_set(self.app, self.party), \
current_user_set(self.app, self.user), \
self.app.app_context():
order_canceled.send(None, order_id=order_id)
|
#!/usr/bin/env python
import lxml
import sys
from lxml import etree
class XMLTranslator:
def __init__(self):
self.constructs = {
"PrimAct": self.handle_action,
"PrimBr": self.handle_branch,
"PrimIter": self.handle_iteration,
"PrimSeln": self.handle_selection,
"PrimSeq": self.handle_sequence,
"PrimTask": self.handle_sequence
}
# Get display indentation for a certain depth
@staticmethod
def get_indent(depth):
line = ""
for i in range(0, depth):
line += "\t"
return line
# Get variable id (name)
@staticmethod
def get_varid(node):
return node[0].get("value")
# Get variables
def get_vars(self, var):
vars = []
for varid in var:
vars.append(self.get_varid(varid))
return vars
# Get variable list used by an action
def get_varlist(self, node):
varlist = []
for var in node.iter("PrimVar"):
varlist += self.get_vars(var)
return varlist
# PML action
def handle_action(self, node, depth, processes_sofar, process_current, resources_sofar):
# Blocks (requires)
reqlist = []
for req in node.iter("SpecReqs"):
reqlist[0:] = self.get_varlist(req)
# print "requires: " + str(reqlist)
curdepth = depth
if len(reqlist) > 0:
line = self.get_indent(curdepth)
curdepth += 1
line += reqlist[0]
for req in reqlist[1:]:
line += " && " + req
line += " ->"
process_current.append(line)
# State changes (provides)
provlist = []
for prov in node.iter("SpecProv"):
provlist[0:] = self.get_varlist(prov)
# print "provides: " + str(provlist)
if len(provlist) == 1:
line = self.get_indent(curdepth)
line += provlist[0] + " = true;"
process_current.append(line)
elif len(provlist) > 0:
process_current.append(self.get_indent(curdepth - 1) + "{")
for prov in provlist:
line = self.get_indent(curdepth)
line += prov + " = true;"
process_current.append(line)
process_current.append(self.get_indent(curdepth - 1) + "}")
for req in reqlist:
resources_sofar.add(req)
for prov in provlist:
resources_sofar.add(prov)
# PML branch
def handle_branch(self, node, depth, processes_sofar, process_current, resources_sofar):
construct_name = node[0][0].get("value") # Branch name; ID will be first element in well-formed XML
beforeline = self.get_indent(depth)
beforeline += "int " + str(construct_name) + " = _nr_pr;" # Records the number of processes currently running
process_current.append(beforeline)
for child in node:
if child.tag != "OpNmId": # Not interested in the ID again
branch_name = str(child[0].get("value"))
process_within = ["proctype " + branch_name + "()", "{"]
processes_sofar.append(process_within)
self.parse_nodes(node, 0, processes_sofar, process_within, resources_sofar)
process_within.append("}")
runline = self.get_indent(depth)
runline += "run " + branch_name + "();"
process_current.append(runline)
afterline = self.get_indent(depth)
afterline += "_nr_pr == " + str(construct_name) + " ->" # Waits until the spawned processes have completed
process_current.append(afterline)
# PML iteration
def handle_iteration(self, node, depth, processes_sofar, process_current, resources_sofar):
pass
# PML sequence
def handle_sequence(self, node, depth, processes_sofar, process_current, resources_sofar):
self.parse_nodes(node, depth, processes_sofar, process_current, resources_sofar)
# Parse non-Process node of the XML file
def parse_nodes(self, node, depth, processes_sofar, process_current, resources_sofar):
for child in node:
if child.tag in self.constructs:
self.constructs[child.tag](child, depth + 1, processes_sofar, process_current,resources_sofar)
pass
# Parse Process, the outermost level of a PML file
def parse_process(self, root):
processes = [] # List of Promela proctypes
resources = set() # Set of resources
procname = root[0].get("value") # Process name; ID is always the first element in well-formed PML
process_main = ["active proctype " + procname + "()", "{"]
processes.append(process_main)
# Parse inner tree nodes
self.parse_nodes(root, 0, processes, process_main, resources)
# Add dummy instruction to cope with empty processes
if len(process_main) <= 2:
process_main.append("\tskip;")
process_main.append("}")
# Assemble resources and processes into translation
translation = []
resourcelist = []
if len(resources) > 0:
for i, resource in enumerate(resources): # FIXME: not sure this is where resources should be going - scoping?
if i < len(resources)-1:
resourcelist.append(resource + ",")
else:
resourcelist.append(resource)
resourcelist.append("")
translation.append(resourcelist)
processlist = []
for process in processes:
for line in process:
processlist.append(line)
translation.append(processlist)
return translation
# PML selection
def handle_selection(self, node, depth, processes_sofar, process_current, resources_sofar):
if_block = False
curdepth = depth
line = self.get_indent(curdepth)
for child_node in node.iterchildren():
if child_node.tag in self.constructs:
if not if_block:
if_block = not if_block
process_current.append(line + "if")
process_current.append(line + ":: true ->")
temp_root = etree.Element(child_node.tag)
temp_root.insert(0, child_node)
self.parse_nodes(temp_root, depth+1, processes_sofar, process_current, resources_sofar)
if if_block:
process_current.append(line + "fi")
pass
def translate_xml(self, xml_string):
root = None
try:
root = lxml.etree.fromstring(xml_string)
except lxml.etree.XMLSyntaxError:
print "Error parsing XML, exiting."
sys.exit(1)
translation = self.parse_process(root)
return translation
Second branch test passing
#!/usr/bin/env python
import lxml
import sys
from lxml import etree
class XMLTranslator:
def __init__(self):
self.constructs = {
"PrimAct": self.handle_action,
"PrimBr": self.handle_branch,
"PrimIter": self.handle_iteration,
"PrimSeln": self.handle_selection,
"PrimSeq": self.handle_sequence,
"PrimTask": self.handle_sequence
}
# Get display indentation for a certain depth
@staticmethod
def get_indent(depth):
line = ""
for i in range(0, depth):
line += "\t"
return line
# Get variable id (name)
@staticmethod
def get_varid(node):
return node[0].get("value")
# Get variables
def get_vars(self, var):
vars = []
for varid in var:
vars.append(self.get_varid(varid))
return vars
# Get variable list used by an action
def get_varlist(self, node):
varlist = []
for var in node.iter("PrimVar"):
varlist += self.get_vars(var)
return varlist
# PML action
def handle_action(self, node, depth, processes_sofar, process_current, resources_sofar):
# Blocks (requires)
reqlist = []
for req in node.iter("SpecReqs"):
reqlist[0:] = self.get_varlist(req)
# print "requires: " + str(reqlist)
curdepth = depth
if len(reqlist) > 0:
line = self.get_indent(curdepth)
curdepth += 1
line += reqlist[0]
for req in reqlist[1:]:
line += " && " + req
line += " ->"
process_current.append(line)
# State changes (provides)
provlist = []
for prov in node.iter("SpecProv"):
provlist[0:] = self.get_varlist(prov)
# print "provides: " + str(provlist)
if len(provlist) == 1:
line = self.get_indent(curdepth)
line += provlist[0] + " = true;"
process_current.append(line)
elif len(provlist) > 0:
process_current.append(self.get_indent(curdepth - 1) + "{")
for prov in provlist:
line = self.get_indent(curdepth)
line += prov + " = true;"
process_current.append(line)
process_current.append(self.get_indent(curdepth - 1) + "}")
for req in reqlist:
resources_sofar.add(req)
for prov in provlist:
resources_sofar.add(prov)
# PML branch
def handle_branch(self, node, depth, processes_sofar, process_current, resources_sofar):
construct_name = node[0][0].get("value") # Branch name; ID will be first element in well-formed XML
beforeline = self.get_indent(depth)
beforeline += "int " + str(construct_name) + " = _nr_pr;" # Records the number of processes currently running
process_current.append(beforeline)
for child in node:
if child.tag != "OpNmId": # Not interested in the ID again
branch_name = str(child[0].get("value"))
process_within = ["proctype " + branch_name + "()", "{"]
processes_sofar.append(process_within)
self.parse_node_as_branch(node, 0, processes_sofar, process_within, resources_sofar, branch_name)
process_within.append("}")
runline = self.get_indent(depth)
runline += "run " + branch_name + "();"
process_current.append(runline)
afterline = self.get_indent(depth)
afterline += "_nr_pr == " + str(construct_name) + " ->" # Waits until the spawned processes have completed
process_current.append(afterline)
# PML iteration
def handle_iteration(self, node, depth, processes_sofar, process_current, resources_sofar):
pass
# PML sequence
def handle_sequence(self, node, depth, processes_sofar, process_current, resources_sofar):
self.parse_nodes(node, depth, processes_sofar, process_current, resources_sofar)
# Parse non-Process node of the XML file
def parse_nodes(self, node, depth, processes_sofar, process_current, resources_sofar):
for child in node:
if child.tag in self.constructs:
self.constructs[child.tag](child, depth + 1, processes_sofar, process_current,resources_sofar)
# Parse child node of a branch construct
def parse_node_as_branch(self, node, depth, processes_sofar, process_current, resources_sofar, branch_name):
for child in node:
if child[0].get("value") == branch_name:
if child.tag in self.constructs:
self.constructs[child.tag](child, depth + 1, processes_sofar, process_current,resources_sofar)
# Parse Process, the outermost level of a PML file
def parse_process(self, root):
processes = [] # List of Promela proctypes
resources = set() # Set of resources
procname = root[0].get("value") # Process name; ID is always the first element in well-formed PML
process_main = ["active proctype " + procname + "()", "{"]
processes.append(process_main)
# Parse inner tree nodes
self.parse_nodes(root, 0, processes, process_main, resources)
# Add dummy instruction to cope with empty processes
if len(process_main) <= 2:
process_main.append("\tskip;")
process_main.append("}")
# Assemble resources and processes into translation
translation = []
resources_ordered = list(resources)
resources_ordered.sort()
resourcelist = []
if len(resources) > 0:
for i, resource in enumerate(resources_ordered): # FIXME: not sure this is where resources should be going - scoping?
if i < len(resources)-1:
resourcelist.append(resource + ",")
else:
resourcelist.append(resource)
resourcelist.append("")
translation.append(resourcelist)
processlist = []
for process in processes:
for line in process:
processlist.append(line)
translation.append(processlist)
return translation
# PML selection
def handle_selection(self, node, depth, processes_sofar, process_current, resources_sofar):
if_block = False
curdepth = depth
line = self.get_indent(curdepth)
for child_node in node.iterchildren():
if child_node.tag in self.constructs:
if not if_block:
if_block = not if_block
process_current.append(line + "if")
process_current.append(line + ":: true ->")
temp_root = etree.Element(child_node.tag)
temp_root.insert(0, child_node)
self.parse_nodes(temp_root, depth+1, processes_sofar, process_current, resources_sofar)
if if_block:
process_current.append(line + "fi")
pass
def translate_xml(self, xml_string):
root = None
try:
root = lxml.etree.fromstring(xml_string)
except lxml.etree.XMLSyntaxError:
print "Error parsing XML, exiting."
sys.exit(1)
translation = self.parse_process(root)
return translation
|
from radosgw_agent.util import obj
class Empty(object):
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
class TestToDict(object):
def test_underscores_are_ignored(self):
fake = Empty(a=1, _b=2)
result = obj.to_dict(fake)
assert result.get('_b') is None
assert result.get('a') == 1
def test_overrides_are_respected(self):
fake = Empty(a=1, b=2)
result = obj.to_dict(fake, b=3)
assert result.get('b') == 3
def test_overrides_dont_mess_up_other_keys(self):
fake = Empty(a=1, b=2)
result = obj.to_dict(fake, b=3)
assert result.get('a') == 1
def test_extra_keys_are_set(self):
result = obj.to_dict(Empty(), a=1, b=2)
assert result['a'] == 1
assert result['b'] == 2
tests for the new keys to attrs helper
Signed-off-by: Alfredo Deza <3f4d00ffa77e6441ab2d23c25a618925d2383b02@redhat.com>
from radosgw_agent.util import obj
class Empty(object):
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
class TestToDict(object):
def test_underscores_are_ignored(self):
fake = Empty(a=1, _b=2)
result = obj.to_dict(fake)
assert result.get('_b') is None
assert result.get('a') == 1
def test_overrides_are_respected(self):
fake = Empty(a=1, b=2)
result = obj.to_dict(fake, b=3)
assert result.get('b') == 3
def test_overrides_dont_mess_up_other_keys(self):
fake = Empty(a=1, b=2)
result = obj.to_dict(fake, b=3)
assert result.get('a') == 1
def test_extra_keys_are_set(self):
result = obj.to_dict(Empty(), a=1, b=2)
assert result['a'] == 1
assert result['b'] == 2
class TestKeysToAttribute(object):
def test_replace_dashes(self):
dictionary = {'dashed-word': 1}
result = obj.keys_to_attributes(dictionary)
assert result.dashed_word == 1
|
from django.db import models
class Board(models.Model):
created = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=64, unique=True, blank=False)
description = models.CharField(max_length=712, blank=False)
slug = models.SlugField()
def __str__(self):
return self.name
add slug generation.
from django.contrib.auth import get_user_model
from django.db import models
from slugify import slugify
def slugify_board_name(name: str) -> str:
slug = name.replace('&', 'and')
return slugify(slug, max_length=64, separator='',
save_order=True, entities=False)
class Board(models.Model):
created = models.DateTimeField(auto_now_add=True, editable=False)
name = models.CharField(max_length=64, unique=True, blank=False)
description = models.CharField(max_length=712, blank=False)
slug = models.SlugField(db_index=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify_board_name(slug)
super(Board, self).save(*args, **kwargs)
|
from django.views.decorators.csrf import csrf_exempt
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from cognitive.settings import graph
import cognitive.apps.atlas.query as query
from .forms import (TaskForm, ConceptForm, ContrastForm, ConditionForm,
DisorderForm, TaskDisorderForm)
Assertion = query.Assertion()
Battery = query.Battery()
Citation = query.Citation()
Concept = query.Concept()
Condition = query.Condition()
Contrast = query.Contrast()
Disorder = query.Disorder()
ExternalDataset = query.ExternalDataset()
ExternalLink = query.ExternalLink()
Implementation = query.Implementation()
Indicator = query.Indicator()
Task = query.Task()
Theory = query.Theory()
class NodeAPI(APIView):
node_class = ...
form_class = ...
name_field = ...
def post(self, request, format=None):
form = self.form_class(request.data)
if form.is_valid():
node_data = form.cleaned_data
name = node_data[self.name_field]
node = self.node_class.create(name=name, properties=node_data,
request=request)
request.GET = request.GET.copy()
request.GET['id'] = node.properties['id']
return self.get(request)
else:
# return a 422 response
return Response(form.errors,
status=status.HTTP_422_UNPROCESSABLE_ENTITY)
def get(self, request, format=None):
id = request.GET.get('id', None)
if id:
return Response(self.node_class.get_full(request.GET['id'], 'id'))
else:
return Response(self.node_class.api_all())
def make_link(self, request, src_id, src_label, dest_id, dest_label, rel,
reverse=False):
if reverse is False:
link_made = src_label.link(src_id, dest_id, rel,
endnode_type=dest_label.name)
elif reverse is True:
link_made = dest_label.link(dest_id, src_id,
rel, endnode_type=src_label.name)
if link_made is None:
return Response({'Unable to associate nodes': ''},
status=status.HTTP_422_UNPROCESSABLE_ENTITY)
request.GET = request.GET.copy()
if reverse is False:
request.GET['id'] = src_id
elif reverse is True:
request.GET['id'] = dest_id
return self.get(request)
class ContrastAPI(NodeAPI):
node_class = Contrast
form_class = ContrastForm
name_field = 'name'
def post(self, request, uid, format=None):
ret = super(ContrastAPI, self).post(request, format=format)
id = ret.data['id']
return self.make_link(request, uid, Task, id,
Contrast, 'HASCONTRAST')
class ConditionAPI(NodeAPI):
node_class = Condition
form_class = ConditionForm
name_field = 'condition_text'
def post(self, request, format=None):
ret = super(ConditionAPI, self).post(request, format=format)
id = ret.data['id']
return self.make_link(request, self.kwargs['uid'], Task, id,
Condition, 'HASCONDITION')
class ConceptAPI(NodeAPI):
node_class = Concept
form_class = ConceptForm
name_field = 'term_name'
def get(self, request, format=None):
fields = {}
id = request.GET.get("id", "")
name = request.GET.get("name", "")
contrast_id = request.GET.get("contrast_id", "")
if id:
concept = Concept.get_full(id, 'id')
elif name:
concept = Concept.get_full(name, 'name')
elif contrast_id:
concept = Contrast.api_get_concepts(contrast_id)
else:
concept = Concept.api_all()
if concept is None:
raise NotFound('Concept not found')
return Response(concept)
class TaskAPI(NodeAPI):
node_class = Task
form_class = TaskForm
name_field = 'term_name'
def get(self, request, format=None):
id = request.GET.get("id", "")
name = request.GET.get("name", "")
if id:
task = Task.get_full(id, 'id')
elif name:
task = Task.get_full(name, 'name')
else:
task = Task.api_all()
if task is None:
raise NotFound('Task not found')
return Response(task)
class DisorderAPI(NodeAPI):
node_class = Disorder
form_class = DisorderForm
name_field = 'name'
def get(self, request, format=None):
id = request.GET.get("id", "")
name = request.GET.get("name", "")
if id:
disorder = Disorder.get_full(id, 'id')
elif name:
disorder = Disorder.get_full(name, 'name')
else:
disorder = Disorder.api_all()
if disorder is None:
raise NotFound('Disorder not found')
return Response(disorder)
class SearchAPI(APIView):
def get(self, request, format=None):
search_classes = [Concept, Contrast, Disorder, Task]
queries = request.GET.get("q", "")
results = []
for sclass in search_classes:
result = sclass.search_all_fields(queries)
results += result
if not results:
raise NotFound('No results found')
return Response(results)
# def add_concept_relation(request, uid):
# def make_link(self, request, src_id, src_label, dest_id, dest_label, rel,
class ConceptRelAPI(NodeAPI):
node_class = Concept
form_class = None
name_field = None
def post(self, request):
# Need some sort of validation.
src_id = request.POST.get('src_id', None)
dest_id = request.POST.get('dest_id', None)
rel_type = request.POST.get('rel_type', None)
return self.make_link(request, src_id, self.node_class, dest_id,
self.node_class, rel_type)
# def all_batteries(request):
# def all_theories(request):
# def all_collections(request, return_context=False):
# def all_disorders(request, return_context=False):
# def all_contrasts(request):
# def view_battery(request, uid, return_context=False):
# handled in battery class
# def view_theory(request, uid, return_context=False):
# handled in theory class
# def update_concept(request, uid):
# def update_task(request, uid):
# def update_theory(request, uid):
# def update_battery(request, uid):
# def update_disorder(request, uid):
# def add_task_concept(request, uid):
class TaskConceptAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
concept_id = request.POST.get('concept_id', '')
return self.make_link(request, uid, self.node_class, concept_id, Concept, 'ASSERTS')
# def add_disorder_task(request, uid):
class DisorderTask(NodeAPI):
node_class = Task
def post(self, request, uid):
disorder_id = request.POST.get('disorder_id', '')
return self.make_link(request, uid, self.node_class, disorder_id, Disorder, 'ASSERTS')
# def add_task_disorder(request, task_id):
class TaskDisorderAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
form = TaskDisorderForm(uid, request.POST)
if form.is_valid():
cleaned_data = form.cleaned_data
disorder_id = cleaned_data['disorders']
cont_id = cleaned_data['contrasts']
Contrast.link(cont_id, disorder_id, "HASDIFFERENCE", endnode_type="disorder")
request.GET = request.GET.copy()
request.GET['id'] = uid
return self.get(request)
else:
return Response(form.errors,
status=status.HTTP_422_UNPROCESSABLE_ENTITY)
# def add_disorder_disorder(request, disorder_id):
class DisorderDisorderAPI(NodeAPI):
node_class = Disorder
def post(self, request, uid):
disorder_id = request.POST.get('disorder_id', '')
return self.make_link(request, uid, self.node_class, disorder_id, Disorder, 'ISA')
# def add_concept_contrast(request, uid, tid):
class ConceptContrastAPI(NodeAPI):
pass
# def add_concept_contrast_task(request, uid):
class ConceptContrastTaskAPI(NodeAPI):
pass
# def concept_task_contrast_assertion(concept_id, task_id, contrast_id):
# def add_task_implementation(request, task_id):
class TaskImplementationAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
imp_id = request.POST.get('implementation_id', '')
return self.make_link(request, uid, self.node_class, dataset_id,
Implementation, 'HASIMPLEMENTATION')
# def add_task_dataset(request, task_id):
class TaskDatasetAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
dataset_id = request.POST.get('dataset_id', '')
return self.make_link(request, uid, self.node_class, dataset_id,
ExternalDataset, 'HASEXTERNALDATASET')
# def add_task_indicator(request, task_id):
class TaskIndicatorAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
indicator_id = request.POST.get('indicator_id', '')
return self.make_link(request, uid, self.node_class, indicator_id,
Indicator, 'HASINDICATOR')
# def add_task_citation(request, task_id):
class TaskCitationAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_concept_citation(request, concept_id):
class ConceptCitationAPI(NodeAPI):
node_class = Concept
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_disorder_citation(request, disorder_id):
class DisorderCitationAPI(NodeAPI):
node_class = Disorder
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_disorder_external_link(request, disorder_id):
class DisorderExternalLinkAPI(NodeAPI):
node_class = Disorder
def post(self, request, uid):
link_id = request.POST.get('link_id', '')
return self.make_link(request, uid, self.node_class, link_id,
ExternalLink, 'HASLINK')
# def add_theory_citation(request, theory_id):
class TheoryCitationAPI(NodeAPI):
node_class = Theory
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_battery_citation(request, battery_id):
class BatteryCitationAPI(NodeAPI):
node_class = Battery
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_battery_indicator(request, battery_id):
class BatteryIndicatorAPI(NodeAPI):
node_class = Battery
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_battery_battery(request, battery_id):
class BatteryBatteryAPI(NodeAPI):
node_class = Battery
def post(self, request, uid):
battery_id = request.POST.get('battery_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Battery, 'INBATTERY')
# def add_battery_task(request, battery_id):
class BatteryTaskAPI(NodeAPI):
node_class = Battery
def post(self, request, uid):
task_id = request.POST.get('task_id', '')
return self.make_link(request, uid, self.node_class, task_id,
Task, 'INBATTERY')
# def add_theory_assertion(request, theory_id):
class TheoryAssertion(NodeAPI):
node_class = Assertion
def post(self, request, uid):
theory_id = request.POST.get('theory_id', '')
return self.make_link(request, uid, self.node_class, theory_id,
Theory, 'INTHEORY')
# def add_theory(request):
class TheoryAPI(NodeAPI):
node_class = Theory
# def add_battery(request):
class BatteryAPI(NodeAPI):
node_class = Battery
update post method for generic api node to return 422 if name for node already exists
from django.views.decorators.csrf import csrf_exempt
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from cognitive.settings import graph
import cognitive.apps.atlas.query as query
from .forms import (TaskForm, ConceptForm, ContrastForm, ConditionForm,
DisorderForm, TaskDisorderForm)
Assertion = query.Assertion()
Battery = query.Battery()
Citation = query.Citation()
Concept = query.Concept()
Condition = query.Condition()
Contrast = query.Contrast()
Disorder = query.Disorder()
ExternalDataset = query.ExternalDataset()
ExternalLink = query.ExternalLink()
Implementation = query.Implementation()
Indicator = query.Indicator()
Task = query.Task()
Theory = query.Theory()
class NodeAPI(APIView):
node_class = ...
form_class = ...
name_field = ...
def post(self, request, format=None):
form = self.form_class(request.data)
if not form.is_valid():
return Response(form.errors,
status=status.HTTP_422_UNPROCESSABLE_ENTITY)
node_data = form.cleaned_data
name_exists = self.node_class.get(node_data[self.name_field],
field=self.name_field,
get_relations=False)
if len(name_exists) > 1:
error_key = "Name {} already exists".format(
node_data[self.name_field])
error_value = "nodes with this name: {}".format(
[i['id'] for i in name_exists])
error = {error_key: error_value}
return Response(error, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
name = node_data[self.name_field]
node = self.node_class.create(name=name, properties=node_data,
request=request)
request.GET = request.GET.copy()
request.GET['id'] = node.properties['id']
return self.get(request)
def get(self, request, format=None):
id = request.GET.get('id', None)
if id:
return Response(self.node_class.get_full(request.GET['id'], 'id'))
else:
return Response(self.node_class.api_all())
def make_link(self, request, src_id, src_label, dest_id, dest_label, rel,
reverse=False):
if reverse is False:
link_made = src_label.link(src_id, dest_id, rel,
endnode_type=dest_label.name)
elif reverse is True:
link_made = dest_label.link(dest_id, src_id,
rel, endnode_type=src_label.name)
if link_made is None:
return Response({'Unable to associate nodes': ''},
status=status.HTTP_422_UNPROCESSABLE_ENTITY)
request.GET = request.GET.copy()
if reverse is False:
request.GET['id'] = src_id
elif reverse is True:
request.GET['id'] = dest_id
return self.get(request)
class ContrastAPI(NodeAPI):
node_class = Contrast
form_class = ContrastForm
name_field = 'name'
def post(self, request, uid, format=None):
ret = super(ContrastAPI, self).post(request, format=format)
id = ret.data['id']
return self.make_link(request, uid, Task, id,
Contrast, 'HASCONTRAST')
class ConditionAPI(NodeAPI):
node_class = Condition
form_class = ConditionForm
name_field = 'condition_text'
def post(self, request, format=None):
ret = super(ConditionAPI, self).post(request, format=format)
id = ret.data['id']
return self.make_link(request, self.kwargs['uid'], Task, id,
Condition, 'HASCONDITION')
class ConceptAPI(NodeAPI):
node_class = Concept
form_class = ConceptForm
name_field = 'term_name'
def get(self, request, format=None):
fields = {}
id = request.GET.get("id", "")
name = request.GET.get("name", "")
contrast_id = request.GET.get("contrast_id", "")
if id:
concept = Concept.get_full(id, 'id')
elif name:
concept = Concept.get_full(name, 'name')
elif contrast_id:
concept = Contrast.api_get_concepts(contrast_id)
else:
concept = Concept.api_all()
if concept is None:
raise NotFound('Concept not found')
return Response(concept)
class TaskAPI(NodeAPI):
node_class = Task
form_class = TaskForm
name_field = 'term_name'
def get(self, request, format=None):
id = request.GET.get("id", "")
name = request.GET.get("name", "")
if id:
task = Task.get_full(id, 'id')
elif name:
task = Task.get_full(name, 'name')
else:
task = Task.api_all()
if task is None:
raise NotFound('Task not found')
return Response(task)
class DisorderAPI(NodeAPI):
node_class = Disorder
form_class = DisorderForm
name_field = 'name'
def get(self, request, format=None):
id = request.GET.get("id", "")
name = request.GET.get("name", "")
if id:
disorder = Disorder.get_full(id, 'id')
elif name:
disorder = Disorder.get_full(name, 'name')
else:
disorder = Disorder.api_all()
if disorder is None:
raise NotFound('Disorder not found')
return Response(disorder)
class SearchAPI(APIView):
def get(self, request, format=None):
search_classes = [Concept, Contrast, Disorder, Task]
queries = request.GET.get("q", "")
results = []
for sclass in search_classes:
result = sclass.search_all_fields(queries)
results += result
if not results:
raise NotFound('No results found')
return Response(results)
# def add_concept_relation(request, uid):
# def make_link(self, request, src_id, src_label, dest_id, dest_label, rel,
class ConceptRelAPI(NodeAPI):
node_class = Concept
form_class = None
name_field = None
def post(self, request):
# Need some sort of validation.
src_id = request.POST.get('src_id', None)
dest_id = request.POST.get('dest_id', None)
rel_type = request.POST.get('rel_type', None)
return self.make_link(request, src_id, self.node_class, dest_id,
self.node_class, rel_type)
# def all_batteries(request):
# def all_theories(request):
# def all_collections(request, return_context=False):
# def all_disorders(request, return_context=False):
# def all_contrasts(request):
# def view_battery(request, uid, return_context=False):
# handled in battery class
# def view_theory(request, uid, return_context=False):
# handled in theory class
# def update_concept(request, uid):
# def update_task(request, uid):
# def update_theory(request, uid):
# def update_battery(request, uid):
# def update_disorder(request, uid):
# def add_task_concept(request, uid):
class TaskConceptAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
concept_id = request.POST.get('concept_id', '')
return self.make_link(request, uid, self.node_class, concept_id, Concept, 'ASSERTS')
# def add_disorder_task(request, uid):
class DisorderTask(NodeAPI):
node_class = Task
def post(self, request, uid):
disorder_id = request.POST.get('disorder_id', '')
return self.make_link(request, uid, self.node_class, disorder_id, Disorder, 'ASSERTS')
# def add_task_disorder(request, task_id):
class TaskDisorderAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
form = TaskDisorderForm(uid, request.POST)
if form.is_valid():
cleaned_data = form.cleaned_data
disorder_id = cleaned_data['disorders']
cont_id = cleaned_data['contrasts']
Contrast.link(cont_id, disorder_id, "HASDIFFERENCE", endnode_type="disorder")
request.GET = request.GET.copy()
request.GET['id'] = uid
return self.get(request)
else:
return Response(form.errors,
status=status.HTTP_422_UNPROCESSABLE_ENTITY)
# def add_disorder_disorder(request, disorder_id):
class DisorderDisorderAPI(NodeAPI):
node_class = Disorder
def post(self, request, uid):
disorder_id = request.POST.get('disorder_id', '')
return self.make_link(request, uid, self.node_class, disorder_id, Disorder, 'ISA')
# def add_concept_contrast(request, uid, tid):
class ConceptContrastAPI(NodeAPI):
pass
# def add_concept_contrast_task(request, uid):
class ConceptContrastTaskAPI(NodeAPI):
pass
# def concept_task_contrast_assertion(concept_id, task_id, contrast_id):
# def add_task_implementation(request, task_id):
class TaskImplementationAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
imp_id = request.POST.get('implementation_id', '')
return self.make_link(request, uid, self.node_class, dataset_id,
Implementation, 'HASIMPLEMENTATION')
# def add_task_dataset(request, task_id):
class TaskDatasetAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
dataset_id = request.POST.get('dataset_id', '')
return self.make_link(request, uid, self.node_class, dataset_id,
ExternalDataset, 'HASEXTERNALDATASET')
# def add_task_indicator(request, task_id):
class TaskIndicatorAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
indicator_id = request.POST.get('indicator_id', '')
return self.make_link(request, uid, self.node_class, indicator_id,
Indicator, 'HASINDICATOR')
# def add_task_citation(request, task_id):
class TaskCitationAPI(NodeAPI):
node_class = Task
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_concept_citation(request, concept_id):
class ConceptCitationAPI(NodeAPI):
node_class = Concept
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_disorder_citation(request, disorder_id):
class DisorderCitationAPI(NodeAPI):
node_class = Disorder
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_disorder_external_link(request, disorder_id):
class DisorderExternalLinkAPI(NodeAPI):
node_class = Disorder
def post(self, request, uid):
link_id = request.POST.get('link_id', '')
return self.make_link(request, uid, self.node_class, link_id,
ExternalLink, 'HASLINK')
# def add_theory_citation(request, theory_id):
class TheoryCitationAPI(NodeAPI):
node_class = Theory
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_battery_citation(request, battery_id):
class BatteryCitationAPI(NodeAPI):
node_class = Battery
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_battery_indicator(request, battery_id):
class BatteryIndicatorAPI(NodeAPI):
node_class = Battery
def post(self, request, uid):
citation_id = request.POST.get('citation_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Citation, 'HASCITATION')
# def add_battery_battery(request, battery_id):
class BatteryBatteryAPI(NodeAPI):
node_class = Battery
def post(self, request, uid):
battery_id = request.POST.get('battery_id', '')
return self.make_link(request, uid, self.node_class, citation_id,
Battery, 'INBATTERY')
# def add_battery_task(request, battery_id):
class BatteryTaskAPI(NodeAPI):
node_class = Battery
def post(self, request, uid):
task_id = request.POST.get('task_id', '')
return self.make_link(request, uid, self.node_class, task_id,
Task, 'INBATTERY')
# def add_theory_assertion(request, theory_id):
class TheoryAssertion(NodeAPI):
node_class = Assertion
def post(self, request, uid):
theory_id = request.POST.get('theory_id', '')
return self.make_link(request, uid, self.node_class, theory_id,
Theory, 'INTHEORY')
# def add_theory(request):
class TheoryAPI(NodeAPI):
node_class = Theory
# def add_battery(request):
class BatteryAPI(NodeAPI):
node_class = Battery
|
import json
import logging
from django.views.generic import View
from arches.app.models.models import ETLModule
from arches.app.utils.response import JSONResponse
logger = logging.getLogger(__name__)
class ETLManagerView(View):
"""
to get the ETL modules from db
"""
def get(self, request):
etl_modules = ETLModule.objects.all()
return JSONResponse(etl_modules)
def post(self, request):
"""
instantiate the proper module with proper action and pass the request
possible actions are "import", "validate", "return first line", ""
"""
action = request.POST.get("action")
module = request.POST.get("module")
import_module = ETLModule.objects.get(pk=module).get_class_module()(request)
import_function = getattr(import_module, action)
response = import_function(request=request)
if response["success"]:
ret = {"result": response["data"]}
return JSONResponse(ret)
else:
return JSONResponse(status=400, reason=response["error"])
Make response json consistent, #8280
import json
import logging
from django.views.generic import View
from arches.app.models.models import ETLModule
from arches.app.utils.response import JSONResponse
logger = logging.getLogger(__name__)
class ETLManagerView(View):
"""
to get the ETL modules from db
"""
def get(self, request):
etl_modules = ETLModule.objects.all()
return JSONResponse(etl_modules)
def post(self, request):
"""
instantiate the proper module with proper action and pass the request
possible actions are "import", "validate", "return first line", ""
"""
action = request.POST.get("action")
module = request.POST.get("module")
import_module = ETLModule.objects.get(pk=module).get_class_module()(request)
import_function = getattr(import_module, action)
response = import_function(request=request)
if response["success"]:
ret = {"result": response["data"]}
return JSONResponse(ret)
else:
return JSONResponse(status=400, reason=response["data"])
|
"""
@file
@brief Overwrites unit test class with additional testing functions.
"""
import os
import sys
import unittest
import warnings
import decimal
import cProfile
import pstats
import site
from .ci_helper import is_travis_or_appveyor
from ..loghelper import fLOG
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
class ExtTestCase(unittest.TestCase):
"""
Overwrites unit test class with additional testing functions.
"""
@staticmethod
def _format_str(s):
"""
Returns ``s`` or ``'s'`` depending on the type.
"""
if hasattr(s, "replace"):
return "'{0}'".format(s)
else:
return s
def assertNotEmpty(self, x):
"""
Checks that *x* is not empty.
"""
if x is None or (hasattr(x, "__len__") and len(x) == 0):
raise AssertionError("x is empty")
def assertEmpty(self, x, none_allowed=True):
"""
Checks that *x* is empty.
"""
if not((none_allowed and x is None) or (hasattr(x, "__len__") and len(x) == 0)):
if isinstance(x, (list, tuple, dict, set)):
end = min(5, len(x))
disp = "\n" + '\n'.join(map(str, x[:end]))
else:
disp = ""
raise AssertionError("x is not empty{0}".format(disp))
def assertGreater(self, x, y, strict=False):
"""
Checks that ``x >= y``.
"""
if x < y or (strict and x == y):
raise AssertionError("x <{2} y with x={0} and y={1}".format(
ExtTestCase._format_str(x), ExtTestCase._format_str(y),
"" if strict else "="))
def assertLesser(self, x, y, strict=False):
"""
Checks that ``x <= y``.
"""
if x > y or (strict and x == y):
raise AssertionError("x >{2} y with x={0} and y={1}".format(
ExtTestCase._format_str(x), ExtTestCase._format_str(y),
"" if strict else "="))
def assertExists(self, name):
"""
Checks that *name* exists.
"""
if not os.path.exists(name):
raise FileNotFoundError("Unable to find '{0}'.".format(name))
def assertNotExists(self, name):
"""
Checks that *name* does not exist.
"""
if os.path.exists(name):
raise FileNotFoundError("Able to find '{0}'.".format(name))
def assertEqualDataFrame(self, d1, d2, **kwargs):
"""
Checks that two dataframes are equal.
Calls :epkg:`pandas:testing:assert_frame_equal`.
"""
from pandas.testing import assert_frame_equal
assert_frame_equal(d1, d2, **kwargs)
def assertNotEqualDataFrame(self, d1, d2, **kwargs):
"""
Checks that two dataframes are different.
Calls :epkg:`pandas:testing:assert_frame_equal`.
"""
from pandas.testing import assert_frame_equal
try:
assert_frame_equal(d1, d2, **kwargs)
except AssertionError:
return
raise AssertionError("Two dataframes are identical.")
def assertEqualArray(self, d1, d2, **kwargs):
"""
Checks that two arrays are equal.
Relies on :epkg:`numpy:testing:assert_almost_equal.html`.
"""
if d1 is None and d2 is None:
return
if d1 is None:
raise AssertionError("d1 is None, d2 is not")
if d2 is None:
raise AssertionError("d1 is not None, d2 is")
from numpy.testing import assert_almost_equal
assert_almost_equal(d1, d2, **kwargs)
def assertNotEqualArray(self, d1, d2, **kwargs):
"""
Checks that two arrays are equal.
Relies on :epkg:`numpy:testing:assert_almost_equal.html`.
"""
if d1 is None and d2 is None:
raise AssertionError("d1 and d2 are equal to None")
if d1 is None or d2 is None:
return
from numpy.testing import assert_almost_equal
try:
assert_almost_equal(d1, d2, **kwargs)
except AssertionError:
return
raise AssertionError("Two arrays are identical.")
def assertEqualNumber(self, d1, d2, **kwargs):
"""
Checks that two numbers are equal.
"""
from numpy import number
if not isinstance(d1, (int, float, decimal.Decimal, number)):
raise TypeError('d1 is not a number but {0}'.format(type(d1)))
if not isinstance(d2, (int, float, decimal.Decimal, number)):
raise TypeError('d2 is not a number but {0}'.format(type(d2)))
diff = abs(float(d1 - d2))
mi = float(min(abs(d1), abs(d2)))
tol = kwargs.get('precision', None)
if tol is None:
if diff != 0:
raise AssertionError("d1 != d2: {0} != {1}".format(d1, d2))
else:
if mi == 0:
if diff > tol:
raise AssertionError(
"d1 != d2: {0} != {1} +/- {2}".format(d1, d2, tol))
else:
rel = diff / mi
if rel > tol:
raise AssertionError(
"d1 != d2: {0} != {1} +/- {2}".format(d1, d2, tol))
def assertRaise(self, fct, exc=None, msg=None):
"""
Checks that function *fct* with no parameter
raises an exception of a given type.
@param fct function to test (no parameter)
@param exc exception type to catch (None for all)
@param msg error message to check (None for no message to check)
"""
try:
fct()
except Exception as e:
if exc is None:
return
elif isinstance(e, exc):
if msg is None:
return
if msg not in str(e):
raise AssertionError(
"Function '{0}' raise exception with wrong message '{1}' (must contain '{2}').".format(fct, e, msg))
return
raise AssertionError(
"Function '{0}' does not raise exception '{1}' but '{2}' of type '{3}'.".format(fct, exc, e, type(e)))
raise AssertionError(
"Function '{0}' does not raise exception.".format(fct))
def assertStartsWith(self, sub, whole):
"""
Checks that string *sub* starts with *whole*.
"""
if not whole.startswith(sub):
if len(whole) > len(sub) * 2:
whole = whole[:len(sub) * 2]
raise AssertionError(
"'{1}' does not start with '{0}'".format(sub, whole))
def assertNotStartsWith(self, sub, whole):
"""
Checks that string *sub* does not start with *whole*.
"""
if whole.startswith(sub):
if len(whole) > len(sub) * 2:
whole = whole[:len(sub) * 2]
raise AssertionError(
"'{1}' starts with '{0}'".format(sub, whole))
def assertEndsWith(self, sub, whole):
"""
Checks that string *sub* ends with *whole*.
"""
if not whole.endswith(sub):
if len(whole) > len(sub) * 2:
whole = whole[-len(sub) * 2:]
raise AssertionError(
"'{1}' does not end with '{0}'".format(sub, whole))
def assertNotEndsWith(self, sub, whole):
"""
Checks that string *sub* does not end with *whole*.
"""
if whole.endswith(sub):
if len(whole) > len(sub) * 2:
whole = whole[-len(sub) * 2:]
raise AssertionError(
"'{1}' ends with '{0}'".format(sub, whole))
def assertEqual(self, a, b):
"""
Checks that ``a == b``.
"""
if a is None and b is not None:
raise AssertionError("a is None, b is not")
if a is not None and b is None:
raise AssertionError("a is not None, b is")
try:
unittest.TestCase.assertEqual(self, a, b)
except ValueError as e:
if "The truth value of a DataFrame is ambiguous" in str(e) or \
"The truth value of an array with more than one element is ambiguous." in str(e):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ImportWarning)
import pandas
if isinstance(a, pandas.DataFrame) and isinstance(b, pandas.DataFrame):
self.assertEqualDataFrame(a, b)
return
import numpy
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
self.assertEqualArray(a, b)
return
raise AssertionError("Unable to check equality for types {0} and {1}".format(
type(a), type(b))) from e
def assertNotEqual(self, a, b):
"""
Checks that ``a != b``.
"""
if a is None and b is None:
raise AssertionError("a is None, b is too")
if a is None and b is not None:
return
if a is not None and b is None:
return
try:
unittest.TestCase.assertNotEqual(self, a, b)
except ValueError as e:
if "Can only compare identically-labeled DataFrame objects" in str(e) or \
"The truth value of a DataFrame is ambiguous." in str(e) or \
"The truth value of an array with more than one element is ambiguous." in str(e):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ImportWarning)
import pandas
if isinstance(a, pandas.DataFrame) and isinstance(b, pandas.DataFrame):
self.assertNotEqualDataFrame(a, b)
return
import numpy
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
self.assertNotEqualArray(a, b)
return
raise e
def assertEqualFloat(self, a, b, precision=1e-5):
"""
Checks that ``abs(a-b) < precision``.
"""
mi = min(abs(a), abs(b))
if mi == 0:
d = abs(a - b)
self.assertLesser(d, precision)
else:
r = float(abs(a - b)) / mi
self.assertLesser(r, precision)
def assertCallable(self, fct):
"""
Checks that *fct* is callable.
"""
if not callable(fct):
raise AssertionError("fct is not callable: {0}".format(type(fct)))
def assertEqualDict(self, a, b):
"""
Checks that ``a == b``.
"""
if not isinstance(a, dict):
raise TypeError('a is not dict but {0}'.format(type(a)))
if not isinstance(b, dict):
raise TypeError('b is not dict but {0}'.format(type(b)))
rows = []
for key in sorted(b):
if key not in a:
rows.append("** Added key '{0}' in b".format(key))
else:
if a[key] != b[key]:
rows.append(
"** Value != for key '{0}': != id({1}) != id({2})\n==1 {3}\n==2 {4}".format(
key, id(a[key]), id(b[key]), a[key], b[key]))
for key in sorted(a):
if key not in b:
rows.append("** Removed key '{0}' in a".format(key))
if len(rows) > 0:
raise AssertionError(
"Dictionaries are different\n{0}".format('\n'.join(rows)))
def fLOG(self, *args, **kwargs):
"""
Prints out some information.
@see fn fLOG.
"""
fLOG(*args, **kwargs)
def profile(self, fct, sort='cumulative', rootrem=None):
"""
Profiles the execution of a function.
@param fct function to profile
@param sort see `sort_stats <https://docs.python.org/3/library/profile.html#pstats.Stats.sort_stats>`_
@param rootrem root to remove in filenames
@return statistics text dump
"""
pr = cProfile.Profile()
pr.enable()
fct()
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats(sort)
ps.print_stats()
res = s.getvalue()
try:
pack = site.getsitepackages()
except AttributeError:
import numpy
pack = os.path.normpath(os.path.abspath(
os.path.join(os.path.dirname(numpy.__file__), "..")))
pack = [pack]
res = res.replace(pack[-1], "site-packages")
if rootrem is not None:
res = res.replace(rootrem, '')
return ps, res
def read_file(self, filename, mode='r', encoding="utf-8"):
"""
Returns the content of a file.
@param filename filename
@param encoding encoding
@param mode reading mode
@return content
"""
self.assertExists(filename)
with open(filename, mode, encoding=encoding) as f:
return f.read()
def write_file(self, filename, content, mode='w', encoding='utf-8'):
"""
Writes the content of a file.
@param filename filename
@param content content to write
@param encoding encoding
@param mode reading mode
@return content
"""
with open(filename, mode, encoding=encoding) as f:
return f.write(content)
def skipif_appveyor(msg):
"""
Skips a unit test if it runs on :epkg:`appveyor`.
"""
if is_travis_or_appveyor() != 'appveyor':
return lambda x: x
msg = 'Test does not work on appveyor due to: ' + msg
return unittest.skip(msg)
def skipif_travis(msg):
"""
Skips a unit test if it runs on :epkg:`travis`.
"""
if is_travis_or_appveyor() != 'travis':
return lambda x: x
msg = 'Test does not work on travis due to: ' + msg
return unittest.skip(msg)
def skipif_circleci(msg):
"""
Skips a unit test if it runs on :epkg:`circleci`.
"""
if is_travis_or_appveyor() != 'circleci':
return lambda x: x
msg = 'Test does not work on circleci due to: ' + msg
return unittest.skip(msg)
def skipif_azure(msg):
"""
Skips a unit test if it runs on :epkg:`azure pipeline`.
"""
if is_travis_or_appveyor() != 'azurepipe':
return lambda x: x
msg = 'Test does not work on azure pipeline due to: ' + msg
return unittest.skip(msg)
def skipif_azure_linux(msg):
"""
Skips a unit test if it runs on :epkg:`azure pipeline` on :epkg:`linux`.
"""
if not sys.platform.startswith('lin') and is_travis_or_appveyor() != 'azurepipe':
return lambda x: x
msg = 'Test does not work on azure pipeline (linux) due to: ' + msg
return unittest.skip(msg)
def skipif_azure_macosx(msg):
"""
Skips a unit test if it runs on :epkg:`azure pipeline` on :epkg:`linux`.
"""
if not sys.platform.startswith('darwin') and is_travis_or_appveyor() != 'azurepipe':
return lambda x: x
msg = 'Test does not work on azure pipeline (macosx) due to: ' + msg
return unittest.skip(msg)
def skipif_linux(msg):
"""
Skips a unit test if it runs on :epkg:`linux`.
.. versionadded:: 1.7
"""
if not sys.platform.startswith('lin'):
return lambda x: x
msg = 'Test does not work on travis due to: ' + msg
return unittest.skip(msg)
def skipif_vless(version, msg):
"""
Skips a unit test if the version is stricly below *version* (tuple).
.. versionadded:: 1.7
"""
if sys.version_info[:3] >= version:
return lambda x: x
msg = 'Python {} < {}: {}'.format(sys.version_info[:3], version, msg)
return unittest.skip(msg)
hides some warnings when running unit tests
"""
@file
@brief Overwrites unit test class with additional testing functions.
"""
import os
import sys
import unittest
import warnings
import decimal
import cProfile
import pstats
import site
from .ci_helper import is_travis_or_appveyor
from ..loghelper import fLOG
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
class ExtTestCase(unittest.TestCase):
"""
Overwrites unit test class with additional testing functions.
Unless *setUp* is overwritten, warnings *FutureWarning* and
*PendingDeprecationWarning* are filtered out.
"""
def setUp(self):
"""
Filters out *FutureWarning*, *PendingDeprecationWarning*.
"""
warnings.simplefilter("ignore", (FutureWarning, PendingDeprecationWarning))
def tearDown(self):
"""
Stops filtering out *FutureWarning*, *PendingDeprecationWarning*.
"""
warnings.simplefilter("default", (FutureWarning, PendingDeprecationWarning))
@staticmethod
def _format_str(s):
"""
Returns ``s`` or ``'s'`` depending on the type.
"""
if hasattr(s, "replace"):
return "'{0}'".format(s)
else:
return s
def assertNotEmpty(self, x):
"""
Checks that *x* is not empty.
"""
if x is None or (hasattr(x, "__len__") and len(x) == 0):
raise AssertionError("x is empty")
def assertEmpty(self, x, none_allowed=True):
"""
Checks that *x* is empty.
"""
if not((none_allowed and x is None) or (hasattr(x, "__len__") and len(x) == 0)):
if isinstance(x, (list, tuple, dict, set)):
end = min(5, len(x))
disp = "\n" + '\n'.join(map(str, x[:end]))
else:
disp = ""
raise AssertionError("x is not empty{0}".format(disp))
def assertGreater(self, x, y, strict=False):
"""
Checks that ``x >= y``.
"""
if x < y or (strict and x == y):
raise AssertionError("x <{2} y with x={0} and y={1}".format(
ExtTestCase._format_str(x), ExtTestCase._format_str(y),
"" if strict else "="))
def assertLesser(self, x, y, strict=False):
"""
Checks that ``x <= y``.
"""
if x > y or (strict and x == y):
raise AssertionError("x >{2} y with x={0} and y={1}".format(
ExtTestCase._format_str(x), ExtTestCase._format_str(y),
"" if strict else "="))
def assertExists(self, name):
"""
Checks that *name* exists.
"""
if not os.path.exists(name):
raise FileNotFoundError("Unable to find '{0}'.".format(name))
def assertNotExists(self, name):
"""
Checks that *name* does not exist.
"""
if os.path.exists(name):
raise FileNotFoundError("Able to find '{0}'.".format(name))
def assertEqualDataFrame(self, d1, d2, **kwargs):
"""
Checks that two dataframes are equal.
Calls :epkg:`pandas:testing:assert_frame_equal`.
"""
from pandas.testing import assert_frame_equal
assert_frame_equal(d1, d2, **kwargs)
def assertNotEqualDataFrame(self, d1, d2, **kwargs):
"""
Checks that two dataframes are different.
Calls :epkg:`pandas:testing:assert_frame_equal`.
"""
from pandas.testing import assert_frame_equal
try:
assert_frame_equal(d1, d2, **kwargs)
except AssertionError:
return
raise AssertionError("Two dataframes are identical.")
def assertEqualArray(self, d1, d2, **kwargs):
"""
Checks that two arrays are equal.
Relies on :epkg:`numpy:testing:assert_almost_equal.html`.
"""
if d1 is None and d2 is None:
return
if d1 is None:
raise AssertionError("d1 is None, d2 is not")
if d2 is None:
raise AssertionError("d1 is not None, d2 is")
from numpy.testing import assert_almost_equal
assert_almost_equal(d1, d2, **kwargs)
def assertNotEqualArray(self, d1, d2, **kwargs):
"""
Checks that two arrays are equal.
Relies on :epkg:`numpy:testing:assert_almost_equal.html`.
"""
if d1 is None and d2 is None:
raise AssertionError("d1 and d2 are equal to None")
if d1 is None or d2 is None:
return
from numpy.testing import assert_almost_equal
try:
assert_almost_equal(d1, d2, **kwargs)
except AssertionError:
return
raise AssertionError("Two arrays are identical.")
def assertEqualNumber(self, d1, d2, **kwargs):
"""
Checks that two numbers are equal.
"""
from numpy import number
if not isinstance(d1, (int, float, decimal.Decimal, number)):
raise TypeError('d1 is not a number but {0}'.format(type(d1)))
if not isinstance(d2, (int, float, decimal.Decimal, number)):
raise TypeError('d2 is not a number but {0}'.format(type(d2)))
diff = abs(float(d1 - d2))
mi = float(min(abs(d1), abs(d2)))
tol = kwargs.get('precision', None)
if tol is None:
if diff != 0:
raise AssertionError("d1 != d2: {0} != {1}".format(d1, d2))
else:
if mi == 0:
if diff > tol:
raise AssertionError(
"d1 != d2: {0} != {1} +/- {2}".format(d1, d2, tol))
else:
rel = diff / mi
if rel > tol:
raise AssertionError(
"d1 != d2: {0} != {1} +/- {2}".format(d1, d2, tol))
def assertRaise(self, fct, exc=None, msg=None):
"""
Checks that function *fct* with no parameter
raises an exception of a given type.
@param fct function to test (no parameter)
@param exc exception type to catch (None for all)
@param msg error message to check (None for no message to check)
"""
try:
fct()
except Exception as e:
if exc is None:
return
elif isinstance(e, exc):
if msg is None:
return
if msg not in str(e):
raise AssertionError(
"Function '{0}' raise exception with wrong message '{1}' (must contain '{2}').".format(fct, e, msg))
return
raise AssertionError(
"Function '{0}' does not raise exception '{1}' but '{2}' of type '{3}'.".format(fct, exc, e, type(e)))
raise AssertionError(
"Function '{0}' does not raise exception.".format(fct))
def assertStartsWith(self, sub, whole):
"""
Checks that string *sub* starts with *whole*.
"""
if not whole.startswith(sub):
if len(whole) > len(sub) * 2:
whole = whole[:len(sub) * 2]
raise AssertionError(
"'{1}' does not start with '{0}'".format(sub, whole))
def assertNotStartsWith(self, sub, whole):
"""
Checks that string *sub* does not start with *whole*.
"""
if whole.startswith(sub):
if len(whole) > len(sub) * 2:
whole = whole[:len(sub) * 2]
raise AssertionError(
"'{1}' starts with '{0}'".format(sub, whole))
def assertEndsWith(self, sub, whole):
"""
Checks that string *sub* ends with *whole*.
"""
if not whole.endswith(sub):
if len(whole) > len(sub) * 2:
whole = whole[-len(sub) * 2:]
raise AssertionError(
"'{1}' does not end with '{0}'".format(sub, whole))
def assertNotEndsWith(self, sub, whole):
"""
Checks that string *sub* does not end with *whole*.
"""
if whole.endswith(sub):
if len(whole) > len(sub) * 2:
whole = whole[-len(sub) * 2:]
raise AssertionError(
"'{1}' ends with '{0}'".format(sub, whole))
def assertEqual(self, a, b):
"""
Checks that ``a == b``.
"""
if a is None and b is not None:
raise AssertionError("a is None, b is not")
if a is not None and b is None:
raise AssertionError("a is not None, b is")
try:
unittest.TestCase.assertEqual(self, a, b)
except ValueError as e:
if "The truth value of a DataFrame is ambiguous" in str(e) or \
"The truth value of an array with more than one element is ambiguous." in str(e):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ImportWarning)
import pandas
if isinstance(a, pandas.DataFrame) and isinstance(b, pandas.DataFrame):
self.assertEqualDataFrame(a, b)
return
import numpy
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
self.assertEqualArray(a, b)
return
raise AssertionError("Unable to check equality for types {0} and {1}".format(
type(a), type(b))) from e
def assertNotEqual(self, a, b):
"""
Checks that ``a != b``.
"""
if a is None and b is None:
raise AssertionError("a is None, b is too")
if a is None and b is not None:
return
if a is not None and b is None:
return
try:
unittest.TestCase.assertNotEqual(self, a, b)
except ValueError as e:
if "Can only compare identically-labeled DataFrame objects" in str(e) or \
"The truth value of a DataFrame is ambiguous." in str(e) or \
"The truth value of an array with more than one element is ambiguous." in str(e):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ImportWarning)
import pandas
if isinstance(a, pandas.DataFrame) and isinstance(b, pandas.DataFrame):
self.assertNotEqualDataFrame(a, b)
return
import numpy
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
self.assertNotEqualArray(a, b)
return
raise e
def assertEqualFloat(self, a, b, precision=1e-5):
"""
Checks that ``abs(a-b) < precision``.
"""
mi = min(abs(a), abs(b))
if mi == 0:
d = abs(a - b)
self.assertLesser(d, precision)
else:
r = float(abs(a - b)) / mi
self.assertLesser(r, precision)
def assertCallable(self, fct):
"""
Checks that *fct* is callable.
"""
if not callable(fct):
raise AssertionError("fct is not callable: {0}".format(type(fct)))
def assertEqualDict(self, a, b):
"""
Checks that ``a == b``.
"""
if not isinstance(a, dict):
raise TypeError('a is not dict but {0}'.format(type(a)))
if not isinstance(b, dict):
raise TypeError('b is not dict but {0}'.format(type(b)))
rows = []
for key in sorted(b):
if key not in a:
rows.append("** Added key '{0}' in b".format(key))
else:
if a[key] != b[key]:
rows.append(
"** Value != for key '{0}': != id({1}) != id({2})\n==1 {3}\n==2 {4}".format(
key, id(a[key]), id(b[key]), a[key], b[key]))
for key in sorted(a):
if key not in b:
rows.append("** Removed key '{0}' in a".format(key))
if len(rows) > 0:
raise AssertionError(
"Dictionaries are different\n{0}".format('\n'.join(rows)))
def fLOG(self, *args, **kwargs):
"""
Prints out some information.
@see fn fLOG.
"""
fLOG(*args, **kwargs)
def profile(self, fct, sort='cumulative', rootrem=None):
"""
Profiles the execution of a function.
@param fct function to profile
@param sort see `sort_stats <https://docs.python.org/3/library/profile.html#pstats.Stats.sort_stats>`_
@param rootrem root to remove in filenames
@return statistics text dump
"""
pr = cProfile.Profile()
pr.enable()
fct()
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats(sort)
ps.print_stats()
res = s.getvalue()
try:
pack = site.getsitepackages()
except AttributeError:
import numpy
pack = os.path.normpath(os.path.abspath(
os.path.join(os.path.dirname(numpy.__file__), "..")))
pack = [pack]
res = res.replace(pack[-1], "site-packages")
if rootrem is not None:
res = res.replace(rootrem, '')
return ps, res
def read_file(self, filename, mode='r', encoding="utf-8"):
"""
Returns the content of a file.
@param filename filename
@param encoding encoding
@param mode reading mode
@return content
"""
self.assertExists(filename)
with open(filename, mode, encoding=encoding) as f:
return f.read()
def write_file(self, filename, content, mode='w', encoding='utf-8'):
"""
Writes the content of a file.
@param filename filename
@param content content to write
@param encoding encoding
@param mode reading mode
@return content
"""
with open(filename, mode, encoding=encoding) as f:
return f.write(content)
def skipif_appveyor(msg):
"""
Skips a unit test if it runs on :epkg:`appveyor`.
"""
if is_travis_or_appveyor() != 'appveyor':
return lambda x: x
msg = 'Test does not work on appveyor due to: ' + msg
return unittest.skip(msg)
def skipif_travis(msg):
"""
Skips a unit test if it runs on :epkg:`travis`.
"""
if is_travis_or_appveyor() != 'travis':
return lambda x: x
msg = 'Test does not work on travis due to: ' + msg
return unittest.skip(msg)
def skipif_circleci(msg):
"""
Skips a unit test if it runs on :epkg:`circleci`.
"""
if is_travis_or_appveyor() != 'circleci':
return lambda x: x
msg = 'Test does not work on circleci due to: ' + msg
return unittest.skip(msg)
def skipif_azure(msg):
"""
Skips a unit test if it runs on :epkg:`azure pipeline`.
"""
if is_travis_or_appveyor() != 'azurepipe':
return lambda x: x
msg = 'Test does not work on azure pipeline due to: ' + msg
return unittest.skip(msg)
def skipif_azure_linux(msg):
"""
Skips a unit test if it runs on :epkg:`azure pipeline` on :epkg:`linux`.
"""
if not sys.platform.startswith('lin') and is_travis_or_appveyor() != 'azurepipe':
return lambda x: x
msg = 'Test does not work on azure pipeline (linux) due to: ' + msg
return unittest.skip(msg)
def skipif_azure_macosx(msg):
"""
Skips a unit test if it runs on :epkg:`azure pipeline` on :epkg:`linux`.
"""
if not sys.platform.startswith('darwin') and is_travis_or_appveyor() != 'azurepipe':
return lambda x: x
msg = 'Test does not work on azure pipeline (macosx) due to: ' + msg
return unittest.skip(msg)
def skipif_linux(msg):
"""
Skips a unit test if it runs on :epkg:`linux`.
.. versionadded:: 1.7
"""
if not sys.platform.startswith('lin'):
return lambda x: x
msg = 'Test does not work on travis due to: ' + msg
return unittest.skip(msg)
def skipif_vless(version, msg):
"""
Skips a unit test if the version is stricly below *version* (tuple).
.. versionadded:: 1.7
"""
if sys.version_info[:3] >= version:
return lambda x: x
msg = 'Python {} < {}: {}'.format(sys.version_info[:3], version, msg)
return unittest.skip(msg)
|
added Vote.vote_status, Nominee.get_judge_ranks() and Nominee.get_votes()
|
import sys
import os
import json
import requests
from parse_uri import ParseUri
from string import Template
import collections
import itertools
import datetime
class Artifact:
def exists(self):
raise Exception("not implemented")
def delete(self):
raise Exception("not implemented")
class SSHArtifact(Artifact):
def __init__(self, host, path):
self.host = host
self.path = path
def exists(self):
# TODO: This should throw exceptions on any errors and only return False when we genuinely know the file is not there
command = 'ssh %s "[ -f %s ]"' % (self.host, self.path)
if not os.system(command):
# file exists
return True
return False
def delete(self):
command = 'ssh %s "rm %s"' % (self.host, self.path)
os.system(command)
class FileArtifact(Artifact):
def __init__(self, path):
self.path = path
def exists(self):
if not self.path:
raise Exception("invalid path " + self.path)
command = '[ -f %s ]' % self.path
if not os.system(command):
return True
return False
def delete(self):
command = 'rm %s' % self.path
print command
os.system(command)
def resolve_artifact(uri):
uri_parser = ParseUri()
parsed_uri = uri_parser.parse(uri)
if parsed_uri.protocol == "ssh":
return SSHArtifact(parsed_uri.host, parsed_uri.path)
elif parsed_uri.protocol == "file":
return FileArtifact(parsed_uri.path)
else:
return Artifact()
def resolve_date(year,month,day,day_delta=0):
dt = datetime.datetime(year,month,day) - datetime.timedelta(days=day_delta)
return dt - delta
class Job:
def __init__(self, jobid, parameters={}):
self.parameters = parameters
jobs_dir = 'jobs'
job_filename = os.path.join(jobs_dir, jobid + ".job")
job_file = open(job_filename)
jobconf = json.load(job_file)
self.jobid = jobid
self.command = Template(jobconf["command"]).substitute(self.parameters)
self.artifact = resolve_artifact(Template(jobconf["artifact"]).substitute(self.parameters))
self.dependencies = []
for dependency_conf in jobconf.get("dependencies", []):
for params in self.resolve_dependency_parameters(dependency_conf['parameters']):
self.dependencies.append(Job(dependency_conf['jobid'], params))
def resolve_dependency_parameters(self, dependency_parameters):
print jobid, "resolving parameters"
templated_params = {}
for key, values in dependency_parameters.items():
for value in values:
try:
value = Template(value).substitute(self.parameters)
except KeyError, e:
print "Could not resolve template parameter", e
raise
templated_params[key] = templated_params.get(key, []) + [value]
for point in itertools.product(*templated_params.values()):
params = dict(zip(templated_params.iterkeys(), point))
yield params
def run(self):
print self.jobid, "command:", self.command
ret = os.system(self.command)
if ret:
print "Error",ret
sys.exit(ret)
def build(self):
if self.artifact.exists():
print self.jobid, "artifact present, nothing to do", sorted(self.artifact.__dict__.items())
else:
print self.jobid, "artifact not present", sorted(self.artifact.__dict__.items())
print self.jobid, "checking dependencies"
for dependency in self.dependencies:
dependency.build()
print self.jobid, "Starting with parameters", self.parameters
self.run()
print self.jobid, "finished"
jobid = sys.argv[1]
Job(jobid, {}).build()
added http artifact
import sys
import os
import json
import requests
from parse_uri import ParseUri
from string import Template
import collections
import itertools
import datetime
class Artifact:
def exists(self):
raise Exception("not implemented")
def delete(self):
raise Exception("not implemented")
class HTTPArtifact(Artifact):
def __init__(self, url):
self.url = url
def exists(self):
r = requests.head(self.url)
if r.status_code == 404:
return False
elif r.status_code == 200 or r.status_code == 302:
return True
else:
raise Exception("Unexpected status code: %s" % r.status_code)
class SSHArtifact(Artifact):
def __init__(self, host, path):
self.host = host
self.path = path
def exists(self):
# TODO: This should throw exceptions on any errors and only return False when we genuinely know the file is not there
command = 'ssh %s "[ -f %s ]"' % (self.host, self.path)
if not os.system(command):
# file exists
return True
return False
def delete(self):
command = 'ssh %s "rm %s"' % (self.host, self.path)
os.system(command)
class FileArtifact(Artifact):
def __init__(self, path):
self.path = path
def exists(self):
if not self.path:
raise Exception("invalid path " + self.path)
command = '[ -f %s ]' % self.path
if not os.system(command):
return True
return False
def delete(self):
command = 'rm %s' % self.path
print command
os.system(command)
def resolve_artifact(uri):
uri_parser = ParseUri()
parsed_uri = uri_parser.parse(uri)
if parsed_uri.protocol == "ssh":
return SSHArtifact(parsed_uri.host, parsed_uri.path)
elif parsed_uri.protocol == "file":
return FileArtifact(parsed_uri.path)
elif parsed_uri.protocol == "http":
return HTTPArtifact(parsed_uri.source)
else:
return Artifact()
def resolve_date(year,month,day,day_delta=0):
dt = datetime.datetime(year,month,day) - datetime.timedelta(days=day_delta)
return dt - delta
class Job:
def __init__(self, jobid, parameters={}):
self.parameters = parameters
jobs_dir = 'jobs'
job_filename = os.path.join(jobs_dir, jobid + ".job")
job_file = open(job_filename)
jobconf = json.load(job_file)
self.jobid = jobid
self.command = Template(jobconf["command"]).substitute(self.parameters)
self.artifact = resolve_artifact(Template(jobconf["artifact"]).substitute(self.parameters))
self.dependencies = []
for dependency_conf in jobconf.get("dependencies", []):
for params in self.resolve_dependency_parameters(dependency_conf['parameters']):
self.dependencies.append(Job(dependency_conf['jobid'], params))
def resolve_dependency_parameters(self, dependency_parameters):
print jobid, "resolving parameters"
templated_params = {}
for key, values in dependency_parameters.items():
for value in values:
try:
value = Template(value).substitute(self.parameters)
except KeyError, e:
print "Could not resolve template parameter", e
raise
templated_params[key] = templated_params.get(key, []) + [value]
for point in itertools.product(*templated_params.values()):
params = dict(zip(templated_params.iterkeys(), point))
yield params
def run(self):
print self.jobid, "command:", self.command
ret = os.system(self.command)
if ret:
print "Error",ret
sys.exit(ret)
def build(self):
if self.artifact.exists():
print self.jobid, "artifact present, nothing to do", sorted(self.artifact.__dict__.items())
else:
print self.jobid, "artifact not present", sorted(self.artifact.__dict__.items())
print self.jobid, "checking dependencies"
for dependency in self.dependencies:
dependency.build()
print self.jobid, "Starting with parameters", self.parameters
self.run()
print self.jobid, "finished"
jobid = sys.argv[1]
Job(jobid, {}).build()
|
from app import app, db, utils
from utils import *
from models import *
from forms import *
from flask import render_template, flash, redirect, session, url_for, request, \
g, jsonify, current_app, Response
from flask.ext.login import login_user, logout_user, current_user, \
login_required
from variables import *
from datetime import time
from utils import generate_password, import_file
from flask_user import login_required, signals
from flask_user.emails import send_email
from flask_user.views import _endpoint_url, _send_registered_email
from flask_login import current_user, login_user, logout_user
from tempfile import NamedTemporaryFile
import csv, time
@app.route('/')
def index():
return render_template('base.html')
@app.route('/admin/new', methods=['GET', 'POST'])
@app.route('/admin/edit/<id>', methods=['GET', 'POST'])
@login_required
def new(id=None):
form = AddNewFoodResourceForm(request.form)
# Set timeslot choices.
for timeslots in form.daily_timeslots:
for timeslot in timeslots.timeslots:
timeslot.starts_at.choices=get_possible_opening_times()
timeslot.ends_at.choices=get_possible_closing_times()
# Set food resource type choices.
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
food_resource_types_choices = []
for food_resource_type in food_resource_types:
food_resource_types_choices.append(
(food_resource_type.enum,
food_resource_type.name_singular)
)
form.location_type.choices = food_resource_types_choices
# Create a new food resource.
if id is None:
title = "Add New Food Resource"
food_resource_type = food_resource_types_choices[0][0]
# Edit an existing food resource.
else:
title = "Edit Food Resource"
# GET request.
if request.method == 'GET':
if id is not None:
# Populate form with information about existing food resource.
food_resource = FoodResource.query.filter_by(id=id).first()
if food_resource is None:
return render_template('404.html')
# Data that can be directly retrieved from the database.
form.name.data = food_resource.name
form.address_line1.data = food_resource.address.line1
form.address_line2.data = food_resource.address.line2
form.address_city.data = food_resource.address.city
form.address_state.data = food_resource.address.state
form.address_zip_code.data = food_resource.address.zip_code
form.phone_number.data = food_resource.phone_numbers[0].number
form.website.data = food_resource.url
form.additional_information.data = food_resource.description
form.is_for_family_and_children.data = \
food_resource.is_for_family_and_children
form.is_for_seniors.data = food_resource.is_for_seniors
form.is_wheelchair_accessible.data = \
food_resource.is_wheelchair_accessible
form.is_accepts_snap.data = food_resource.is_accepts_snap
form.location_type.data = food_resource.food_resource_type.enum
# Data that must be interpreted before being rendered.
if food_resource.are_hours_available == True:
form.are_hours_available.data = "yes"
else:
form.are_hours_available.data = "no"
num_timeslots_per_day = [0] * 7
for timeslot in food_resource.timeslots:
day_of_week_index = timeslot.day_of_week
timeslot_index = num_timeslots_per_day[timeslot.day_of_week]
num_timeslots_per_day[timeslot.day_of_week] += 1
start_time = timeslot.start_time
end_time = timeslot.end_time
form.daily_timeslots[day_of_week_index].timeslots[timeslot_index].starts_at.data = start_time.strftime("%H:%M")
form.daily_timeslots[day_of_week_index].timeslots[timeslot_index].ends_at.data = end_time.strftime("%H:%M")
form.is_open[day_of_week_index].is_open.data = "open"
form.daily_timeslots[day_of_week_index].num_timeslots.data = num_timeslots_per_day[timeslot.day_of_week]
# POST request.
additional_errors = []
if request.method == 'POST' and form.validate():
food_resource = create_food_resource_from_form(form, additional_errors)
if (len(additional_errors) == 0):
# If a food resource is being edited, remove its old verion from the database.
if id is not None:
fr = FoodResource.query.filter_by(id=id).first()
if fr:
db.session.delete(fr)
# Commit all database changes.
db.session.add(food_resource)
db.session.commit()
return redirect(url_for('admin'))
# If GET request is received or POST request fails due to invalid timeslots,
# render the page.
return render_template('add_resource.html', form=form,
days_of_week=days_of_week,
additional_errors=additional_errors, title=title)
#Allows non-admins to add food resources
@app.route('/propose-resource', methods=['GET', 'POST'])
def guest_new_food_resource():
form = NonAdminAddNewFoodResourceForm(request.form)
# Set timeslot choices.
for timeslots in form.daily_timeslots:
for timeslot in timeslots.timeslots:
timeslot.starts_at.choices=get_possible_opening_times()
timeslot.ends_at.choices=get_possible_closing_times()
# Set food resource type choices.
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
food_resource_types_choices = []
for food_resource_type in food_resource_types:
food_resource_types_choices.append(
(food_resource_type.enum,
food_resource_type.name_singular)
)
form.location_type.choices = food_resource_types_choices
# Initialize location type.
if request.method == 'GET':
form.location_type.data = food_resource_types_choices[0][0]
additional_errors = []
if request.method == 'POST' and form.validate():
# Check if this guest has added resources in the past. If not,
# create a new FoodResourceContact.
guest_name = form.your_name.data
guest_email = form.your_email_address.data
guest_phone_number = form.your_phone_number.data
# Check to see if this contact exists.
contact = FoodResourceContact.query \
.filter_by(email=guest_email, name=guest_name).first()
if contact is None:
contact = FoodResourceContact(name=guest_name,
email=guest_email, phone_number=guest_phone_number)
db.session.add(contact)
food_resource = create_food_resource_from_form(form, additional_errors)
if (len(additional_errors) == 0):
# Additional fields that are relevant for pending resources.
food_resource.is_approved = False
food_resource.food_resource_contact = contact
food_resource.notes = form.notes.data
# Commit all database changes.
db.session.add(food_resource)
db.session.commit()
return redirect(url_for('post_guest_add'))
# If GET request is received or POST request fails due to invalid timeslots,
# render the page.
return render_template('guest_add_resource.html', form=form,
days_of_week=days_of_week,
additional_errors=additional_errors)
@app.route('/_thank-you')
def post_guest_add():
return render_template('thank_you.html')
@app.route('/admin/manage-resources')
@login_required
def admin():
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
for food_resource_type in food_resource_types:
for food_resource in list(food_resource_type.food_resources):
if food_resource.is_approved == False:
food_resource_type.food_resources.remove(food_resource)
contacts = FoodResourceContact.query.all()
for contact in contacts:
for food_resource in contact.food_resource:
print food_resource.food_resource_type
#print food_resource.food_resource_type.name_singular
return render_template('admin_resources.html',
food_resource_contacts=contacts,
days_of_week=days_of_week,
food_resource_types=food_resource_types)
@app.route('/admin')
def admin_redirect():
return redirect(url_for('admin'))
@login_required
def invite():
""" Display invite form and create new User."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
next = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint))
reg_next = request.args.get('reg_next', _endpoint_url(user_manager.after_register_endpoint))
login_form = user_manager.login_form()
register_form = user_manager.register_form(request.form)
if request.method!='POST':
login_form.next.data = register_form.next.data = next
login_form.reg_next.data = register_form.reg_next.data = reg_next
# Process valid POST
if request.method=='POST' and register_form.validate():
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
for field_name, field_value in register_form.data.items():
# Store corresponding Form fields into the User object and/or UserProfile object
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
# Generates temporary password
password = generate_password(9)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = password
else:
user_fields['password'] = password
g.temp_password = password
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
if db_adapter.UserProfileClass:
user_profile = user
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
db_adapter.commit()
# Send 'invite' email and delete new User object if send fails
if user_manager.send_registered_email:
try:
# Send 'invite' email
_send_registered_email(user, user_email)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user)
db_adapter.commit()
raise e
# Send user_registered signal
signals.user_registered.send(current_app._get_current_object(), user=user)
# Redirect if USER_ENABLE_CONFIRM_EMAIL is set
if user_manager.enable_confirm_email:
next = request.args.get('next', _endpoint_url(user_manager.after_register_endpoint))
return redirect(next)
# Auto-login after register or redirect to login page
next = request.args.get('next', _endpoint_url(user_manager.after_confirm_endpoint))
if user_manager.auto_login_after_register:
return _do_login_user(user, reg_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+reg_next) # redirect to login page
# Process GET or invalid POST
return render_template(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form)
@app.route('/_invite_sent')
@login_required
def invite_sent():
return render_template('invite_sent.html')
@app.route("/_admin_remove_filters")
@login_required
def get_all_food_resource_data():
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
for food_resource_type in food_resource_types:
for food_resource in list(food_resource_type.food_resources):
if food_resource.is_approved == False:
food_resource_type.food_resources.remove(food_resource)
return jsonify(days_of_week=days_of_week,
food_resource_types=[i.serialize_food_resource_type() for i in \
food_resource_types])
@app.route('/_admin_apply_filters')
@login_required
def get_filtered_food_resource_data():
# Collect boolean paramaters passed via JSON.
has_zip_code_filter = request.args.get('has_zip_code_filter', 0, type=int)
zip_code = request.args.get('zip_code', 0, type=int)
has_families_and_children_filter = request.args.get(
'has_families_and_children_filter', 0, type=int)
has_seniors_filter = request.args.get('has_seniors_filter', 0, type=int)
has_wheelchair_accessible_filter = request.args.get(
'has_wheelchair_accessible_filter', 0, type=int)
has_accepts_snap_filter = request.args.get(
'has_accepts_snap_filter', 0, type=int)
# Create empty arrays to hold food resources.
all_resources = []
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
# Zip code is one of the filters.
if has_zip_code_filter:
# Iterate through all food resource types.
for i, food_resource_type in enumerate(food_resource_types):
# Filter for each kind of food resource with a specific zip code.
all_resources.append([])
get_food_resources_by_location_type_and_zip_code(
all_resources[i], # List to populate.
food_resource_type, # Location type by which to filter.
zip_code # Zip code by which to filter.
)
# Zip code is not one of the filters.
else:
# Iterate through all food resource types.
for i, food_resource_type in enumerate(food_resource_types):
# Filter for each kind of food resource without a specific zip code.
all_resources.append([])
get_food_resources_by_location_type(
all_resources[i], # List to populate.
food_resource_type # Location type by which to filter.
)
# Filter each list by other boolean criteria.
for list_to_filter in all_resources:
filter_food_resources(list_to_filter, has_families_and_children_filter,
has_seniors_filter, has_wheelchair_accessible_filter,
has_accepts_snap_filter)
json = []
for i, list in enumerate(all_resources):
json.append([])
for food_resource in list:
json[i].append(food_resource.serialize_food_resource())
return jsonify(days_of_week=days_of_week, food_resources=json)
@app.route('/map')
def map():
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_singular).all()
return render_template('newmaps.html',
food_resource_types=food_resource_types)
@app.route('/_map')
def address_food_resources():
zip_code = request.args.get('zip_code', 0, type=int)
food_resources = db.session.query(FoodResource) \
.join(FoodResource.address) \
.filter(Address.zip_code==zip_code, FoodResource.is_approved==True) \
.order_by(FoodResource.name).all()
return jsonify(addresses=[i.serialize_food_resource() for i in food_resources])
@app.route('/_edit', methods=['GET', 'POST'])
@login_required
def save_page():
data = request.form.get('edit_data')
name = request.form.get('page_name')
if(data):
page = HTML.query.filter_by(page = name).first()
page.value = data
db.session.commit()
return 'Added' + data + 'to database.'
@app.route('/_remove_food_resource_type')
def remove_food_resource_type():
id = request.args.get("id", type=int)
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
# Remove the food resoures and their timeslots, address, and phone numbers
# from the database.
for food_resource in food_resource_type.food_resources:
for timeslot in food_resource.timeslots:
db.session.delete(timeslot)
for phone_number in food_resource.phone_numbers:
db.session.delete(phone_number)
db.session.delete(food_resource.address)
db.session.delete(food_resource)
# Remove the food resource type from the database.
db.session.delete(food_resource_type)
db.session.commit()
return jsonify(success="success")
@app.route('/_search_query', methods=['GET', 'POST'])
def save_search_query():
# Only record searches for regular users
if(current_user.is_authenticated()):
return
zip_code = request.form.get('zipCode')
if(zip_code):
zip_requested = ZipSearch.query.filter_by(zip_code = zip_code).first()
if(zip_requested):
zip_requested.search_count = zip_requested.search_count + 1
else:
zip_requested = ZipSearch(zip_code = zip_code, search_count = 1)
db.session.add(zip_requested)
db.session.commit()
return 'Recorded a search for' + zip_code
@app.route('/_remove')
@login_required
def remove():
id = request.args.get("id", type=int)
food_resource = FoodResource.query.filter_by(id=id).first()
if not food_resource:
return jsonify(message="failed")
# Determine whether the food resource being removed is approved or pending.
# Needed for front-end update after food resource is removed.
is_approved = False
if (food_resource.is_approved):
is_approved = True
contact = food_resource.food_resource_contact
if contact and contact.email:
send_email(
recipient = contact.email,
subject = food_resource.name + ' has been rejected',
html_message = 'Dear ' + contact.name + ', \
<p>Your proposed resource <b>' + food_resource.name +
'</b> was rejected. Please contact an admin to find out why.\
</p><br> Sincerely,<br>' + app.config['USER_APP_NAME'],
text_message = 'Your proposed resource ' + food_resource.name +
' was rejected. Please contact an admin to find out why.'
)
# If the food resource has a contact and its contact has submitted no other
# food resources to the database, remove him/her from the database.
if contact and len(contact.food_resource) <= 1:
db.session.delete(contact)
# Remove the food resoure and its timeslots, address, and phone numbers
# from the database.
for timeslot in food_resource.timeslots:
db.session.delete(timeslot)
for phone_number in food_resource.phone_numbers:
db.session.delete(phone_number)
db.session.delete(food_resource.address)
db.session.delete(food_resource)
db.session.delete(food_resource)
db.session.commit()
return jsonify(is_approved=is_approved)
@app.route('/_approve')
@login_required
def approve():
id = request.args.get("id", type=int)
food_resource = FoodResource.query.filter_by(id=id).first()
contact = food_resource.food_resource_contact
if contact.email:
send_email(
recipient = contact.email,
subject = food_resource.name + ' has been approved',
html_message = 'Dear ' + contact.name + ',\
<p>Good news! Your proposed resource <b>' + food_resource.name +
'</b> was approved. Thanks so much!</p><br> Sincerely,<br>' +
app.config['USER_APP_NAME'],
text_message = 'Good news! Your proposed resource ' +
food_resource.name + ' was approved. Thanks so much!'
)
if len(contact.food_resource) <= 1:
db.session.delete(contact)
else:
contact.food_resource.remove(food_resource)
food_resource.is_approved = True
db.session.commit()
return jsonify(message="success")
@app.route('/about')
def about():
return render_template('about.html',
html_string = HTML.query.filter_by(page = 'about-page').first())
@app.route('/admin/analytics')
@login_required
def analytics():
zip_codes_all_query = ZipSearch.query.order_by(ZipSearch.search_count.desc())
zip_codes_all = ZipSearch.query.order_by(ZipSearch.search_count.desc()).all()
zip_codes_limit = zip_codes_all_query.limit(10)
return render_template('charts.html', zip_codes_all = zip_codes_all, zip_codes_limit = zip_codes_limit)
@app.route('/contact')
def contact():
return render_template('contact.html',
html_string = HTML.query.filter_by(page = 'contact-page').first())
@app.route('/resources/wic')
def wic():
return render_template('wic_info.html',
html_string = HTML.query.filter_by(page = 'wic-info-page').first())
@app.route('/resources/snap')
def snap():
return render_template('snap_info.html',
html_string = HTML.query.filter_by(page = 'snap-info-page').first())
@app.route('/resources/summer-meals')
def summer_meals():
return render_template('summer_meals.html',
html_string = HTML.query.filter_by(page = 'summer-info-page').first())
@app.route('/resources/seniors')
def seniors():
return render_template('seniors_info.html',
html_string = HTML.query.filter_by(page = 'seniors-info-page').first())
@app.route('/resources/farmers')
def farmers():
return render_template('farmers_info.html',
html_string = HTML.query.filter_by(page = 'farmers-info-page').first())
@app.route('/resources/neighborhood')
def neighborhood():
return render_template('neighborhood_info.html',
html_string = HTML.query.filter_by(page = 'neighborhood-info-page').first())
@app.route('/resources/share')
def share():
return render_template('share_info.html',
html_string = HTML.query.filter_by(page = 'share-info-page').first())
@app.route('/admin/files')
@login_required
def files():
return render_template('file_inputoutput.html')
@app.route('/_csv_input', methods=['GET', 'POST'])
@login_required
def csv_input():
file = request.files['file']
path = '.csv_input.csv'
file.save(path)
if file:
try:
errors = import_file(path)
except Exception as e:
errors = [str(e)]
if errors is None or len(errors) is 0:
return jsonify(message = "success")
else:
response = jsonify({
'status': 500,
'errors': errors
})
response.status_code = 500
return response
@app.route('/_csv_download')
@login_required
def download():
outfile = open('.mydump.csv', 'wb')
outcsv = csv.writer(outfile)
resources = FoodResource.query.filter_by(is_approved = True).all()
outcsv.writerow(['Table 1'])
outcsv.writerow(['','Type (' + get_string_of_all_food_resource_types() + ')',
'Name', 'Address - Line 1', 'Address - Line 2 (optional)', 'City', 'State', 'Zip Code', 'Phone Number (optional)',
'Website (optional)', 'Description (optional)', 'Families and children? (either \'Yes\' or leave blank)',
'Seniors? (either \'Yes\' or leave blank)', 'Wheelchair accessible? (either \'Yes\' or leave blank)',
'Accepts SNAP? (either \'Yes\' or leave blank)', 'Accepts FMNP Vouchers? (either \'Yes\' or leave blank)',
'Accepts Philly Food Bucks? (either \'Yes\' or leave blank)', 'Hours Available? (either \'Yes\' or leave blank)',
'Open Sunday? (either \'Yes\' or leave blank)', 'Open Monday? (either \'Yes\' or leave blank)',
'Open Tuesday? (either \'Yes\' or leave blank)', 'Open Wednesday? (either \'Yes\' or leave blank)',
'Open Thursday? (either \'Yes\' or leave blank)', 'Open Friday? (either \'Yes\' or leave blank)',
'Open Saturday? (either \'Yes\' or leave blank)', 'Sunday Opening Time (military time - e.g., 8:00 or 17:00)',
'Sunday Closing Time (military time - e.g., 8:00 or 17:00)', 'Monday Opening Time (military time - e.g., 8:00 or 17:00)',
'Monday Closing Time (military time - e.g., 8:00 or 17:00)',
'Tuesday Opening Time (military time - e.g., 8:00 or 17:00)', 'Tuesday Closing Time (military time - e.g., 8:00 or 17:00)',
'Wednesday Opening Time (military time - e.g., 8:00 or 17:00)',
'Wednesday Closing Time (military time - e.g., 8:00 or 17:00)',
'Thursday Opening Time (military time - e.g., 8:00 or 17:00)', 'Thursday Closing Time (military time - e.g., 8:00 or 17:00)',
'Friday Opening Time (military time - e.g., 8:00 or 17:00)', 'Friday Closing Time (military time - e.g., 8:00 or 17:00)',
'Saturday Opening Time (military time - e.g., 8:00 or 17:00)', 'Saturday Closing Time (military time - e.g., 8:00 or 17:00)'])
def does_timeslot_exist(timeslots, index):
try:
return (timeslots[index] is not None)
except IndexError:
return False
row_counter = 1
for resource in resources:
timeslots = resource.timeslots
outcsv.writerow([row_counter,resource.food_resource_type.enum, resource.name, resource.address.line1, resource.address.line2,
resource.address.city, resource.address.state, resource.address.zip_code, resource.phone_numbers[0].number,
resource.url, resource.description, 'Yes' if resource.is_for_family_and_children else '',
'Yes' if resource.is_for_seniors else '', 'Yes' if resource.is_wheelchair_accessible else '',
'Yes' if resource.is_accepts_snap else '', 'Accepts FMNP Vouchers?', 'Accepts Philly Food Bucks?',
'Yes' if resource.are_hours_available else '', 'Yes' if does_timeslot_exist(timeslots, 0) else '',
'Yes' if does_timeslot_exist(timeslots, 1) else '', 'Yes' if does_timeslot_exist(timeslots, 2) else '',
'Yes' if does_timeslot_exist(timeslots, 3) else '', 'Yes' if does_timeslot_exist(timeslots, 4) else '',
'Yes' if does_timeslot_exist(timeslots, 5) else '', 'Yes' if does_timeslot_exist(timeslots, 6) else '',
timeslots[0].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 0) else '',
timeslots[0].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 0) else '',
timeslots[1].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 1) else '',
timeslots[1].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 1) else '',
timeslots[2].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 2) else '',
timeslots[2].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 2) else '',
timeslots[3].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 3) else '',
timeslots[3].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 3) else '',
timeslots[4].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 4) else '',
timeslots[4].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 4) else '',
timeslots[5].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 5) else '',
timeslots[5].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 5) else '',
timeslots[6].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 6) else '',
timeslots[6].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 6) else ''])
row_counter = row_counter + 1
def generate():
with open('.mydump.csv', 'rb') as f:
for line in f:
yield line
response = Response(generate(), mimetype='text/csv')
filename = 'resources_generated_at_' + str(datetime.now()) + '.csv'
response.headers["Content-Disposition"] = "attachment; filename="+filename
return response
@app.route('/admin/food-resource-types')
@login_required
def view_food_resource_types():
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_singular).all()
return render_template('food_resource_types.html',
food_resource_types=food_resource_types)
@app.route('/admin/new-food-resource-type', methods=['GET', 'POST'])
@app.route('/admin/edit-food-resource-type/<id>', methods=['GET', 'POST'])
@login_required
def new_food_resource_type(id=None):
form = AddNewFoodResourceTypeForm(request.form)
form.id.data = None
# Show unused colors.
choices = []
unused_pins = ColoredPin.query.filter_by(food_resource=None) \
.order_by(ColoredPin.color_name).all()
for unused_pin in unused_pins:
choices.append((unused_pin.color_name, unused_pin.color_name))
if id is not None:
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
if food_resource_type:
choices.insert(0, (food_resource_type.colored_pin.color_name,
food_resource_type.colored_pin.color_name))
form.color.choices = choices
# Create a new food resource.
if id is None:
title = "Add New Food Resource Type"
# Edit an existing food resource.
else:
title = "Edit Food Resource Type"
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
if food_resource_type is not None:
form.id.data = food_resource_type.id
# GET request.
if request.method == 'GET' and id is not None:
# Retrieve existing food resource type.
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
if food_resource_type is None:
return render_template('404.html')
print "merp"
# Pre-populate form fields with data from database.
form.name_singular.data = food_resource_type.name_singular
form.name_plural.data = food_resource_type.name_plural
if request.method == 'POST' and form.validate():
colored_pin = ColoredPin.query.filter_by(color_name=form.color.data) \
.first()
# Edit an existing food resource type.
if id is not None:
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
if food_resource_type:
food_resource_type.name_singular = form.name_singular.data
food_resource_type.name_plural = form.name_plural.data
food_resource_type.colored_pin = colored_pin
food_resource_type.recreate_fields()
# Create a new food resource type.
else:
food_resource_type = FoodResourceType(
name_singular = form.name_singular.data,
name_plural = form.name_plural.data,
colored_pin = colored_pin
)
# Save and commit database changes.
db.session.add(food_resource_type)
db.session.commit()
return redirect(url_for('view_food_resource_types'))
return render_template('add_resource_type.html', form=form, title=title)
Spacing
from app import app, db, utils
from utils import *
from models import *
from forms import *
from flask import render_template, flash, redirect, session, url_for, request, \
g, jsonify, current_app, Response
from flask.ext.login import login_user, logout_user, current_user, \
login_required
from variables import *
from datetime import time
from utils import generate_password, import_file
from flask_user import login_required, signals
from flask_user.emails import send_email
from flask_user.views import _endpoint_url, _send_registered_email
from flask_login import current_user, login_user, logout_user
from tempfile import NamedTemporaryFile
import csv, time
@app.route('/')
def index():
return render_template('base.html')
@app.route('/admin/new', methods=['GET', 'POST'])
@app.route('/admin/edit/<id>', methods=['GET', 'POST'])
@login_required
def new(id=None):
form = AddNewFoodResourceForm(request.form)
# Set timeslot choices.
for timeslots in form.daily_timeslots:
for timeslot in timeslots.timeslots:
timeslot.starts_at.choices=get_possible_opening_times()
timeslot.ends_at.choices=get_possible_closing_times()
# Set food resource type choices.
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
food_resource_types_choices = []
for food_resource_type in food_resource_types:
food_resource_types_choices.append(
(food_resource_type.enum,
food_resource_type.name_singular)
)
form.location_type.choices = food_resource_types_choices
# Create a new food resource.
if id is None:
title = "Add New Food Resource"
food_resource_type = food_resource_types_choices[0][0]
# Edit an existing food resource.
else:
title = "Edit Food Resource"
# GET request.
if request.method == 'GET':
if id is not None:
# Populate form with information about existing food resource.
food_resource = FoodResource.query.filter_by(id=id).first()
if food_resource is None:
return render_template('404.html')
# Data that can be directly retrieved from the database.
form.name.data = food_resource.name
form.address_line1.data = food_resource.address.line1
form.address_line2.data = food_resource.address.line2
form.address_city.data = food_resource.address.city
form.address_state.data = food_resource.address.state
form.address_zip_code.data = food_resource.address.zip_code
form.phone_number.data = food_resource.phone_numbers[0].number
form.website.data = food_resource.url
form.additional_information.data = food_resource.description
form.is_for_family_and_children.data = \
food_resource.is_for_family_and_children
form.is_for_seniors.data = food_resource.is_for_seniors
form.is_wheelchair_accessible.data = \
food_resource.is_wheelchair_accessible
form.is_accepts_snap.data = food_resource.is_accepts_snap
form.location_type.data = food_resource.food_resource_type.enum
# Data that must be interpreted before being rendered.
if food_resource.are_hours_available == True:
form.are_hours_available.data = "yes"
else:
form.are_hours_available.data = "no"
num_timeslots_per_day = [0] * 7
for timeslot in food_resource.timeslots:
day_of_week_index = timeslot.day_of_week
timeslot_index = num_timeslots_per_day[timeslot.day_of_week]
num_timeslots_per_day[timeslot.day_of_week] += 1
start_time = timeslot.start_time
end_time = timeslot.end_time
form.daily_timeslots[day_of_week_index] \
.timeslots[timeslot_index].starts_at.data = \
start_time.strftime("%H:%M")
form.daily_timeslots[day_of_week_index] \
.timeslots[timeslot_index].ends_at.data = \
end_time.strftime("%H:%M")
form.is_open[day_of_week_index].is_open.data = "open"
form.daily_timeslots[day_of_week_index].num_timeslots.data = \
num_timeslots_per_day[timeslot.day_of_week]
# POST request.
additional_errors = []
if request.method == 'POST' and form.validate():
food_resource = create_food_resource_from_form(form, additional_errors)
if (len(additional_errors) == 0):
# If a food resource is being edited, remove its old verion from the
# database.
if id is not None:
fr = FoodResource.query.filter_by(id=id).first()
if fr:
db.session.delete(fr)
# Commit all database changes.
db.session.add(food_resource)
db.session.commit()
return redirect(url_for('admin'))
# If GET request is received or POST request fails due to invalid timeslots,
# render the page.
return render_template('add_resource.html', form=form,
days_of_week=days_of_week,
additional_errors=additional_errors, title=title)
#Allows non-admins to add food resources
@app.route('/propose-resource', methods=['GET', 'POST'])
def guest_new_food_resource():
form = NonAdminAddNewFoodResourceForm(request.form)
# Set timeslot choices.
for timeslots in form.daily_timeslots:
for timeslot in timeslots.timeslots:
timeslot.starts_at.choices=get_possible_opening_times()
timeslot.ends_at.choices=get_possible_closing_times()
# Set food resource type choices.
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
food_resource_types_choices = []
for food_resource_type in food_resource_types:
food_resource_types_choices.append(
(food_resource_type.enum,
food_resource_type.name_singular)
)
form.location_type.choices = food_resource_types_choices
# Initialize location type.
if request.method == 'GET':
form.location_type.data = food_resource_types_choices[0][0]
additional_errors = []
if request.method == 'POST' and form.validate():
# Check if this guest has added resources in the past. If not,
# create a new FoodResourceContact.
guest_name = form.your_name.data
guest_email = form.your_email_address.data
guest_phone_number = form.your_phone_number.data
# Check to see if this contact exists.
contact = FoodResourceContact.query \
.filter_by(email=guest_email, name=guest_name).first()
if contact is None:
contact = FoodResourceContact(name=guest_name,
email=guest_email, phone_number=guest_phone_number)
db.session.add(contact)
food_resource = create_food_resource_from_form(form, additional_errors)
if (len(additional_errors) == 0):
# Additional fields that are relevant for pending resources.
food_resource.is_approved = False
food_resource.food_resource_contact = contact
food_resource.notes = form.notes.data
# Commit all database changes.
db.session.add(food_resource)
db.session.commit()
return redirect(url_for('post_guest_add'))
# If GET request is received or POST request fails due to invalid timeslots,
# render the page.
return render_template('guest_add_resource.html', form=form,
days_of_week=days_of_week,
additional_errors=additional_errors)
@app.route('/_thank-you')
def post_guest_add():
return render_template('thank_you.html')
@app.route('/admin/manage-resources')
@login_required
def admin():
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
for food_resource_type in food_resource_types:
for food_resource in list(food_resource_type.food_resources):
if food_resource.is_approved == False:
food_resource_type.food_resources.remove(food_resource)
contacts = FoodResourceContact.query.all()
for contact in contacts:
for food_resource in contact.food_resource:
print food_resource.food_resource_type
#print food_resource.food_resource_type.name_singular
return render_template('admin_resources.html',
food_resource_contacts=contacts,
days_of_week=days_of_week,
food_resource_types=food_resource_types)
@app.route('/admin')
def admin_redirect():
return redirect(url_for('admin'))
@login_required
def invite():
""" Display invite form and create new User."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
next = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint))
reg_next = request.args.get('reg_next', _endpoint_url(user_manager.after_register_endpoint))
login_form = user_manager.login_form()
register_form = user_manager.register_form(request.form)
if request.method!='POST':
login_form.next.data = register_form.next.data = next
login_form.reg_next.data = register_form.reg_next.data = reg_next
# Process valid POST
if request.method=='POST' and register_form.validate():
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
for field_name, field_value in register_form.data.items():
# Store corresponding Form fields into the User object and/or
# UserProfile object
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
# Generates temporary password
password = generate_password(9)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = password
else:
user_fields['password'] = password
g.temp_password = password
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
if db_adapter.UserProfileClass:
user_profile = user
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
db_adapter.commit()
# Send 'invite' email and delete new User object if send fails
if user_manager.send_registered_email:
try:
# Send 'invite' email
_send_registered_email(user, user_email)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user)
db_adapter.commit()
raise e
# Send user_registered signal
signals.user_registered.send(current_app._get_current_object(), user=user)
# Redirect if USER_ENABLE_CONFIRM_EMAIL is set
if user_manager.enable_confirm_email:
next = request.args.get('next', _endpoint_url(user_manager.after_register_endpoint))
return redirect(next)
# Auto-login after register or redirect to login page
next = request.args.get('next', _endpoint_url(user_manager.after_confirm_endpoint))
if user_manager.auto_login_after_register:
return _do_login_user(user, reg_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+reg_next) # redirect to login page
# Process GET or invalid POST
return render_template(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form)
@app.route('/_invite_sent')
@login_required
def invite_sent():
return render_template('invite_sent.html')
@app.route("/_admin_remove_filters")
@login_required
def get_all_food_resource_data():
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
for food_resource_type in food_resource_types:
for food_resource in list(food_resource_type.food_resources):
if food_resource.is_approved == False:
food_resource_type.food_resources.remove(food_resource)
return jsonify(days_of_week=days_of_week,
food_resource_types=[i.serialize_food_resource_type() for i in \
food_resource_types])
@app.route('/_admin_apply_filters')
@login_required
def get_filtered_food_resource_data():
# Collect boolean paramaters passed via JSON.
has_zip_code_filter = request.args.get('has_zip_code_filter', 0, type=int)
zip_code = request.args.get('zip_code', 0, type=int)
has_families_and_children_filter = request.args.get(
'has_families_and_children_filter', 0, type=int)
has_seniors_filter = request.args.get('has_seniors_filter', 0, type=int)
has_wheelchair_accessible_filter = request.args.get(
'has_wheelchair_accessible_filter', 0, type=int)
has_accepts_snap_filter = request.args.get(
'has_accepts_snap_filter', 0, type=int)
# Create empty arrays to hold food resources.
all_resources = []
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_plural).all()
# Zip code is one of the filters.
if has_zip_code_filter:
# Iterate through all food resource types.
for i, food_resource_type in enumerate(food_resource_types):
# Filter for each kind of food resource with a specific zip code.
all_resources.append([])
get_food_resources_by_location_type_and_zip_code(
all_resources[i], # List to populate.
food_resource_type, # Location type by which to filter.
zip_code # Zip code by which to filter.
)
# Zip code is not one of the filters.
else:
# Iterate through all food resource types.
for i, food_resource_type in enumerate(food_resource_types):
# Filter for each kind of food resource without a specific zip code.
all_resources.append([])
get_food_resources_by_location_type(
all_resources[i], # List to populate.
food_resource_type # Location type by which to filter.
)
# Filter each list by other boolean criteria.
for list_to_filter in all_resources:
filter_food_resources(list_to_filter, has_families_and_children_filter,
has_seniors_filter, has_wheelchair_accessible_filter,
has_accepts_snap_filter)
json = []
for i, list in enumerate(all_resources):
json.append([])
for food_resource in list:
json[i].append(food_resource.serialize_food_resource())
return jsonify(days_of_week=days_of_week, food_resources=json)
@app.route('/map')
def map():
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_singular).all()
return render_template('newmaps.html',
food_resource_types=food_resource_types)
@app.route('/_map')
def address_food_resources():
zip_code = request.args.get('zip_code', 0, type=int)
food_resources = db.session.query(FoodResource) \
.join(FoodResource.address) \
.filter(Address.zip_code==zip_code, FoodResource.is_approved==True) \
.order_by(FoodResource.name).all()
return jsonify(addresses=[i.serialize_food_resource() for i in food_resources])
@app.route('/_edit', methods=['GET', 'POST'])
@login_required
def save_page():
data = request.form.get('edit_data')
name = request.form.get('page_name')
if(data):
page = HTML.query.filter_by(page = name).first()
page.value = data
db.session.commit()
return 'Added' + data + 'to database.'
@app.route('/_remove_food_resource_type')
def remove_food_resource_type():
id = request.args.get("id", type=int)
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
# Remove the food resoures and their timeslots, address, and phone numbers
# from the database.
for food_resource in food_resource_type.food_resources:
for timeslot in food_resource.timeslots:
db.session.delete(timeslot)
for phone_number in food_resource.phone_numbers:
db.session.delete(phone_number)
db.session.delete(food_resource.address)
db.session.delete(food_resource)
# Remove the food resource type from the database.
db.session.delete(food_resource_type)
db.session.commit()
return jsonify(success="success")
@app.route('/_search_query', methods=['GET', 'POST'])
def save_search_query():
# Only record searches for regular users
if(current_user.is_authenticated()):
return
zip_code = request.form.get('zipCode')
if(zip_code):
zip_requested = ZipSearch.query.filter_by(zip_code = zip_code).first()
if(zip_requested):
zip_requested.search_count = zip_requested.search_count + 1
else:
zip_requested = ZipSearch(zip_code = zip_code, search_count = 1)
db.session.add(zip_requested)
db.session.commit()
return 'Recorded a search for' + zip_code
@app.route('/_remove')
@login_required
def remove():
id = request.args.get("id", type=int)
food_resource = FoodResource.query.filter_by(id=id).first()
if not food_resource:
return jsonify(message="failed")
# Determine whether the food resource being removed is approved or pending.
# Needed for front-end update after food resource is removed.
is_approved = False
if (food_resource.is_approved):
is_approved = True
contact = food_resource.food_resource_contact
if contact and contact.email:
send_email(
recipient = contact.email,
subject = food_resource.name + ' has been rejected',
html_message = 'Dear ' + contact.name + ', \
<p>Your proposed resource <b>' + food_resource.name +
'</b> was rejected. Please contact an admin to find out why.\
</p><br> Sincerely,<br>' + app.config['USER_APP_NAME'],
text_message = 'Your proposed resource ' + food_resource.name +
' was rejected. Please contact an admin to find out why.'
)
# If the food resource has a contact and its contact has submitted no other
# food resources to the database, remove him/her from the database.
if contact and len(contact.food_resource) <= 1:
db.session.delete(contact)
# Remove the food resoure and its timeslots, address, and phone numbers
# from the database.
for timeslot in food_resource.timeslots:
db.session.delete(timeslot)
for phone_number in food_resource.phone_numbers:
db.session.delete(phone_number)
db.session.delete(food_resource.address)
db.session.delete(food_resource)
db.session.delete(food_resource)
db.session.commit()
return jsonify(is_approved=is_approved)
@app.route('/_approve')
@login_required
def approve():
id = request.args.get("id", type=int)
food_resource = FoodResource.query.filter_by(id=id).first()
contact = food_resource.food_resource_contact
if contact.email:
send_email(
recipient = contact.email,
subject = food_resource.name + ' has been approved',
html_message = 'Dear ' + contact.name + ',\
<p>Good news! Your proposed resource <b>' + food_resource.name +
'</b> was approved. Thanks so much!</p><br> Sincerely,<br>' +
app.config['USER_APP_NAME'],
text_message = 'Good news! Your proposed resource ' +
food_resource.name + ' was approved. Thanks so much!'
)
if len(contact.food_resource) <= 1:
db.session.delete(contact)
else:
contact.food_resource.remove(food_resource)
food_resource.is_approved = True
db.session.commit()
return jsonify(message="success")
@app.route('/about')
def about():
return render_template('about.html',
html_string = HTML.query.filter_by(page = 'about-page').first())
@app.route('/admin/analytics')
@login_required
def analytics():
zip_codes_all_query = ZipSearch.query.order_by(ZipSearch.search_count.desc())
zip_codes_all = ZipSearch.query.order_by(ZipSearch.search_count.desc()).all()
zip_codes_limit = zip_codes_all_query.limit(10)
return render_template('charts.html', zip_codes_all = zip_codes_all, zip_codes_limit = zip_codes_limit)
@app.route('/contact')
def contact():
return render_template('contact.html',
html_string = HTML.query.filter_by(page = 'contact-page').first())
@app.route('/resources/wic')
def wic():
return render_template('wic_info.html',
html_string = HTML.query.filter_by(page = 'wic-info-page').first())
@app.route('/resources/snap')
def snap():
return render_template('snap_info.html',
html_string = HTML.query.filter_by(page = 'snap-info-page').first())
@app.route('/resources/summer-meals')
def summer_meals():
return render_template('summer_meals.html',
html_string = HTML.query.filter_by(page = 'summer-info-page').first())
@app.route('/resources/seniors')
def seniors():
return render_template('seniors_info.html',
html_string = HTML.query.filter_by(page = 'seniors-info-page').first())
@app.route('/resources/farmers')
def farmers():
return render_template('farmers_info.html',
html_string = HTML.query.filter_by(page = 'farmers-info-page').first())
@app.route('/resources/neighborhood')
def neighborhood():
return render_template('neighborhood_info.html',
html_string = HTML.query.filter_by(page = 'neighborhood-info-page').first())
@app.route('/resources/share')
def share():
return render_template('share_info.html',
html_string = HTML.query.filter_by(page = 'share-info-page').first())
@app.route('/admin/files')
@login_required
def files():
return render_template('file_inputoutput.html')
@app.route('/_csv_input', methods=['GET', 'POST'])
@login_required
def csv_input():
file = request.files['file']
path = '.csv_input.csv'
file.save(path)
if file:
try:
errors = import_file(path)
except Exception as e:
errors = [str(e)]
if errors is None or len(errors) is 0:
return jsonify(message = "success")
else:
response = jsonify({
'status': 500,
'errors': errors
})
response.status_code = 500
return response
@app.route('/_csv_download')
@login_required
def download():
outfile = open('.mydump.csv', 'wb')
outcsv = csv.writer(outfile)
resources = FoodResource.query.filter_by(is_approved = True).all()
outcsv.writerow(['Table 1'])
outcsv.writerow(['','Type (' + get_string_of_all_food_resource_types() + ')',
'Name', 'Address - Line 1', 'Address - Line 2 (optional)', 'City', 'State', 'Zip Code', 'Phone Number (optional)',
'Website (optional)', 'Description (optional)', 'Families and children? (either \'Yes\' or leave blank)',
'Seniors? (either \'Yes\' or leave blank)', 'Wheelchair accessible? (either \'Yes\' or leave blank)',
'Accepts SNAP? (either \'Yes\' or leave blank)', 'Accepts FMNP Vouchers? (either \'Yes\' or leave blank)',
'Accepts Philly Food Bucks? (either \'Yes\' or leave blank)', 'Hours Available? (either \'Yes\' or leave blank)',
'Open Sunday? (either \'Yes\' or leave blank)', 'Open Monday? (either \'Yes\' or leave blank)',
'Open Tuesday? (either \'Yes\' or leave blank)', 'Open Wednesday? (either \'Yes\' or leave blank)',
'Open Thursday? (either \'Yes\' or leave blank)', 'Open Friday? (either \'Yes\' or leave blank)',
'Open Saturday? (either \'Yes\' or leave blank)', 'Sunday Opening Time (military time - e.g., 8:00 or 17:00)',
'Sunday Closing Time (military time - e.g., 8:00 or 17:00)', 'Monday Opening Time (military time - e.g., 8:00 or 17:00)',
'Monday Closing Time (military time - e.g., 8:00 or 17:00)',
'Tuesday Opening Time (military time - e.g., 8:00 or 17:00)', 'Tuesday Closing Time (military time - e.g., 8:00 or 17:00)',
'Wednesday Opening Time (military time - e.g., 8:00 or 17:00)',
'Wednesday Closing Time (military time - e.g., 8:00 or 17:00)',
'Thursday Opening Time (military time - e.g., 8:00 or 17:00)', 'Thursday Closing Time (military time - e.g., 8:00 or 17:00)',
'Friday Opening Time (military time - e.g., 8:00 or 17:00)', 'Friday Closing Time (military time - e.g., 8:00 or 17:00)',
'Saturday Opening Time (military time - e.g., 8:00 or 17:00)', 'Saturday Closing Time (military time - e.g., 8:00 or 17:00)'])
def does_timeslot_exist(timeslots, index):
try:
return (timeslots[index] is not None)
except IndexError:
return False
row_counter = 1
for resource in resources:
timeslots = resource.timeslots
outcsv.writerow([row_counter,resource.food_resource_type.enum, resource.name, resource.address.line1, resource.address.line2,
resource.address.city, resource.address.state, resource.address.zip_code, resource.phone_numbers[0].number,
resource.url, resource.description, 'Yes' if resource.is_for_family_and_children else '',
'Yes' if resource.is_for_seniors else '', 'Yes' if resource.is_wheelchair_accessible else '',
'Yes' if resource.is_accepts_snap else '', 'Accepts FMNP Vouchers?', 'Accepts Philly Food Bucks?',
'Yes' if resource.are_hours_available else '', 'Yes' if does_timeslot_exist(timeslots, 0) else '',
'Yes' if does_timeslot_exist(timeslots, 1) else '', 'Yes' if does_timeslot_exist(timeslots, 2) else '',
'Yes' if does_timeslot_exist(timeslots, 3) else '', 'Yes' if does_timeslot_exist(timeslots, 4) else '',
'Yes' if does_timeslot_exist(timeslots, 5) else '', 'Yes' if does_timeslot_exist(timeslots, 6) else '',
timeslots[0].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 0) else '',
timeslots[0].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 0) else '',
timeslots[1].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 1) else '',
timeslots[1].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 1) else '',
timeslots[2].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 2) else '',
timeslots[2].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 2) else '',
timeslots[3].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 3) else '',
timeslots[3].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 3) else '',
timeslots[4].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 4) else '',
timeslots[4].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 4) else '',
timeslots[5].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 5) else '',
timeslots[5].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 5) else '',
timeslots[6].start_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 6) else '',
timeslots[6].end_time.strftime('%H:%M') if does_timeslot_exist(timeslots, 6) else ''])
row_counter = row_counter + 1
def generate():
with open('.mydump.csv', 'rb') as f:
for line in f:
yield line
response = Response(generate(), mimetype='text/csv')
filename = 'resources_generated_at_' + str(datetime.now()) + '.csv'
response.headers["Content-Disposition"] = "attachment; filename="+filename
return response
@app.route('/admin/food-resource-types')
@login_required
def view_food_resource_types():
food_resource_types = FoodResourceType.query \
.order_by(FoodResourceType.name_singular).all()
return render_template('food_resource_types.html',
food_resource_types=food_resource_types)
@app.route('/admin/new-food-resource-type', methods=['GET', 'POST'])
@app.route('/admin/edit-food-resource-type/<id>', methods=['GET', 'POST'])
@login_required
def new_food_resource_type(id=None):
form = AddNewFoodResourceTypeForm(request.form)
form.id.data = None
# Show unused colors.
choices = []
unused_pins = ColoredPin.query.filter_by(food_resource=None) \
.order_by(ColoredPin.color_name).all()
for unused_pin in unused_pins:
choices.append((unused_pin.color_name, unused_pin.color_name))
if id is not None:
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
if food_resource_type:
choices.insert(0, (food_resource_type.colored_pin.color_name,
food_resource_type.colored_pin.color_name))
form.color.choices = choices
# Create a new food resource.
if id is None:
title = "Add New Food Resource Type"
# Edit an existing food resource.
else:
title = "Edit Food Resource Type"
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
if food_resource_type is not None:
form.id.data = food_resource_type.id
# GET request.
if request.method == 'GET' and id is not None:
# Retrieve existing food resource type.
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
if food_resource_type is None:
return render_template('404.html')
print "merp"
# Pre-populate form fields with data from database.
form.name_singular.data = food_resource_type.name_singular
form.name_plural.data = food_resource_type.name_plural
if request.method == 'POST' and form.validate():
colored_pin = ColoredPin.query.filter_by(color_name=form.color.data) \
.first()
# Edit an existing food resource type.
if id is not None:
food_resource_type = FoodResourceType.query.filter_by(id=id).first()
if food_resource_type:
food_resource_type.name_singular = form.name_singular.data
food_resource_type.name_plural = form.name_plural.data
food_resource_type.colored_pin = colored_pin
food_resource_type.recreate_fields()
# Create a new food resource type.
else:
food_resource_type = FoodResourceType(
name_singular = form.name_singular.data,
name_plural = form.name_plural.data,
colored_pin = colored_pin
)
# Save and commit database changes.
db.session.add(food_resource_type)
db.session.commit()
return redirect(url_for('view_food_resource_types'))
return render_template('add_resource_type.html', form=form, title=title)
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""python_instance.py: Python Instance for running python functions
"""
import os
import time
import Queue
import threading
from functools import partial
from collections import namedtuple
import pulsar
import contextimpl
import Function_pb2
import log
import util
import InstanceCommunication_pb2
Log = log.Log
# Equivalent of the InstanceConfig in Java
InstanceConfig = namedtuple('InstanceConfig', 'instance_id function_id function_version function_config limits')
# This is the message that the consumers put on the queue for the function thread to process
InternalMessage = namedtuple('InternalMessage', 'message topic serde consumer')
InternalQuitMessage = namedtuple('InternalQuitMessage', 'quit')
DEFAULT_SERIALIZER = "pulsarfunction.serde.IdentitySerDe"
# We keep track of the following metrics
class Stats(object):
def __init__(self):
self.reset()
def reset(self):
self.nprocessed = 0
self.nsuccessfullyprocessed = 0
self.nuserexceptions = 0
self.ntimeoutexceptions = 0
self.nsystemexceptions = 0
self.ndeserialization_exceptions = {}
self.nserialization_exceptions = 0
self.latency = 0
def increment_deser_errors(self, topic):
if topic not in self.ndeserialization_exceptions:
self.ndeserialization_exceptions[topic] = 0
self.ndeserialization_exceptions[topic] += 1
def increment_successfully_processed(self, latency):
self.nsuccessfullyprocessed += 1
self.latency += latency
def compute_latency(self):
if self.nsuccessfullyprocessed <= 0:
return 0
else:
return self.latency / self.nsuccessfullyprocessed
class PythonInstance(object):
def __init__(self, instance_id, function_id, function_version, function_config, limits, user_code, pulsar_client):
self.instance_config = InstanceConfig(instance_id, function_id, function_version, function_config, limits)
self.user_code = user_code
self.queue = Queue.Queue(limits.max_buffered_tuples)
self.pulsar_client = pulsar_client
self.input_serdes = {}
self.consumers = {}
self.output_serde = None
self.function_class = None
self.producer = None
self.exeuction_thread = None
self.atmost_once = self.instance_config.function_config.processingGuarantees == Function_pb2.FunctionConfig.ProcessingGuarantees.Value('ATMOST_ONCE')
self.atleast_once = self.instance_config.function_config.processingGuarantees == Function_pb2.FunctionConfig.ProcessingGuarantees.Value('ATLEAST_ONCE')
self.auto_ack = self.instance_config.function_config.autoAck
self.contextimpl = None
self.total_stats = Stats()
self.current_stats = Stats()
def run(self):
# Setup consumers and input deserializers
mode = pulsar._pulsar.ConsumerType.Exclusive
if self.atmost_once:
mode = pulsar._pulsar.ConsumerType.Shared
subscription_name = str(self.instance_config.function_config.tenant) + "/" + \
str(self.instance_config.function_config.namespace) + "/" + \
str(self.instance_config.function_config.name)
for topic, serde in self.instance_config.function_config.custom_serde_inputs.items():
serde_kclass = util.import_class(os.path.dirname(self.user_code), serde, try_internal=True)
self.input_serdes[topic] = serde_kclass()
Log.info("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, topic, self.input_serdes[topic])
)
for topic in self.instance_config.function_config.inputs:
global DEFAULT_SERIALIZER
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER, try_internal=True)
self.input_serdes[topic] = serde_kclass()
Log.info("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, topic, self.input_serdes[topic])
)
function_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_config.className, try_internal=True)
try:
self.function_class = function_kclass()
except:
self.function_class = function_kclass
self.contextimpl = contextimpl.ContextImpl(self.instance_config, Log, self.pulsar_client, self.user_code, self.consumers)
# Now launch a thread that does execution
self.exeuction_thread = threading.Thread(target=self.actual_execution)
self.exeuction_thread.start()
def actual_execution(self):
Log.info("Started Thread for executing the function")
while True:
msg = self.queue.get(True)
if isinstance(msg, InternalQuitMessage):
break
user_exception = False
system_exception = False
Log.debug("Got a message from topic %s" % msg.topic)
self.current_stats.nprocessed += 1
self.total_stats.nprocessed += 1
input_object = None
try:
input_object = msg.serde.deserialize(msg.message.data())
except:
self.current_stats.increment_deser_errors(msg.topic)
self.total_stats.increment_deser_errors(msg.topic)
continue
self.contextimpl.set_current_message_context(msg.message.message_id(), msg.topic)
output_object = None
try:
start_time = time.time()
output_object = self.function_class.process(input_object, self.contextimpl)
end_time = time.time()
latency = (end_time - start_time) * 1000
self.total_stats.increment_successfully_processed(latency)
self.current_stats.increment_successfully_processed(latency)
self.process_result(output_object, msg)
except Exception as e:
Log.exception("Exception while executing user method")
self.total_stats.nuserexceptions += 1
self.current_stats.nuserexceptions += 1
def done_producing(self, consumer, orig_message, result, sent_message):
if result == pulsar.Result.Ok and self.auto_ack and self.atleast_once:
consumer.acknowledge(orig_message)
def process_result(self, output, msg):
if output is not None:
output_bytes = None
if self.output_serde is None:
self.setup_output_serde()
if self.producer is None:
self.setup_producer()
try:
output_bytes = self.output_serde.serialize(output)
except:
self.current_stats.nserialization_exceptions += 1
self.total_stats.nserialization_exceptions += 1
if output_bytes is not None:
try:
self.producer.send_async(output_bytes, partial(self.done_producing, msg.consumer, msg.message))
except:
self.current_stats.nsystemexceptions += 1
self.total_stats.nsystemexceptions += 1
elif self.auto_ack and self.atleast_once:
msg.consumer.acknowledge(msg.message)
def setup_output_serde(self):
if self.instance_config.function_config.outputSerdeClassName != None and \
len(self.instance_config.function_config.outputSerdeClassName) > 0:
serde_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_config.outputSerdeClassName, try_internal=True)
self.output_serde = serde_kclass()
else:
global DEFAULT_SERIALIZER
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER, try_internal=True)
self.output_serde = serde_kclass()
def setup_producer(self):
if self.instance_config.function_config.sinkTopic != None and \
len(self.instance_config.function_config.sinkTopic) > 0:
Log.info("Setting up producer for topic %s" % self.instance_config.function_config.sinkTopic)
self.producer = self.pulsar_client.create_producer(
str(self.instance_config.function_config.sinkTopic),
block_if_queue_full=True,
batching_enabled=True,
batching_max_publish_delay_ms=1,
max_pending_messages=100000)
def message_listener(self, topic, serde, consumer, message):
item = InternalMessage(message, topic, serde, consumer)
self.queue.put(item, True)
if self.atmost_once and self.auto_ack:
consumer.acknowledge(message)
def get_and_reset_metrics(self):
# First get any user metrics
metrics = self.contextimpl.get_and_reset_metrics()
# Now add system metrics as well
self.add_system_metrics("__total_processed__", self.current_stats.nprocessed, metrics)
self.add_system_metrics("__total_successfully_processed__", self.current_stats.nsuccessfullyprocessed, metrics)
self.add_system_metrics("__total_system_exceptions__", self.current_stats.nsystemexceptions, metrics)
self.add_system_metrics("__total_timeout_exceptions__", self.current_stats.ntimeoutexceptions, metrics)
self.add_system_metrics("__total_user_exceptions__", self.current_stats.nuserexceptions, metrics)
for (topic, metric) in self.current_stats.ndeserialization_exceptions.items():
self.add_system_metrics("__total_deserialization_exceptions__" + topic, metric, metrics)
self.add_system_metrics("__total_serialization_exceptions__", self.current_stats.nserialization_exceptions, metrics)
self.add_system_metrics("__avg_latency_ms__", self.current_stats.compute_latency(), metrics)
self.current_stats.reset()
return metrics
def add_system_metrics(self, metric_name, value, metrics):
metrics.metrics[metric_name].count = value
metrics.metrics[metric_name].sum = value
metrics.metrics[metric_name].min = 0
metrics.metrics[metric_name].max = value
def get_function_status(self):
status = InstanceCommunication_pb2.FunctionStatus()
status.running = True
status.numProcessed = self.total_stats.nprocessed
status.numSuccessfullyProcessed = self.total_stats.nsuccessfullyprocessed
status.numTimeouts = self.total_stats.ntimeoutexceptions
status.numUserExceptions = self.total_stats.nuserexceptions
status.numSystemExceptions = self.total_stats.nsystemexceptions
for (topic, metric) in self.total_stats.ndeserialization_exceptions.items():
status.deserializationExceptions[topic] = metric
status.serializationExceptions = self.total_stats.nserialization_exceptions
status.averageLatency = self.total_stats.compute_latency()
return status
def join(self):
self.queue.put(InternalQuitMessage(True), True)
self.exeuction_thread.join()
Raise an exception if the module is not found (#194)
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""python_instance.py: Python Instance for running python functions
"""
import os
import time
import Queue
import threading
from functools import partial
from collections import namedtuple
import pulsar
import contextimpl
import Function_pb2
import log
import util
import InstanceCommunication_pb2
Log = log.Log
# Equivalent of the InstanceConfig in Java
InstanceConfig = namedtuple('InstanceConfig', 'instance_id function_id function_version function_config limits')
# This is the message that the consumers put on the queue for the function thread to process
InternalMessage = namedtuple('InternalMessage', 'message topic serde consumer')
InternalQuitMessage = namedtuple('InternalQuitMessage', 'quit')
DEFAULT_SERIALIZER = "pulsarfunction.serde.IdentitySerDe"
# We keep track of the following metrics
class Stats(object):
def __init__(self):
self.reset()
def reset(self):
self.nprocessed = 0
self.nsuccessfullyprocessed = 0
self.nuserexceptions = 0
self.ntimeoutexceptions = 0
self.nsystemexceptions = 0
self.ndeserialization_exceptions = {}
self.nserialization_exceptions = 0
self.latency = 0
def increment_deser_errors(self, topic):
if topic not in self.ndeserialization_exceptions:
self.ndeserialization_exceptions[topic] = 0
self.ndeserialization_exceptions[topic] += 1
def increment_successfully_processed(self, latency):
self.nsuccessfullyprocessed += 1
self.latency += latency
def compute_latency(self):
if self.nsuccessfullyprocessed <= 0:
return 0
else:
return self.latency / self.nsuccessfullyprocessed
class PythonInstance(object):
def __init__(self, instance_id, function_id, function_version, function_config, limits, user_code, pulsar_client):
self.instance_config = InstanceConfig(instance_id, function_id, function_version, function_config, limits)
self.user_code = user_code
self.queue = Queue.Queue(limits.max_buffered_tuples)
self.pulsar_client = pulsar_client
self.input_serdes = {}
self.consumers = {}
self.output_serde = None
self.function_class = None
self.producer = None
self.exeuction_thread = None
self.atmost_once = self.instance_config.function_config.processingGuarantees == Function_pb2.FunctionConfig.ProcessingGuarantees.Value('ATMOST_ONCE')
self.atleast_once = self.instance_config.function_config.processingGuarantees == Function_pb2.FunctionConfig.ProcessingGuarantees.Value('ATLEAST_ONCE')
self.auto_ack = self.instance_config.function_config.autoAck
self.contextimpl = None
self.total_stats = Stats()
self.current_stats = Stats()
def run(self):
# Setup consumers and input deserializers
mode = pulsar._pulsar.ConsumerType.Exclusive
if self.atmost_once:
mode = pulsar._pulsar.ConsumerType.Shared
subscription_name = str(self.instance_config.function_config.tenant) + "/" + \
str(self.instance_config.function_config.namespace) + "/" + \
str(self.instance_config.function_config.name)
for topic, serde in self.instance_config.function_config.custom_serde_inputs.items():
serde_kclass = util.import_class(os.path.dirname(self.user_code), serde, try_internal=True)
self.input_serdes[topic] = serde_kclass()
Log.info("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, topic, self.input_serdes[topic])
)
for topic in self.instance_config.function_config.inputs:
global DEFAULT_SERIALIZER
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER, try_internal=True)
self.input_serdes[topic] = serde_kclass()
Log.info("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, topic, self.input_serdes[topic])
)
function_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_config.className, try_internal=True)
if function_kclass is None:
Log.critical("Could not import User Function Module %s" % self.instance_config.function_config.className)
raise NameError("Could not import User Function Module %s" % self.instance_config.function_config.className)
try:
self.function_class = function_kclass()
except:
self.function_class = function_kclass
self.contextimpl = contextimpl.ContextImpl(self.instance_config, Log, self.pulsar_client, self.user_code, self.consumers)
# Now launch a thread that does execution
self.exeuction_thread = threading.Thread(target=self.actual_execution)
self.exeuction_thread.start()
def actual_execution(self):
Log.info("Started Thread for executing the function")
while True:
msg = self.queue.get(True)
if isinstance(msg, InternalQuitMessage):
break
user_exception = False
system_exception = False
Log.debug("Got a message from topic %s" % msg.topic)
self.current_stats.nprocessed += 1
self.total_stats.nprocessed += 1
input_object = None
try:
input_object = msg.serde.deserialize(msg.message.data())
except:
self.current_stats.increment_deser_errors(msg.topic)
self.total_stats.increment_deser_errors(msg.topic)
continue
self.contextimpl.set_current_message_context(msg.message.message_id(), msg.topic)
output_object = None
try:
start_time = time.time()
output_object = self.function_class.process(input_object, self.contextimpl)
end_time = time.time()
latency = (end_time - start_time) * 1000
self.total_stats.increment_successfully_processed(latency)
self.current_stats.increment_successfully_processed(latency)
self.process_result(output_object, msg)
except Exception as e:
Log.exception("Exception while executing user method")
self.total_stats.nuserexceptions += 1
self.current_stats.nuserexceptions += 1
def done_producing(self, consumer, orig_message, result, sent_message):
if result == pulsar.Result.Ok and self.auto_ack and self.atleast_once:
consumer.acknowledge(orig_message)
def process_result(self, output, msg):
if output is not None:
output_bytes = None
if self.output_serde is None:
self.setup_output_serde()
if self.producer is None:
self.setup_producer()
try:
output_bytes = self.output_serde.serialize(output)
except:
self.current_stats.nserialization_exceptions += 1
self.total_stats.nserialization_exceptions += 1
if output_bytes is not None:
try:
self.producer.send_async(output_bytes, partial(self.done_producing, msg.consumer, msg.message))
except:
self.current_stats.nsystemexceptions += 1
self.total_stats.nsystemexceptions += 1
elif self.auto_ack and self.atleast_once:
msg.consumer.acknowledge(msg.message)
def setup_output_serde(self):
if self.instance_config.function_config.outputSerdeClassName != None and \
len(self.instance_config.function_config.outputSerdeClassName) > 0:
serde_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_config.outputSerdeClassName, try_internal=True)
self.output_serde = serde_kclass()
else:
global DEFAULT_SERIALIZER
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER, try_internal=True)
self.output_serde = serde_kclass()
def setup_producer(self):
if self.instance_config.function_config.sinkTopic != None and \
len(self.instance_config.function_config.sinkTopic) > 0:
Log.info("Setting up producer for topic %s" % self.instance_config.function_config.sinkTopic)
self.producer = self.pulsar_client.create_producer(
str(self.instance_config.function_config.sinkTopic),
block_if_queue_full=True,
batching_enabled=True,
batching_max_publish_delay_ms=1,
max_pending_messages=100000)
def message_listener(self, topic, serde, consumer, message):
item = InternalMessage(message, topic, serde, consumer)
self.queue.put(item, True)
if self.atmost_once and self.auto_ack:
consumer.acknowledge(message)
def get_and_reset_metrics(self):
# First get any user metrics
metrics = self.contextimpl.get_and_reset_metrics()
# Now add system metrics as well
self.add_system_metrics("__total_processed__", self.current_stats.nprocessed, metrics)
self.add_system_metrics("__total_successfully_processed__", self.current_stats.nsuccessfullyprocessed, metrics)
self.add_system_metrics("__total_system_exceptions__", self.current_stats.nsystemexceptions, metrics)
self.add_system_metrics("__total_timeout_exceptions__", self.current_stats.ntimeoutexceptions, metrics)
self.add_system_metrics("__total_user_exceptions__", self.current_stats.nuserexceptions, metrics)
for (topic, metric) in self.current_stats.ndeserialization_exceptions.items():
self.add_system_metrics("__total_deserialization_exceptions__" + topic, metric, metrics)
self.add_system_metrics("__total_serialization_exceptions__", self.current_stats.nserialization_exceptions, metrics)
self.add_system_metrics("__avg_latency_ms__", self.current_stats.compute_latency(), metrics)
self.current_stats.reset()
return metrics
def add_system_metrics(self, metric_name, value, metrics):
metrics.metrics[metric_name].count = value
metrics.metrics[metric_name].sum = value
metrics.metrics[metric_name].min = 0
metrics.metrics[metric_name].max = value
def get_function_status(self):
status = InstanceCommunication_pb2.FunctionStatus()
status.running = True
status.numProcessed = self.total_stats.nprocessed
status.numSuccessfullyProcessed = self.total_stats.nsuccessfullyprocessed
status.numTimeouts = self.total_stats.ntimeoutexceptions
status.numUserExceptions = self.total_stats.nuserexceptions
status.numSystemExceptions = self.total_stats.nsystemexceptions
for (topic, metric) in self.total_stats.ndeserialization_exceptions.items():
status.deserializationExceptions[topic] = metric
status.serializationExceptions = self.total_stats.nserialization_exceptions
status.averageLatency = self.total_stats.compute_latency()
return status
def join(self):
self.queue.put(InternalQuitMessage(True), True)
self.exeuction_thread.join()
|
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.template.context import RequestContext
from django.template.defaultfilters import slugify
from gae_django.auth.models import User
from july.people.models import Commit, Location, Project
from google.appengine.ext.ndb.query import Cursor
from google.appengine.ext import ndb
def people_projects(request, username):
user = User.get_by_auth_id('own:%s' % username)
if user == None:
raise Http404("User not found")
if getattr(user, 'projects', None) == None:
projects = []
else:
projects = user.projects
projects = ndb.get_multi([Project.make_key(project) for project in projects])
return render_to_response('people/people_projects.html',
{"projects":projects, 'profile':user},
context_instance=RequestContext(request))
def user_profile(request, username):
user = User.get_by_auth_id('own:%s' % username)
if user == None:
raise Http404("User not found")
commits = Commit.query(ancestor=user.key).order(-Commit.timestamp).fetch(100)
return render_to_response('people/profile.html',
{"commits":commits, 'profile':user},
context_instance=RequestContext(request))
def leaderboard(request, template_name='people/leaderboard.html'):
limit = 100
cursor = request.GET.get('cursor')
if cursor:
cursor = Cursor(urlsafe=cursor)
query = User.query().order(-ndb.GenericProperty('total'))
models, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)
return render_to_response(template_name,
{'next':next_cursor.urlsafe(), 'more':more,
'users':models},
context_instance=RequestContext(request))
def users_by_location(request, location_slug,
template_name='people/people_list.html'):
limit = 100
cursor = request.GET.get('cursor')
if cursor:
cursor = Cursor(urlsafe=cursor)
query = User.query(User.location_slug == location_slug)
query = query.order(-ndb.GenericProperty('total'))
models, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)
location = Location.get_by_id(location_slug)
return render_to_response(template_name,
{'next':next_cursor.urlsafe(), 'more':more,
'users':models,
'location': location, 'slug': location_slug},
context_instance=RequestContext(request))
def locations(request, template_name='people/locations.html'):
locations = Location.query().order(-Location.total).fetch(1000)
return render_to_response(template_name,
{'locations': locations},
context_instance=RequestContext(request))
def projects(request, template_name='projects/index.html'):
limit = 100
cursor = request.GET.get('cursor')
if cursor:
cursor = Cursor(urlsafe=cursor)
query = Project.query().order(-Project.total)
models, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)
return render_to_response(template_name,
{'projects': models, 'next': next_cursor.urlsafe(), 'more': more},
context_instance=RequestContext(request))
def project_details(request, slug, template_name='projects/details.html'):
project_key = ndb.Key('Project', slug)
project = project_key.get()
if project is None:
raise Http404("Project Not Found.")
# TODO: pagination
users = User.query().filter(ndb.GenericProperty('projects') == project.url).fetch(1000)
return render_to_response(template_name,
{'project': project, 'users': users},
context_instance=RequestContext(request))
@login_required
def edit_profile(request, username, template_name='people/edit.html'):
from forms import EditUserForm
user = User.get_by_auth_id('own:%s' % username)
if user == None:
raise Http404("User not found")
if user.key != request.user.key:
http403 = HttpResponse("This ain't you!")
http403.status = 403
return http403
form = EditUserForm(request.POST or None, user=request.user)
if form.is_valid():
for key in form.cleaned_data:
if key == 'email':
continue
setattr(user, key, form.cleaned_data.get(key))
slugify(user.location)
user.put()
return HttpResponseRedirect(
reverse('member-profile',
kwargs={'username':request.user.username}
)
)
return render_to_response(template_name,
{'form':form},
context_instance=RequestContext(request))
@login_required
def delete_email(request, username, email):
# the ID we are to delete
auth_id = 'email:%s' % email
user = User.get_by_auth_id('own:%s' % username)
e_user = User.get_by_auth_id(auth_id)
if user is None or e_user is None:
raise Http404("User not found")
if user != request.user or user != e_user:
http403 = HttpResponse("This ain't you!")
http403.status = 403
return http403
if request.method == "POST":
# delete the email from the user
user.auth_ids.remove(auth_id)
user.unique_model.delete_multi(['User.auth_id:%s' % auth_id])
user.put()
return HttpResponseRedirect(
reverse('member-profile', kwargs={'username':request.user.username})
)
return render_to_response('people/delete_email.html',
{'email': email},
context_instance=RequestContext(request))
check cursor before calling urlsafe
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.template.context import RequestContext
from django.template.defaultfilters import slugify
from gae_django.auth.models import User
from july.people.models import Commit, Location, Project
from google.appengine.ext.ndb.query import Cursor
from google.appengine.ext import ndb
def people_projects(request, username):
user = User.get_by_auth_id('own:%s' % username)
if user == None:
raise Http404("User not found")
if getattr(user, 'projects', None) == None:
projects = []
else:
projects = user.projects
projects = ndb.get_multi([Project.make_key(project) for project in projects])
return render_to_response('people/people_projects.html',
{"projects":projects, 'profile':user},
context_instance=RequestContext(request))
def user_profile(request, username):
user = User.get_by_auth_id('own:%s' % username)
if user == None:
raise Http404("User not found")
commits = Commit.query(ancestor=user.key).order(-Commit.timestamp).fetch(100)
return render_to_response('people/profile.html',
{"commits":commits, 'profile':user},
context_instance=RequestContext(request))
def leaderboard(request, template_name='people/leaderboard.html'):
limit = 100
cursor = request.GET.get('cursor')
if cursor:
cursor = Cursor(urlsafe=cursor)
query = User.query().order(-ndb.GenericProperty('total'))
models, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)
if next_cursor is not None:
next_cursor = next_cursor.urlsafe()
return render_to_response(template_name,
{'next':next_cursor, 'more':more,
'users':models},
context_instance=RequestContext(request))
def users_by_location(request, location_slug,
template_name='people/people_list.html'):
limit = 100
cursor = request.GET.get('cursor')
if cursor:
cursor = Cursor(urlsafe=cursor)
query = User.query(User.location_slug == location_slug)
query = query.order(-ndb.GenericProperty('total'))
models, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)
if next_cursor is not None:
next_cursor = next_cursor.urlsafe()
location = Location.get_by_id(location_slug)
return render_to_response(template_name,
{'next':next_cursor, 'more':more,
'users':models,
'location': location, 'slug': location_slug},
context_instance=RequestContext(request))
def locations(request, template_name='people/locations.html'):
locations = Location.query().order(-Location.total).fetch(1000)
return render_to_response(template_name,
{'locations': locations},
context_instance=RequestContext(request))
def projects(request, template_name='projects/index.html'):
limit = 100
cursor = request.GET.get('cursor')
if cursor:
cursor = Cursor(urlsafe=cursor)
query = Project.query().order(-Project.total)
models, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)
if next_cursor is not None:
next_cursor = next_cursor.urlsafe()
return render_to_response(template_name,
{'projects': models, 'next': next_cursor, 'more': more},
context_instance=RequestContext(request))
def project_details(request, slug, template_name='projects/details.html'):
project_key = ndb.Key('Project', slug)
project = project_key.get()
if project is None:
raise Http404("Project Not Found.")
# TODO: pagination
users = User.query().filter(ndb.GenericProperty('projects') == project.url).fetch(1000)
return render_to_response(template_name,
{'project': project, 'users': users},
context_instance=RequestContext(request))
@login_required
def edit_profile(request, username, template_name='people/edit.html'):
from forms import EditUserForm
user = User.get_by_auth_id('own:%s' % username)
if user == None:
raise Http404("User not found")
if user.key != request.user.key:
http403 = HttpResponse("This ain't you!")
http403.status = 403
return http403
form = EditUserForm(request.POST or None, user=request.user)
if form.is_valid():
for key in form.cleaned_data:
if key == 'email':
continue
setattr(user, key, form.cleaned_data.get(key))
slugify(user.location)
user.put()
return HttpResponseRedirect(
reverse('member-profile',
kwargs={'username':request.user.username}
)
)
return render_to_response(template_name,
{'form':form},
context_instance=RequestContext(request))
@login_required
def delete_email(request, username, email):
# the ID we are to delete
auth_id = 'email:%s' % email
user = User.get_by_auth_id('own:%s' % username)
e_user = User.get_by_auth_id(auth_id)
if user is None or e_user is None:
raise Http404("User not found")
if user != request.user or user != e_user:
http403 = HttpResponse("This ain't you!")
http403.status = 403
return http403
if request.method == "POST":
# delete the email from the user
user.auth_ids.remove(auth_id)
user.unique_model.delete_multi(['User.auth_id:%s' % auth_id])
user.put()
return HttpResponseRedirect(
reverse('member-profile', kwargs={'username':request.user.username})
)
return render_to_response('people/delete_email.html',
{'email': email},
context_instance=RequestContext(request))
|
# This file is part of allegedb, an object-relational mapper for versioned graphs.
# Copyright (C) Zachary Spector. public@zacharyspector.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""WindowDict, the core data structure used by allegedb's caching system.
It resembles a dictionary, more specifically a defaultdict-like where retrieving
a key that isn't set will get the highest set key that is lower than the key
you asked for (and thus, keys must be orderable). It is optimized for retrieval
of the same key and neighboring ones repeatedly and in sequence.
"""
from collections import deque, Mapping, MutableMapping, KeysView, ItemsView, ValuesView
from operator import itemgetter
from itertools import chain
try:
import cython
except ImportError:
class cython:
def locals(**kwargs):
def passthru(fun):
return fun
return passthru
cfunc = locals
int = None
bint = None
get0 = itemgetter(0)
get1 = itemgetter(1)
# TODO: cancel changes that would put something back to where it was at the start
# This will complicate the update_window functions though, and I don't think it'll
# improve much apart from a bit of efficiency in that the deltas are smaller
# sometimes.
def update_window(turn_from, tick_from, turn_to, tick_to, updfun, branchd):
"""Iterate over a window of time in ``branchd`` and call ``updfun`` on the values"""
if turn_from in branchd:
# Not including the exact tick you started from because deltas are *changes*
for past_state in branchd[turn_from][tick_from+1:]:
updfun(*past_state)
for midturn in range(turn_from+1, turn_to):
if midturn in branchd:
for past_state in branchd[midturn][:]:
updfun(*past_state)
if turn_to in branchd:
for past_state in branchd[turn_to][:tick_to]:
updfun(*past_state)
def update_backward_window(turn_from, tick_from, turn_to, tick_to, updfun, branchd):
"""Iterate backward over a window of time in ``branchd`` and call ``updfun`` on the values"""
if turn_from in branchd:
for future_state in reversed(branchd[turn_from][:tick_from]):
updfun(*future_state)
for midturn in range(turn_from-1, turn_to, -1):
if midturn in branchd:
for future_state in reversed(branchd[midturn][:]):
updfun(*future_state)
if turn_to in branchd:
for future_state in reversed(branchd[turn_to][tick_to+1:]):
updfun(*future_state)
class HistoryError(KeyError):
"""You tried to access the past in a bad way."""
def __init__(self, *args, deleted=False):
super().__init__(*args)
self.deleted = deleted
def within_history(rev, windowdict):
"""Return whether the windowdict has history at the revision."""
if not windowdict:
return False
begin = windowdict._past[0][0] if windowdict._past else \
windowdict._future[-1][0]
end = windowdict._future[0][0] if windowdict._future else \
windowdict._past[-1][0]
return begin <= rev <= end
class WindowDictKeysView(KeysView):
"""Look through all the keys a WindowDict contains."""
def __contains__(self, rev):
return rev in self._mapping._keys
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from map(get0, past)
if future:
yield from map(get0, future)
class WindowDictItemsView(ItemsView):
"""Look through everything a WindowDict contains."""
def __contains__(self, item):
(rev, v) = item
mapp = self._mapping
if not within_history(rev, mapp):
return False
for mrev, mv in mapp._past:
if mrev == rev:
return mv == v
for mrev, mv in mapp._future:
if mrev == rev:
return mv == v
return False
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from past
if future:
yield from future
class WindowDictPastKeysView(KeysView):
"""View on a WindowDict's past keys relative to last lookup"""
def __iter__(self):
if not self._mapping.stack:
return
yield from map(get0, reversed(self._mapping.stack))
def __contains__(self, item):
if not self._mapping.stack:
return False
stack = self._mapping.stack
if not stack or item < stack[0][0] or item > stack[-1][0]:
return False
for rev in map(get0, stack):
if rev == item:
return True
return False
class WindowDictFutureKeysView(KeysView):
"""View on a WindowDict's future keys relative to last lookup"""
def __iter__(self):
if not self._mapping.stack:
return
yield from map(get0, reversed(self._mapping.stack))
def __contains__(self, item):
if not self._mapping.stack:
return False
stack = self._mapping.stack
if not stack or item < stack[-1][0] or item > stack[0][0]:
return False
for rev in map(get0, stack):
if rev == item:
return True
return False
class WindowDictPastItemsView(ItemsView):
"""View on a WindowDict's past items relative to last lookup"""
def __iter__(self):
if not self._mapping.stack:
return
yield from reversed(self._mapping.stack)
def __contains__(self, item):
stack = self._mapping.stack
if not stack or item[0] < stack[0][0] or item[0] > stack[-1][0]:
return False
return item in stack
class WindowDictFutureItemsView(ItemsView):
"""View on a WindowDict's future items relative to last lookup"""
def __iter__(self):
stack = self._mapping.stack
if not stack:
return
yield from stack
def __contains__(self, item):
stack = self._mapping.stack
if not stack or item[0] < stack[-1][0] or item[0] > stack[0][0]:
return False
return item in stack
class WindowDictPastFutureValuesView(ValuesView):
"""Abstract class for views on the past or future values of a WindowDict"""
def __contains__(self, item):
stack = self._mapping.stack
if not stack:
return False
for v in map(get1, stack):
if v == item:
return True
return False
def __iter__(self):
stack = self._mapping.stack
if not stack:
return
yield from map(get1, reversed(stack))
class WindowDictValuesView(ValuesView):
"""Look through all the values that a WindowDict contains."""
def __contains__(self, value):
past = self._mapping._past
future = self._mapping._future
if past:
for rev, v in past:
if v == value:
return True
if future:
for rev, v in future:
if v == value:
return True
return False
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from map(get1, past)
if future:
yield from map(get1, future)
class WindowDictPastFutureView(Mapping):
"""Abstract class for historical views on WindowDict"""
__slots__ = ('stack',)
def __init__(self, stack):
self.stack = stack
def __len__(self):
stack = self.stack
if not stack:
return 0
return len(stack)
class WindowDictPastView(WindowDictPastFutureView):
"""Read-only mapping of just the past of a WindowDict"""
def __iter__(self):
stack = self.stack
if not stack:
return
yield from map(get0, reversed(stack))
def __getitem__(self, key):
stack = self.stack
if not stack or key < stack[0][0] or key > stack[-1][0]:
raise KeyError
for rev, value in stack:
if rev == key:
return value
raise KeyError
def keys(self):
return WindowDictPastKeysView(self)
def items(self):
return WindowDictPastItemsView(self)
def values(self):
return WindowDictPastFutureValuesView(self)
class WindowDictFutureView(WindowDictPastFutureView):
"""Read-only mapping of just the future of a WindowDict"""
def __iter__(self):
stack = self.stack
if not stack:
return
yield from map(get0, reversed(stack))
def __getitem__(self, key):
stack = self.stack
if not stack or key < stack[-1][0] or key > stack[0][0]:
raise KeyError
for rev, value in stack:
if rev == key:
return value
raise KeyError
def keys(self):
return WindowDictFutureKeysView(self)
def items(self):
return WindowDictFutureItemsView(self)
def values(self):
return WindowDictPastFutureValuesView(self)
class WindowDictSlice:
"""A slice of history in which the start is earlier than the stop"""
__slots__ = ['dict', 'slice']
def __init__(self, dict, slice):
self.dict = dict
self.slice = slice
def __reversed__(self):
return iter(WindowDictReverseSlice(self.dict, self.slice))
def __iter__(self):
dic = self.dict
if not dic:
return
slic = self.slice
if slic.step is not None:
for i in range(slic.start or dic.beginning, slic.stop or dic.end, slic.step):
yield dic[i]
if slic.start is None and slic.stop is None:
yield from map(get1, dic._past)
yield from map(get1, reversed(dic._future))
elif None not in (slic.start, slic.stop):
if slic.stop == slic.start:
yield dic[slic.stop]
return
left, right = (slic.start, slic.stop) if slic.start < slic.stop else (slic.stop, slic.start)
dic.seek(right)
if not dic._past:
return
past = dic._past.copy()
popper = getattr(past, 'popleft', lambda: past.pop(0))
while past and past[0][0] < left:
popper()
yield from map(get1, past)
elif slic.start is None:
stac = dic._past + list(reversed(dic._future))
while stac and stac[-1][0] > slic.stop:
stac.pop()
yield from map(get1, stac)
return
else: # slic.stop is None
if not dic._past and not dic._future:
return
chan = chain(dic._past, reversed(dic._future))
nxt = next(chan)
while nxt[0] < slic.start:
try:
nxt = next(chan)
except StopIteration:
return
yield get1(nxt)
yield from map(get1, chan)
class WindowDictReverseSlice:
"""A slice of history in which the start is later than the stop"""
__slots__ = ['dict', 'slice']
def __init__(self, dict, slice):
self.dict = dict
self.slice = slice
def __reversed__(self):
return iter(WindowDictSlice(self.dict, self.slice))
def __iter__(self):
dic = self.dict
if not dic:
return
slic = self.slice
if slic.step is not None:
for i in range(slic.start or dic.end, slic.stop or dic.beginning, slic.step):
yield dic[i]
if slic.start is None and slic.stop is None:
yield from map(get1, dic._future)
yield from map(get1, reversed(dic._past))
elif None not in (slic.start, slic.stop):
if slic.start == slic.stop:
yield dic[slic.stop]
return
left, right = (slic.start, slic.stop) if slic.start < slic.stop else (slic.stop, slic.start)
dic.seek(right)
for frev, fv in reversed(dic._past):
if frev <= left:
return
yield fv
elif slic.start is None:
stac = dic._past + list(reversed(dic._future))
while stac and stac[-1][0] > slic.stop:
stac.pop()
yield from map(get1, reversed(stac))
else: # slic.stop is None
stac = deque(dic._past)
stac.extend(reversed(dic._future))
while stac and stac[0][0] < slic.start:
stac.popleft()
yield from map(get1, reversed(stac))
class WindowDict(MutableMapping):
"""A dict that keeps every value that a variable has had over time.
Look up a revision number in this dict and it will give you the
effective value as of that revision. Keys should always be
revision numbers. Once a key is set, all greater keys are
considered to be in this dict unless the value is ``None``. Keys
after that one aren't "set" until one's value is non-``None``
again.
Optimized for the cases where you look up the same revision
repeatedly, or its neighbors.
This supports slice notation to get all values in a given
time-frame. If you do not supply a step, you'll just get the
values, with no indication of when they're from exactly --
so explicitly supply a step of 1 to get the value at each point in
the slice, or use the ``future`` and ``past`` methods to get read-only
mappings of data relative to when you last got an item from this.
Unlike slices of eg. lists, you can slice with a start greater than the stop
even if you don't supply a step. That will get you values in reverse order,
still without retaining the revision they're from.
"""
__slots__ = ('_future', '_past', '_keys')
def future(self, rev=None):
"""Return a Mapping of items after the given revision."""
if rev is not None:
self.seek(rev)
return WindowDictFutureView(self._future)
def past(self, rev=None):
"""Return a Mapping of items at or before the given revision."""
if rev is not None:
self.seek(rev)
return WindowDictPastView(self._past)
@cython.locals(rev=cython.int, past_end=cython.int, future_start=cython.int)
def seek(self, rev):
"""Arrange the caches to help look up the given revision."""
# TODO: binary search? Perhaps only when one or the other
# stack is very large?
if not self:
return
if type(rev) is not int:
raise TypeError("rev must be int")
past = self._past
future = self._future
past_end = -1 if not past else past[-1][0]
future_start = -1 if not future else future[-1][0]
if past and past_end <= rev and (
not future or future_start > rev
):
return
if future:
appender = past.append
popper = future.pop
while future_start <= rev:
appender(popper())
if future:
future_start = future[-1][0]
else:
break
if past:
popper = past.pop
appender = future.append
while past_end > rev:
appender(popper())
if past:
past_end = past[-1][0]
else:
break
def rev_gettable(self, rev: int) -> bool:
if self._past:
return rev >= self._past[0][0]
elif self._future:
return rev >= self._future[0][0]
else:
return False
def rev_before(self, rev: int) -> int:
"""Return the latest past rev on which the value changed."""
self.seek(rev)
if self._past:
return self._past[-1][0]
def rev_after(self, rev: int) -> int:
"""Return the earliest future rev on which the value will change."""
self.seek(rev)
if self._future:
return self._future[-1][0]
def truncate(self, rev: int) -> None:
"""Delete everything after the given revision."""
self.seek(rev)
self._future = []
@property
def beginning(self) -> int:
if self._past:
return self._past[0][0]
elif self._future:
return self._future[-1][0]
else:
raise HistoryError("No history yet")
@property
def end(self) -> int:
if self._future:
return self._future[0][0]
elif self._past:
return self._past[-1][0]
else:
raise HistoryError("No history yet")
def keys(self):
return WindowDictKeysView(self)
def items(self):
return WindowDictItemsView(self)
def values(self):
return WindowDictValuesView(self)
def __bool__(self):
return bool(self._past) or bool(self._future)
def __init__(self, data=None):
if not data:
self._past = []
elif hasattr(data, 'items'):
self._past = list(sorted(data.items()))
else:
# assume it's an orderable sequence of pairs
self._past = list(sorted(data))
self._future = []
self._keys = set(map(get0, self._past or ()))
def __iter__(self):
if not self:
return
if self._past:
yield from map(get0, self._past)
if self._future:
yield from map(get0, self._future)
def __contains__(self, item):
return item in self._keys
def __len__(self):
return len(self._past or ()) + len(self._future or ())
def __getitem__(self, rev):
if not self:
raise HistoryError("No history yet")
if isinstance(rev, slice):
if None not in (rev.start, rev.stop) and rev.start > rev.stop:
return WindowDictReverseSlice(self, rev)
return WindowDictSlice(self, rev)
self.seek(rev)
past = self._past
if not past:
raise HistoryError(
"Revision {} is before the start of history".format(rev)
)
return past[-1][1]
@cython.locals(past_start=cython.int, past_end=cython.int, future_start=cython.int, have_past=cython.bint, have_future=cython.bint, rev=cython.int)
def __setitem__(self, rev, v):
if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap'):
v = v.unwrap()
past = self._past
future = self._future
have_past = bool(past)
have_future = bool(future)
past_start = -1 if not have_past else past[0][0]
past_end = -1 if not have_past else past[-1][0]
future_start = -1 if not have_future else future[-1][0]
if not have_past and not have_future:
past.append((rev, v))
elif have_past and rev < past_start:
past.insert(0, (rev, v))
elif have_past and rev == past_start:
past[0] = (rev, v)
elif have_past and rev == past_end:
past[-1] = (rev, v)
elif have_past and (
not have_future or
rev < future_start
) and rev > past_end:
past.append((rev, v))
else:
self.seek(rev)
past = self._past
future = self._future
past_end = -1 if not past else past[-1][0]
if not past:
past.append((rev, v))
elif past_end == rev:
past[-1] = (rev, v)
else:
assert past_end < rev
past.append((rev, v))
self._keys.add(rev)
@cython.locals(rev=cython.int, past_end=cython.int)
def __delitem__(self, rev):
# Not checking for rev's presence at the beginning because
# to do so would likely require iterating thru history,
# which I have to do anyway in deleting.
# But handle degenerate case.
if not self:
raise HistoryError("Tried to delete from an empty WindowDict")
if not self.beginning <= rev <= self.end:
raise HistoryError("Rev outside of history: {}".format(rev))
self.seek(rev)
past = self._past
past_end = -1 if not past else past[-1][0]
if not past or past_end != rev:
raise HistoryError("Rev not present: {}".format(rev))
del self._past[-1]
self._keys.remove(rev)
def __repr__(self):
me = dict(self._past)
me.update(self._future)
return "{}({})".format(self.__class__.__name__, me)
class FuturistWindowDict(WindowDict):
"""A WindowDict that does not let you rewrite the past."""
__slots__ = ('_future', '_past')
def __setitem__(self, rev, v):
if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap'):
v = v.unwrap()
if not self._past or (
self._past and (
not self._future and
rev > self._past[-1][0]
)):
self._past.append((rev, v))
self._keys.add(rev)
return
self.seek(rev)
past = self._past
future = self._future
if future:
raise HistoryError(
"Already have some history after {}".format(rev)
)
if not past or rev > past[-1][0]:
past.append((rev, v))
elif rev == past[-1][0]:
past[-1] = (rev, v)
else:
raise HistoryError(
"Already have some history after {} "
"(and my seek function is broken?)".format(rev)
)
self._keys.add(rev)
class TurnDict(FuturistWindowDict):
__slots__ = ('_future', '_past')
cls = FuturistWindowDict
def __getitem__(self, rev):
if self.rev_gettable(rev):
return FuturistWindowDict.__getitem__(self, rev)
else:
ret = self[rev] = FuturistWindowDict()
return ret
def __setitem__(self, turn, value):
if type(value) is not FuturistWindowDict:
value = FuturistWindowDict(value)
FuturistWindowDict.__setitem__(self, turn, value)
class SettingsTurnDict(WindowDict):
__slots__ = ('_future', '_past')
cls = WindowDict
def __getitem__(self, rev):
if self.rev_gettable(rev):
return WindowDict.__getitem__(self, rev)
else:
ret = self[rev] = WindowDict()
return ret
def __setitem__(self, turn, value):
if type(value) is not WindowDict:
value = WindowDict(value)
WindowDict.__setitem__(self, turn, value)
Update cached keys when truncating WindowDict
# This file is part of allegedb, an object-relational mapper for versioned graphs.
# Copyright (C) Zachary Spector. public@zacharyspector.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""WindowDict, the core data structure used by allegedb's caching system.
It resembles a dictionary, more specifically a defaultdict-like where retrieving
a key that isn't set will get the highest set key that is lower than the key
you asked for (and thus, keys must be orderable). It is optimized for retrieval
of the same key and neighboring ones repeatedly and in sequence.
"""
from collections import deque, Mapping, MutableMapping, KeysView, ItemsView, ValuesView
from operator import itemgetter
from itertools import chain
try:
import cython
except ImportError:
class cython:
def locals(**kwargs):
def passthru(fun):
return fun
return passthru
cfunc = locals
int = None
bint = None
get0 = itemgetter(0)
get1 = itemgetter(1)
# TODO: cancel changes that would put something back to where it was at the start
# This will complicate the update_window functions though, and I don't think it'll
# improve much apart from a bit of efficiency in that the deltas are smaller
# sometimes.
def update_window(turn_from, tick_from, turn_to, tick_to, updfun, branchd):
"""Iterate over a window of time in ``branchd`` and call ``updfun`` on the values"""
if turn_from in branchd:
# Not including the exact tick you started from because deltas are *changes*
for past_state in branchd[turn_from][tick_from+1:]:
updfun(*past_state)
for midturn in range(turn_from+1, turn_to):
if midturn in branchd:
for past_state in branchd[midturn][:]:
updfun(*past_state)
if turn_to in branchd:
for past_state in branchd[turn_to][:tick_to]:
updfun(*past_state)
def update_backward_window(turn_from, tick_from, turn_to, tick_to, updfun, branchd):
"""Iterate backward over a window of time in ``branchd`` and call ``updfun`` on the values"""
if turn_from in branchd:
for future_state in reversed(branchd[turn_from][:tick_from]):
updfun(*future_state)
for midturn in range(turn_from-1, turn_to, -1):
if midturn in branchd:
for future_state in reversed(branchd[midturn][:]):
updfun(*future_state)
if turn_to in branchd:
for future_state in reversed(branchd[turn_to][tick_to+1:]):
updfun(*future_state)
class HistoryError(KeyError):
"""You tried to access the past in a bad way."""
def __init__(self, *args, deleted=False):
super().__init__(*args)
self.deleted = deleted
def within_history(rev, windowdict):
"""Return whether the windowdict has history at the revision."""
if not windowdict:
return False
begin = windowdict._past[0][0] if windowdict._past else \
windowdict._future[-1][0]
end = windowdict._future[0][0] if windowdict._future else \
windowdict._past[-1][0]
return begin <= rev <= end
class WindowDictKeysView(KeysView):
"""Look through all the keys a WindowDict contains."""
def __contains__(self, rev):
return rev in self._mapping._keys
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from map(get0, past)
if future:
yield from map(get0, future)
class WindowDictItemsView(ItemsView):
"""Look through everything a WindowDict contains."""
def __contains__(self, item):
(rev, v) = item
mapp = self._mapping
if not within_history(rev, mapp):
return False
for mrev, mv in mapp._past:
if mrev == rev:
return mv == v
for mrev, mv in mapp._future:
if mrev == rev:
return mv == v
return False
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from past
if future:
yield from future
class WindowDictPastKeysView(KeysView):
"""View on a WindowDict's past keys relative to last lookup"""
def __iter__(self):
if not self._mapping.stack:
return
yield from map(get0, reversed(self._mapping.stack))
def __contains__(self, item):
if not self._mapping.stack:
return False
stack = self._mapping.stack
if not stack or item < stack[0][0] or item > stack[-1][0]:
return False
for rev in map(get0, stack):
if rev == item:
return True
return False
class WindowDictFutureKeysView(KeysView):
"""View on a WindowDict's future keys relative to last lookup"""
def __iter__(self):
if not self._mapping.stack:
return
yield from map(get0, reversed(self._mapping.stack))
def __contains__(self, item):
if not self._mapping.stack:
return False
stack = self._mapping.stack
if not stack or item < stack[-1][0] or item > stack[0][0]:
return False
for rev in map(get0, stack):
if rev == item:
return True
return False
class WindowDictPastItemsView(ItemsView):
"""View on a WindowDict's past items relative to last lookup"""
def __iter__(self):
if not self._mapping.stack:
return
yield from reversed(self._mapping.stack)
def __contains__(self, item):
stack = self._mapping.stack
if not stack or item[0] < stack[0][0] or item[0] > stack[-1][0]:
return False
return item in stack
class WindowDictFutureItemsView(ItemsView):
"""View on a WindowDict's future items relative to last lookup"""
def __iter__(self):
stack = self._mapping.stack
if not stack:
return
yield from stack
def __contains__(self, item):
stack = self._mapping.stack
if not stack or item[0] < stack[-1][0] or item[0] > stack[0][0]:
return False
return item in stack
class WindowDictPastFutureValuesView(ValuesView):
"""Abstract class for views on the past or future values of a WindowDict"""
def __contains__(self, item):
stack = self._mapping.stack
if not stack:
return False
for v in map(get1, stack):
if v == item:
return True
return False
def __iter__(self):
stack = self._mapping.stack
if not stack:
return
yield from map(get1, reversed(stack))
class WindowDictValuesView(ValuesView):
"""Look through all the values that a WindowDict contains."""
def __contains__(self, value):
past = self._mapping._past
future = self._mapping._future
if past:
for rev, v in past:
if v == value:
return True
if future:
for rev, v in future:
if v == value:
return True
return False
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from map(get1, past)
if future:
yield from map(get1, future)
class WindowDictPastFutureView(Mapping):
"""Abstract class for historical views on WindowDict"""
__slots__ = ('stack',)
def __init__(self, stack):
self.stack = stack
def __len__(self):
stack = self.stack
if not stack:
return 0
return len(stack)
class WindowDictPastView(WindowDictPastFutureView):
"""Read-only mapping of just the past of a WindowDict"""
def __iter__(self):
stack = self.stack
if not stack:
return
yield from map(get0, reversed(stack))
def __getitem__(self, key):
stack = self.stack
if not stack or key < stack[0][0] or key > stack[-1][0]:
raise KeyError
for rev, value in stack:
if rev == key:
return value
raise KeyError
def keys(self):
return WindowDictPastKeysView(self)
def items(self):
return WindowDictPastItemsView(self)
def values(self):
return WindowDictPastFutureValuesView(self)
class WindowDictFutureView(WindowDictPastFutureView):
"""Read-only mapping of just the future of a WindowDict"""
def __iter__(self):
stack = self.stack
if not stack:
return
yield from map(get0, reversed(stack))
def __getitem__(self, key):
stack = self.stack
if not stack or key < stack[-1][0] or key > stack[0][0]:
raise KeyError
for rev, value in stack:
if rev == key:
return value
raise KeyError
def keys(self):
return WindowDictFutureKeysView(self)
def items(self):
return WindowDictFutureItemsView(self)
def values(self):
return WindowDictPastFutureValuesView(self)
class WindowDictSlice:
"""A slice of history in which the start is earlier than the stop"""
__slots__ = ['dict', 'slice']
def __init__(self, dict, slice):
self.dict = dict
self.slice = slice
def __reversed__(self):
return iter(WindowDictReverseSlice(self.dict, self.slice))
def __iter__(self):
dic = self.dict
if not dic:
return
slic = self.slice
if slic.step is not None:
for i in range(slic.start or dic.beginning, slic.stop or dic.end, slic.step):
yield dic[i]
if slic.start is None and slic.stop is None:
yield from map(get1, dic._past)
yield from map(get1, reversed(dic._future))
elif None not in (slic.start, slic.stop):
if slic.stop == slic.start:
yield dic[slic.stop]
return
left, right = (slic.start, slic.stop) if slic.start < slic.stop else (slic.stop, slic.start)
dic.seek(right)
if not dic._past:
return
past = dic._past.copy()
popper = getattr(past, 'popleft', lambda: past.pop(0))
while past and past[0][0] < left:
popper()
yield from map(get1, past)
elif slic.start is None:
stac = dic._past + list(reversed(dic._future))
while stac and stac[-1][0] > slic.stop:
stac.pop()
yield from map(get1, stac)
return
else: # slic.stop is None
if not dic._past and not dic._future:
return
chan = chain(dic._past, reversed(dic._future))
nxt = next(chan)
while nxt[0] < slic.start:
try:
nxt = next(chan)
except StopIteration:
return
yield get1(nxt)
yield from map(get1, chan)
class WindowDictReverseSlice:
"""A slice of history in which the start is later than the stop"""
__slots__ = ['dict', 'slice']
def __init__(self, dict, slice):
self.dict = dict
self.slice = slice
def __reversed__(self):
return iter(WindowDictSlice(self.dict, self.slice))
def __iter__(self):
dic = self.dict
if not dic:
return
slic = self.slice
if slic.step is not None:
for i in range(slic.start or dic.end, slic.stop or dic.beginning, slic.step):
yield dic[i]
if slic.start is None and slic.stop is None:
yield from map(get1, dic._future)
yield from map(get1, reversed(dic._past))
elif None not in (slic.start, slic.stop):
if slic.start == slic.stop:
yield dic[slic.stop]
return
left, right = (slic.start, slic.stop) if slic.start < slic.stop else (slic.stop, slic.start)
dic.seek(right)
for frev, fv in reversed(dic._past):
if frev <= left:
return
yield fv
elif slic.start is None:
stac = dic._past + list(reversed(dic._future))
while stac and stac[-1][0] > slic.stop:
stac.pop()
yield from map(get1, reversed(stac))
else: # slic.stop is None
stac = deque(dic._past)
stac.extend(reversed(dic._future))
while stac and stac[0][0] < slic.start:
stac.popleft()
yield from map(get1, reversed(stac))
class WindowDict(MutableMapping):
"""A dict that keeps every value that a variable has had over time.
Look up a revision number in this dict and it will give you the
effective value as of that revision. Keys should always be
revision numbers. Once a key is set, all greater keys are
considered to be in this dict unless the value is ``None``. Keys
after that one aren't "set" until one's value is non-``None``
again.
Optimized for the cases where you look up the same revision
repeatedly, or its neighbors.
This supports slice notation to get all values in a given
time-frame. If you do not supply a step, you'll just get the
values, with no indication of when they're from exactly --
so explicitly supply a step of 1 to get the value at each point in
the slice, or use the ``future`` and ``past`` methods to get read-only
mappings of data relative to when you last got an item from this.
Unlike slices of eg. lists, you can slice with a start greater than the stop
even if you don't supply a step. That will get you values in reverse order,
still without retaining the revision they're from.
"""
__slots__ = ('_future', '_past', '_keys')
def future(self, rev=None):
"""Return a Mapping of items after the given revision."""
if rev is not None:
self.seek(rev)
return WindowDictFutureView(self._future)
def past(self, rev=None):
"""Return a Mapping of items at or before the given revision."""
if rev is not None:
self.seek(rev)
return WindowDictPastView(self._past)
@cython.locals(rev=cython.int, past_end=cython.int, future_start=cython.int)
def seek(self, rev):
"""Arrange the caches to help look up the given revision."""
# TODO: binary search? Perhaps only when one or the other
# stack is very large?
if not self:
return
if type(rev) is not int:
raise TypeError("rev must be int")
past = self._past
future = self._future
past_end = -1 if not past else past[-1][0]
future_start = -1 if not future else future[-1][0]
if past and past_end <= rev and (
not future or future_start > rev
):
return
if future:
appender = past.append
popper = future.pop
while future_start <= rev:
appender(popper())
if future:
future_start = future[-1][0]
else:
break
if past:
popper = past.pop
appender = future.append
while past_end > rev:
appender(popper())
if past:
past_end = past[-1][0]
else:
break
def rev_gettable(self, rev: int) -> bool:
if self._past:
return rev >= self._past[0][0]
elif self._future:
return rev >= self._future[0][0]
else:
return False
def rev_before(self, rev: int) -> int:
"""Return the latest past rev on which the value changed."""
self.seek(rev)
if self._past:
return self._past[-1][0]
def rev_after(self, rev: int) -> int:
"""Return the earliest future rev on which the value will change."""
self.seek(rev)
if self._future:
return self._future[-1][0]
def truncate(self, rev: int) -> None:
"""Delete everything after the given revision."""
self.seek(rev)
self._keys.difference_update(map(get0, self._future))
self._future = []
@property
def beginning(self) -> int:
if self._past:
return self._past[0][0]
elif self._future:
return self._future[-1][0]
else:
raise HistoryError("No history yet")
@property
def end(self) -> int:
if self._future:
return self._future[0][0]
elif self._past:
return self._past[-1][0]
else:
raise HistoryError("No history yet")
def keys(self):
return WindowDictKeysView(self)
def items(self):
return WindowDictItemsView(self)
def values(self):
return WindowDictValuesView(self)
def __bool__(self):
return bool(self._past) or bool(self._future)
def __init__(self, data=None):
if not data:
self._past = []
elif hasattr(data, 'items'):
self._past = list(sorted(data.items()))
else:
# assume it's an orderable sequence of pairs
self._past = list(sorted(data))
self._future = []
self._keys = set(map(get0, self._past or ()))
def __iter__(self):
if not self:
return
if self._past:
yield from map(get0, self._past)
if self._future:
yield from map(get0, self._future)
def __contains__(self, item):
return item in self._keys
def __len__(self):
return len(self._past or ()) + len(self._future or ())
def __getitem__(self, rev):
if not self:
raise HistoryError("No history yet")
if isinstance(rev, slice):
if None not in (rev.start, rev.stop) and rev.start > rev.stop:
return WindowDictReverseSlice(self, rev)
return WindowDictSlice(self, rev)
self.seek(rev)
past = self._past
if not past:
raise HistoryError(
"Revision {} is before the start of history".format(rev)
)
return past[-1][1]
@cython.locals(past_start=cython.int, past_end=cython.int, future_start=cython.int, have_past=cython.bint, have_future=cython.bint, rev=cython.int)
def __setitem__(self, rev, v):
if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap'):
v = v.unwrap()
past = self._past
future = self._future
have_past = bool(past)
have_future = bool(future)
past_start = -1 if not have_past else past[0][0]
past_end = -1 if not have_past else past[-1][0]
future_start = -1 if not have_future else future[-1][0]
if not have_past and not have_future:
past.append((rev, v))
elif have_past and rev < past_start:
past.insert(0, (rev, v))
elif have_past and rev == past_start:
past[0] = (rev, v)
elif have_past and rev == past_end:
past[-1] = (rev, v)
elif have_past and (
not have_future or
rev < future_start
) and rev > past_end:
past.append((rev, v))
else:
self.seek(rev)
past = self._past
future = self._future
past_end = -1 if not past else past[-1][0]
if not past:
past.append((rev, v))
elif past_end == rev:
past[-1] = (rev, v)
else:
assert past_end < rev
past.append((rev, v))
self._keys.add(rev)
@cython.locals(rev=cython.int, past_end=cython.int)
def __delitem__(self, rev):
# Not checking for rev's presence at the beginning because
# to do so would likely require iterating thru history,
# which I have to do anyway in deleting.
# But handle degenerate case.
if not self:
raise HistoryError("Tried to delete from an empty WindowDict")
if not self.beginning <= rev <= self.end:
raise HistoryError("Rev outside of history: {}".format(rev))
self.seek(rev)
past = self._past
past_end = -1 if not past else past[-1][0]
if not past or past_end != rev:
raise HistoryError("Rev not present: {}".format(rev))
del self._past[-1]
self._keys.remove(rev)
def __repr__(self):
me = dict(self._past)
me.update(self._future)
return "{}({})".format(self.__class__.__name__, me)
class FuturistWindowDict(WindowDict):
"""A WindowDict that does not let you rewrite the past."""
__slots__ = ('_future', '_past')
def __setitem__(self, rev, v):
if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap'):
v = v.unwrap()
if not self._past or (
self._past and (
not self._future and
rev > self._past[-1][0]
)):
self._past.append((rev, v))
self._keys.add(rev)
return
self.seek(rev)
past = self._past
future = self._future
if future:
raise HistoryError(
"Already have some history after {}".format(rev)
)
if not past or rev > past[-1][0]:
past.append((rev, v))
elif rev == past[-1][0]:
past[-1] = (rev, v)
else:
raise HistoryError(
"Already have some history after {} "
"(and my seek function is broken?)".format(rev)
)
self._keys.add(rev)
class TurnDict(FuturistWindowDict):
__slots__ = ('_future', '_past')
cls = FuturistWindowDict
def __getitem__(self, rev):
if self.rev_gettable(rev):
return FuturistWindowDict.__getitem__(self, rev)
else:
ret = self[rev] = FuturistWindowDict()
return ret
def __setitem__(self, turn, value):
if type(value) is not FuturistWindowDict:
value = FuturistWindowDict(value)
FuturistWindowDict.__setitem__(self, turn, value)
class SettingsTurnDict(WindowDict):
__slots__ = ('_future', '_past')
cls = WindowDict
def __getitem__(self, rev):
if self.rev_gettable(rev):
return WindowDict.__getitem__(self, rev)
else:
ret = self[rev] = WindowDict()
return ret
def __setitem__(self, turn, value):
if type(value) is not WindowDict:
value = WindowDict(value)
WindowDict.__setitem__(self, turn, value)
|
from sqlalchemy import create_engine, Column, func, BIGINT, BigInteger, BINARY, Binary,\
BOOLEAN, Boolean, DATE, Date, DATETIME, DateTime, FLOAT, Float,\
INTEGER, Integer, VARCHAR, String, TEXT, Text, MetaData
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from alembic.migration import MigrationContext
from alembic.operations import Operations
import datetime, time, json
from kokoropy import Fore, Back, base_url
# create Base
Base = declarative_base()
class Model(Base):
'''
Model
'''
_real_id = Column(Integer, primary_key=True)
_trashed = Column(Boolean, default=False)
_created_at = Column(DateTime, default=func.now())
_updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
id = Column(String(35), unique=True)
__abstract__ = True
__connectionstring__ = ''
__echo__ = True
__prefixid__ = '%Y%m%d-'
__digitid__ = 3
__showncolumn__ = None
__formcolumn__ = None
__insertformcolumn__ = None
__updateformcolumn__ = None
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@property
def _shown_column(self):
if self.__showncolumn__ is None:
self.__showncolumn__ = []
for column in self.__table__.columns:
column = column.name
if column in ['_real_id', '_created_at', '_updated_at', '_trashed'] or column.split('_')[0] == 'fk':
continue
self.__showncolumn__.append(column)
for relation_name in self._get_relation_name():
self.__showncolumn__.append(relation_name)
return self.__showncolumn__
@property
def _form_column(self):
if self.__formcolumn__ is None:
return self._shown_column
else:
return self.__formcolumn__
@property
def _insert_form_column(self):
if self.__insertformcolumn__ is None:
return self._form_column
else:
return self.__insertformcolumn__
@property
def _update_form_column(self):
if self.__insertformcolumn__ is None:
return self._form_column
else:
return self.__insertformcolumn__
@property
def engine(self):
if hasattr(self, '__session__'):
self.__engine__ = self.session.bind
elif not hasattr(self, '__engine__'):
self.__engine__ = create_engine(self.__connectionstring__, echo=self.__echo__)
return self.__engine__
@property
def session(self):
if not hasattr(self, '__session__'):
if not hasattr(self, '__engine__'):
self.__engine__ = create_engine(self.__connectionstring__, echo=self.__echo__)
self.__session__ = scoped_session(sessionmaker(bind=self.__engine__))
return self.__session__
@property
def error_message(self):
if hasattr(self, '_error_message'):
return self._error_message
else:
return ''
@error_message.setter
def error_message(self, val):
self._error_message = val
@property
def generated_html(self):
if hasattr(self, '_generated_html'):
return self._generated_html
else:
return ''
@generated_html.setter
def generated_html(self, val):
self._generated_html = val
@property
def generated_style(self):
if hasattr(self, '_generated_style'):
return self._generated_style
else:
return ''
@generated_style.setter
def generated_style(self, val):
self._generated_style = val
@property
def generated_script(self):
if hasattr(self, '_generated_script'):
return self._generated_script
else:
return ''
@generated_script.setter
def generated_script(self, val):
self._generated_script = val
@property
def success(self):
if hasattr(self, '_success'):
return self._success
else:
return True
@success.setter
def success(self, val):
self._success = val
@classmethod
def get(cls, *criterion, **kwargs):
'''
Usage:
Model.get(Model.name=="whatever", limit=1000, offset=0, include_trashed=True, as_json=True, include_relation=True)
'''
# get kwargs parameters
limit = kwargs.pop('limit', 1000)
offset = kwargs.pop('offset', 0)
include_trashed = kwargs.pop('include_trashed', False)
as_json = kwargs.pop('as_json', False)
include_relation = kwargs.pop('include_relation', False)
# get / make session if not exists
if hasattr(cls,'__session__ '):
session = cls.__session__
else:
obj = cls()
session = obj.session
query = session.query(cls)
if include_trashed == False:
query = query.filter(cls._trashed == False)
# run the query
result = query.filter(*criterion).limit(limit).offset(offset).all()
if as_json:
kwargs = {'include_relation' : include_relation, 'isoformat' : True}
result_list = []
for row in result:
result_list.append(row.to_dict(**kwargs))
return json.dumps(result_list)
else:
return result
@classmethod
def count(cls, *criterion, **kwargs):
# get kwargs parameters
limit = kwargs.pop('limit', None)
offset = kwargs.pop('offset', None)
include_trashed = kwargs.pop('include_trashed', False)
include_relation = kwargs.pop('include_relation', False)
# get / make session if not exists
if hasattr(cls,'__session__ '):
session = cls.__session__
else:
obj = cls()
session = obj.session
query = session.query(cls)
if include_trashed == False:
query = query.filter(cls._trashed == False)
# apply filter
query = query.filter(*criterion)
# apply limit & offset
if limit is not None:
query = query.limit(limit)
if offset is not None:
query = query.offset(offset)
return query.count()
@classmethod
def find(cls, id_value):
result = cls.get(cls.id == id_value)
if len(result)>0:
return result[0]
def before_save(self):
self.success = True
def before_insert(self):
self.success = True
def before_update(self):
self.success = True
def before_trash(self):
self.success = True
def before_untrash(self):
self.success = True
def before_delete(self):
self.success = True
def after_save(self):
self.success = True
def after_insert(self):
self.success = True
def after_update(self):
self.success = True
def after_trash(self):
self.success = True
def after_untrash(self):
self.success = True
def after_delete(self):
self.success = True
def _get_relation_name(self):
return self.__mapper__.relationships._data
def _get_relation(self, relation_name):
return getattr(self, relation_name)
def _save_relation(self):
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, Model):
relation.save()
elif isinstance(relation, list):
for child in relation:
if isinstance(child, Model):
child.save()
def _commit(self):
# success or rollback
if self.success:
self.session.commit()
else:
self.session.rollback()
def save(self):
inserting = False
if self._real_id is None:
inserting = True
# before insert
self.before_insert()
# insert
if self.success:
self.session.add(self)
else:
#before update
self.before_update()
self.before_save()
# save
self._commit()
# generate id if not exists
if self.id is None:
self.generate_id()
self._commit()
# after insert, after update and after save
if inserting:
self.after_insert()
else:
self.after_update()
self.after_save()
# also trigger save of relation
self._save_relation()
def trash(self):
self.before_trash()
if self.success:
self._trashed = True
self._commit()
self.after_trash()
# also trash children
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, list):
for child in relation:
if isinstance(child, Model):
child.trash()
child.save()
def untrash(self):
self.before_untrash()
if self.success:
self._trashed = False
self._commit()
self.after_untrash()
# also untrash children
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, list):
for child in relation:
if isinstance(child, Model):
child.untrash()
child.save()
def delete(self):
self.before_delete()
if self.success:
self.session.delete(self)
self._commit()
self.after_delete()
# also delete children
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, list):
for child in relation:
if isinstance(child, Model):
child.trash()
child.delete()
def generate_prefix_id(self):
return datetime.datetime.fromtimestamp(time.time()).strftime(self.__prefixid__)
def generate_id(self):
if self.id is None:
prefix = self.generate_prefix_id()
classobj = self.__class__
# get maxid
query = self.session.query(func.max(classobj.id).label("maxid")).filter(classobj.id.like(prefix+'%')).one()
maxid = query.maxid
if maxid is None:
number = 0
else:
# get number part of maxid
number = int(maxid[len(prefix):])
# create newid
newid = prefix + str(number+1).zfill(self.__digitid__)
self.id = newid
def to_dict(self, **kwargs):
'''
Usage:
model_instance.to_dict()
model_instance.to_dict(include_relation = True, isoformat = True)
'''
include_relation = kwargs.pop('include_relation', False)
isoformat = kwargs.pop('isoformat', False)
dictionary = {}
for column in self.__table__.columns:
val = getattr(self, column.name)
if isoformat and hasattr(val, 'isoformat'):
val = val.isoformat()
dictionary[column.name] = val
# also include_relation
if include_relation:
kwargs = {'isoformat': isoformat}
# also add relation to dictionary
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, Model):
dictionary[relation_name] = relation.to_dict(**kwargs)
elif isinstance(relation, list):
dictionary[relation_name] = []
for child in relation:
if isinstance(child, Model):
dictionary[relation_name].append(child.to_dict(**kwargs))
else:
dictionary[relation_name] = relation
return dictionary
def to_json(self, **kwargs):
'''
Usage:
model_instance.to_json()
model_instance.to_json(include_relation = True)
'''
kwargs['isoformat'] = True
dictionary = self.to_dict(**kwargs)
return json.dumps(dictionary)
def build_column(self, column_name, **kwargs):
'''
Custom column if defined, override this if needed, but promise me 3 things:
* add any additional css into self.generated_style
* add any additional script into self.generated_script
* return your HTML as string
'''
return None
def build_input(self, column_name, **kwargs):
'''
Custom input if defined, override this if needed, but promise me 3 things:
* add any additional css into self.generated_style
* add any additional script into self.generated_script
* return your HTML as string
'''
return None
def reset_generated(self):
self._generated_html = ''
self._generated_script = ''
self._generated_css = ''
def include_resource(self):
base_url = base_url()
self._generated_script += '<!--[if lt IE 9]>' + \
'<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>' + \
'<![endif]-->' + \
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/jquery-1.9.1.min.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/jquery-migrate-1.2.1.min.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/bootstrap.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/holder.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/jquery-ui-1.10.3.custom.min.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/google-code-prettify/prettify.js" type="text/javascript"></script>'
self._generated_css += '<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/css/bootstrap.min.css">' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/css/custom-theme/jquery-ui-1.10.3.custom.css">' +\
'<!--<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/css/custom-theme/jquery-ui-1.10.3.theme.css">-->' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/css/font-awesome.min.css">' +\
'<!--[if IE 7]>' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/css/font-awesome-ie7.min.css">' +\
'<![endif]-->' +\
'<!--[if lt IE 9]>' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/css/custom-theme/jquery.ui.1.10.3.ie.css">' +\
'<![endif]-->' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/google-code-prettify/prettify.css">' +\
'<!-- HTML5 shim and Respond.js IE8 support of HTML5 elements and media queries -->' +\
'<!--[if lt IE 9]>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/html5shiv.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/respond.min.js" type="text/javascript"></script>' +\
'<![endif]-->' +\
'<!-- Le fav and touch icons -->' +\
'<link rel="apple-touch-icon-precomposed" sizes="144x144" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/ico/apple-touch-icon-144-precomposed.png">' +\
'<link rel="apple-touch-icon-precomposed" sizes="114x114" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/ico/apple-touch-icon-114-precomposed.png">' +\
'<link rel="apple-touch-icon-precomposed" sizes="72x72" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/ico/apple-touch-icon-72-precomposed.png">' +\
'<link rel="apple-touch-icon-precomposed" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/ico/apple-touch-icon-57-precomposed.png">'
def quick_preview(self):
'''
Quick preview of record, override this
'''
return self.id
def generate_input_view(self, state = None, include_resource = False):
'''
Input view of record
'''
# prepare resource
self.reset_generated()
if include_resource:
self.include_resource()
# determine which input column is used
if state is None:
input_column = self._form_column
if state == 'new' or state == 'create' or state == 'insert' or state == 'add':
input_column = self._insert_form_column
elif state == 'edit' or state == 'update':
input_column = self._update_form_column
# build html
html = ''
relation_properties = self.__mapper__.relationships._data
for key in input_column:
label = key.replace('_', ' ').title()
html += '<div class="form-group">'
html += '<label for="field_' + key + '" class="col-xs-12 col-sm-12 col-md-3 col-lg-3 control-label">' + label + '</label>'
html += '<div class="col-xs-12 col-sm-12 col-md-9 col-lg-9">'
custom_input = self.build_input(key)
if custom_input is not None:
input_element = custom_input
else:
value = getattr(self, key)
if key in relation_properties:
relation = relation_properties[key]
ref_class = getattr(self.__class__, key).property.mapper.class_
if relation.uselist:
# one to many
input_element = 'One to Many'
else:
# many to one
option_obj = ref_class.get()
option_count = ref_class.count()
input_element = ''
if option_count == 0:
input_element += 'No option available'
elif option_count <= 3:
xs_width = sm_width = str(12/option_count)
md_width = lg_width = str(9/option_count)
for obj in option_obj:
if value == obj:
checked = 'checked'
else:
checked = ''
input_element += '<div class="col-xs-' + xs_width + ' col-sm-' + sm_width + ' col-md-' + md_width + ' col-lg-' + lg_width+ '">'
input_element += '<label><input type="radio" ' + checked + ' name ="' + key + '" value="' + obj.id + '"/> ' + obj.quick_preview() + '</label>'
input_element += '</div>'
else:
input_element += '<select class="form-control" id="field_' + key + '" name ="' + key + '">'
input_element += '<option value="">None</option>'
for obj in option_obj:
if value == obj:
selected = 'selected'
else:
selected = ''
input_element += '<option ' + selected + ' value="' + obj.id + '">' + obj.quick_preview() + '</option>'
input_element += '</select>'
else:
if value is None:
value = ''
else:
value = str(value)
input_element = '<input type="text" class="form-control" id="field_' + key + '" name="' + key + '" placeholder="' + label + '" value="' + value + '">'
html += input_element
html += '</div>'
html += '</div>'
self.generated_html = html
def generate_detail_view(self, include_resource = False):
'''
Detail view of record, override this with care
'''
# prepare resource
self.reset_generated()
if include_resource:
self.include_resource()
dictionary = self.to_dict(include_relation = True)
# build html
html = '<div class="row container">'
for key in self._shown_column:
# row
html += '<div class="row container col-xs-12 col-sm-12 col-md-12 col-lg-12">'
label = key.replace('_', ' ').title()
custom_value = self.build_column(key)
if custom_value is not None:
value = custom_value
else:
if key in dictionary:
value = dictionary[key]
else:
value = None
# pre-process
if isinstance(value, list) and len(value)>0:
children = getattr(self,key)
# generate new value
value = '<ul>'
for child in children:
value += '<li>' + child.quick_preview() + '</li>'
value += '<ul>'
label_class = 'col-xs-12 col-sm-12 col-md-3 col-lg-3'
content_class = 'col-xs-12 col-sm-12 col-md-9 col-lg-9'
# lookup value
if isinstance(value, dict):
obj = getattr(self, key)
value = obj.quick_preview()
# None or empty children
if value is None or (isinstance(value,list) and len(value)==0):
value = 'Not available'
# label
html += '<div class="' + label_class + '">'
html += '<label>' + str(label) + '</label>'
html += '</div>'
# value
html += '<div class="' + content_class + '">'
html += str(value)
html += '</div>'
# end of row
html += '</div>'
html += '</div>'
self.generated_html = html
def auto_migrate(engine):
print(' %s%s WARNING %s%s%s : You are using auto_migrate()\n Note that not all operation supported. Be prepared to do things manually.\n Using auto_migration in production mode is not recommended.%s%s' %(Fore.BLACK, Back.GREEN, Fore.RESET, Back.RESET, Fore.GREEN, Fore.RESET, Fore.MAGENTA))
# make model_meta & db_meta
Model.metadata.create_all(bind=engine)
model_meta = Model.metadata
db_meta = MetaData()
db_meta.reflect(bind=engine)
# create db session & alembic operation
db_session = scoped_session(sessionmaker(bind=engine))
conn = engine.connect()
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
# default parameters
default_column_names = ['_real_id', '_trashed', '_created_at', '_updated_at', 'id']
column_properties = ['key', 'primary_key', 'nullable', 'default',
'server_default', 'server_onupdate', 'index',
'unique', 'system', 'quote', 'doc', 'onupdate',
'autoincrement', 'constraints', 'foreign_keys']
for model_table_name in model_meta.tables:
# get model_table from model_meta
model_table = model_meta.tables[model_table_name]
db_table = None
db_column_list = None
# make model_table with alembic if necessary
if model_table_name not in db_meta.tables:
try:
op.create_table(
model_table_name,
Column('_real_id', Integer, primary_key = True),
Column('_trashed', Boolean, default = False),
Column('_created_at', DateTime, default=func.now()),
Column('_updated_at', DateTime, default=func.now(), onupdate=func.now()),
Column('id', String(35), unique = True)
)
except:
print(' Fail to make table: %s, please add it manually' % (model_table_name))
else:
db_table = db_meta.tables[model_table_name]
for model_column in model_table.columns:
# don't create or alter default columns
if model_column.name in default_column_names:
continue
# get model_column properties
model_column_kwargs = {}
for prop in column_properties:
model_column_kwargs[prop] = getattr(model_column, prop)
# make model_column with alembic if necessary
if model_column.name not in db_meta.tables[model_table_name].columns:
try:
op.add_column(model_table_name, Column(model_column.name, model_column.type, **model_column_kwargs))
except:
print(' Fail to make column %s.%s, please add it manually' % (model_table_name, model_column.name))
else:
# get db_column information
db_column = None
if db_table is not None:
for column in db_table.columns:
if column.name == model_column.name:
db_column = column
break
db_column_kwargs = {}
for prop in column_properties:
db_column_kwargs[prop] = getattr(db_column, prop)
# is alter column needed?
need_alter = str(model_column.type) != str(db_column.type) or model_column_kwargs['nullable'] != db_column_kwargs['nullable']
if need_alter:
# alter model_table with alembic
try:
op.alter_column(model_table_name,
model_column.name,
nullable = model_column_kwargs['nullable'], # None,
server_default = False,
new_column_name = None,
type_ = model_column.type, # None
existing_type=None,
existing_server_default=False,
existing_nullable=None)
except:
print(' Fail to alter column %s.%s, please alter it manually.\n Old type: %s, new type: %s' % (model_table_name, model_column.name, str(db_column.type), str(model_column.type)))
print(Fore.RESET)
not-tested improvement
from sqlalchemy import create_engine, Column, func, BIGINT, BigInteger, BINARY, Binary,\
BOOLEAN, Boolean, DATE, Date, DATETIME, DateTime, FLOAT, Float,\
INTEGER, Integer, VARCHAR, String, TEXT, Text, MetaData
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from alembic.migration import MigrationContext
from alembic.operations import Operations
import datetime, time, json
from kokoropy import Fore, Back, base_url
# create Base
Base = declarative_base()
class Model(Base):
'''
Model
'''
_real_id = Column(Integer, primary_key=True)
_trashed = Column(Boolean, default=False)
_created_at = Column(DateTime, default=func.now())
_updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
id = Column(String(35), unique=True)
__abstract__ = True
__connectionstring__ = ''
__echo__ = True
__prefixid__ = '%Y%m%d-'
__digitid__ = 3
__showncolumn__ = None
__formcolumn__ = None
__insertformcolumn__ = None
__updateformcolumn__ = None
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@property
def _shown_column(self):
if self.__showncolumn__ is None:
self.__showncolumn__ = []
for column in self.__table__.columns:
column = column.name
if column in ['_real_id', '_created_at', '_updated_at', '_trashed'] or column.split('_')[0] == 'fk':
continue
self.__showncolumn__.append(column)
for relation_name in self._get_relation_name():
self.__showncolumn__.append(relation_name)
return self.__showncolumn__
@property
def _form_column(self):
if self.__formcolumn__ is None:
return self._shown_column
else:
return self.__formcolumn__
@property
def _insert_form_column(self):
if self.__insertformcolumn__ is None:
return self._form_column
else:
return self.__insertformcolumn__
@property
def _update_form_column(self):
if self.__insertformcolumn__ is None:
return self._form_column
else:
return self.__insertformcolumn__
@property
def engine(self):
if hasattr(self, '__session__'):
self.__engine__ = self.session.bind
elif not hasattr(self, '__engine__'):
self.__engine__ = create_engine(self.__connectionstring__, echo=self.__echo__)
return self.__engine__
@property
def session(self):
if not hasattr(self, '__session__'):
if not hasattr(self, '__engine__'):
self.__engine__ = create_engine(self.__connectionstring__, echo=self.__echo__)
self.__session__ = scoped_session(sessionmaker(bind=self.__engine__))
return self.__session__
@property
def error_message(self):
if hasattr(self, '_error_message'):
return self._error_message
else:
return ''
@error_message.setter
def error_message(self, val):
self._error_message = val
@property
def generated_html(self):
if hasattr(self, '_generated_html'):
return self._generated_html
else:
return ''
@generated_html.setter
def generated_html(self, val):
self._generated_html = val
@property
def generated_style(self):
if hasattr(self, '_generated_style'):
return self._generated_style
else:
return ''
@generated_style.setter
def generated_style(self, val):
self._generated_style = val
@property
def generated_script(self):
if hasattr(self, '_generated_script'):
return self._generated_script
else:
return ''
@generated_script.setter
def generated_script(self, val):
self._generated_script = val
@property
def success(self):
if hasattr(self, '_success'):
return self._success
else:
return True
@success.setter
def success(self, val):
self._success = val
@classmethod
def get(cls, *criterion, **kwargs):
'''
Usage:
Model.get(Model.name=="whatever", limit=1000, offset=0, include_trashed=True, as_json=True, include_relation=True)
'''
# get kwargs parameters
limit = kwargs.pop('limit', 1000)
offset = kwargs.pop('offset', 0)
include_trashed = kwargs.pop('include_trashed', False)
as_json = kwargs.pop('as_json', False)
include_relation = kwargs.pop('include_relation', False)
# get / make session if not exists
if hasattr(cls,'__session__ '):
session = cls.__session__
else:
obj = cls()
session = obj.session
query = session.query(cls)
if include_trashed == False:
query = query.filter(cls._trashed == False)
# run the query
result = query.filter(*criterion).limit(limit).offset(offset).all()
if as_json:
kwargs = {'include_relation' : include_relation, 'isoformat' : True}
result_list = []
for row in result:
result_list.append(row.to_dict(**kwargs))
return json.dumps(result_list)
else:
return result
@classmethod
def count(cls, *criterion, **kwargs):
# get kwargs parameters
limit = kwargs.pop('limit', None)
offset = kwargs.pop('offset', None)
include_trashed = kwargs.pop('include_trashed', False)
include_relation = kwargs.pop('include_relation', False)
# get / make session if not exists
if hasattr(cls,'__session__ '):
session = cls.__session__
else:
obj = cls()
session = obj.session
query = session.query(cls)
if include_trashed == False:
query = query.filter(cls._trashed == False)
# apply filter
query = query.filter(*criterion)
# apply limit & offset
if limit is not None:
query = query.limit(limit)
if offset is not None:
query = query.offset(offset)
return query.count()
@classmethod
def find(cls, id_value):
result = cls.get(cls.id == id_value)
if len(result)>0:
return result[0]
def before_save(self):
self.success = True
def before_insert(self):
self.success = True
def before_update(self):
self.success = True
def before_trash(self):
self.success = True
def before_untrash(self):
self.success = True
def before_delete(self):
self.success = True
def after_save(self):
self.success = True
def after_insert(self):
self.success = True
def after_update(self):
self.success = True
def after_trash(self):
self.success = True
def after_untrash(self):
self.success = True
def after_delete(self):
self.success = True
def _get_relation_name(self):
return self.__mapper__.relationships._data
def _get_relation(self, relation_name):
return getattr(self, relation_name)
def _save_relation(self):
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, Model):
relation.save()
elif isinstance(relation, list):
for child in relation:
if isinstance(child, Model):
child.save()
def _commit(self):
# success or rollback
if self.success:
self.session.commit()
else:
self.session.rollback()
def save(self):
inserting = False
if self._real_id is None:
inserting = True
# before insert
self.before_insert()
# insert
if self.success:
self.session.add(self)
else:
#before update
self.before_update()
self.before_save()
# save
self._commit()
# generate id if not exists
if self.id is None:
self.generate_id()
self._commit()
# after insert, after update and after save
if inserting:
self.after_insert()
else:
self.after_update()
self.after_save()
# also trigger save of relation
self._save_relation()
def trash(self):
self.before_trash()
if self.success:
self._trashed = True
self._commit()
self.after_trash()
# also trash children
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, list):
for child in relation:
if isinstance(child, Model):
child.trash()
child.save()
def untrash(self):
self.before_untrash()
if self.success:
self._trashed = False
self._commit()
self.after_untrash()
# also untrash children
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, list):
for child in relation:
if isinstance(child, Model):
child.untrash()
child.save()
def delete(self):
self.before_delete()
if self.success:
self.session.delete(self)
self._commit()
self.after_delete()
# also delete children
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, list):
for child in relation:
if isinstance(child, Model):
child.trash()
child.delete()
def generate_prefix_id(self):
return datetime.datetime.fromtimestamp(time.time()).strftime(self.__prefixid__)
def generate_id(self):
if self.id is None:
prefix = self.generate_prefix_id()
classobj = self.__class__
# get maxid
query = self.session.query(func.max(classobj.id).label("maxid")).filter(classobj.id.like(prefix+'%')).one()
maxid = query.maxid
if maxid is None:
number = 0
else:
# get number part of maxid
number = int(maxid[len(prefix):])
# create newid
newid = prefix + str(number+1).zfill(self.__digitid__)
self.id = newid
def to_dict(self, **kwargs):
'''
Usage:
model_instance.to_dict()
model_instance.to_dict(include_relation = True, isoformat = True)
'''
include_relation = kwargs.pop('include_relation', False)
isoformat = kwargs.pop('isoformat', False)
dictionary = {}
for column in self.__table__.columns:
val = getattr(self, column.name)
if isoformat and hasattr(val, 'isoformat'):
val = val.isoformat()
dictionary[column.name] = val
# also include_relation
if include_relation:
kwargs = {'isoformat': isoformat}
# also add relation to dictionary
for relation_name in self._get_relation_name():
relation = self._get_relation(relation_name)
if isinstance(relation, Model):
dictionary[relation_name] = relation.to_dict(**kwargs)
elif isinstance(relation, list):
dictionary[relation_name] = []
for child in relation:
if isinstance(child, Model):
dictionary[relation_name].append(child.to_dict(**kwargs))
else:
dictionary[relation_name] = relation
return dictionary
def to_json(self, **kwargs):
'''
Usage:
model_instance.to_json()
model_instance.to_json(include_relation = True)
'''
kwargs['isoformat'] = True
dictionary = self.to_dict(**kwargs)
return json.dumps(dictionary)
def build_custom_label(self, column_name, **kwargs):
'''
Custom label if defined, override this if needed, but promise me 3 things:
* add any additional css into self.generated_style
* add any additional script into self.generated_script
* return your HTML as string
'''
return None
def build_custom_input(self, column_name, **kwargs):
'''
Custom input if defined, override this if needed, but promise me 3 things:
* add any additional css into self.generated_style
* add any additional script into self.generated_script
* return your HTML as string
'''
return None
def build_custom_representation(self, column_name, **kwargs):
'''
Custom representation if defined, override this if needed, but promise me 3 things:
* add any additional css into self.generated_style
* add any additional script into self.generated_script
* return your HTML as string
'''
pass
def build_label(self, column_name, **kwargs):
custom_label = self.build_custom_label(column_name, **kwargs)
if custom_label is not None:
return custom_label
else:
return column_name.replace('_', ' ').title()
def build_input(self, column_name, **kwargs):
custom_input = self.build_custom_input(column_name, **kwargs)
if custom_input is not None:
return custom_input
else:
if hasattr(self, column_name):
value = getattr(self, column_name)
else:
value = ''
html = ''
relation_properties = self.__mapper__.relationships._data
if column_name in relation_properties:
relation = relation_properties[column_name]
ref_class = getattr(self.__class__, column_name).property.mapper.class_
if relation.uselist:
# one to many
input_element = 'One to Many'
else:
# many to one
option_obj = ref_class.get()
option_count = ref_class.count()
input_element = ''
if option_count == 0:
input_element += 'No option available'
elif option_count <= 3:
xs_width = sm_width = str(12/option_count)
md_width = lg_width = str(9/option_count)
for obj in option_obj:
if value == obj:
checked = 'checked'
else:
checked = ''
input_element += '<div class="col-xs-' + xs_width + ' col-sm-' + sm_width + ' col-md-' + md_width + ' col-lg-' + lg_width+ '">'
input_element += '<label><input type="radio" ' + checked + ' name ="' + column_name + '" value="' + obj.id + '"/> ' + obj.quick_preview() + '</label>'
input_element += '</div>'
else:
input_element += '<select class="form-control" id="field_' + column_name + '" name ="' + column_name + '">'
input_element += '<option value="">None</option>'
for obj in option_obj:
if value == obj:
selected = 'selected'
else:
selected = ''
input_element += '<option ' + selected + ' value="' + obj.id + '">' + obj.quick_preview() + '</option>'
input_element += '</select>'
else:
if value is None:
value = ''
else:
value = str(value)
label = self.build_label(column_name, **kwargs)
input_element = '<input type="text" class="form-control" id="field_' + column_name + '" name="' + column_name + '" placeholder="' + label + '" value="' + value + '">'
html += input_element
return html
def build_labeled_input(self, column_name, **kwargs):
label = self.build_label(column_name, **kwargs)
html = '<div class="form-group">'
html += '<label for="field_' + column_name + '" class="col-xs-12 col-sm-12 col-md-3 col-lg-3 control-label">' + label + '</label>'
html += '<div class="col-xs-12 col-sm-12 col-md-9 col-lg-9">'
html += self.build_input(column_name, **kwargs)
html += '</div>'
html += '</div>'
return html
def build_representation(self, column_name, **kwargs):
custom_representation = self.build_custom_representation(column_name, **kwargs)
if custom_representation is not None:
return custom_representation
else:
if hasattr(self, column_name):
value = getattr(self, column_name)
else:
value = ''
# pre-process
if isinstance(value, list) and len(value)>0:
children = getattr(self,column_name)
# generate new value
value = '<ul>'
for child in children:
value += '<li>' + child.quick_preview() + '</li>'
value += '<ul>'
# lookup value
if isinstance(value, Model):
obj = getattr(self, column_name)
value = obj.quick_preview()
# None or empty children
if value is None or (isinstance(value,list) and len(value)==0):
value = 'Not available'
return value
def build_labeled_representation(self, column_name, **kwargs):
label = self.build_label(column_name, **kwargs)
html = '<div class="form-group">'
html += '<label class="col-xs-12 col-sm-12 col-md-3 col-lg-3 control-label">' + label + '</label>'
html += '<div class="col-xs-12 col-sm-12 col-md-9 col-lg-9">'
html += self.build_representation(column_name, **kwargs)
html += '</div>'
html += '</div>'
return html
def generate_tabular_label(self, **kwargs):
pass
def generate_tabular_representation(self, **kwargs):
pass
def generate_tabular_input(self, column_name, **kwargs):
pass
def reset_generated(self):
self._generated_html = ''
self._generated_script = ''
self._generated_css = ''
def include_resource(self):
base_url = base_url()
self._generated_script += '<!--[if lt IE 9]>' + \
'<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>' + \
'<![endif]-->' + \
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/jquery-1.9.1.min.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/jquery-migrate-1.2.1.min.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/bootstrap.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/holder.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/jquery-ui-1.10.3.custom.min.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/google-code-prettify/prettify.js" type="text/javascript"></script>'
self._generated_css += '<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/css/bootstrap.min.css">' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/css/custom-theme/jquery-ui-1.10.3.custom.css">' +\
'<!--<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/css/custom-theme/jquery-ui-1.10.3.theme.css">-->' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/css/font-awesome.min.css">' +\
'<!--[if IE 7]>' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/css/font-awesome-ie7.min.css">' +\
'<![endif]-->' +\
'<!--[if lt IE 9]>' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/css/custom-theme/jquery.ui.1.10.3.ie.css">' +\
'<![endif]-->' +\
'<link rel="stylesheet" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/google-code-prettify/prettify.css">' +\
'<!-- HTML5 shim and Respond.js IE8 support of HTML5 elements and media queries -->' +\
'<!--[if lt IE 9]>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/html5shiv.js" type="text/javascript"></script>' +\
'<script src="' + base_url + 'assets/jquery-ui-bootstrap/assets/js/vendor/respond.min.js" type="text/javascript"></script>' +\
'<![endif]-->' +\
'<!-- Le fav and touch icons -->' +\
'<link rel="apple-touch-icon-precomposed" sizes="144x144" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/ico/apple-touch-icon-144-precomposed.png">' +\
'<link rel="apple-touch-icon-precomposed" sizes="114x114" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/ico/apple-touch-icon-114-precomposed.png">' +\
'<link rel="apple-touch-icon-precomposed" sizes="72x72" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/ico/apple-touch-icon-72-precomposed.png">' +\
'<link rel="apple-touch-icon-precomposed" href="' + base_url + 'assets/jquery-ui-bootstrap/assets/ico/apple-touch-icon-57-precomposed.png">'
def quick_preview(self):
'''
Quick preview of record, override this
'''
return self.id
def generate_input_view(self, state = None, include_resource = False):
'''
Input view of record
'''
# prepare resource
self.reset_generated()
if include_resource:
self.include_resource()
# determine which input column is used
if state is None:
input_column = self._form_column
if state == 'new' or state == 'create' or state == 'insert' or state == 'add':
input_column = self._insert_form_column
elif state == 'edit' or state == 'update':
input_column = self._update_form_column
# build html
html = ''
for column_name in input_column:
html += self.build_labeled_input(column_name)
self.generated_html = html
def generate_detail_view(self, include_resource = False):
'''
Detail view of record, override this with care
'''
# prepare resource
self.reset_generated()
if include_resource:
self.include_resource()
# build html
html = '<div class="row container">'
for column_name in self._shown_column:
html += self.build_custom_representation(column_name)
html += '</div>'
self.generated_html = html
def auto_migrate(engine):
print(' %s%s WARNING %s%s%s : You are using auto_migrate()\n Note that not all operation supported. Be prepared to do things manually.\n Using auto_migration in production mode is not recommended.%s%s' %(Fore.BLACK, Back.GREEN, Fore.RESET, Back.RESET, Fore.GREEN, Fore.RESET, Fore.MAGENTA))
# make model_meta & db_meta
Model.metadata.create_all(bind=engine)
model_meta = Model.metadata
db_meta = MetaData()
db_meta.reflect(bind=engine)
# create db session & alembic operation
db_session = scoped_session(sessionmaker(bind=engine))
conn = engine.connect()
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
# default parameters
default_column_names = ['_real_id', '_trashed', '_created_at', '_updated_at', 'id']
column_properties = ['key', 'primary_key', 'nullable', 'default',
'server_default', 'server_onupdate', 'index',
'unique', 'system', 'quote', 'doc', 'onupdate',
'autoincrement', 'constraints', 'foreign_keys']
for model_table_name in model_meta.tables:
# get model_table from model_meta
model_table = model_meta.tables[model_table_name]
db_table = None
db_column_list = None
# make model_table with alembic if necessary
if model_table_name not in db_meta.tables:
try:
op.create_table(
model_table_name,
Column('_real_id', Integer, primary_key = True),
Column('_trashed', Boolean, default = False),
Column('_created_at', DateTime, default=func.now()),
Column('_updated_at', DateTime, default=func.now(), onupdate=func.now()),
Column('id', String(35), unique = True)
)
except:
print(' Fail to make table: %s, please add it manually' % (model_table_name))
else:
db_table = db_meta.tables[model_table_name]
for model_column in model_table.columns:
# don't create or alter default columns
if model_column.name in default_column_names:
continue
# get model_column properties
model_column_kwargs = {}
for prop in column_properties:
model_column_kwargs[prop] = getattr(model_column, prop)
# make model_column with alembic if necessary
if model_column.name not in db_meta.tables[model_table_name].columns:
try:
op.add_column(model_table_name, Column(model_column.name, model_column.type, **model_column_kwargs))
except:
print(' Fail to make column %s.%s, please add it manually' % (model_table_name, model_column.name))
else:
# get db_column information
db_column = None
if db_table is not None:
for column in db_table.columns:
if column.name == model_column.name:
db_column = column
break
db_column_kwargs = {}
for prop in column_properties:
db_column_kwargs[prop] = getattr(db_column, prop)
# is alter column needed?
need_alter = str(model_column.type) != str(db_column.type) or model_column_kwargs['nullable'] != db_column_kwargs['nullable']
if need_alter:
# alter model_table with alembic
try:
op.alter_column(model_table_name,
model_column.name,
nullable = model_column_kwargs['nullable'], # None,
server_default = False,
new_column_name = None,
type_ = model_column.type, # None
existing_type=None,
existing_server_default=False,
existing_nullable=None)
except:
print(' Fail to alter column %s.%s, please alter it manually.\n Old type: %s, new type: %s' % (model_table_name, model_column.name, str(db_column.type), str(model_column.type)))
print(Fore.RESET) |
from backend.pg import PGBackend
'''
@author: anant bhardwaj
@date: Oct 3, 2013
DataHub DB wrapper for backends (only postgres implemented)
Any new backend must implement the DataHubConnection interface
'''
class DataHubConnection:
def __init__(self, user, password, repo_base=None):
self.backend = PGBackend(user, password, repo_base=repo_base)
def reset_connection(self, repo_base):
self.backend.reset_connection(repo_base=repo_base)
def close_connection(self):
self.backend.close_connection()
def create_repo(self, repo):
return self.backend.create_repo(repo=repo)
def list_repos(self):
return self.backend.list_repos()
def delete_repo(self, repo, force=False):
return self.backend.delete_repo(repo=repo, force=force)
def add_collaborator(self, repo, username,
privileges, auto_in_future=True):
return self.backend.add_collaborator(
repo=repo,
username=username,
privileges=privileges)
def delete_collaborator(self, repo, username):
return self.backend.delete_collaborator(repo=repo, username=username)
def list_tables(self, repo):
return self.backend.list_tables(repo=repo)
def list_views(self, repo):
return self.backend.list_views(repo=repo)
def get_schema(self, table):
return self.backend.get_schema(table=table)
def execute_sql(self, query, params=None):
return self.backend.execute_sql(query, params)
def has_base_privilege(self, login, privilege):
return self.backend.has_base_privilege(
login=login, privilege=privilege)
def has_repo_privilege(self, login, repo, privilege):
return self.backend.has_repo_privilege(
login=login, repo=repo, privilege=privilege)
def has_table_privilege(self, login, table, privilege):
return self.backend.has_table_privilege(
login=login, table=table, privilege=privilege)
def has_column_privilege(self, login, table, column, privilege):
return self.backend.has_column_privilege(
login=login, table=table, column=column, privilege=privilege)
'''
The following methods works only in superuser mode
'''
def create_user(self, username, password, create_db):
return self.backend.create_user(username, password, create_db)
def remove_user(self, username, remove_db):
return self.backend.remove_user(username, remove_db)
def change_password(self, username, password):
return self.backend.change_password(username, password)
def import_file(self, table_name, file_path, file_format='CSV',
delimiter=',', header=True, encoding='ISO-8859-1',
quote_character='"'):
return self.backend.import_file(
table_name=table_name,
file_path=file_path,
file_format=file_format,
delimiter=delimiter,
encoding=encoding,
quote_character=quote_character)
def export_table(self, table_name, file_path, file_format='CSV',
delimiter=',', header=True):
return self.backend.export_table(
table_name=table_name,
file_path=file_path,
file_format=file_format,
delimiter=delimiter)
def export_query(self, query, file_path, file_format='CSV',
delimiter=',', header=True):
return self.backend.export_query(
query=query,
file_path=file_path,
file_format=file_format,
delimiter=delimiter)
def list_collaborators(self, repo_base, repo):
return self.backend.list_collaborators(repo_base=repo_base, repo=repo)
Remove unused optional argument
from backend.pg import PGBackend
'''
@author: anant bhardwaj
@date: Oct 3, 2013
DataHub DB wrapper for backends (only postgres implemented)
Any new backend must implement the DataHubConnection interface
'''
class DataHubConnection:
def __init__(self, user, password, repo_base=None):
self.backend = PGBackend(user, password, repo_base=repo_base)
def reset_connection(self, repo_base):
self.backend.reset_connection(repo_base=repo_base)
def close_connection(self):
self.backend.close_connection()
def create_repo(self, repo):
return self.backend.create_repo(repo=repo)
def list_repos(self):
return self.backend.list_repos()
def delete_repo(self, repo, force=False):
return self.backend.delete_repo(repo=repo, force=force)
def add_collaborator(self, repo, username, privileges):
return self.backend.add_collaborator(
repo=repo,
username=username,
privileges=privileges)
def delete_collaborator(self, repo, username):
return self.backend.delete_collaborator(repo=repo, username=username)
def list_tables(self, repo):
return self.backend.list_tables(repo=repo)
def list_views(self, repo):
return self.backend.list_views(repo=repo)
def get_schema(self, table):
return self.backend.get_schema(table=table)
def execute_sql(self, query, params=None):
return self.backend.execute_sql(query, params)
def has_base_privilege(self, login, privilege):
return self.backend.has_base_privilege(
login=login, privilege=privilege)
def has_repo_privilege(self, login, repo, privilege):
return self.backend.has_repo_privilege(
login=login, repo=repo, privilege=privilege)
def has_table_privilege(self, login, table, privilege):
return self.backend.has_table_privilege(
login=login, table=table, privilege=privilege)
def has_column_privilege(self, login, table, column, privilege):
return self.backend.has_column_privilege(
login=login, table=table, column=column, privilege=privilege)
'''
The following methods works only in superuser mode
'''
def create_user(self, username, password, create_db):
return self.backend.create_user(username, password, create_db)
def remove_user(self, username, remove_db):
return self.backend.remove_user(username, remove_db)
def change_password(self, username, password):
return self.backend.change_password(username, password)
def import_file(self, table_name, file_path, file_format='CSV',
delimiter=',', header=True, encoding='ISO-8859-1',
quote_character='"'):
return self.backend.import_file(
table_name=table_name,
file_path=file_path,
file_format=file_format,
delimiter=delimiter,
encoding=encoding,
quote_character=quote_character)
def export_table(self, table_name, file_path, file_format='CSV',
delimiter=',', header=True):
return self.backend.export_table(
table_name=table_name,
file_path=file_path,
file_format=file_format,
delimiter=delimiter)
def export_query(self, query, file_path, file_format='CSV',
delimiter=',', header=True):
return self.backend.export_query(
query=query,
file_path=file_path,
file_format=file_format,
delimiter=delimiter)
def list_collaborators(self, repo_base, repo):
return self.backend.list_collaborators(repo_base=repo_base, repo=repo)
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Token.seed'
db.alter_column(u'two_factor_token', 'seed', self.gf('django.db.models.fields.CharField')(max_length=20))
def backwards(self, orm):
# Changing field 'Token.seed'
db.alter_column(u'two_factor_token', 'seed', self.gf('django.db.models.fields.CharField')(max_length=16))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'two_factor.token': {
'Meta': {'object_name': 'Token'},
'backup_phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'seed': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'two_factor.verifiedcomputer': {
'Meta': {'object_name': 'VerifiedComputer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'verified_until': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['two_factor']
Changing field length to match model.
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Token.seed'
db.alter_column(u'two_factor_token', 'seed', self.gf('django.db.models.fields.CharField')(max_length=32))
def backwards(self, orm):
# Changing field 'Token.seed'
db.alter_column(u'two_factor_token', 'seed', self.gf('django.db.models.fields.CharField')(max_length=16))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'two_factor.token': {
'Meta': {'object_name': 'Token'},
'backup_phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'seed': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'two_factor.verifiedcomputer': {
'Meta': {'object_name': 'VerifiedComputer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'verified_until': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['two_factor']
|
from __future__ import print_function
from functools import wraps
import itertools
import time
import logging
import os
import uuid
import warnings
from . import browserlib
from . import _glyph_functions as gf
from .document import Document
from .embed import notebook_div, file_html, autoload_server
from .objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend
from .palettes import brewer
from .plotting_helpers import (
get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat
)
from .resources import Resources
from .session import Cloud, DEFAULT_SERVER_URL, Session
logger = logging.getLogger(__name__)
_default_document = Document()
_default_session = None
_default_file = None
_default_notebook = None
def curdoc():
''' Return the current document.
Returns:
doc : the current default document object.
'''
try:
"""This is used when we need to call the plotting API from within
the server, within a request context. (Applets do this for example)
in this case you still want the API to work but you don't want
to use the global module level document
"""
from flask import request
doc = request.bokeh_server_document
logger.debug("returning config from flask request")
return doc
except (ImportError, RuntimeError, AttributeError):
return _default_document
def curplot():
''' Return the current default plot object.
Returns:
plot : the current default plot (or None)
'''
return curdoc().curplot()
def cursession():
''' Return the current session, if there is one.
Returns:
session : the current default session object (or None)
'''
return _default_session
def hold(value=True):
''' Set or clear the plot hold status on the current document.
This is a convenience function that acts on the current document, and is equivalent to curdoc().hold(...)
Args:
value (bool, optional) : whether hold should be turned on or off (default: True)
Returns:
None
'''
curdoc().hold(value)
def figure(**kwargs):
''' Activate a new figure for plotting.
All subsequent plotting operations will affect the new figure.
This function accepts all plot style keyword parameters.
Returns:
None
'''
curdoc().figure(**kwargs)
def output_server(docname, session=None, url="default", name=None):
""" Cause plotting commands to automatically persist plots to a Bokeh server.
Can use explicitly provided Session for persistence, or the default
session.
Args:
docname (str) : name of document to push on Bokeh server
An existing documents with the same name will be overwritten.
session (Session, optional) : An explicit session to use (default: None)
If session is None, use the default session
url (str, optianal) : URL of the Bokeh server (default: "default")
if url is "default" use session.DEFAULT_SERVER_URL
name (str, optional) :
if name is None, use the server URL as the name
Additional keyword arguments like **username**, **userapikey**,
and **base_url** can also be supplied.
Returns:
None
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
.. note:: Calling this function will replaces any existing default Server session
"""
global _default_session
if url == "default":
url = DEFAULT_SERVER_URL
if name is None:
name = url
if not session:
if not _default_session:
_default_session = Session(name=name, root_url=url)
session = _default_session
session.use_doc(docname)
session.load_document(curdoc())
def output_cloud(docname):
""" Cause plotting commands to automatically persist plots to the Bokeh
cloud server.
Args:
docname (str) : name of document to push on Bokeh server
An existing documents with the same name will be overwritten.
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
.. note:: Calling this function will replaces any existing default Server session
"""
output_server(docname, session=Cloud())
def output_notebook(url=None, docname=None, session=None, name=None):
if session or url or name:
if docname is None:
docname = "IPython Session at %s" % time.ctime()
output_server(docname, url=url, session=session, name=name)
else:
from . import load_notebook
load_notebook()
global _default_notebook
_default_notebook = True
def output_file(filename, title="Bokeh Plot", autosave=True, mode="inline", root_dir=None):
""" Outputs to a static HTML file.
.. note:: This file will be overwritten each time show() or save() is invoked.
Args:
autosave (bool, optional) : whether to automatically save (default: True)
If **autosave** is True, then every time plot() or one of the other
visual functions is called, this causes the file to be saved. If it
is False, then the file is only saved upon calling show().
mode (str, optional) : how to inlude BokehJS (default: "inline")
**mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'.
In the 'relative(-dev)' case, **root_dir** can be specified to indicate the
base directory from which the path to the various static files should be
computed.
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
"""
global _default_file
_default_file = {
'filename' : filename,
'resources' : Resources(mode=mode, root_dir=root_dir, minified=False),
'autosave' : autosave,
'title' : title,
}
if os.path.isfile(filename):
print("Session output file '%s' already exists, will be overwritten." % filename)
def show(browser=None, new="tab", url=None):
""" 'shows' the current plot, by auto-raising the window or tab
displaying the current plot (for file/server output modes) or displaying
it in an output cell (IPython notebook).
Args:
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows specifying
which browser to display in, e.g. "safari", "firefox", "opera",
"windows-default". (See the webbrowser module documentation in the
standard lib for more details.)
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
"""
filename = _default_file['filename'] if _default_file else None
session = cursession()
notebook = _default_notebook
# Map our string argument to the webbrowser.open argument
new_param = {'tab': 2, 'window': 1}[new]
controller = browserlib.get_browser_controller(browser=browser)
plot = curplot()
if not plot:
warnings.warn("No current plot to show. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
if notebook and session:
import IPython.core.displaypub as displaypub
push(session=session)
snippet = autoload_server(plot, cursession())
displaypub.publish_display_data('bokeh', {'text/html': snippet})
elif notebook:
import IPython.core.displaypub as displaypub
displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)})
elif session:
push()
if url:
controller.open(url, new=new_param)
else:
controller.open(session.object_link(curdoc()._plotcontext))
elif filename:
save(filename)
controller.open("file://" + os.path.abspath(filename), new=new_param)
def save(filename=None, resources=None):
""" Updates the file with the data for the current document.
If a filename is supplied, or output_file(...) has been called, this will
save the plot to the given filename.
Args:
filename (str, optional) : filename to save document under (default: None)
if `filename` is None, the current output_file(...) filename is used if present
resources (Resources, optional) : BokehJS resource config to use
if `resources` is None, the current default resource config is used
Returns:
None
"""
if filename is None and _default_file:
filename = _default_file['filename']
if resources is None and _default_file:
resources = _default_file['resources']
if not filename:
warnings.warn("save() called but no filename was supplied and output_file(...) was never called, nothing saved")
return
if not resources:
warnings.warn("save() called but no resources was supplied and output_file(...) was never called, nothing saved")
return
if not curplot():
warnings.warn("No current plot to save. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
html = file_html(curdoc(), resources, _default_file['title'])
with open(filename, "wb") as f:
f.write(html.encode('utf-8'))
def push(session=None, document=None):
""" Updates the server with the data for the current document.
Args:
session (Sesion, optional) : filename to save document under (default: None)
if `sessiokn` is None, the current output_server(...) session is used if present
document (Document, optional) : BokehJS document to push
if `document` is None, the current default document is pushed
Returns:
None
"""
if not session:
session = cursession()
if not document:
document = curdoc()
if session:
return session.store_document(curdoc())
else:
warnings.warn("push() called but no session was supplied and output_server(...) was never called, nothing pushed")
def _doc_wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
retval = func(curdoc(), *args, **kwargs)
if cursession() and curdoc()._autostore:
push()
if _default_file and _default_file['autosave']:
save()
return retval
wrapper.__doc__ += "\nThis is a convenience function that acts on the current document, and is equivalent to curdoc().%s(...)" % func.__name__
return wrapper
annular_wedge = _doc_wrap(gf.annular_wedge)
annulus = _doc_wrap(gf.annulus)
arc = _doc_wrap(gf.arc)
asterisk = _doc_wrap(gf.asterisk)
bezier = _doc_wrap(gf.bezier)
circle = _doc_wrap(gf.circle)
circle_cross = _doc_wrap(gf.circle_cross)
circle_x = _doc_wrap(gf.circle_x)
cross = _doc_wrap(gf.cross)
diamond = _doc_wrap(gf.diamond)
diamond_cross = _doc_wrap(gf.diamond_cross)
image = _doc_wrap(gf.image)
image_rgba = _doc_wrap(gf.image_rgba)
image_url = _doc_wrap(gf.image_url)
inverted_triangle = _doc_wrap(gf.inverted_triangle)
line = _doc_wrap(gf.line)
multi_line = _doc_wrap(gf.multi_line)
oval = _doc_wrap(gf.oval)
patch = _doc_wrap(gf.patch)
patches = _doc_wrap(gf.patches)
quad = _doc_wrap(gf.quad)
quadratic = _doc_wrap(gf.quadratic)
ray = _doc_wrap(gf.ray)
rect = _doc_wrap(gf.rect)
segment = _doc_wrap(gf.segment)
square = _doc_wrap(gf.square)
square_cross = _doc_wrap(gf.square_cross)
square_x = _doc_wrap(gf.square_x)
text = _doc_wrap(gf.text)
triangle = _doc_wrap(gf.triangle)
wedge = _doc_wrap(gf.wedge)
x = _doc_wrap(gf.x)
_marker_types = {
"asterisk": asterisk,
"circle": circle,
"circle_cross": circle_cross,
"circle_x": circle_x,
"cross": cross,
"diamond": diamond,
"diamond_cross": diamond_cross,
"inverted_triangle": inverted_triangle,
"square": square,
"square_x": square_x,
"square_cross": square_cross,
"triangle": triangle,
"x": x,
"*": asterisk,
"+": cross,
"o": circle,
"ox": circle_x,
"o+": circle_cross,
}
def markers():
""" Prints a list of valid marker types for scatter()
Returns:
None
"""
print(list(sorted(_marker_types.keys())))
_color_fields = set(["color", "fill_color", "line_color"])
_alpha_fields = set(["alpha", "fill_alpha", "line_alpha"])
def scatter(*args, **kwargs):
""" Creates a scatter plot of the given x and y items.
Args:
*args : The data to plot. Can be of several forms:
(X, Y)
Two 1D arrays or iterables
(XNAME, YNAME)
Two bokeh DataSource/ColumnsRef
marker (str, optional): a valid marker_type, defaults to "circle"
color (color value, optional): shorthand to set both fill and line color
All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are
also accepted as keyword parameters.
Examples:
>>> scatter([1,2,3],[4,5,6], fill_color="red")
>>> scatter("data1", "data2", source=data_source, ...)
"""
ds = kwargs.get("source", None)
names, datasource = _handle_1d_data_args(args, datasource=ds)
kwargs["source"] = datasource
markertype = kwargs.get("marker", "circle")
# TODO: How to handle this? Just call curplot()?
if not len(_color_fields.intersection(set(kwargs.keys()))):
kwargs['color'] = get_default_color()
if not len(_alpha_fields.intersection(set(kwargs.keys()))):
kwargs['alpha'] = get_default_alpha()
if markertype not in _marker_types:
raise ValueError("Invalid marker type '%s'. Use markers() to see a list of valid marker types." % markertype)
return _marker_types[markertype](*args, **kwargs)
def gridplot(plot_arrangement, name=None):
""" Generate a plot that arranges several subplots into a grid.
Args:
plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid
name (str) : name for this plot
.. note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]]
Returns:
grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>`
"""
grid = GridPlot(children=plot_arrangement)
if name:
grid._id = name
# Walk the plot_arrangement and remove them from the plotcontext,
# so they don't show up twice
subplots = itertools.chain.from_iterable(plot_arrangement)
curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots))
curdoc().add(grid)
curdoc()._current_plot = grid # TODO (bev) don't use private attrs
if _default_session:
push()
if _default_file and _default_file['autosave']:
save()
return grid
def xaxis():
""" Get the current axis objects
Returns:
Returns axis object or splattable list of axis objects on the current plot
"""
p = curplot()
if p is None:
return None
axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==0]
return _list_attr_splat(axis)
def yaxis():
""" Get the current `y` axis object(s)
Returns:
Returns y-axis object or splattable list of y-axis objects on the current plot
"""
p = curplot()
if p is None:
return None
axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==1]
return _list_attr_splat(axis)
def axis():
""" Get the current `x` axis object(s)
Returns:
Returns x-axis object or splattable list of x-axis objects on the current plot
"""
return _list_attr_splat(xaxis() + yaxis())
def legend():
""" Get the current :class:`legend <bokeh.objects.Legend>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
p = curplot()
if p is None:
return None
legends = [obj for obj in p.renderers if isinstance(obj, Legend)]
return _list_attr_splat(legends)
def xgrid():
""" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
p = curplot()
if p is None:
return None
grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==0]
return _list_attr_splat(grid)
def ygrid():
""" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns y-grid object or splattable list of y-grid objects on the current plot
"""
p = curplot()
if p is None:
return None
grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==1]
return _list_attr_splat(grid)
def grid():
""" Get the current :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns grid object or splattable list of grid objects on the current plot
"""
return _list_attr_splat(xgrid() + ygrid())
def load_object(obj):
"""updates object from the server
"""
cursession().load_object(obj, curdoc())
Update plotting.py
Fix for Python2.7
from __future__ import print_function
from functools import wraps
import itertools
import time
import logging
import os
import uuid
import warnings
import io
from . import browserlib
from . import _glyph_functions as gf
from .document import Document
from .embed import notebook_div, file_html, autoload_server
from .objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend
from .palettes import brewer
from .plotting_helpers import (
get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat
)
from .resources import Resources
from .session import Cloud, DEFAULT_SERVER_URL, Session
from .utils import decode_utf8
logger = logging.getLogger(__name__)
_default_document = Document()
_default_session = None
_default_file = None
_default_notebook = None
def curdoc():
''' Return the current document.
Returns:
doc : the current default document object.
'''
try:
"""This is used when we need to call the plotting API from within
the server, within a request context. (Applets do this for example)
in this case you still want the API to work but you don't want
to use the global module level document
"""
from flask import request
doc = request.bokeh_server_document
logger.debug("returning config from flask request")
return doc
except (ImportError, RuntimeError, AttributeError):
return _default_document
def curplot():
''' Return the current default plot object.
Returns:
plot : the current default plot (or None)
'''
return curdoc().curplot()
def cursession():
''' Return the current session, if there is one.
Returns:
session : the current default session object (or None)
'''
return _default_session
def hold(value=True):
''' Set or clear the plot hold status on the current document.
This is a convenience function that acts on the current document, and is equivalent to curdoc().hold(...)
Args:
value (bool, optional) : whether hold should be turned on or off (default: True)
Returns:
None
'''
curdoc().hold(value)
def figure(**kwargs):
''' Activate a new figure for plotting.
All subsequent plotting operations will affect the new figure.
This function accepts all plot style keyword parameters.
Returns:
None
'''
curdoc().figure(**kwargs)
def output_server(docname, session=None, url="default", name=None):
""" Cause plotting commands to automatically persist plots to a Bokeh server.
Can use explicitly provided Session for persistence, or the default
session.
Args:
docname (str) : name of document to push on Bokeh server
An existing documents with the same name will be overwritten.
session (Session, optional) : An explicit session to use (default: None)
If session is None, use the default session
url (str, optianal) : URL of the Bokeh server (default: "default")
if url is "default" use session.DEFAULT_SERVER_URL
name (str, optional) :
if name is None, use the server URL as the name
Additional keyword arguments like **username**, **userapikey**,
and **base_url** can also be supplied.
Returns:
None
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
.. note:: Calling this function will replaces any existing default Server session
"""
global _default_session
if url == "default":
url = DEFAULT_SERVER_URL
if name is None:
name = url
if not session:
if not _default_session:
_default_session = Session(name=name, root_url=url)
session = _default_session
session.use_doc(docname)
session.load_document(curdoc())
def output_cloud(docname):
""" Cause plotting commands to automatically persist plots to the Bokeh
cloud server.
Args:
docname (str) : name of document to push on Bokeh server
An existing documents with the same name will be overwritten.
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
.. note:: Calling this function will replaces any existing default Server session
"""
output_server(docname, session=Cloud())
def output_notebook(url=None, docname=None, session=None, name=None):
if session or url or name:
if docname is None:
docname = "IPython Session at %s" % time.ctime()
output_server(docname, url=url, session=session, name=name)
else:
from . import load_notebook
load_notebook()
global _default_notebook
_default_notebook = True
def output_file(filename, title="Bokeh Plot", autosave=True, mode="inline", root_dir=None):
""" Outputs to a static HTML file.
.. note:: This file will be overwritten each time show() or save() is invoked.
Args:
autosave (bool, optional) : whether to automatically save (default: True)
If **autosave** is True, then every time plot() or one of the other
visual functions is called, this causes the file to be saved. If it
is False, then the file is only saved upon calling show().
mode (str, optional) : how to inlude BokehJS (default: "inline")
**mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'.
In the 'relative(-dev)' case, **root_dir** can be specified to indicate the
base directory from which the path to the various static files should be
computed.
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
"""
global _default_file
_default_file = {
'filename' : filename,
'resources' : Resources(mode=mode, root_dir=root_dir, minified=False),
'autosave' : autosave,
'title' : title,
}
if os.path.isfile(filename):
print("Session output file '%s' already exists, will be overwritten." % filename)
def show(browser=None, new="tab", url=None):
""" 'shows' the current plot, by auto-raising the window or tab
displaying the current plot (for file/server output modes) or displaying
it in an output cell (IPython notebook).
Args:
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows specifying
which browser to display in, e.g. "safari", "firefox", "opera",
"windows-default". (See the webbrowser module documentation in the
standard lib for more details.)
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
"""
filename = _default_file['filename'] if _default_file else None
session = cursession()
notebook = _default_notebook
# Map our string argument to the webbrowser.open argument
new_param = {'tab': 2, 'window': 1}[new]
controller = browserlib.get_browser_controller(browser=browser)
plot = curplot()
if not plot:
warnings.warn("No current plot to show. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
if notebook and session:
import IPython.core.displaypub as displaypub
push(session=session)
snippet = autoload_server(plot, cursession())
displaypub.publish_display_data('bokeh', {'text/html': snippet})
elif notebook:
import IPython.core.displaypub as displaypub
displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)})
elif session:
push()
if url:
controller.open(url, new=new_param)
else:
controller.open(session.object_link(curdoc()._plotcontext))
elif filename:
save(filename)
controller.open("file://" + os.path.abspath(filename), new=new_param)
def save(filename=None, resources=None):
""" Updates the file with the data for the current document.
If a filename is supplied, or output_file(...) has been called, this will
save the plot to the given filename.
Args:
filename (str, optional) : filename to save document under (default: None)
if `filename` is None, the current output_file(...) filename is used if present
resources (Resources, optional) : BokehJS resource config to use
if `resources` is None, the current default resource config is used
Returns:
None
"""
if filename is None and _default_file:
filename = _default_file['filename']
if resources is None and _default_file:
resources = _default_file['resources']
if not filename:
warnings.warn("save() called but no filename was supplied and output_file(...) was never called, nothing saved")
return
if not resources:
warnings.warn("save() called but no resources was supplied and output_file(...) was never called, nothing saved")
return
if not curplot():
warnings.warn("No current plot to save. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
html = file_html(curdoc(), resources, _default_file['title'])
with io.open(filename, "w", encoding="utf-8") as f:
f.write(decode_utf8(html))
def push(session=None, document=None):
""" Updates the server with the data for the current document.
Args:
session (Sesion, optional) : filename to save document under (default: None)
if `sessiokn` is None, the current output_server(...) session is used if present
document (Document, optional) : BokehJS document to push
if `document` is None, the current default document is pushed
Returns:
None
"""
if not session:
session = cursession()
if not document:
document = curdoc()
if session:
return session.store_document(curdoc())
else:
warnings.warn("push() called but no session was supplied and output_server(...) was never called, nothing pushed")
def _doc_wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
retval = func(curdoc(), *args, **kwargs)
if cursession() and curdoc()._autostore:
push()
if _default_file and _default_file['autosave']:
save()
return retval
wrapper.__doc__ += "\nThis is a convenience function that acts on the current document, and is equivalent to curdoc().%s(...)" % func.__name__
return wrapper
annular_wedge = _doc_wrap(gf.annular_wedge)
annulus = _doc_wrap(gf.annulus)
arc = _doc_wrap(gf.arc)
asterisk = _doc_wrap(gf.asterisk)
bezier = _doc_wrap(gf.bezier)
circle = _doc_wrap(gf.circle)
circle_cross = _doc_wrap(gf.circle_cross)
circle_x = _doc_wrap(gf.circle_x)
cross = _doc_wrap(gf.cross)
diamond = _doc_wrap(gf.diamond)
diamond_cross = _doc_wrap(gf.diamond_cross)
image = _doc_wrap(gf.image)
image_rgba = _doc_wrap(gf.image_rgba)
image_url = _doc_wrap(gf.image_url)
inverted_triangle = _doc_wrap(gf.inverted_triangle)
line = _doc_wrap(gf.line)
multi_line = _doc_wrap(gf.multi_line)
oval = _doc_wrap(gf.oval)
patch = _doc_wrap(gf.patch)
patches = _doc_wrap(gf.patches)
quad = _doc_wrap(gf.quad)
quadratic = _doc_wrap(gf.quadratic)
ray = _doc_wrap(gf.ray)
rect = _doc_wrap(gf.rect)
segment = _doc_wrap(gf.segment)
square = _doc_wrap(gf.square)
square_cross = _doc_wrap(gf.square_cross)
square_x = _doc_wrap(gf.square_x)
text = _doc_wrap(gf.text)
triangle = _doc_wrap(gf.triangle)
wedge = _doc_wrap(gf.wedge)
x = _doc_wrap(gf.x)
_marker_types = {
"asterisk": asterisk,
"circle": circle,
"circle_cross": circle_cross,
"circle_x": circle_x,
"cross": cross,
"diamond": diamond,
"diamond_cross": diamond_cross,
"inverted_triangle": inverted_triangle,
"square": square,
"square_x": square_x,
"square_cross": square_cross,
"triangle": triangle,
"x": x,
"*": asterisk,
"+": cross,
"o": circle,
"ox": circle_x,
"o+": circle_cross,
}
def markers():
""" Prints a list of valid marker types for scatter()
Returns:
None
"""
print(list(sorted(_marker_types.keys())))
_color_fields = set(["color", "fill_color", "line_color"])
_alpha_fields = set(["alpha", "fill_alpha", "line_alpha"])
def scatter(*args, **kwargs):
""" Creates a scatter plot of the given x and y items.
Args:
*args : The data to plot. Can be of several forms:
(X, Y)
Two 1D arrays or iterables
(XNAME, YNAME)
Two bokeh DataSource/ColumnsRef
marker (str, optional): a valid marker_type, defaults to "circle"
color (color value, optional): shorthand to set both fill and line color
All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are
also accepted as keyword parameters.
Examples:
>>> scatter([1,2,3],[4,5,6], fill_color="red")
>>> scatter("data1", "data2", source=data_source, ...)
"""
ds = kwargs.get("source", None)
names, datasource = _handle_1d_data_args(args, datasource=ds)
kwargs["source"] = datasource
markertype = kwargs.get("marker", "circle")
# TODO: How to handle this? Just call curplot()?
if not len(_color_fields.intersection(set(kwargs.keys()))):
kwargs['color'] = get_default_color()
if not len(_alpha_fields.intersection(set(kwargs.keys()))):
kwargs['alpha'] = get_default_alpha()
if markertype not in _marker_types:
raise ValueError("Invalid marker type '%s'. Use markers() to see a list of valid marker types." % markertype)
return _marker_types[markertype](*args, **kwargs)
def gridplot(plot_arrangement, name=None):
""" Generate a plot that arranges several subplots into a grid.
Args:
plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid
name (str) : name for this plot
.. note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]]
Returns:
grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>`
"""
grid = GridPlot(children=plot_arrangement)
if name:
grid._id = name
# Walk the plot_arrangement and remove them from the plotcontext,
# so they don't show up twice
subplots = itertools.chain.from_iterable(plot_arrangement)
curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots))
curdoc().add(grid)
curdoc()._current_plot = grid # TODO (bev) don't use private attrs
if _default_session:
push()
if _default_file and _default_file['autosave']:
save()
return grid
def xaxis():
""" Get the current axis objects
Returns:
Returns axis object or splattable list of axis objects on the current plot
"""
p = curplot()
if p is None:
return None
axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==0]
return _list_attr_splat(axis)
def yaxis():
""" Get the current `y` axis object(s)
Returns:
Returns y-axis object or splattable list of y-axis objects on the current plot
"""
p = curplot()
if p is None:
return None
axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==1]
return _list_attr_splat(axis)
def axis():
""" Get the current `x` axis object(s)
Returns:
Returns x-axis object or splattable list of x-axis objects on the current plot
"""
return _list_attr_splat(xaxis() + yaxis())
def legend():
""" Get the current :class:`legend <bokeh.objects.Legend>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
p = curplot()
if p is None:
return None
legends = [obj for obj in p.renderers if isinstance(obj, Legend)]
return _list_attr_splat(legends)
def xgrid():
""" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
p = curplot()
if p is None:
return None
grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==0]
return _list_attr_splat(grid)
def ygrid():
""" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns y-grid object or splattable list of y-grid objects on the current plot
"""
p = curplot()
if p is None:
return None
grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==1]
return _list_attr_splat(grid)
def grid():
""" Get the current :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns grid object or splattable list of grid objects on the current plot
"""
return _list_attr_splat(xgrid() + ygrid())
def load_object(obj):
"""updates object from the server
"""
cursession().load_object(obj, curdoc())
|
#!/usr/bin/env python
# Copyright 2017, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START classify_text_tutorial]
"""Using the classify_text method to find content categories of text files,
Then use the content category labels to compare text similarity.
For more information, see the tutorial page at
https://cloud.google.com/natural-language/docs/classify-text-tutorial.
"""
# [START classify_text_tutorial_import]
import argparse
import io
import json
import os
from google.cloud import language_v1beta2
from google.cloud.language_v1beta2 import enums
from google.cloud.language_v1beta2 import types
import numpy
import six
# [END classify_text_tutorial_import]
# [START def_classify]
def classify(text, verbose=True):
"""Classify the input text into categories. """
language_client = language_v1beta2.LanguageServiceClient()
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
response = language_client.classify_text(document)
categories = response.categories
result = {}
for category in categories:
# Turn the categories into a dictionary of the form:
# {category.name: category.confidence}, so that they can
# be treated as a sparse vector.
result[category.name] = category.confidence
if verbose:
print(text)
for category in categories:
print(u'=' * 20)
print(u'{:<16}: {}'.format('category', category.name))
print(u'{:<16}: {}'.format('confidence', category.confidence))
return result
# [END def_classify]
# [START def_index]
def index(path, index_file):
"""Classify each text file in a directory and write
the results to the index_file.
"""
result = {}
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
if not os.path.isfile(file_path):
continue
try:
with io.open(file_path, 'r') as f:
text = f.read()
categories = classify(text, verbose=False)
result[filename] = categories
except Exception:
print('Failed to process {}'.format(file_path))
with io.open(index_file, 'w') as f:
f.write(json.dumps(result).encode('utf-8'))
print('Texts indexed in file: {}'.format(index_file))
return result
# [END def_index]
# [START def_split_labels]
def split_labels(categories):
"""The category labels are of the form "/a/b/c" up to three levels,
for example "/Computers & Electronics/Software", and these labels
are used as keys in the categories dictionary, whose values are
confidence scores.
The split_labels function splits the keys into individual levels
while duplicating the confidence score, which allows a natural
boost in how we calculate similarity when more levels are in common.
Example:
If we have
x = {"/a/b/c": 0.5}
y = {"/a/b": 0.5}
z = {"/a": 0.5}
Then x and y are considered more similar than y and z.
"""
_categories = {}
for name, confidence in six.iteritems(categories):
labels = [label for label in name.split('/') if label]
for label in labels:
_categories[label] = confidence
return _categories
# [END def_split_labels]
# [START def_similarity]
def similarity(categories1, categories2):
"""Cosine similarity of the categories treated as sparse vectors."""
categories1 = split_labels(categories1)
categories2 = split_labels(categories2)
norm1 = numpy.linalg.norm(categories1.values())
norm2 = numpy.linalg.norm(categories2.values())
# Return the smallest possible similarity if either categories is empty.
if norm1 == 0 or norm2 == 0:
return 0.0
# Compute the cosine similarity.
dot = 0.0
for label, confidence in six.iteritems(categories1):
dot += confidence * categories2.get(label, 0.0)
return dot / (norm1 * norm2)
# [END def_similarity]
# [START def_query]
def query(index_file, text, n_top=3):
"""Find the indexed files that are the most similar to
the query text.
"""
with io.open(index_file, 'r') as f:
index = json.load(f)
# Get the categories of the query text.
query_categories = classify(text, verbose=False)
similarities = []
for filename, categories in six.iteritems(index):
similarities.append(
(filename, similarity(query_categories, categories)))
similarities = sorted(similarities, key=lambda p: p[1], reverse=True)
print('=' * 20)
print('Query: {}\n'.format(text))
for category, confidence in six.iteritems(query_categories):
print('\tCategory: {}, confidence: {}'.format(category, confidence))
print('\nMost similar {} indexed texts:'.format(n_top))
for filename, sim in similarities[:n_top]:
print('\tFilename: {}'.format(filename))
print('\tSimilarity: {}'.format(sim))
print('\n')
return similarities
# [END def_query]
# [START def_query_category]
def query_category(index_file, category_string, n_top=3):
"""Find the indexed files that are the most similar to
the query label.
The list of all available labels:
https://cloud.google.com/natural-language/docs/categories
"""
with io.open(index_file, 'r') as f:
index = json.load(f)
# Make the category_string into a dictionary so that it is
# of the same format as what we get by calling classify.
query_categories = {category_string: 1.0}
similarities = []
for filename, categories in six.iteritems(index):
similarities.append(
(filename, similarity(query_categories, categories)))
similarities = sorted(similarities, key=lambda p: p[1], reverse=True)
print('=' * 20)
print('Query: {}\n'.format(category_string))
print('\nMost similar {} indexed texts:'.format(n_top))
for filename, sim in similarities[:n_top]:
print('\tFilename: {}'.format(filename))
print('\tSimilarity: {}'.format(sim))
print('\n')
return similarities
# [END def_query_category]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
classify_parser = subparsers.add_parser(
'classify', help=classify.__doc__)
classify_parser.add_argument(
'text', help='The text to be classified. '
'The text needs to have at least 20 tokens.')
index_parser = subparsers.add_parser(
'index', help=index.__doc__)
index_parser.add_argument(
'path', help='The directory that contains '
'text files to be indexed.')
index_parser.add_argument(
'--index_file', help='Filename for the output JSON.',
default='index.json')
query_parser = subparsers.add_parser(
'query', help=query.__doc__)
query_parser.add_argument(
'index_file', help='Path to the index JSON file.')
query_parser.add_argument(
'text', help='Query text.')
query_category_parser = subparsers.add_parser(
'query-category', help=query_category.__doc__)
query_category_parser.add_argument(
'index_file', help='Path to the index JSON file.')
query_category_parser.add_argument(
'category', help='Query category.')
args = parser.parse_args()
if args.command == 'classify':
classify(args.text)
if args.command == 'index':
index(args.path, args.index_file)
if args.command == 'query':
query(args.index_file, args.text)
if args.command == 'query-category':
query_category(args.index_file, args.category)
# [END classify_text_tutorial]
Fix classify text tutorial
Change-Id: Ib86df7cf37588b7a7fc0c7f4ad4fc70548152354
#!/usr/bin/env python
# Copyright 2017, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START classify_text_tutorial]
"""Using the classify_text method to find content categories of text files,
Then use the content category labels to compare text similarity.
For more information, see the tutorial page at
https://cloud.google.com/natural-language/docs/classify-text-tutorial.
"""
# [START classify_text_tutorial_import]
import argparse
import io
import json
import os
from google.cloud import language_v1beta2
from google.cloud.language_v1beta2 import enums
from google.cloud.language_v1beta2 import types
import numpy
import six
# [END classify_text_tutorial_import]
# [START def_classify]
def classify(text, verbose=True):
"""Classify the input text into categories. """
language_client = language_v1beta2.LanguageServiceClient()
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
response = language_client.classify_text(document)
categories = response.categories
result = {}
for category in categories:
# Turn the categories into a dictionary of the form:
# {category.name: category.confidence}, so that they can
# be treated as a sparse vector.
result[category.name] = category.confidence
if verbose:
print(text)
for category in categories:
print(u'=' * 20)
print(u'{:<16}: {}'.format('category', category.name))
print(u'{:<16}: {}'.format('confidence', category.confidence))
return result
# [END def_classify]
# [START def_index]
def index(path, index_file):
"""Classify each text file in a directory and write
the results to the index_file.
"""
result = {}
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
if not os.path.isfile(file_path):
continue
try:
with io.open(file_path, 'r') as f:
text = f.read()
categories = classify(text, verbose=False)
result[filename] = categories
except Exception:
print('Failed to process {}'.format(file_path))
with io.open(index_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(result))
print('Texts indexed in file: {}'.format(index_file))
return result
# [END def_index]
# [START def_split_labels]
def split_labels(categories):
"""The category labels are of the form "/a/b/c" up to three levels,
for example "/Computers & Electronics/Software", and these labels
are used as keys in the categories dictionary, whose values are
confidence scores.
The split_labels function splits the keys into individual levels
while duplicating the confidence score, which allows a natural
boost in how we calculate similarity when more levels are in common.
Example:
If we have
x = {"/a/b/c": 0.5}
y = {"/a/b": 0.5}
z = {"/a": 0.5}
Then x and y are considered more similar than y and z.
"""
_categories = {}
for name, confidence in six.iteritems(categories):
labels = [label for label in name.split('/') if label]
for label in labels:
_categories[label] = confidence
return _categories
# [END def_split_labels]
# [START def_similarity]
def similarity(categories1, categories2):
"""Cosine similarity of the categories treated as sparse vectors."""
categories1 = split_labels(categories1)
categories2 = split_labels(categories2)
norm1 = numpy.linalg.norm(categories1.values())
norm2 = numpy.linalg.norm(categories2.values())
# Return the smallest possible similarity if either categories is empty.
if norm1 == 0 or norm2 == 0:
return 0.0
# Compute the cosine similarity.
dot = 0.0
for label, confidence in six.iteritems(categories1):
dot += confidence * categories2.get(label, 0.0)
return dot / (norm1 * norm2)
# [END def_similarity]
# [START def_query]
def query(index_file, text, n_top=3):
"""Find the indexed files that are the most similar to
the query text.
"""
with io.open(index_file, 'r') as f:
index = json.load(f)
# Get the categories of the query text.
query_categories = classify(text, verbose=False)
similarities = []
for filename, categories in six.iteritems(index):
similarities.append(
(filename, similarity(query_categories, categories)))
similarities = sorted(similarities, key=lambda p: p[1], reverse=True)
print('=' * 20)
print('Query: {}\n'.format(text))
for category, confidence in six.iteritems(query_categories):
print('\tCategory: {}, confidence: {}'.format(category, confidence))
print('\nMost similar {} indexed texts:'.format(n_top))
for filename, sim in similarities[:n_top]:
print('\tFilename: {}'.format(filename))
print('\tSimilarity: {}'.format(sim))
print('\n')
return similarities
# [END def_query]
# [START def_query_category]
def query_category(index_file, category_string, n_top=3):
"""Find the indexed files that are the most similar to
the query label.
The list of all available labels:
https://cloud.google.com/natural-language/docs/categories
"""
with io.open(index_file, 'r') as f:
index = json.load(f)
# Make the category_string into a dictionary so that it is
# of the same format as what we get by calling classify.
query_categories = {category_string: 1.0}
similarities = []
for filename, categories in six.iteritems(index):
similarities.append(
(filename, similarity(query_categories, categories)))
similarities = sorted(similarities, key=lambda p: p[1], reverse=True)
print('=' * 20)
print('Query: {}\n'.format(category_string))
print('\nMost similar {} indexed texts:'.format(n_top))
for filename, sim in similarities[:n_top]:
print('\tFilename: {}'.format(filename))
print('\tSimilarity: {}'.format(sim))
print('\n')
return similarities
# [END def_query_category]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
classify_parser = subparsers.add_parser(
'classify', help=classify.__doc__)
classify_parser.add_argument(
'text', help='The text to be classified. '
'The text needs to have at least 20 tokens.')
index_parser = subparsers.add_parser(
'index', help=index.__doc__)
index_parser.add_argument(
'path', help='The directory that contains '
'text files to be indexed.')
index_parser.add_argument(
'--index_file', help='Filename for the output JSON.',
default='index.json')
query_parser = subparsers.add_parser(
'query', help=query.__doc__)
query_parser.add_argument(
'index_file', help='Path to the index JSON file.')
query_parser.add_argument(
'text', help='Query text.')
query_category_parser = subparsers.add_parser(
'query-category', help=query_category.__doc__)
query_category_parser.add_argument(
'index_file', help='Path to the index JSON file.')
query_category_parser.add_argument(
'category', help='Query category.')
args = parser.parse_args()
if args.command == 'classify':
classify(args.text)
if args.command == 'index':
index(args.path, args.index_file)
if args.command == 'query':
query(args.index_file, args.text)
if args.command == 'query-category':
query_category(args.index_file, args.category)
# [END classify_text_tutorial]
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import Context, loader
from django.utils import simplejson as json
import psycopg2
import psycopg2.extras
def main_page( request ):
return render_to_response( "databrowser.html" )
def first_table( request ):
data = get_data_i()
return HttpResponse( json.dumps( data ));
def second_table( request ):
data = get_data_ii()
return HttpResponse( json.dumps( data ));
def get_data_i():
#postgres select + group by
conn_postgres = psycopg2.connect("user='postgres' host='localhost' password='marcinbarski' dbname='cclpoll'")
cur = conn_postgres.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("""
SELECT k.plec AS "_id_kand_plec", k.typ AS "_id_kand_typ", k.jednostka AS "_id_kand_jednostka", k.szczebel AS "_id_kand_szczebel",
SUM(k.l_glosow) AS "value_kand_glosow_total", COUNT(*) as "value_rec_count"
FROM kandydaci_rady k
GROUP BY k.plec, k.typ, k.jednostka, k.szczebel
ORDER BY value_kand_glosow_total DESC, k.plec, k.typ, k.jednostka, k.szczebel
""")
rows = cur.fetchall()
out= []
for row in rows:
dict_id= {'kand_plec':row['_id_kand_plec'], 'kand_typ':row['_id_kand_typ'], 'kand_jednostka':row['_id_kand_jednostka'], 'kand_szczebel':row['_id_kand_szczebel']}
dictval= {'kand_glosow_total':row['value_kand_glosow_total'], 'rec_count':row['value_rec_count']}
dictrow= {'_id':dict_id, 'value':dictval}
out.append(dictrow)
conn_postgres.close()
return out
#plain table
"""
return [{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":856271,"recs":505}},{"_id":{"kand_plec":"K","kand_typ":"partia","kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":386308,"recs":241}},{"_id":{"kand_plec":"M","kand_typ":"organizacja","kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":108105,"recs":82}},{"_id":{"kand_plec":"M","kand_typ":"wyborczy","kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":46457,"recs":132}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"Czestochowa","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":44538,"recs":129}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"Katowice","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":38399,"recs":158}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"Sosnowiec","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":34586,"recs":114}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"bielski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":30805,"recs":124}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"bedzinski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":28641,"recs":112}},{"_id":{"kand_plec":"M","kand_typ":"wyborczy","kand_jednostka":"zywiecki","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":28536,"recs":121}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"Gliwice","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":26909,"recs":113}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"Bielsko-Biala","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":26837,"recs":124}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"czestochowski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":24955,"recs":136}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"zawiercianski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":24262,"recs":117}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"cieszynski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":23002,"recs":90}},{"_id":{"kand_plec":"K","kand_typ":"partia","kand_jednostka":"Katowice","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":22943,"recs":64}},{"_id":{"kand_plec":"M","kand_typ":"partia","kand_jednostka":"zywiecki","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":22549,"recs":124}},{"_id":{"kand_plec":"K","kand_typ":"wyborczy","kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":22350,"recs":44}},{"_id":{"kand_plec":"M","kand_typ":"wyborczy","kand_jednostka":"Bielsko-Biala","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":19881,"recs":59}},{"_id":{"kand_plec":"M","kand_typ":"wyborczy","kand_jednostka":"Katowice","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":19832,"recs":51}}]
"""
def get_data_ii():
#postgres select + group by
conn_postgres = psycopg2.connect("user='postgres' host='localhost' password='marcinbarski' dbname='cclpoll'")
cur = conn_postgres.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("""
SELECT k.jednostka AS "_id_kand_jednostka", k.szczebel AS "_id_kand_szczebel",
COUNT(*) as "value_rec_count", SUM(k.l_glosow) AS "value_kand_glosow_total"
FROM kandydaci_rady k
GROUP BY k.jednostka, k.szczebel
ORDER BY value_rec_count DESC, k.jednostka, k.szczebel
""")
rows = cur.fetchall()
out= []
for row in rows:
dict_id= {'kand_jednostka':row['_id_kand_jednostka'], 'kand_szczebel':row['_id_kand_szczebel']}
dictval= {'kand_glosow_total':row['value_kand_glosow_total'], 'rec_count':row['value_rec_count']}
dictrow= {'_id':dict_id, 'value':dictval}
out.append(dictrow)
conn_postgres.close()
return out
#plain table
"""
return [{"_id":{"kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":856271,"recs":505}},{"_id":{"kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":386308,"recs":241}},{"_id":{"kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":108105,"recs":82}},{"_id":{"kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":46457,"recs":132}},{"_id":{"kand_jednostka":"Czestochowa","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":44538,"recs":129}},{"_id":{"kand_jednostka":"Katowice","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":38399,"recs":158}},{"_id":{"kand_jednostka":"Sosnowiec","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":34586,"recs":114}},{"_id":{"kand_jednostka":"bielski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":30805,"recs":124}},{"_id":{"kand_jednostka":"bedzinski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":28641,"recs":112}},{"_id":{"kand_jednostka":"zywiecki","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":28536,"recs":121}},{"_id":{"kand_jednostka":"Gliwice","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":26909,"recs":113}},{"_id":{"kand_jednostka":"Bielsko-Biala","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":26837,"recs":124}},{"_id":{"kand_jednostka":"czestochowski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":24955,"recs":136}},{"_id":{"kand_jednostka":"zawiercianski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":24262,"recs":117}},{"_id":{"kand_jednostka":"cieszynski","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":23002,"recs":90}},{"_id":{"kand_jednostka":"Katowice","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":22943,"recs":64}},{"_id":{"kand_jednostka":"zywiecki","kand_szczebel":"powiat"},"value":{"kand_glosow_sum":22549,"recs":124}},{"_id":{"kand_jednostka":"slaskie","kand_szczebel":"sejmik"},"value":{"kand_glosow_sum":22350,"recs":44}},{"_id":{"kand_jednostka":"Bielsko-Biala","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":19881,"recs":59}},{"_id":{"kand_jednostka":"Katowice","kand_szczebel":"miasto na prawach powiatu"},"value":{"kand_glosow_sum":19832,"recs":51}}]
"""
updated data extract
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import Context, loader
from django.utils import simplejson as json
import psycopg2
import psycopg2.extras
import pymongo
def main_page( request ):
return render_to_response( "databrowser.html" )
def first_table( request ):
data = get_data_i()
return HttpResponse( json.dumps( data ));
def second_table( request ):
data = get_data_ii()
return HttpResponse( json.dumps( data ));
def get_data_i():
mongo_collect= pymongo.Connection("localhost", 27017)['rawsdoc00']['dd_budg2011_in_tmp0']
rows= mongo_collect.find()
out= []
for row in rows:
out.append(row)
return out
# extract logic
# due to the collection structure gives a totally identical result to given above
#
#for row in rows:
# dict_id= {'dysponent':row['_id']['dysponent'],'czesc':row['_id']['czesc']}
# dictval= {'grand_nation':row['value']['grand_nation'],'grand_eu':row['value']['grand_eu'],'grand_total':row['value']['grand_total']}
# dictrow= {'_id':dict_id, 'value':dictval}
# out.append(dictrow)
def get_data_ii():
#postgres select + group by
mongo_collect= pymongo.Connection("localhost", 27017)['rawsdoc00']['dd_budg2011_go']
rows= mongo_collect.find({'node': 0, 'level':'c'})
out= []
for row in rows:
dict_id= {'parent':row['parent'],'idef':row['idef'],'name':row['name'],'czesc':row['czesc']}
dictval= {'v_eu':row['v_eu'],'v_nation':row['v_nation'],'v_eu':row['v_eu']} # 3 keys here - why only 2 are shown
dictrow= {'_id':dict_id, 'value':dictval}
out.append(dictrow)
return out
|
# -----------
# User Instructions
#
# Modify the previous code to adjust for a highly
# confident last measurement. Do this by adding a
# factor of 5 into your Omega and Xi matrices
# as described in the video.
from math import *
import random
# ===============================================================
#
# SLAM in a rectolinear world (we avoid non-linearities)
#
#
# ===============================================================
# ------------------------------------------------
#
# this is the matrix class
# we use it because it makes it easier to collect constraints in GraphSLAM
# and to calculate solutions (albeit inefficiently)
#
class matrix:
# implements basic operations of a matrix class
# ------------
#
# initialization - can be called with an initial matrix
#
def __init__(self, value=[[]]):
self.value = value
self.dimx = len(value)
self.dimy = len(value[0])
if value == [[]]:
self.dimx = 0
# ------------
#
# makes matrix of a certain size and sets each element to zero
#
def zero(self, dimx, dimy=0):
if dimy == 0:
dimy = dimx
# check if valid dimensions
if dimx < 1 or dimy < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dimx
self.dimy = dimy
self.value = [[0.0 for row in range(dimy)] for col in range(dimx)]
# ------------
#
# makes matrix of a certain (square) size and turns matrix into identity matrix
#
def identity(self, dim):
# check if valid dimension
if dim < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dim
self.dimy = dim
self.value = [[0.0 for row in range(dim)] for col in range(dim)]
for i in range(dim):
self.value[i][i] = 1.0
# ------------
#
# prints out values of matrix
#
def show(self, txt=''):
for i in range(len(self.value)):
print txt + '[' + ', '.join('%.3f' % x for x in self.value[i]) + ']'
print ' '
# ------------
#
# defines elmement-wise matrix addition. Both matrices must be of equal dimensions
#
def __add__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimx != other.dimx:
raise ValueError, "Matrices must be of equal dimension to add"
else:
# add if correct dimensions
res = matrix()
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] + other.value[i][j]
return res
# ------------
#
# defines elmement-wise matrix subtraction. Both matrices must be of equal dimensions
#
def __sub__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimx != other.dimx:
raise ValueError, "Matrices must be of equal dimension to subtract"
else:
# subtract if correct dimensions
res = matrix()
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] - other.value[i][j]
return res
# ------------
#
# defines multiplication. Both matrices must be of fitting dimensions
#
def __mul__(self, other):
# check if correct dimensions
if self.dimy != other.dimx:
raise ValueError, "Matrices must be m*n and n*p to multiply"
else:
# multiply if correct dimensions
res = matrix()
res.zero(self.dimx, other.dimy)
for i in range(self.dimx):
for j in range(other.dimy):
for k in range(self.dimy):
res.value[i][j] += self.value[i][k] * other.value[k][j]
return res
# ------------
#
# returns a matrix transpose
#
def transpose(self):
# compute transpose
res = matrix()
res.zero(self.dimy, self.dimx)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[j][i] = self.value[i][j]
return res
# ------------
#
# creates a new matrix from the existing matrix elements.
#
# Example:
# l = matrix([[ 1, 2, 3, 4, 5],
# [ 6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15]])
#
# l.take([0, 2], [0, 2, 3])
#
# results in:
#
# [[1, 3, 4],
# [11, 13, 14]]
#
#
# take is used to remove rows and columns from existing matrices
# list1/list2 define a sequence of rows/columns that shall be taken
# is no list2 is provided, then list2 is set to list1 (good for symmetric matrices)
#
def take(self, list1, list2=[]):
if list2 == []:
list2 = list1
if len(list1) > self.dimx or len(list2) > self.dimy:
raise ValueError, "list invalid in take()"
res = matrix()
res.zero(len(list1), len(list2))
for i in range(len(list1)):
for j in range(len(list2)):
res.value[i][j] = self.value[list1[i]][list2[j]]
return res
# ------------
#
# creates a new matrix from the existing matrix elements.
#
# Example:
# l = matrix([[1, 2, 3],
# [4, 5, 6]])
#
# l.expand(3, 5, [0, 2], [0, 2, 3])
#
# results in:
#
# [[1, 0, 2, 3, 0],
# [0, 0, 0, 0, 0],
# [4, 0, 5, 6, 0]]
#
# expand is used to introduce new rows and columns into an existing matrix
# list1/list2 are the new indexes of row/columns in which the matrix
# elements are being mapped. Elements for rows and columns
# that are not listed in list1/list2
# will be initialized by 0.0.
#
def expand(self, dimx, dimy, list1, list2=[]):
if list2 == []:
list2 = list1
if len(list1) > self.dimx or len(list2) > self.dimy:
raise ValueError, "list invalid in expand()"
res = matrix()
res.zero(dimx, dimy)
for i in range(len(list1)):
for j in range(len(list2)):
res.value[list1[i]][list2[j]] = self.value[i][j]
return res
# ------------
#
# Computes the upper triangular Cholesky factorization of
# a positive definite matrix.
# This code is based on http://adorio-research.org/wordpress/?p=4560
def Cholesky(self, ztol=1.0e-5):
res = matrix()
res.zero(self.dimx, self.dimx)
for i in range(self.dimx):
S = sum([(res.value[k][i]) ** 2 for k in range(i)])
d = self.value[i][i] - S
if abs(d) < ztol:
res.value[i][i] = 0.0
else:
if d < 0.0:
raise ValueError, "Matrix not positive-definite"
res.value[i][i] = sqrt(d)
for j in range(i + 1, self.dimx):
S = sum([res.value[k][i] * res.value[k][j] for k in range(i)])
if abs(S) < ztol:
S = 0.0
res.value[i][j] = (self.value[i][j] - S) / res.value[i][i]
return res
# ------------
#
# Computes inverse of matrix given its Cholesky upper Triangular
# decomposition of matrix.
# This code is based on http://adorio-research.org/wordpress/?p=4560
def CholeskyInverse(self):
# Computes inverse of matrix given its Cholesky upper Triangular
# decomposition of matrix.
# This code is based on http://adorio-research.org/wordpress/?p=4560
res = matrix()
res.zero(self.dimx, self.dimx)
# Backward step for inverse.
for j in reversed(range(self.dimx)):
tjj = self.value[j][j]
S = sum([self.value[j][k] * res.value[j][k] for k in range(j + 1, self.dimx)])
res.value[j][j] = 1.0 / tjj ** 2 - S / tjj
for i in reversed(range(j)):
res.value[j][i] = res.value[i][j] = \
-sum([self.value[i][k] * res.value[k][j] for k in \
range(i + 1, self.dimx)]) / self.value[i][i]
return res
# ------------
#
# comutes and returns the inverse of a square matrix
#
def inverse(self):
aux = self.Cholesky()
res = aux.CholeskyInverse()
return res
# ------------
#
# prints matrix (needs work!)
#
def __repr__(self):
return repr(self.value)
# ######################################################################
# ######################################################################
# ######################################################################
# Including the 5 times multiplier, your returned mu should now be:
#
# [[-3.0],
# [2.179],
# [5.714],
# [6.821]]
############## MODIFY CODE BELOW ##################
def doit(initial_pos, move1, move2, Z0, Z1, Z2):
Omega = matrix([[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
Xi = matrix([[initial_pos],
[0.0],
[0.0]])
Omega += matrix([[1.0, -1.0, 0.0],
[-1.0, 1.0, 0.0],
[0.0, 0.0, 0.0]])
Xi += matrix([[-move1],
[move1],
[0.0]])
Omega += matrix([[0.0, 0.0, 0.0],
[0.0, 1.0, -1.0],
[0.0, -1.0, 1.0]])
Xi += matrix([[0.0],
[-move2],
[move2]])
Omega = Omega.expand(4, 4, [0, 1, 2], [0, 1, 2])
Xi = Xi.expand(4, 1, [0, 1, 2], [0])
Omega += matrix([[1.0, 0.0, 0.0, -1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0, 1.0]])
Xi += matrix([[-Z0],
[0.0],
[0.0],
[Z0]])
Omega += matrix([[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, -1.0, 0.0, 1.0]])
Xi += matrix([[0.0],
[-Z1],
[0.0],
[Z1]])
Omega += matrix([[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, -1.0, 1.0]])
Xi += matrix([[0.0],
[0.0],
[-Z2],
[Z2]])
Omega.show('Omega: ')
Xi.show('Xi: ')
mu = Omega.inverse() * Xi
mu.show('Mu: ')
return mu
doit(-3, 5, 3, 10, 5, 1)
Graph SLAM (Corrected)
from math import *
import random
class matrix:
# implements basic operations of a matrix class
# ------------
#
# initialization - can be called with an initial matrix
#
def __init__(self, value=[[]]):
self.value = value
self.dimx = len(value)
self.dimy = len(value[0])
if value == [[]]:
self.dimx = 0
def zero(self, dimx, dimy=0):
if dimy == 0:
dimy = dimx
# check if valid dimensions
if dimx < 1 or dimy < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dimx
self.dimy = dimy
self.value = [[0.0 for row in range(dimy)] for col in range(dimx)]
def identity(self, dim):
# check if valid dimension
if dim < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dim
self.dimy = dim
self.value = [[0.0 for row in range(dim)] for col in range(dim)]
for i in range(dim):
self.value[i][i] = 1.0
def show(self, txt=''):
for i in range(len(self.value)):
print txt + '[' + ', '.join('%.3f' % x for x in self.value[i]) + ']'
print ' '
def __add__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimx != other.dimx:
raise ValueError, "Matrices must be of equal dimension to add"
else:
# add if correct dimensions
res = matrix()
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] + other.value[i][j]
return res
def __sub__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimx != other.dimx:
raise ValueError, "Matrices must be of equal dimension to subtract"
else:
# subtract if correct dimensions
res = matrix()
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] - other.value[i][j]
return res
def __mul__(self, other):
# check if correct dimensions
if self.dimy != other.dimx:
raise ValueError, "Matrices must be m*n and n*p to multiply"
else:
# multiply if correct dimensions
res = matrix()
res.zero(self.dimx, other.dimy)
for i in range(self.dimx):
for j in range(other.dimy):
for k in range(self.dimy):
res.value[i][j] += self.value[i][k] * other.value[k][j]
return res
def transpose(self):
# compute transpose
res = matrix()
res.zero(self.dimy, self.dimx)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[j][i] = self.value[i][j]
return res
# ------------
#
# creates a new matrix from the existing matrix elements.
#
# Example:
# l = matrix([[ 1, 2, 3, 4, 5],
# [ 6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15]])
#
# l.take([0, 2], [0, 2, 3])
#
# results in:
#
# [[1, 3, 4],
# [11, 13, 14]]
#
#
# take is used to remove rows and columns from existing matrices
# list1/list2 define a sequence of rows/columns that shall be taken
# is no list2 is provided, then list2 is set to list1 (good for symmetric matrices)
#
def take(self, list1, list2=[]):
if list2 == []:
list2 = list1
if len(list1) > self.dimx or len(list2) > self.dimy:
raise ValueError, "list invalid in take()"
res = matrix()
res.zero(len(list1), len(list2))
for i in range(len(list1)):
for j in range(len(list2)):
res.value[i][j] = self.value[list1[i]][list2[j]]
return res
# ------------
#
# creates a new matrix from the existing matrix elements.
#
# Example:
# l = matrix([[1, 2, 3],
# [4, 5, 6]])
#
# l.expand(3, 5, [0, 2], [0, 2, 3])
#
# results in:
#
# [[1, 0, 2, 3, 0],
# [0, 0, 0, 0, 0],
# [4, 0, 5, 6, 0]]
#
# expand is used to introduce new rows and columns into an existing matrix
# list1/list2 are the new indexes of row/columns in which the matrix
# elements are being mapped. Elements for rows and columns
# that are not listed in list1/list2
# will be initialized by 0.0.
#
def expand(self, dimx, dimy, list1, list2=[]):
if list2 == []:
list2 = list1
if len(list1) > self.dimx or len(list2) > self.dimy:
raise ValueError, "list invalid in expand()"
res = matrix()
res.zero(dimx, dimy)
for i in range(len(list1)):
for j in range(len(list2)):
res.value[list1[i]][list2[j]] = self.value[i][j]
return res
def Cholesky(self, ztol=1.0e-5):
res = matrix()
res.zero(self.dimx, self.dimx)
for i in range(self.dimx):
S = sum([(res.value[k][i]) ** 2 for k in range(i)])
d = self.value[i][i] - S
if abs(d) < ztol:
res.value[i][i] = 0.0
else:
if d < 0.0:
raise ValueError, "Matrix not positive-definite"
res.value[i][i] = sqrt(d)
for j in range(i + 1, self.dimx):
S = sum([res.value[k][i] * res.value[k][j] for k in range(i)])
if abs(S) < ztol:
S = 0.0
res.value[i][j] = (self.value[i][j] - S) / res.value[i][i]
return res
def CholeskyInverse(self):
# Computes inverse of matrix given its Cholesky upper Triangular
# decomposition of matrix.
# This code is based on http://adorio-research.org/wordpress/?p=4560
res = matrix()
res.zero(self.dimx, self.dimx)
# Backward step for inverse.
for j in reversed(range(self.dimx)):
tjj = self.value[j][j]
S = sum([self.value[j][k] * res.value[j][k] for k in range(j + 1, self.dimx)])
res.value[j][j] = 1.0 / tjj ** 2 - S / tjj
for i in reversed(range(j)):
res.value[j][i] = res.value[i][j] = \
-sum([self.value[i][k] * res.value[k][j] for k in \
range(i + 1, self.dimx)]) / self.value[i][i]
return res
def inverse(self):
aux = self.Cholesky()
res = aux.CholeskyInverse()
return res
def __repr__(self):
return repr(self.value)
# Including the 5 times multiplier, your returned mu should now be:
#
# [[-3.0],
# [2.179],
# [5.714],
# [6.821]]
def doit(initial_pos, move1, move2, Z0, Z1, Z2):
omega = matrix([[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
xi = matrix([[initial_pos],
[0.0],
[0.0]])
omega += matrix([[1.0, -1.0, 0.0],
[-1.0, 1.0, 0.0],
[0.0, 0.0, 0.0]])
xi += matrix([[-move1],
[move1],
[0.0]])
omega += matrix([[0.0, 0.0, 0.0],
[0.0, 1.0, -1.0],
[0.0, -1.0, 1.0]])
xi += matrix([[0.0],
[-move2],
[move2]])
omega = omega.expand(4, 4, [0, 1, 2], [0, 1, 2])
xi = xi.expand(4, 1, [0, 1, 2], [0])
omega += matrix([[1.0, 0.0, 0.0, -1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0, 1.0]])
xi += matrix([[-Z0],
[0.0],
[0.0],
[Z0]])
omega += matrix([[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, -1.0, 0.0, 1.0]])
xi += matrix([[0.0],
[-Z1],
[0.0],
[Z1]])
omega += matrix([[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, -1.0, 1.0]])
xi += matrix([[0.0],
[0.0],
[-Z2],
[Z2]])
omega.show('omega: ')
xi.show('xi: ')
mu = omega.inverse() * xi
mu.show('Mu: ')
return mu
doit(-3, 5, 3, 10, 5, 1)
|
#!/bin/env python
from __future__ import division, print_function
import os.path
import sys
import time
import pstats
import argparse
import pprint
import runpy
from struct import Struct
from hashlib import sha1
from functools import partial
from collections import Counter
from xml.sax.saxutils import escape
from cProfile import Profile
version = '0.3'
eprint = partial(print, file=sys.stderr)
epprint = partial(pprint.pprint, stream=sys.stderr)
DEFAULT_WIDTH = 1200
DEFAULT_ROW_HEIGHT = 24
DEFAULT_FONT_SIZE = 12
DEFAULT_THRESHOLD = 0.1
DEFAULT_FORMAT = 'svg'
DEFAULT_LOG_MULT = 1000000
PY2 = sys.version_info[0] == 2
if PY2:
bstr = lambda r: r
else:
def bstr(data):
if type(data) is str:
data = data.encode('utf-8')
return data
def gen_colors(s, e, size):
for i in range(size):
yield (s[0] + (e[0] - s[0]) * i // size,
s[1] + (e[1] - s[1]) * i // size,
s[2] + (e[2] - s[2]) * i // size)
COLORS = list(gen_colors((255, 240, 141), (255, 65, 34), 7))
CCOLORS = list(gen_colors((44, 255, 210), (113, 194, 0), 5))
ECOLORS = list(gen_colors((230, 230, 255), (150, 150, 255), 5))
DCOLORS = list(gen_colors((190, 190, 190), (240, 240, 240), 7))
int_struct = Struct('!L')
def name_hash(name):
v, = int_struct.unpack(sha1(bstr(name)).digest()[:4])
return v / (0xffffffff + 1.0)
def calc_callers(stats):
roots = []
funcs = {}
calls = {}
for func, (cc, nc, tt, ct, clist) in stats.items():
funcs[func] = {'calls': [], 'called': [], 'stat': (cc, nc, tt, ct)}
if not clist:
roots.append(func)
calls[('root', func)] = funcs[func]['stat']
for func, (_, _, _, _, clist) in stats.items():
for cfunc, t in clist.items():
assert (cfunc, func) not in calls
funcs[cfunc]['calls'].append(func)
funcs[func]['called'].append(cfunc)
calls[(cfunc, func)] = t
total = sum(funcs[r]['stat'][3] for r in roots)
ttotal = sum(funcs[r]['stat'][2] for r in funcs)
if not (0.8 < total / ttotal < 1.2):
eprint('Warning: can not find proper roots, root cumtime is {} but sum tottime is {}'.format(total, ttotal))
# Try to find suitable root
newroot = max((r for r in funcs if r not in roots), key=lambda r: funcs[r]['stat'][3])
nstat = funcs[newroot]['stat']
ntotal = total + nstat[3]
if 0.8 < ntotal / ttotal < 1.2:
roots.append(newroot)
calls[('root', newroot)] = nstat
total = ntotal
else:
total = ttotal
funcs['root'] = {'calls': roots,
'called': [],
'stat': (1, 1, 0, total)}
return funcs, calls
def prepare(funcs, calls, threshold=0.0001):
blocks = []
bblocks = []
block_counts = Counter()
def _counts(parent, visited, level=0):
for child in funcs[parent]['calls']:
k = parent, child
block_counts[k] += 1
if block_counts[k] < 2:
if k not in visited:
_counts(child, visited | {k}, level+1)
def _calc(parent, timings, level, origin, visited, trace=(), pccnt=1, pblock=None):
childs = funcs[parent]['calls']
_, _, ptt, ptc = timings
fchilds = sorted(((f, funcs[f], calls[(parent, f)], max(block_counts[(parent, f)], pccnt))
for f in childs),
key=lambda r: r[0])
gchilds = [r for r in fchilds if r[3] == 1]
bchilds = [r for r in fchilds if r[3] > 1]
if bchilds:
gctc = sum(r[2][3] for r in gchilds)
bctc = sum(r[2][3] for r in bchilds)
rest = ptc-ptt-gctc
if bctc > 0:
factor = rest / bctc
else:
factor = 1
bchilds = [(f, ff, (round(cc*factor), round(nc*factor), tt*factor, tc*factor), ccnt)
for f, ff, (cc, nc, tt, tc), ccnt in bchilds]
for child, _, (cc, nc, tt, tc), ccnt in gchilds + bchilds:
if tc/maxw > threshold:
ckey = parent, child
ctrace = trace + (child,)
block = {
'trace': ctrace,
'color': (pccnt==1 and ccnt > 1),
'level': level,
'name': child[2],
'hash_name': '{0[0]}:{0[1]}:{0[2]}'.format(child),
'full_name': '{0[0]}:{0[1]}:{0[2]} {5:.2%} ({1} {2} {3} {4})'.format(child, cc, nc, tt, tc, tc/maxw),
'w': tc,
'ww': tt,
'x': origin
}
blocks.append(block)
if ckey not in visited:
_calc(child, (cc, nc, tt, tc), level + 1, origin, visited | {ckey},
ctrace, ccnt, block)
elif pblock:
pblock['ww'] += tc
origin += tc
def _calc_back(names, level, to, origin, visited, pw):
if to and names:
factor = pw / sum(calls[(r, to)][3] for r in names)
else:
factor = 1
for name in sorted(names):
func = funcs[name]
if to:
cc, nc, tt, tc = calls[(name, to)]
ttt = tc * factor
else:
cc, nc, tt, tc = func['stat']
ttt = tt * factor
if ttt / maxw > threshold:
block = {
'color': 2 if level > 0 else not func['calls'],
'level': level,
'name': name[2],
'hash_name': '{0[0]}:{0[1]}:{0[2]}'.format(name),
'full_name': '{0[0]}:{0[1]}:{0[2]} {5:.2%} ({1} {2} {3} {4})'.format(name, cc, nc, tt, tc, tt/maxw),
'w': ttt,
'x': origin
}
bblocks.append(block)
key = name, to
if key not in visited:
_calc_back(func['called'], level+1, name, origin, visited | {key}, ttt)
origin += ttt
maxw = funcs['root']['stat'][3] * 1.0
_counts('root', set())
_calc('root', (1, 1, maxw, maxw), 0, 0, set())
_calc_back((f for f in funcs if f != 'root'), 0, None, 0, set(), 0)
return blocks, bblocks, maxw
def render_svg_section(blocks, maxw, colors, h=24, fsize=12, width=1200, top=0, invert=False):
maxlevel = max(r['level'] for r in blocks)
height = (maxlevel + 1) * h
content = []
for b in blocks:
x = b['x'] * width / maxw
tx = h / 6
if invert:
y = top + height - (maxlevel - b['level']) * h - h
else:
y = top + height - b['level'] * h - h
ty = h / 2
w = max(1, b['w'] * width / maxw - 1)
bcolors = colors[b['color']]
fill = bcolors[int(len(bcolors) * name_hash(b['hash_name']))]
content.append(ELEM.format(w=w, x=x, y=y, tx=tx, ty=ty,
name=escape(b['name']),
full_name=escape(b['full_name']),
fsize=fsize, h=h-1, fill=fill))
return content, top + height
def render_svg(blocks, bblocks, maxw, h=24, fsize=12, width=1200):
if blocks:
s1, h1 = render_svg_section(blocks, maxw, [COLORS, CCOLORS], h=h,
fsize=fsize, width=width, top=0)
h1 += h
else:
s1, h1 = [], 0
s2, h2 = render_svg_section(bblocks, maxw, [COLORS, ECOLORS, DCOLORS], h=h,
fsize=fsize, width=width, top=h1, invert=True)
return SVG.format('\n'.join(s1 + s2), width=width, height=h2)
def render_fg(blocks, multiplier, out):
for b in blocks:
trace = []
for t in b['trace']:
trace.append('{}:{}:{}'.format(*t))
print(';'.join(trace), round(b['ww'] * multiplier), file=out)
try:
import pytest
def pytest_addoption(parser):
group = parser.getgroup('flameprof')
group.addoption("--flameprof-svg", help="filename with out svg, default is %(default)s",
default='/tmp/pytest-prof.svg')
group.addoption("--flameprof-cpu", action="store_true", help="ignore io wait")
def pytest_configure(config):
config.pluginmanager.register(PyTestPlugin(config.getvalue('flameprof_svg'),
config.getvalue('flameprof_cpu')))
class PyTestPlugin(object):
def __init__(self, out, cpu):
self.out = out
self.any_test_was_run = False
if cpu:
self.profiler = Profile(time.clock)
else:
self.profiler = Profile()
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
self.any_test_was_run = True
self.profiler.enable()
try:
yield
finally:
self.profiler.disable()
def pytest_unconfigure(self, config):
if self.any_test_was_run:
self.profiler.create_stats()
render(self.profiler.stats, open(self.out, 'w'))
except ImportError:
pass
SVG = '''\
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" width="{width}" height="{height}"
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<style type="text/css">
.func_g:hover {{ stroke:black; stroke-width:0.5; cursor:pointer; }}
</style>
{}
</svg>'''
ELEM = '''\
<svg class="func_g" x="{x}" y="{y}" width="{w}" height="{h}"><g>
<title>{full_name}</title>
<rect height="100%" width="100%" fill="rgb{fill}" rx="2" ry="2" />
<text alignment-baseline="central" x="{tx}" y="{ty}" font-size="{fsize}px" fill="rgb(0,0,0)">{name}</text>
</g></svg>'''
def render(stats, out, fmt=DEFAULT_FORMAT, threshold=DEFAULT_THRESHOLD/100,
width=DEFAULT_WIDTH, row_height=DEFAULT_ROW_HEIGHT,
fsize=DEFAULT_FONT_SIZE, log_mult=DEFAULT_LOG_MULT):
funcs, calls = calc_callers(stats)
blocks, bblocks, maxw = prepare(funcs, calls, threshold=threshold)
if fmt == 'svg':
print(render_svg(blocks, bblocks, maxw, h=row_height,
fsize=fsize, width=width), file=out)
elif fmt == 'log':
render_fg(blocks, log_mult, out)
out.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Make flamegraph from cProfile stats.')
parser.add_argument('stats', help='file with cProfile stats or command to run')
parser.add_argument('--width', type=int, help='image width, default is %(default)s', default=DEFAULT_WIDTH)
parser.add_argument('--row-height', type=int, help='row height, default is %(default)s', default=DEFAULT_ROW_HEIGHT)
parser.add_argument('--font-size', type=int, help='font size, default is %(default)s', default=DEFAULT_FONT_SIZE)
parser.add_argument('--threshold', type=float, default=DEFAULT_THRESHOLD,
help='limit functions relative cumulative time in percents, default is %(default)s%%')
parser.add_argument('--format', choices=['svg', 'log'], default=DEFAULT_FORMAT,
help='output format, default is %(default)s. `log` is suitable as input for flamegraph.pl')
parser.add_argument('--log-mult', type=int, default=DEFAULT_LOG_MULT,
help='multiply score value for log format, default is %(default)s')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(version))
parser.add_argument('-r', '--run', action='store_true', help='run python script')
parser.add_argument('-m', '--run-module', action='store_true', help='run python module')
parser.add_argument('--cpu', action='store_true', help='count cpu time only (without io wait)')
parser.add_argument('-o', '--out', type=argparse.FileType('w'), default=sys.stdout, help='file with final svg')
args, rest = parser.parse_known_args()
if args.run or args.run_module:
if args.run:
code = compile(open(args.stats, mode='rb').read(), '__main__', 'exec', dont_inherit=True)
fname = args.stats
elif args.run_module:
if PY2:
mod_name, loader, code, fname = runpy._get_module_details(args.stats)
else:
mod_name, mod_spec, code = runpy._get_module_details(args.stats)
fname = mod_spec.origin
if args.cpu:
s = Profile(time.clock)
else:
s = Profile()
globs = {
'__file__': fname,
'__name__': '__main__',
'__package__': None,
}
sys.argv[:] = [fname] + rest
sys.path.insert(0, os.path.dirname(args.stats))
try:
s.runctx(code, globs, None)
except SystemExit:
pass
s.create_stats()
else:
s = pstats.Stats(args.stats)
render(s.stats, args.out, args.format, args.threshold / 100,
args.width, args.row_height, args.font_size, args.log_mult)
Invocation examples in --help
#!/bin/env python
from __future__ import division, print_function
import os.path
import sys
import time
import pstats
import argparse
import pprint
import runpy
from struct import Struct
from hashlib import sha1
from functools import partial
from collections import Counter
from xml.sax.saxutils import escape
from cProfile import Profile
version = '0.3'
eprint = partial(print, file=sys.stderr)
epprint = partial(pprint.pprint, stream=sys.stderr)
DEFAULT_WIDTH = 1200
DEFAULT_ROW_HEIGHT = 24
DEFAULT_FONT_SIZE = 12
DEFAULT_THRESHOLD = 0.1
DEFAULT_FORMAT = 'svg'
DEFAULT_LOG_MULT = 1000000
PY2 = sys.version_info[0] == 2
if PY2:
bstr = lambda r: r
else:
def bstr(data):
if type(data) is str:
data = data.encode('utf-8')
return data
def gen_colors(s, e, size):
for i in range(size):
yield (s[0] + (e[0] - s[0]) * i // size,
s[1] + (e[1] - s[1]) * i // size,
s[2] + (e[2] - s[2]) * i // size)
COLORS = list(gen_colors((255, 240, 141), (255, 65, 34), 7))
CCOLORS = list(gen_colors((44, 255, 210), (113, 194, 0), 5))
ECOLORS = list(gen_colors((230, 230, 255), (150, 150, 255), 5))
DCOLORS = list(gen_colors((190, 190, 190), (240, 240, 240), 7))
int_struct = Struct('!L')
def name_hash(name):
v, = int_struct.unpack(sha1(bstr(name)).digest()[:4])
return v / (0xffffffff + 1.0)
def calc_callers(stats):
roots = []
funcs = {}
calls = {}
for func, (cc, nc, tt, ct, clist) in stats.items():
funcs[func] = {'calls': [], 'called': [], 'stat': (cc, nc, tt, ct)}
if not clist:
roots.append(func)
calls[('root', func)] = funcs[func]['stat']
for func, (_, _, _, _, clist) in stats.items():
for cfunc, t in clist.items():
assert (cfunc, func) not in calls
funcs[cfunc]['calls'].append(func)
funcs[func]['called'].append(cfunc)
calls[(cfunc, func)] = t
total = sum(funcs[r]['stat'][3] for r in roots)
ttotal = sum(funcs[r]['stat'][2] for r in funcs)
if not (0.8 < total / ttotal < 1.2):
eprint('Warning: can not find proper roots, root cumtime is {} but sum tottime is {}'.format(total, ttotal))
# Try to find suitable root
newroot = max((r for r in funcs if r not in roots), key=lambda r: funcs[r]['stat'][3])
nstat = funcs[newroot]['stat']
ntotal = total + nstat[3]
if 0.8 < ntotal / ttotal < 1.2:
roots.append(newroot)
calls[('root', newroot)] = nstat
total = ntotal
else:
total = ttotal
funcs['root'] = {'calls': roots,
'called': [],
'stat': (1, 1, 0, total)}
return funcs, calls
def prepare(funcs, calls, threshold=0.0001):
blocks = []
bblocks = []
block_counts = Counter()
def _counts(parent, visited, level=0):
for child in funcs[parent]['calls']:
k = parent, child
block_counts[k] += 1
if block_counts[k] < 2:
if k not in visited:
_counts(child, visited | {k}, level+1)
def _calc(parent, timings, level, origin, visited, trace=(), pccnt=1, pblock=None):
childs = funcs[parent]['calls']
_, _, ptt, ptc = timings
fchilds = sorted(((f, funcs[f], calls[(parent, f)], max(block_counts[(parent, f)], pccnt))
for f in childs),
key=lambda r: r[0])
gchilds = [r for r in fchilds if r[3] == 1]
bchilds = [r for r in fchilds if r[3] > 1]
if bchilds:
gctc = sum(r[2][3] for r in gchilds)
bctc = sum(r[2][3] for r in bchilds)
rest = ptc-ptt-gctc
if bctc > 0:
factor = rest / bctc
else:
factor = 1
bchilds = [(f, ff, (round(cc*factor), round(nc*factor), tt*factor, tc*factor), ccnt)
for f, ff, (cc, nc, tt, tc), ccnt in bchilds]
for child, _, (cc, nc, tt, tc), ccnt in gchilds + bchilds:
if tc/maxw > threshold:
ckey = parent, child
ctrace = trace + (child,)
block = {
'trace': ctrace,
'color': (pccnt==1 and ccnt > 1),
'level': level,
'name': child[2],
'hash_name': '{0[0]}:{0[1]}:{0[2]}'.format(child),
'full_name': '{0[0]}:{0[1]}:{0[2]} {5:.2%} ({1} {2} {3} {4})'.format(child, cc, nc, tt, tc, tc/maxw),
'w': tc,
'ww': tt,
'x': origin
}
blocks.append(block)
if ckey not in visited:
_calc(child, (cc, nc, tt, tc), level + 1, origin, visited | {ckey},
ctrace, ccnt, block)
elif pblock:
pblock['ww'] += tc
origin += tc
def _calc_back(names, level, to, origin, visited, pw):
if to and names:
factor = pw / sum(calls[(r, to)][3] for r in names)
else:
factor = 1
for name in sorted(names):
func = funcs[name]
if to:
cc, nc, tt, tc = calls[(name, to)]
ttt = tc * factor
else:
cc, nc, tt, tc = func['stat']
ttt = tt * factor
if ttt / maxw > threshold:
block = {
'color': 2 if level > 0 else not func['calls'],
'level': level,
'name': name[2],
'hash_name': '{0[0]}:{0[1]}:{0[2]}'.format(name),
'full_name': '{0[0]}:{0[1]}:{0[2]} {5:.2%} ({1} {2} {3} {4})'.format(name, cc, nc, tt, tc, tt/maxw),
'w': ttt,
'x': origin
}
bblocks.append(block)
key = name, to
if key not in visited:
_calc_back(func['called'], level+1, name, origin, visited | {key}, ttt)
origin += ttt
maxw = funcs['root']['stat'][3] * 1.0
_counts('root', set())
_calc('root', (1, 1, maxw, maxw), 0, 0, set())
_calc_back((f for f in funcs if f != 'root'), 0, None, 0, set(), 0)
return blocks, bblocks, maxw
def render_svg_section(blocks, maxw, colors, h=24, fsize=12, width=1200, top=0, invert=False):
maxlevel = max(r['level'] for r in blocks)
height = (maxlevel + 1) * h
content = []
for b in blocks:
x = b['x'] * width / maxw
tx = h / 6
if invert:
y = top + height - (maxlevel - b['level']) * h - h
else:
y = top + height - b['level'] * h - h
ty = h / 2
w = max(1, b['w'] * width / maxw - 1)
bcolors = colors[b['color']]
fill = bcolors[int(len(bcolors) * name_hash(b['hash_name']))]
content.append(ELEM.format(w=w, x=x, y=y, tx=tx, ty=ty,
name=escape(b['name']),
full_name=escape(b['full_name']),
fsize=fsize, h=h-1, fill=fill))
return content, top + height
def render_svg(blocks, bblocks, maxw, h=24, fsize=12, width=1200):
if blocks:
s1, h1 = render_svg_section(blocks, maxw, [COLORS, CCOLORS], h=h,
fsize=fsize, width=width, top=0)
h1 += h
else:
s1, h1 = [], 0
s2, h2 = render_svg_section(bblocks, maxw, [COLORS, ECOLORS, DCOLORS], h=h,
fsize=fsize, width=width, top=h1, invert=True)
return SVG.format('\n'.join(s1 + s2), width=width, height=h2)
def render_fg(blocks, multiplier, out):
for b in blocks:
trace = []
for t in b['trace']:
trace.append('{}:{}:{}'.format(*t))
print(';'.join(trace), round(b['ww'] * multiplier), file=out)
SVG = '''\
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" width="{width}" height="{height}"
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<style type="text/css">
.func_g:hover {{ stroke:black; stroke-width:0.5; cursor:pointer; }}
</style>
{}
</svg>'''
ELEM = '''\
<svg class="func_g" x="{x}" y="{y}" width="{w}" height="{h}"><g>
<title>{full_name}</title>
<rect height="100%" width="100%" fill="rgb{fill}" rx="2" ry="2" />
<text alignment-baseline="central" x="{tx}" y="{ty}" font-size="{fsize}px" fill="rgb(0,0,0)">{name}</text>
</g></svg>'''
def render(stats, out, fmt=DEFAULT_FORMAT, threshold=DEFAULT_THRESHOLD/100,
width=DEFAULT_WIDTH, row_height=DEFAULT_ROW_HEIGHT,
fsize=DEFAULT_FONT_SIZE, log_mult=DEFAULT_LOG_MULT):
funcs, calls = calc_callers(stats)
blocks, bblocks, maxw = prepare(funcs, calls, threshold=threshold)
if fmt == 'svg':
print(render_svg(blocks, bblocks, maxw, h=row_height,
fsize=fsize, width=width), file=out)
elif fmt == 'log':
render_fg(blocks, log_mult, out)
out.flush()
if __name__ == '__main__':
import textwrap
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Make flamegraph from cProfile stats.
Using existing profile:
flameprof -o /tmp/profile.svg /path/to/file-with-cprofile.stats
Profile script:
flameprof -o /tmp/profile.svg -r myscript.py [-- script_arg1, script_arg2, ...]
Profile python module:
flameprof -o /tmp/profile.svg -m some.module [-- mod_arg1, mod_arg2, ...]
Profile pytest:
py.test -p flameprof # by default svg will be put in /tmp/pytest-prof.svg, see --help for other options
'''))
parser.add_argument('stats', help='file with cProfile stats or command to run')
parser.add_argument('--width', type=int, help='image width, default is %(default)s', default=DEFAULT_WIDTH)
parser.add_argument('--row-height', type=int, help='row height, default is %(default)s', default=DEFAULT_ROW_HEIGHT)
parser.add_argument('--font-size', type=int, help='font size, default is %(default)s', default=DEFAULT_FONT_SIZE)
parser.add_argument('--threshold', type=float, default=DEFAULT_THRESHOLD,
help='limit functions relative cumulative time in percents, default is %(default)s%%')
parser.add_argument('--format', choices=['svg', 'log'], default=DEFAULT_FORMAT,
help='output format, default is %(default)s. `log` is suitable as input for flamegraph.pl')
parser.add_argument('--log-mult', type=int, default=DEFAULT_LOG_MULT,
help='multiply score value for log format, default is %(default)s')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(version))
parser.add_argument('-r', '--run', action='store_true', help='run python script')
parser.add_argument('-m', '--run-module', action='store_true', help='run python module')
parser.add_argument('--cpu', action='store_true', help='count cpu time only (without io wait)')
parser.add_argument('-o', '--out', type=argparse.FileType('w'), default=sys.stdout, help='file with final svg')
args, rest = parser.parse_known_args()
if args.run or args.run_module:
if args.run:
code = compile(open(args.stats, mode='rb').read(), '__main__', 'exec', dont_inherit=True)
fname = args.stats
elif args.run_module:
if PY2:
mod_name, loader, code, fname = runpy._get_module_details(args.stats)
else:
mod_name, mod_spec, code = runpy._get_module_details(args.stats)
fname = mod_spec.origin
if args.cpu:
s = Profile(time.clock)
else:
s = Profile()
globs = {
'__file__': fname,
'__name__': '__main__',
'__package__': None,
}
sys.argv[:] = [fname] + rest
sys.path.insert(0, os.path.dirname(args.stats))
try:
s.runctx(code, globs, None)
except SystemExit:
pass
s.create_stats()
else:
s = pstats.Stats(args.stats)
render(s.stats, args.out, args.format, args.threshold / 100,
args.width, args.row_height, args.font_size, args.log_mult)
else:
try:
import pytest
def pytest_addoption(parser):
group = parser.getgroup('flameprof')
group.addoption("--flameprof-svg", help="filename with out svg, default is %(default)s",
default='/tmp/pytest-prof.svg')
group.addoption("--flameprof-cpu", action="store_true", help="ignore io wait")
def pytest_configure(config):
config.pluginmanager.register(PyTestPlugin(config.getvalue('flameprof_svg'),
config.getvalue('flameprof_cpu')))
class PyTestPlugin(object):
def __init__(self, out, cpu):
self.out = out
self.any_test_was_run = False
if cpu:
self.profiler = Profile(time.clock)
else:
self.profiler = Profile()
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
self.any_test_was_run = True
self.profiler.enable()
try:
yield
finally:
self.profiler.disable()
def pytest_unconfigure(self, config):
if self.any_test_was_run:
self.profiler.create_stats()
render(self.profiler.stats, open(self.out, 'w'))
except ImportError:
pass
|
from nodes import RootNode, create_node
class Compiler:
def process(self, raw_text):
split_text = raw_text.strip().split('\n')
return self.process_lines(split_text)
def process_lines(self, haml_lines):
root = RootNode()
for line in haml_lines:
haml_node = create_node(line)
root.add_node(haml_node)
return root.render()
def convert_files():
import sys
import codecs
if len(sys.argv) < 2:
print "Specify the input file as the first argument."
else:
infile = sys.argv[1]
haml_lines = codecs.open(infile, 'r', encoding='utf-8').read().splitlines()
compiler = Compiler()
output = compiler.process_lines(haml_lines)
if len(sys.argv) == 3:
outfile = open(sys.argv[2], 'w')
outfile.write(output)
else:
print output
if __name__ == '__main__':
convert_files()
Made the file output in UTF-8
from nodes import RootNode, create_node
class Compiler:
def process(self, raw_text):
split_text = raw_text.strip().split('\n')
return self.process_lines(split_text)
def process_lines(self, haml_lines):
root = RootNode()
for line in haml_lines:
haml_node = create_node(line)
root.add_node(haml_node)
return root.render()
def convert_files():
import sys
import codecs
if len(sys.argv) < 2:
print "Specify the input file as the first argument."
else:
infile = sys.argv[1]
haml_lines = codecs.open(infile, 'r', encoding='utf-8').read().splitlines()
compiler = Compiler()
output = compiler.process_lines(haml_lines)
if len(sys.argv) == 3:
outfile = codecs.open(sys.argv[2], 'w', encoding='utf-8')
outfile.write(output)
else:
print output
if __name__ == '__main__':
convert_files()
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2012 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, \
permission_required
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.core.mail import mail_admins
from django.conf import settings
from django.template.context import RequestContext
from django.utils.translation import ugettext as _
from django.utils import simplejson
from django.contrib.contenttypes.models import ContentType
from django.db.models import get_model
from notification import models as notification
from frontend.views import notice_count
from dialer_contact.models import Contact
from utils.helper import grid_common_function, get_grid_update_delete_link
from dialer_campaign.models import Campaign
from dialer_campaign.forms import CampaignForm
from dialer_campaign.function_def import user_attached_with_dialer_settings, \
check_dialer_setting, dialer_setting_limit, \
get_campaign_status_name, user_dialer_setting_msg
from dialer_campaign.tasks import collect_subscriber
from common.common_functions import current_view
import re
def common_send_notification(request, status, recipient=None):
"""User Notification (e.g. start | stop | pause | abort |
contact/camapign limit) needs to be saved.
It is a common function for the admin and customer UI's
**Attributes**:
* ``pk`` - primary key of the campaign record
* ``status`` - get label for notifications
**Logic Description**:
* This function is used by ``update_campaign_status_admin()`` &
``update_campaign_status_cust()``
"""
if not recipient:
recipient = request.user
sender = User.objects.get(is_superuser=1, username=recipient)
else:
if request.user.is_anonymous():
sender = User.objects.get(is_superuser=1, username=recipient)
else:
sender = request.user
if notification:
note_label = notification.NoticeType.objects.get(default=status)
notification.send([recipient],
note_label.label,
{"from_user": request.user},
sender=sender)
return True
def common_campaign_status(pk, status):
"""Campaign Status (e.g. start | stop | abort | pause) needs to be changed.
It is a common function for the admin and customer UI's
**Attributes**:
* ``pk`` - primary key of the campaign record
* ``status`` - selected status for the campaign record
**Logic Description**:
* Selected Campaign's status needs to be changed.
Changed status can be start, stop or pause.
* This function is used by ``update_campaign_status_admin()`` &
``update_campaign_status_cust()``
"""
campaign = Campaign.objects.get(pk=pk)
previous_status = campaign.status
campaign.status = status
campaign.save()
#Start tasks to import subscriber
if status == "1" and previous_status != "1":
collect_subscriber.delay(pk)
return campaign.user
@login_required
def update_campaign_status_admin(request, pk, status):
"""Campaign Status (e.g. start|stop|pause|abort) can be changed from
admin interface (via campaign list)"""
recipient = common_campaign_status(pk, status)
common_send_notification(request, status, recipient)
return HttpResponseRedirect(
reverse("admin:dialer_campaign_campaign_changelist"))
@login_required
def update_campaign_status_cust(request, pk, status):
"""Campaign Status (e.g. start|stop|pause|abort) can be changed from
customer interface (via dialer_campaign/campaign list)"""
recipient = common_campaign_status(pk, status)
common_send_notification(request, status, recipient)
return HttpResponseRedirect('/campaign/')
@login_required
def notify_admin(request):
"""Notify administrator regarding dialer setting configuration for
system user via mail
"""
# TODO : get recipient = admin user
recipient = User.objects.get(pk=request.user.pk)
if request.session['has_notified'] == False:
common_send_notification(request, 7, recipient)
# Send mail to ADMINS
subject = _('Dialer setting configuration')
message = _('Notification - User Dialer Setting The user "%(user)s" - "%(user_id)s" is not properly configured to use the system, please configure their dialer settings.') %\
{'user': request.user, 'user_id': request.user.id}
# mail_admins() is a shortcut for sending an email to the site admins,
# as defined in the ADMINS setting
mail_admins(subject, message)
request.session['has_notified'] = True
return HttpResponseRedirect('/dashboard/')
def count_contact_of_campaign(campaign_id):
"""Count no of Contacts from phonebook belonging to the campaign
>>> count_contact_of_campaign(1)
'Phonebook Empty'
"""
count_contact = Contact.objects\
.filter(phonebook__campaign=campaign_id).count()
if not count_contact:
return str("Phonebook Empty")
return count_contact
def tpl_control_icon(icon):
"""
function to produce control html icon
"""
return 'style="text-decoration:none;background-image:url(' \
+ settings.STATIC_URL \
+ 'newfies/icons/%s);"' % icon
def get_url_campaign_status(id, status):
"""
Helper to display campaign status button on the grid
"""
#Store html for campaign control button
control_play_style = tpl_control_icon('control_play.png')
control_pause_style = tpl_control_icon('control_pause.png')
control_abort_style = tpl_control_icon('abort_grey.png')
control_stop_style = tpl_control_icon('control_stop.png')
#set different url for the campaign status
url_cpg_status = 'update_campaign_status_cust/' + str(id)
url_cpg_start = url_cpg_status + '/1/'
url_cpg_pause = url_cpg_status + '/2/'
url_cpg_abort = url_cpg_status + '/3/'
url_cpg_stop = url_cpg_status + '/4/'
#according to the current status, disable link and change the button color
if status == 1:
url_cpg_start = '#'
control_play_style = tpl_control_icon('control_play_blue.png')
elif status == 2:
url_cpg_pause = '#'
control_pause_style = tpl_control_icon('control_pause_blue.png')
elif status == 3:
url_cpg_abort = '#'
control_abort_style = tpl_control_icon('abort.png')
elif status == 4:
url_cpg_stop = '#'
control_stop_style = tpl_control_icon('control_stop_blue.png')
#return all the html button for campaign status management
return "<a href='%s' class='icon' title='%s' %s> </a>\
<a href='%s' class='icon' title='%s' %s> </a>\
<a href='%s' class='icon' title='%s' %s> </a>\
<a href='%s' class='icon' title='%s' %s> </a>" % \
(url_cpg_start, _("Start"), control_play_style,
url_cpg_pause, _("Pause"), control_pause_style,
url_cpg_abort, _("Abort"), control_abort_style,
url_cpg_stop, _("Stop"), control_stop_style)
#TODO: Add comments / docs
def get_app_name(app_label, model_name, object_id):
try:
return get_model(app_label, model_name).objects.get(pk=object_id)
except:
return '-'
# Campaign
@login_required
def campaign_grid(request):
"""Campaign list in json format for flexigrid
**Model**: Campaign
"""
grid_data = grid_common_function(request)
page = int(grid_data['page'])
start_page = int(grid_data['start_page'])
end_page = int(grid_data['end_page'])
sortorder_sign = grid_data['sortorder_sign']
sortname = grid_data['sortname']
campaign_list = Campaign.objects\
.values('id', 'campaign_code', 'name', 'startingdate',
'expirationdate', 'aleg_gateway',
'aleg_gateway__name', 'content_type__name',
'content_type__app_label', 'object_id',
'content_type__model', 'status')\
.filter(user=request.user)
count = campaign_list.count()
campaign_list = campaign_list\
.order_by(sortorder_sign + sortname)[start_page:end_page]
rows = [
{'id': row['id'],
'cell': ['<input type="checkbox" name="select" class="checkbox"\
value="' + str(row['id']) + '" />',
row['campaign_code'],
row['name'],
row['startingdate'].strftime('%Y-%m-%d %H:%M:%S'),
row['content_type__name'],
str(get_app_name(row['content_type__app_label'],
row['content_type__model'],
row['object_id'])),
count_contact_of_campaign(row['id']),
get_campaign_status_name(row['status']),
get_grid_update_delete_link(request, row['id'],
'dialer_campaign.change_campaign',
_('Update campaign'), 'update') +\
get_grid_update_delete_link(request, row['id'],
'dialer_campaign.delete_campaign',
_('Delete campaign'), 'delete') +\
get_url_campaign_status(row['id'], row['status']),
]} for row in campaign_list]
data = {'rows': rows,
'page': page,
'total': count}
return HttpResponse(simplejson.dumps(data), mimetype='application/json',
content_type="application/json")
@permission_required('dialer_campaign.view_campaign', login_url='/')
@login_required
def campaign_list(request):
"""List all campaigns for the logged in user
**Attributes**:
* ``template`` - frontend/campaign/list.html
**Logic Description**:
* List all campaigns belonging to the logged in user
"""
template = 'frontend/campaign/list.html'
data = {
'module': current_view(request),
'msg': request.session.get('msg'),
'error_msg': request.session.get('error_msg'),
'notice_count': notice_count(request),
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
request.session['msg'] = ''
request.session['error_msg'] = ''
return render_to_response(template, data,
context_instance=RequestContext(request))
def get_content_type(object_string):
"""
It is used by campaign_add & campaign_change to get ContentType object
>>> get_content_type("type:31-id:1")
{'object_type': <ContentType: observed item>, 'object_id': '1'}
"""
result_array = {}
matches = re.match("type:(\d+)-id:(\d+)", object_string).groups()
object_type_id = matches[0] # get 45 from "type:45-id:38"
result_array['object_id'] = matches[1] # get 38 from "type:45-id:38"
try:
result_array['object_type'] = ContentType.objects\
.get(id=object_type_id)
except:
pass
return result_array
@permission_required('dialer_campaign.add_campaign', login_url='/')
@login_required
def campaign_add(request):
"""Add a new campaign for the logged in user
**Attributes**:
* ``form`` - CampaignForm
* ``template`` - frontend/campaign/change.html
**Logic Description**:
* Before adding a campaign, check dialer setting limit if
applicable to the user.
* Add the new campaign which will belong to the logged in user
via CampaignForm & get redirected to campaign list
"""
# If dialer setting is not attached with user, redirect to campaign list
if user_attached_with_dialer_settings(request):
request.session['error_msg'] = \
_("In order to add a campaign, you need to have your settings configured properly, please contact the admin.")
return HttpResponseRedirect("/campaign/")
# Check dialer setting limit
if request.user and request.method != 'POST':
# check Max Number of running campaign
if check_dialer_setting(request, check_for="campaign"):
msg = _("you have too many campaigns. Max allowed %(limit)s") \
% {'limit': \
dialer_setting_limit(request, limit_for="campaign")}
request.session['msg'] = msg
# campaign limit reached
common_send_notification(request, '5')
return HttpResponseRedirect("/campaign/")
form = CampaignForm(request.user)
# Add campaign
if request.method == 'POST':
form = CampaignForm(request.user, request.POST)
if form.is_valid():
obj = form.save(commit=False)
result_array = \
get_content_type(form.cleaned_data['content_object'])
obj.content_type = result_array['object_type']
obj.object_id = result_array['object_id']
obj.user = User.objects.get(username=request.user)
obj.save()
# Start tasks to import subscriber
if obj.status == 1:
collect_subscriber.delay(obj.pk)
form.save_m2m()
request.session["msg"] = _('"%(name)s" is added.') %\
{'name': request.POST['name']}
return HttpResponseRedirect('/campaign/')
template = 'frontend/campaign/change.html'
data = {
'module': current_view(request),
'form': form,
'action': 'add',
'notice_count': notice_count(request),
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('dialer_campaign.delete_campaign', login_url='/')
@login_required
def campaign_del(request, object_id):
"""Delete campaign for the logged in user
**Attributes**:
* ``object_id`` - Selected campaign object
* ``object_list`` - Selected campaign objects
**Logic Description**:
* Delete the selected campaign from the campaign list
"""
if int(object_id) != 0:
try:
# When object_id is not 0
campaign = Campaign.objects.get(pk=object_id, user=request.user)
request.session["msg"] = _('"%(name)s" is deleted.')\
% {'name': campaign.name}
campaign.delete()
except:
request.session["error_msg"] = \
_('campaign doesn`t belong to user')
else:
# When object_id is 0 (Multiple records delete)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
try:
campaign_list = Campaign.objects\
.filter(user=request.user)\
.extra(where=['id IN (%s)' % values])
if campaign_list:
request.session["msg"] = _('%(count)s campaign(s) are deleted.')\
% {'count': campaign_list.count()}
campaign_list.delete()
except:
request.session["error_msg"] =\
_('campaign(s) do not belong to user')
return HttpResponseRedirect('/campaign/')
@permission_required('dialer_campaign.change_campaign', login_url='/')
@login_required
def campaign_change(request, object_id):
"""Update/Delete campaign for the logged in user
**Attributes**:
* ``object_id`` - Selected campaign object
* ``form`` - CampaignForm
* ``template`` - frontend/campaign/change.html
**Logic Description**:
* Update/delete selected campaign from the campaign list
via CampaignForm & get redirected to the campaign list
"""
# If dialer setting is not attached with user, redirect to campaign list
if user_attached_with_dialer_settings(request):
return HttpResponseRedirect("/campaign/")
try:
campaign = Campaign.objects.get(pk=object_id, user=request.user)
content_object = "type:%s-id:%s" % \
(campaign.content_type_id, campaign.object_id)
form = CampaignForm(request.user,
instance=campaign,
initial={'content_object': content_object})
if request.method == 'POST':
# Delete campaign
if request.POST.get('delete'):
campaign_del(request, object_id)
request.session["msg"] = _('"%(name)s" is deleted.')\
% {'name': request.POST['name']}
return HttpResponseRedirect('/campaign/')
else:
# Update campaign
form = CampaignForm(request.user, request.POST, instance=campaign)
previous_status = campaign.status
if form.is_valid():
form.save()
obj = form.save(commit=False)
result_array = \
get_content_type(form.cleaned_data['content_object'])
obj.content_type = result_array['object_type']
obj.object_id = result_array['object_id']
obj.save()
# Start tasks to import subscriber
if obj.status == 1 and previous_status != 1:
collect_subscriber.delay(obj.id)
request.session["msg"] = _('"%(name)s" is updated.') \
% {'name': request.POST['name']}
return HttpResponseRedirect('/campaign/')
except:
request.session["error_msg"] = _('Campaign doesn`t belong to user')
return HttpResponseRedirect('/campaign/')
template = 'frontend/campaign/change.html'
data = {
'module': current_view(request),
'form': form,
'action': 'update',
'notice_count': notice_count(request),
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
refactor : renaming
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2012 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, \
permission_required
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.core.mail import mail_admins
from django.conf import settings
from django.template.context import RequestContext
from django.utils.translation import ugettext as _
from django.utils import simplejson
from django.contrib.contenttypes.models import ContentType
from django.db.models import get_model
from notification import models as notification
from frontend.views import notice_count
from dialer_contact.models import Contact
from utils.helper import grid_common_function, get_grid_update_delete_link
from dialer_campaign.models import Campaign
from dialer_campaign.forms import CampaignForm
from dialer_campaign.function_def import user_attached_with_dialer_settings, \
check_dialer_setting, dialer_setting_limit, \
get_campaign_status_name, user_dialer_setting_msg
from dialer_campaign.tasks import collect_subscriber
from common.common_functions import current_view
import re
def common_send_notification(request, status, recipient=None):
"""User Notification (e.g. start | stop | pause | abort |
contact/camapign limit) needs to be saved.
It is a common function for the admin and customer UI's
**Attributes**:
* ``pk`` - primary key of the campaign record
* ``status`` - get label for notifications
**Logic Description**:
* This function is used by ``update_campaign_status_admin()`` &
``update_campaign_status_cust()``
"""
if not recipient:
recipient = request.user
sender = User.objects.get(is_superuser=1, username=recipient)
else:
if request.user.is_anonymous():
sender = User.objects.get(is_superuser=1, username=recipient)
else:
sender = request.user
if notification:
note_label = notification.NoticeType.objects.get(default=status)
notification.send([recipient],
note_label.label,
{"from_user": request.user},
sender=sender)
return True
def common_campaign_status(pk, status):
"""Campaign Status (e.g. start | stop | abort | pause) needs to be changed.
It is a common function for the admin and customer UI's
**Attributes**:
* ``pk`` - primary key of the campaign record
* ``status`` - selected status for the campaign record
**Logic Description**:
* Selected Campaign's status needs to be changed.
Changed status can be start, stop or pause.
* This function is used by ``update_campaign_status_admin()`` &
``update_campaign_status_cust()``
"""
campaign = Campaign.objects.get(pk=pk)
previous_status = campaign.status
campaign.status = status
campaign.save()
#Start tasks to import subscriber
if status == "1" and previous_status != "1":
collect_subscriber.delay(pk)
return campaign.user
@login_required
def update_campaign_status_admin(request, pk, status):
"""Campaign Status (e.g. start|stop|pause|abort) can be changed from
admin interface (via campaign list)"""
recipient = common_campaign_status(pk, status)
common_send_notification(request, status, recipient)
return HttpResponseRedirect(
reverse("admin:dialer_campaign_campaign_changelist"))
@login_required
def update_campaign_status_cust(request, pk, status):
"""Campaign Status (e.g. start|stop|pause|abort) can be changed from
customer interface (via dialer_campaign/campaign list)"""
recipient = common_campaign_status(pk, status)
common_send_notification(request, status, recipient)
return HttpResponseRedirect('/campaign/')
@login_required
def notify_admin(request):
"""Notify administrator regarding dialer setting configuration for
system user via mail
"""
# TODO : get recipient = admin user
recipient = User.objects.get(pk=request.user.pk)
if request.session['has_notified'] == False:
common_send_notification(request, 7, recipient)
# Send mail to ADMINS
subject = _('Dialer setting configuration')
message = _('Notification - User Dialer Setting The user "%(user)s" - "%(user_id)s" is not properly configured to use the system, please configure their dialer settings.') %\
{'user': request.user, 'user_id': request.user.id}
# mail_admins() is a shortcut for sending an email to the site admins,
# as defined in the ADMINS setting
mail_admins(subject, message)
request.session['has_notified'] = True
return HttpResponseRedirect('/dashboard/')
def count_contact_of_campaign(campaign_id):
"""Count no of Contacts from phonebook belonging to the campaign
>>> count_contact_of_campaign(1)
'Phonebook Empty'
"""
count_contact = Contact.objects\
.filter(phonebook__campaign=campaign_id).count()
if not count_contact:
return str("Phonebook Empty")
return count_contact
def tpl_control_icon(icon):
"""
function to produce control html icon
"""
return 'style="text-decoration:none;background-image:url(' \
+ settings.STATIC_URL \
+ 'newfies/icons/%s);"' % icon
def get_url_campaign_status(id, status):
"""
Helper to display campaign status button on the grid
"""
#Store html for campaign control button
control_play_style = tpl_control_icon('control_play.png')
control_pause_style = tpl_control_icon('control_pause.png')
control_abort_style = tpl_control_icon('abort_grey.png')
control_stop_style = tpl_control_icon('control_stop.png')
#set different url for the campaign status
url_cpg_status = 'update_campaign_status_cust/' + str(id)
url_cpg_start = url_cpg_status + '/1/'
url_cpg_pause = url_cpg_status + '/2/'
url_cpg_abort = url_cpg_status + '/3/'
url_cpg_stop = url_cpg_status + '/4/'
#according to the current status, disable link and change the button color
if status == 1:
url_cpg_start = '#'
control_play_style = tpl_control_icon('control_play_blue.png')
elif status == 2:
url_cpg_pause = '#'
control_pause_style = tpl_control_icon('control_pause_blue.png')
elif status == 3:
url_cpg_abort = '#'
control_abort_style = tpl_control_icon('abort.png')
elif status == 4:
url_cpg_stop = '#'
control_stop_style = tpl_control_icon('control_stop_blue.png')
#return all the html button for campaign status management
return "<a href='%s' class='icon' title='%s' %s> </a>\
<a href='%s' class='icon' title='%s' %s> </a>\
<a href='%s' class='icon' title='%s' %s> </a>\
<a href='%s' class='icon' title='%s' %s> </a>" % \
(url_cpg_start, _("Start"), control_play_style,
url_cpg_pause, _("Pause"), control_pause_style,
url_cpg_abort, _("Abort"), control_abort_style,
url_cpg_stop, _("Stop"), control_stop_style)
#TODO: Add comments / docs
def get_app_name(app_label, model_name, object_id):
try:
return get_model(app_label, model_name).objects.get(pk=object_id)
except:
return '-'
# Campaign
@login_required
def campaign_grid(request):
"""Campaign list in json format for flexigrid
**Model**: Campaign
"""
grid_data = grid_common_function(request)
page = int(grid_data['page'])
start_page = int(grid_data['start_page'])
end_page = int(grid_data['end_page'])
sortorder_sign = grid_data['sortorder_sign']
sortname = grid_data['sortname']
campaign_list = Campaign.objects\
.values('id', 'campaign_code', 'name', 'startingdate',
'expirationdate', 'aleg_gateway',
'aleg_gateway__name', 'content_type__name',
'content_type__app_label', 'object_id',
'content_type__model', 'status')\
.filter(user=request.user)
count = campaign_list.count()
campaign_list = campaign_list\
.order_by(sortorder_sign + sortname)[start_page:end_page]
rows = [
{'id': row['id'],
'cell': ['<input type="checkbox" name="select" class="checkbox"\
value="' + str(row['id']) + '" />',
row['campaign_code'],
row['name'],
row['startingdate'].strftime('%Y-%m-%d %H:%M:%S'),
row['content_type__name'],
str(get_app_name(row['content_type__app_label'],
row['content_type__model'],
row['object_id'])),
count_contact_of_campaign(row['id']),
get_campaign_status_name(row['status']),
get_grid_update_delete_link(request, row['id'],
'dialer_campaign.change_campaign',
_('Update campaign'), 'update') +\
get_grid_update_delete_link(request, row['id'],
'dialer_campaign.delete_campaign',
_('Delete campaign'), 'delete') +\
get_url_campaign_status(row['id'], row['status']),
]} for row in campaign_list]
data = {'rows': rows,
'page': page,
'total': count}
return HttpResponse(simplejson.dumps(data), mimetype='application/json',
content_type="application/json")
@permission_required('dialer_campaign.view_campaign', login_url='/')
@login_required
def campaign_list(request):
"""List all campaigns for the logged in user
**Attributes**:
* ``template`` - frontend/campaign/list.html
**Logic Description**:
* List all campaigns belonging to the logged in user
"""
template = 'frontend/campaign/list.html'
data = {
'module': current_view(request),
'msg': request.session.get('msg'),
'error_msg': request.session.get('error_msg'),
'notice_count': notice_count(request),
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
request.session['msg'] = ''
request.session['error_msg'] = ''
return render_to_response(template, data,
context_instance=RequestContext(request))
def get_content_type(object_string):
"""
Retrieve ContentType and Object ID from a string
It is used by campaign_add & campaign_change
>>> get_content_type("type:31-id:1")
{'object_type': <ContentType: observed item>, 'object_id': '1'}
"""
contenttype = {}
matches = re.match("type:(\d+)-id:(\d+)", object_string).groups()
object_type_id = matches[0] # get 45 from "type:45-id:38"
contenttype['object_id'] = matches[1] # get 38 from "type:45-id:38"
try:
contenttype['object_type'] = ContentType.objects\
.get(id=object_type_id)
except:
pass
return contenttype
@permission_required('dialer_campaign.add_campaign', login_url='/')
@login_required
def campaign_add(request):
"""Add a new campaign for the logged in user
**Attributes**:
* ``form`` - CampaignForm
* ``template`` - frontend/campaign/change.html
**Logic Description**:
* Before adding a campaign, check dialer setting limit if
applicable to the user.
* Add the new campaign which will belong to the logged in user
via CampaignForm & get redirected to campaign list
"""
# If dialer setting is not attached with user, redirect to campaign list
if user_attached_with_dialer_settings(request):
request.session['error_msg'] = \
_("In order to add a campaign, you need to have your settings configured properly, please contact the admin.")
return HttpResponseRedirect("/campaign/")
# Check dialer setting limit
if request.user and request.method != 'POST':
# check Max Number of running campaign
if check_dialer_setting(request, check_for="campaign"):
msg = _("you have too many campaigns. Max allowed %(limit)s") \
% {'limit': \
dialer_setting_limit(request, limit_for="campaign")}
request.session['msg'] = msg
# campaign limit reached
common_send_notification(request, '5')
return HttpResponseRedirect("/campaign/")
form = CampaignForm(request.user)
# Add campaign
if request.method == 'POST':
form = CampaignForm(request.user, request.POST)
if form.is_valid():
obj = form.save(commit=False)
contenttype = get_content_type(form.cleaned_data['content_object'])
obj.content_type = contenttype['object_type']
obj.object_id = contenttype['object_id']
obj.user = User.objects.get(username=request.user)
obj.save()
# Start tasks to import subscriber
if obj.status == 1:
collect_subscriber.delay(obj.pk)
form.save_m2m()
request.session["msg"] = _('"%(name)s" is added.') %\
{'name': request.POST['name']}
return HttpResponseRedirect('/campaign/')
template = 'frontend/campaign/change.html'
data = {
'module': current_view(request),
'form': form,
'action': 'add',
'notice_count': notice_count(request),
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('dialer_campaign.delete_campaign', login_url='/')
@login_required
def campaign_del(request, object_id):
"""Delete campaign for the logged in user
**Attributes**:
* ``object_id`` - Selected campaign object
* ``object_list`` - Selected campaign objects
**Logic Description**:
* Delete the selected campaign from the campaign list
"""
if int(object_id) != 0:
try:
# When object_id is not 0
campaign = Campaign.objects.get(pk=object_id, user=request.user)
request.session["msg"] = _('"%(name)s" is deleted.')\
% {'name': campaign.name}
campaign.delete()
except:
request.session["error_msg"] = \
_('campaign doesn`t belong to user')
else:
# When object_id is 0 (Multiple records delete)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
try:
campaign_list = Campaign.objects\
.filter(user=request.user)\
.extra(where=['id IN (%s)' % values])
if campaign_list:
request.session["msg"] = _('%(count)s campaign(s) are deleted.')\
% {'count': campaign_list.count()}
campaign_list.delete()
except:
request.session["error_msg"] =\
_('campaign(s) do not belong to user')
return HttpResponseRedirect('/campaign/')
@permission_required('dialer_campaign.change_campaign', login_url='/')
@login_required
def campaign_change(request, object_id):
"""Update/Delete campaign for the logged in user
**Attributes**:
* ``object_id`` - Selected campaign object
* ``form`` - CampaignForm
* ``template`` - frontend/campaign/change.html
**Logic Description**:
* Update/delete selected campaign from the campaign list
via CampaignForm & get redirected to the campaign list
"""
# If dialer setting is not attached with user, redirect to campaign list
if user_attached_with_dialer_settings(request):
return HttpResponseRedirect("/campaign/")
try:
campaign = Campaign.objects.get(pk=object_id, user=request.user)
content_object = "type:%s-id:%s" % \
(campaign.content_type_id, campaign.object_id)
form = CampaignForm(request.user,
instance=campaign,
initial={'content_object': content_object})
if request.method == 'POST':
# Delete campaign
if request.POST.get('delete'):
campaign_del(request, object_id)
request.session["msg"] = _('"%(name)s" is deleted.')\
% {'name': request.POST['name']}
return HttpResponseRedirect('/campaign/')
else:
# Update campaign
form = CampaignForm(request.user, request.POST, instance=campaign)
previous_status = campaign.status
if form.is_valid():
form.save()
obj = form.save(commit=False)
contenttype = get_content_type(form.cleaned_data['content_object'])
obj.content_type = contenttype['object_type']
obj.object_id = contenttype['object_id']
obj.save()
# Start tasks to import subscriber
if obj.status == 1 and previous_status != 1:
collect_subscriber.delay(obj.id)
request.session["msg"] = _('"%(name)s" is updated.') \
% {'name': request.POST['name']}
return HttpResponseRedirect('/campaign/')
except:
request.session["error_msg"] = _('Campaign doesn`t belong to user')
return HttpResponseRedirect('/campaign/')
template = 'frontend/campaign/change.html'
data = {
'module': current_view(request),
'form': form,
'action': 'update',
'notice_count': notice_count(request),
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import remi.gui as gui
import remi.server
from remi import start, App
import imp
import inspect
import sys
import os #for path handling
import prototypes
import editor_widgets
import html_helper
import threading
class ResizeHelper(gui.Widget, gui.EventSource):
def __init__(self, project, **kwargs):
super(ResizeHelper, self).__init__(**kwargs)
gui.EventSource.__init__(self)
self.style['float'] = 'none'
self.style['background-image'] = "url('/res/resize.png')"
self.style['background-color'] = "rgba(255,255,255,0.0)"
self.style['position'] = 'absolute'
self.style['left']='0px'
self.style['top']='0px'
self.project = project
self.parent = None
self.refWidget = None
self.active = False
self.onmousedown.connect(self.start_drag)
self.origin_x = -1
self.origin_y = -1
def setup(self, refWidget, newParent):
#refWidget is the target widget that will be resized
#newParent is the container
if self.parent:
try:
self.parent.remove_child(self)
except:
#there was no ResizeHelper placed
pass
if newParent==None:
return
self.parent = newParent
self.refWidget = refWidget
self.static_positioning = False
if 'position' in self.refWidget.style:
if self.refWidget.style['position'] != 'absolute':
self.static_positioning = True
if self.static_positioning:
return
try:
self.parent.append(self)
except:
#the selected widget's parent can't contain a ResizeHelper
pass
#self.refWidget.style['position'] = 'relative'
self.update_position()
def start_drag(self, emitter, x, y):
self.active = True
self.project.onmousemove.connect(self.on_drag)
self.project.onmouseup.connect(self.stop_drag)
self.project.onmouseleave.connect(self.stop_drag, 0, 0)
self.origin_x = -1
self.origin_y = -1
@gui.decorate_event
def stop_drag(self, emitter, x, y):
self.active = False
self.update_position()
return ()
def on_drag(self, emitter, x, y):
if self.active:
if self.origin_x == -1:
self.origin_x = int(x)
self.origin_y = int(y)
self.refWidget_origin_w = gui.from_pix(self.refWidget.style['width'])
self.refWidget_origin_h = gui.from_pix(self.refWidget.style['height'])
else:
self.refWidget.style['width'] = gui.to_pix(self.refWidget_origin_w + int(x) - self.origin_x )
self.refWidget.style['height'] = gui.to_pix(self.refWidget_origin_h + int(y) - self.origin_y)
self.update_position()
def update_position(self):
self.style['position']='absolute'
if self.refWidget:
if 'left' in self.refWidget.style and 'top' in self.refWidget.style:
self.style['left']=gui.to_pix(gui.from_pix(self.refWidget.style['left']) + gui.from_pix(self.refWidget.style['width']) - gui.from_pix(self.style['width'])/2)
self.style['top']=gui.to_pix(gui.from_pix(self.refWidget.style['top']) + gui.from_pix(self.refWidget.style['height']) - gui.from_pix(self.style['height'])/2)
class DragHelper(gui.Widget, gui.EventSource):
def __init__(self, project, **kwargs):
super(DragHelper, self).__init__(**kwargs)
gui.EventSource.__init__(self)
self.style['float'] = 'none'
self.style['background-image'] = "url('/res/drag.png')"
self.style['background-color'] = "rgba(255,255,255,0.0)"
self.style['position'] = 'absolute'
self.style['left']='0px'
self.style['top']='0px'
self.project = project
self.parent = None
self.refWidget = None
self.active = False
self.onmousedown.connect(self.start_drag)
self.origin_x = -1
self.origin_y = -1
def setup(self, refWidget, newParent):
#refWidget is the target widget that will be resized
#newParent is the container
if self.parent:
try:
self.parent.remove_child(self)
except:
#there was no ResizeHelper placed
pass
if newParent==None:
return
self.parent = newParent
self.refWidget = refWidget
self.static_positioning = False
if 'position' in self.refWidget.style:
if self.refWidget.style['position'] != 'absolute':
self.static_positioning = True
if self.static_positioning:
return
try:
self.parent.append(self)
except:
#the selected widget's parent can't contain a ResizeHelper
pass
#self.refWidget.style['position'] = 'relative'
self.update_position()
def start_drag(self, emitter, x, y):
self.active = True
self.project.onmousemove.connect(self.on_drag)
self.project.onmouseup.connect(self.stop_drag)
self.project.onmouseleave.connect(self.stop_drag, 0, 0)
self.origin_x = -1
self.origin_y = -1
@gui.decorate_event
def stop_drag(self, emitter, x, y):
self.active = False
self.update_position()
return ()
def on_drag(self, emitter, x, y):
if self.active:
if self.origin_x == -1:
self.origin_x = int(x)
self.origin_y = int(y)
self.refWidget_origin_x = gui.from_pix(self.refWidget.style['left'])
self.refWidget_origin_y = gui.from_pix(self.refWidget.style['top'])
else:
self.refWidget.style['left'] = gui.to_pix(self.refWidget_origin_x + int(x) - self.origin_x )
self.refWidget.style['top'] = gui.to_pix(self.refWidget_origin_y + int(y) - self.origin_y)
self.update_position()
def update_position(self):
self.style['position']='absolute'
if self.refWidget:
if 'left' in self.refWidget.style and 'top' in self.refWidget.style:
self.style['left']=gui.to_pix(gui.from_pix(self.refWidget.style['left']))
self.style['top']=gui.to_pix(gui.from_pix(self.refWidget.style['top']))
class Project(gui.Widget):
""" The editor project is pure html with specific tag attributes
This class loads and save the project file,
and also compiles a project in python code.
"""
def __init__(self, **kwargs):
super(Project, self).__init__(**kwargs)
self.style.update({'position':'relative',
'overflow':'auto',
'background-color':'rgb(250,248,240)',
'background-image':"url('/res/background.png')"})
def new(self):
#remove the main widget
pass
def load(self, ifile, configuration):
self.ifile = ifile
_module = imp.load_source('project', self.ifile) #imp.load_source('module.name', '/path/to/file.py')
configuration.configDict = _module.configuration
#finding App class
clsmembers = inspect.getmembers(_module, inspect.isclass)
app_init_fnc = None
for (name, value) in clsmembers:
if issubclass(value,App) and name!='App':
app_init_fnc = value
if app_init_fnc==None:
return None
members_list = inspect.getmembers(app_init_fnc(editing_mode=True), inspect.ismethod)
#print(members_list)
for (name, member) in members_list:
#print("SETTING MEMBER: " + name)
setattr(self, name, self.fakeListenerFunc)
return app_init_fnc.construct_ui(self)
def fakeListenerFunc(*args):
pass
def check_pending_listeners(self, widget, widgetVarName, force=False):
code_nested_listener = ''
#checking if pending listeners code production can be solved
for event in self.pending_listener_registration:
#print("widget: %s source:%s listener:%s"%(str(id(widget)),event['eventsource'].path_to_this_widget,event['eventlistener'].path_to_this_widget))
if force or (hasattr(event['eventsource'],'path_to_this_widget') and hasattr(event['eventlistener'],'path_to_this_widget')):
if (force or (widget.attributes['editor_varname'] in event['eventsource'].path_to_this_widget and widget.attributes['editor_varname'] in event['eventlistener'].path_to_this_widget)) and event['done']==False:
#this means that this is the root node from where the leafs(listener and source) departs, hre can be set the listener
event['done'] = True
#event source chain
sourcename = 'self'
source_filtered_path=event['eventsource'].path_to_this_widget[:]
listener_filtered_path=event['eventlistener'].path_to_this_widget[:]
for v in widget.path_to_this_widget:
source_filtered_path.remove(v)
listener_filtered_path.remove(v)
if force or (self.children['root']==widget and not (widget.attributes['editor_newclass'] == 'True')):
sourcename = self.children['root'].attributes['editor_varname']
if self.children['root'].attributes['editor_varname'] in source_filtered_path:
source_filtered_path.remove(self.children['root'].attributes['editor_varname'])
if len(source_filtered_path)>0:
sourcename = ("%s.children['" + "'].children['".join(source_filtered_path) + "']")%sourcename
#listener chain
listenername = "self"
if force or (self.children['root']==widget and not (widget.attributes['editor_newclass'] == 'True')):
if event['eventlistener'] != self:
listenername = self.children['root'].attributes['editor_varname']
if len(listener_filtered_path)>0:
listenername = ("%s.children['" + "'].children['".join(listener_filtered_path) + "']")%listenername
code_nested_listener += prototypes.proto_set_listener%{'sourcename':sourcename,
'register_function': event['setoneventfuncname'],
'listenername': listenername,
'listener_function': event['listenerfuncname']}
if not event['eventlistener'].identifier in self.code_declared_classes:
self.code_declared_classes[event['eventlistener'].identifier] = ''
self.code_declared_classes[event['eventlistener'].identifier] += event['listenerClassFunction']
return code_nested_listener
def repr_widget_for_editor(self, widget): #widgetVarName is the name with which the parent calls this instance
self.known_project_children.append(widget)
widget.path_to_this_widget.append( widget.attributes['editor_varname'] )
print(widget.attributes['editor_varname'])
code_nested = '' #the code strings to return
if not hasattr( widget, 'attributes' ):
return '' #no nested code
widgetVarName = widget.attributes['editor_varname']
newClass = widget.attributes['editor_newclass'] == 'True'
classname = 'CLASS' + widgetVarName if newClass else widget.__class__.__name__
code_nested = prototypes.proto_widget_allocation%{'varname': widgetVarName, 'classname': classname, 'editor_constructor': widget.attributes['editor_constructor'], 'editor_instance_id':widget.identifier}
code_nested += prototypes.proto_attribute_setup%{'varname': widgetVarName, 'attr_dict': ','.join('"%s":"%s"'%(key,widget.attributes[key]) for key in widget.attributes.keys() if key not in html_helper.htmlInternallyUsedTags)}
code_nested += prototypes.proto_style_setup%{'varname': widgetVarName, 'style_dict': ','.join('"%s":"%s"'%(key,widget.style[key]) for key in widget.style.keys())}
backup_editor_onclick = widget.onclick.callback
widget.onclick.callback = widget.backup_onclick_listener
#for all the methods of this widget
for (setOnEventListenerFuncname,setOnEventListenerFunc) in inspect.getmembers(widget):
#if the member is decorated by decorate_set_on_listener
if hasattr(setOnEventListenerFunc, '_event_info'):
#if there is a callback
if getattr(widget, setOnEventListenerFuncname).callback:
listenerPrototype = setOnEventListenerFunc._event_info['prototype']
listener = getattr(widget, setOnEventListenerFuncname).callback.__self__
listenerFunctionName = setOnEventListenerFunc._event_info['name'] + "_" + widget.attributes['editor_varname']
listenerClassFunction = prototypes.proto_code_function%{'funcname': listenerFunctionName,
'parameters': listenerPrototype}
self.pending_listener_registration.append({'done':False,
'eventsource':widget,
'eventlistener':listener,
'setoneventfuncname':setOnEventListenerFuncname,
'listenerfuncname': listenerFunctionName,
'listenerClassFunction':listenerClassFunction})
if newClass:
widgetVarName = 'self'
children_code_nested = ''
for child_key in widget.children.keys():
child = widget.children[child_key]
if type(child)==str:
#children_code_nested += prototypes.proto_layout_append%{'parentname':widgetVarName,'varname':"'%s'"%child}
continue
if 'editor_varname' not in child.attributes.keys():
continue
child.path_to_this_widget = widget.path_to_this_widget[:]
children_code_nested += self.repr_widget_for_editor(child)
children_code_nested += prototypes.proto_layout_append%{'parentname':widgetVarName,'varname':"%s,'%s'"%(child.attributes['editor_varname'],child.attributes['editor_varname'])}
children_code_nested += self.check_pending_listeners(widget, widgetVarName)
widget.onclick.callback = backup_editor_onclick
if newClass:# and not (classname in self.code_declared_classes.keys()):
if not widget.identifier in self.code_declared_classes:
self.code_declared_classes[widget.identifier] = ''
self.code_declared_classes[widget.identifier] = prototypes.proto_code_class%{'classname': classname, 'superclassname': widget.attributes['editor_baseclass'],
'nested_code': children_code_nested } + self.code_declared_classes[widget.identifier]
else:
code_nested = code_nested + children_code_nested
return code_nested
def prepare_path_to_this_widget(self, node):
#here gets initiated to null list the path_to_this_widget chain
node.path_to_this_widget = []
for child in node.children.values():
if type(child)==str:
continue
if 'editor_varname' not in child.attributes.keys():
continue
self.prepare_path_to_this_widget(child)
def save(self, save_path_filename, configuration):
self.code_declared_classes = {}
self.pending_listener_registration = list()
self.known_project_children = [self,] #a list containing widgets that have been parsed and that are considered valid listeners
self.pending_signals_to_connect = list() #a list containing dicts {listener, emitter, register_function, listener_function}
compiled_code = ''
code_classes = ''
self.path_to_this_widget = []
self.prepare_path_to_this_widget(self.children['root'])
ret = self.repr_widget_for_editor( self.children['root'] )
code_nested = ret + self.check_pending_listeners(self,'self',True)# + self.code_listener_registration[str(id(self))]
main_code_class = prototypes.proto_code_main_class%{'classname':configuration.configDict[configuration.KEY_PRJ_NAME],
'config_resourcepath':configuration.configDict[configuration.KEY_RESOURCEPATH],
'code_nested':code_nested,
'mainwidgetname':self.children['root'].attributes['editor_varname']}
if self.identifier in self.code_declared_classes.keys():
main_code_class += self.code_declared_classes[self.identifier]
del self.code_declared_classes[self.identifier]
for key in self.code_declared_classes.keys():
code_class = self.code_declared_classes[key]
code_listener_setting = ''
code_classes += code_class
code_classes += main_code_class
compiled_code = prototypes.proto_code_program%{ 'code_classes':code_classes,
'classname':configuration.configDict[configuration.KEY_PRJ_NAME],
'configuration':configuration.configDict
}
print(compiled_code)
if save_path_filename!=None:
f = open(save_path_filename, "w")
f.write(compiled_code)
f.close()
class Editor(App):
def __init__(self, *args):
editor_res_path = os.path.join(os.path.dirname(__file__), 'res')
super(Editor, self).__init__(*args, static_file_path=editor_res_path)
def idle(self):
self.resizeHelper.update_position()
self.dragHelper.update_position()
def main(self):
self.mainContainer = gui.Widget(width='100%', height='100%', layout_orientation=gui.Widget.LAYOUT_VERTICAL)
self.mainContainer.style['background-color'] = 'white'
self.mainContainer.style['border'] = 'none'
menubar = gui.MenuBar(height='4%')
menu = gui.Menu(width='100%',height='100%')
menu.style['z-index'] = '1'
m1 = gui.MenuItem('File', width=150, height='100%')
m10 = gui.MenuItem('New', width=150, height=30)
m11 = gui.MenuItem('Open', width=150, height=30)
m12 = gui.MenuItem('Save Your App', width=150, height=30)
#m12.style['visibility'] = 'hidden'
m121 = gui.MenuItem('Save', width=100, height=30)
m122 = gui.MenuItem('Save as', width=100, height=30)
m1.append([m10, m11, m12])
m12.append([m121, m122])
m2 = gui.MenuItem('Edit', width=100, height='100%')
m21 = gui.MenuItem('Cut', width=100, height=30)
m22 = gui.MenuItem('Paste', width=100, height=30)
m2.append([m21, m22])
m3 = gui.MenuItem('Project Config', width=200, height='100%')
menu.append([m1, m2, m3])
menubar.append(menu)
self.toolbar = editor_widgets.ToolBar(width='100%', height='30px', margin='0px 0px')
self.toolbar.style['border-bottom'] = '1px solid rgba(0,0,0,.12)'
self.toolbar.add_command('/res/delete.png', self.toolbar_delete_clicked, 'Delete Widget')
self.toolbar.add_command('/res/cut.png', self.menu_cut_selection_clicked, 'Cut Widget')
self.toolbar.add_command('/res/paste.png', self.menu_paste_selection_clicked, 'Paste Widget')
self.fileOpenDialog = editor_widgets.EditorFileSelectionDialog('Open Project', 'Select the project file.<br>It have to be a python program created with this editor.', False, '.', True, False, self)
self.fileOpenDialog.confirm_value.connect(self.on_open_dialog_confirm)
self.fileSaveAsDialog = editor_widgets.EditorFileSaveDialog('Project Save', 'Select the project folder and type a filename', False, '.', False, True, self)
self.fileSaveAsDialog.add_fileinput_field('untitled.py')
self.fileSaveAsDialog.confirm_value.connect(self.on_saveas_dialog_confirm)
m10.onclick.connect(self.menu_new_clicked)
m11.onclick.connect(self.fileOpenDialog.show)
m121.onclick.connect(self.menu_save_clicked)
m122.onclick.connect(self.fileSaveAsDialog.show)
m21.onclick.connect(self.menu_cut_selection_clicked)
m22.onclick.connect(self.menu_paste_selection_clicked)
m3.onclick.connect(self.menu_project_config_clicked)
self.subContainer = gui.HBox(width='100%', height='96%', layout_orientation=gui.Widget.LAYOUT_HORIZONTAL)
self.subContainer.style.update({'position':'relative',
'overflow':'auto',
'align-items':'stretch'})
#here are contained the widgets
self.widgetsCollection = editor_widgets.WidgetCollection(self, width='100%', height='50%')
self.project = Project(width='100%', height='100%')
self.project.style['min-height'] = '400px'
self.project.attributes['ondragover'] = "event.preventDefault();"
self.EVENT_ONDROPPPED = "on_dropped"
self.project.attributes['ondrop'] = """event.preventDefault();
var data = JSON.parse(event.dataTransfer.getData('application/json'));
var params={};
if( data[0] == 'resize'){
document.getElementById(data[1]).style.left = parseInt(document.getElementById(data[1]).style.left) + event.clientX - data[2] + 'px';
document.getElementById(data[1]).style.top = parseInt(document.getElementById(data[1]).style.top) + event.clientY - data[3] + 'px';
params['left']=document.getElementById(data[1]).style.left;
params['top']=document.getElementById(data[1]).style.top;
}
if( data[0] == 'add'){
params['left']=event.clientX-event.currentTarget.getBoundingClientRect().left;
params['top']=event.clientY-event.currentTarget.getBoundingClientRect().top;
}
if( data[0] == 'move'){
document.getElementById(data[1]).style.left = parseInt(document.getElementById(data[1]).style.left) + event.clientX - data[2] + 'px';
document.getElementById(data[1]).style.top = parseInt(document.getElementById(data[1]).style.top) + event.clientY - data[3] + 'px';
params['left']=document.getElementById(data[1]).style.left;
params['top']=document.getElementById(data[1]).style.top;
}
sendCallbackParam(data[1],'%(evt)s',params);
return false;""" % {'evt':self.EVENT_ONDROPPPED}
self.project.attributes['editor_varname'] = 'App'
self.project.attributes[self.project.EVENT_ONKEYDOWN] = """
var params={};
params['keypressed']=event.keyCode;
sendCallbackParam('%(id)s','%(evt)s',params);
if(event.keyCode==46){
return false;
}
""" % {'id':str(id(self)), 'evt':self.project.EVENT_ONKEYDOWN}
self.projectConfiguration = editor_widgets.ProjectConfigurationDialog('Project Configuration', 'Write here the configuration for your project.')
self.attributeEditor = editor_widgets.EditorAttributes(self, width='100%')
self.attributeEditor.style['overflow'] = 'hide'
self.signalConnectionManager = editor_widgets.SignalConnectionManager(width='100%', height='50%')
self.mainContainer.append([menubar, self.subContainer])
self.subContainerLeft = gui.Widget(width='20%', height='100%')
self.subContainerLeft.style['position'] = 'relative'
self.subContainerLeft.style['left'] = '0px'
self.subContainerLeft.append([self.widgetsCollection, self.signalConnectionManager])
self.subContainerLeft.add_class('RaisedFrame')
self.centralContainer = gui.VBox(width='56%', height='100%')
self.centralContainer.append([self.toolbar, self.project])
self.subContainerRight = gui.Widget(width='24%', height='100%')
self.subContainerRight.style.update({'position':'absolute', 'right':'0px', 'overflow':'scroll'})
self.subContainerRight.add_class('RaisedFrame')
self.instancesWidget = editor_widgets.InstancesWidget(width='100%')
self.instancesWidget.treeView.on_tree_item_selected.connect(self.on_instances_widget_selection)
self.subContainerRight.append([self.instancesWidget, self.attributeEditor])
self.subContainer.append([self.subContainerLeft, self.centralContainer, self.subContainerRight])
self.project.style['position'] = 'relative'
self.resizeHelper = ResizeHelper(self.project, width=16, height=16)
self.dragHelper = DragHelper(self.project, width=15, height=15)
self.resizeHelper.stop_drag.connect(self.on_drag_resize_end)
self.dragHelper.stop_drag.connect(self.on_drag_resize_end)
self.menu_new_clicked(None)
self.projectPathFilename = ''
self.editCuttedWidget = None #cut operation, contains the cutted tag
# returning the root widget
return self.mainContainer
def on_drag_resize_end(self, emitter):
self.attributeEditor.set_widget( self.selectedWidget )
def configure_widget_for_editing(self, widget):
""" A widget have to be added to the editor, it is configured here in order to be conformant
to the editor
"""
if not 'editor_varname' in widget.attributes:
return
widget.backup_onclick_listener = widget.onclick.callback
widget.onclick.connect(self.on_widget_selection)
#setup of the on_dropped function of the widget in order to manage the dragNdrop
widget.__class__.on_dropped = on_dropped
#drag properties
#widget.style['resize'] = 'both'
widget.style['overflow'] = 'auto'
widget.attributes['draggable'] = 'true'
widget.attributes['ondragstart'] = """this.style.cursor='move'; event.dataTransfer.dropEffect = 'move'; event.dataTransfer.setData('application/json', JSON.stringify(['move',event.target.id,(event.clientX),(event.clientY)]));"""
widget.attributes['ondragover'] = "event.preventDefault();"
widget.EVENT_ONDROPPPED = "on_dropped"
widget.attributes['ondrop'] = """
var data = JSON.parse(event.dataTransfer.getData('application/json'));
var params={};
if( data[0] == 'add'){
console.debug('addd---------------------------------------------');
sendCallback('%(id)s','%(event_click)s');
console.debug('dopo---------------------------------------------');
params['left']=event.clientX-event.currentTarget.getBoundingClientRect().left;
params['top']=event.clientY-event.currentTarget.getBoundingClientRect().top;
sendCallbackParam(data[1],'%(evt)s',params);
event.stopPropagation();
event.preventDefault();
}
return false;""" % {'evt':widget.EVENT_ONDROPPPED, 'id': widget.identifier, 'event_click': widget.EVENT_ONCLICK}
widget.attributes['tabindex']=str(self.tabindex)
if not 'position' in widget.style.keys():
widget.style['position'] = 'absolute'
if not 'left' in widget.style.keys():
widget.style['left'] = '1px'
if not 'top' in widget.style.keys():
widget.style['top'] = '1px'
self.tabindex += 1
def add_widget_to_editor(self, widget, parent = None, root_tree_node = True):
if parent == None:
parent = self.selectedWidget
self.configure_widget_for_editing(widget)
key = "root" if parent==self.project else widget.identifier
if root_tree_node:
parent.append(widget,key)
if self.selectedWidget == self.project:
self.on_widget_selection( widget )
#dcopy = widget.children.copy()
for child in widget.children.values():
if type(child) == str:
continue
self.add_widget_to_editor(child, widget, False)
self.instancesWidget.update(self.project, self.selectedWidget)
def on_instances_widget_selection(self, instancesWidgetItem, selectedWidget):
self.on_widget_selection(selectedWidget)
def on_widget_selection(self, widget):
self.remove_box_shadow_selected_widget()
self.selectedWidget = widget
self.selectedWidget.style['box-shadow'] = '0 0 10px rgb(33,150,243)'
self.signalConnectionManager.update(self.selectedWidget, self.project)
self.attributeEditor.set_widget( self.selectedWidget )
parent = remi.server.get_method_by_id(self.selectedWidget.attributes['data-parent-widget'])
self.resizeHelper.setup(widget,parent)
self.dragHelper.setup(widget,parent)
self.instancesWidget.select(self.selectedWidget)
print("selected widget: " + widget.identifier)
def menu_new_clicked(self, widget):
print('new project')
self.project.new()
self.tabindex = 0 #incremental number to allow widgets selection
self.selectedWidget = self.project
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
if 'root' in self.project.children.keys():
self.project.remove_child( self.project.children['root'] )
def on_open_dialog_confirm(self, widget, filelist):
if len(filelist):
widgetTree = self.project.load(filelist[0], self.projectConfiguration)
if widgetTree!=None:
self.add_widget_to_editor( widgetTree )
self.projectPathFilename = filelist[0]
def menu_save_clicked(self, widget):
#the dragHelper have to be removed
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
if self.projectPathFilename == '':
self.fileSaveAsDialog.show()
else:
self.remove_box_shadow_selected_widget()
self.project.save(self.projectPathFilename, self.projectConfiguration)
def remove_box_shadow_selected_widget(self):
if 'box-shadow' in self.selectedWidget.style.keys():
del self.selectedWidget.style['box-shadow']
def on_saveas_dialog_confirm(self, widget, path):
#the resizeHelper have to be removed
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
if len(path):
self.projectPathFilename = path + '/' + self.fileSaveAsDialog.get_fileinput_value()
print("file:%s"%self.projectPathFilename)
self.remove_box_shadow_selected_widget()
self.project.save(self.projectPathFilename, self.projectConfiguration)
def menu_cut_selection_clicked(self, widget):
if self.selectedWidget==self.project:
return
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
parent = remi.server.get_method_by_id(self.selectedWidget.attributes['data-parent-widget'])
self.editCuttedWidget = self.selectedWidget
parent.remove_child(self.selectedWidget)
self.selectedWidget = parent
self.instancesWidget.update(self.project, self.selectedWidget)
print("tag cutted:" + self.editCuttedWidget.identifier)
def menu_paste_selection_clicked(self, widget):
if self.editCuttedWidget != None:
key = "root" if self.selectedWidget==self.project else self.editCuttedWidget.identifier
self.selectedWidget.append(self.editCuttedWidget, key)
self.editCuttedWidget = None
self.instancesWidget.update(self.project, self.selectedWidget)
def menu_project_config_clicked(self, widget):
self.projectConfiguration.show(self)
def toolbar_delete_clicked(self, widget):
if self.selectedWidget==self.project:
return
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
parent = remi.server.get_method_by_id(self.selectedWidget.attributes['data-parent-widget'])
parent.remove_child(self.selectedWidget)
self.instancesWidget.update(self.project, self.selectedWidget)
self.selectedWidget = parent
print("tag deleted")
def onkeydown(self, keypressed):
if str(keypressed)=='46': #46 the delete keycode
self.toolbar_delete_clicked(None)
print("Key pressed: " + str(keypressed))
def on_dropped(self, left, top):
if len(left)<1:
left='0px'
if len(top)<1:
top='0px'
self.style['left']=left
self.style['top']=top
def main():
#p = Project()
#root = p.load('./example_project.py')
#p.append(root, "root")
#p.save(None)
# starts the webserver
# optional parameters
# start(MyApp,address='127.0.0.1', port=8081, multiple_instance=False,enable_file_cache=True, update_interval=0.1, start_browser=True)
start(Editor, debug=False, address='0.0.0.0', port=8082, update_interval=0.01)
if __name__ == "__main__":
main()
Fixed editor bug. It caused lag during drag and resize after project reload.
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import remi.gui as gui
import remi.server
from remi import start, App
import imp
import inspect
import sys
import os #for path handling
import prototypes
import editor_widgets
import html_helper
import threading
class ResizeHelper(gui.Widget, gui.EventSource):
def __init__(self, project, **kwargs):
super(ResizeHelper, self).__init__(**kwargs)
gui.EventSource.__init__(self)
self.style['float'] = 'none'
self.style['background-image'] = "url('/res/resize.png')"
self.style['background-color'] = "rgba(255,255,255,0.0)"
self.style['position'] = 'absolute'
self.style['left']='0px'
self.style['top']='0px'
self.project = project
self.parent = None
self.refWidget = None
self.active = False
self.onmousedown.connect(self.start_drag)
self.origin_x = -1
self.origin_y = -1
def setup(self, refWidget, newParent):
#refWidget is the target widget that will be resized
#newParent is the container
if self.parent:
try:
self.parent.remove_child(self)
except:
#there was no ResizeHelper placed
pass
if newParent==None:
return
self.parent = newParent
self.refWidget = refWidget
self.static_positioning = False
if 'position' in self.refWidget.style:
if self.refWidget.style['position'] != 'absolute':
self.static_positioning = True
if self.static_positioning:
return
try:
self.parent.append(self)
except:
#the selected widget's parent can't contain a ResizeHelper
pass
#self.refWidget.style['position'] = 'relative'
self.update_position()
def start_drag(self, emitter, x, y):
self.active = True
self.project.onmousemove.connect(self.on_drag)
self.project.onmouseup.connect(self.stop_drag)
self.project.onmouseleave.connect(self.stop_drag, 0, 0)
self.origin_x = -1
self.origin_y = -1
@gui.decorate_event
def stop_drag(self, emitter, x, y):
self.active = False
self.update_position()
return ()
def on_drag(self, emitter, x, y):
if self.active:
if self.origin_x == -1:
self.origin_x = int(x)
self.origin_y = int(y)
self.refWidget_origin_w = gui.from_pix(self.refWidget.style['width'])
self.refWidget_origin_h = gui.from_pix(self.refWidget.style['height'])
else:
self.refWidget.style['width'] = gui.to_pix(self.refWidget_origin_w + int(x) - self.origin_x )
self.refWidget.style['height'] = gui.to_pix(self.refWidget_origin_h + int(y) - self.origin_y)
self.update_position()
def update_position(self):
self.style['position']='absolute'
if self.refWidget:
if 'left' in self.refWidget.style and 'top' in self.refWidget.style:
self.style['left']=gui.to_pix(gui.from_pix(self.refWidget.style['left']) + gui.from_pix(self.refWidget.style['width']) - gui.from_pix(self.style['width'])/2)
self.style['top']=gui.to_pix(gui.from_pix(self.refWidget.style['top']) + gui.from_pix(self.refWidget.style['height']) - gui.from_pix(self.style['height'])/2)
class DragHelper(gui.Widget, gui.EventSource):
def __init__(self, project, **kwargs):
super(DragHelper, self).__init__(**kwargs)
gui.EventSource.__init__(self)
self.style['float'] = 'none'
self.style['background-image'] = "url('/res/drag.png')"
self.style['background-color'] = "rgba(255,255,255,0.0)"
self.style['position'] = 'absolute'
self.style['left']='0px'
self.style['top']='0px'
self.project = project
self.parent = None
self.refWidget = None
self.active = False
self.onmousedown.connect(self.start_drag)
self.origin_x = -1
self.origin_y = -1
def setup(self, refWidget, newParent):
#refWidget is the target widget that will be resized
#newParent is the container
if self.parent:
try:
self.parent.remove_child(self)
except:
#there was no ResizeHelper placed
pass
if newParent==None:
return
self.parent = newParent
self.refWidget = refWidget
self.static_positioning = False
if 'position' in self.refWidget.style:
if self.refWidget.style['position'] != 'absolute':
self.static_positioning = True
if self.static_positioning:
return
try:
self.parent.append(self)
except:
#the selected widget's parent can't contain a ResizeHelper
pass
#self.refWidget.style['position'] = 'relative'
self.update_position()
def start_drag(self, emitter, x, y):
self.active = True
self.project.onmousemove.connect(self.on_drag)
self.project.onmouseup.connect(self.stop_drag)
self.project.onmouseleave.connect(self.stop_drag, 0, 0)
self.origin_x = -1
self.origin_y = -1
@gui.decorate_event
def stop_drag(self, emitter, x, y):
self.active = False
self.update_position()
return ()
def on_drag(self, emitter, x, y):
if self.active:
if self.origin_x == -1:
self.origin_x = int(x)
self.origin_y = int(y)
self.refWidget_origin_x = gui.from_pix(self.refWidget.style['left'])
self.refWidget_origin_y = gui.from_pix(self.refWidget.style['top'])
else:
self.refWidget.style['left'] = gui.to_pix(self.refWidget_origin_x + int(x) - self.origin_x )
self.refWidget.style['top'] = gui.to_pix(self.refWidget_origin_y + int(y) - self.origin_y)
self.update_position()
def update_position(self):
self.style['position']='absolute'
if self.refWidget:
if 'left' in self.refWidget.style and 'top' in self.refWidget.style:
self.style['left']=gui.to_pix(gui.from_pix(self.refWidget.style['left']))
self.style['top']=gui.to_pix(gui.from_pix(self.refWidget.style['top']))
class Project(gui.Widget):
""" The editor project is pure html with specific tag attributes
This class loads and save the project file,
and also compiles a project in python code.
"""
def __init__(self, **kwargs):
super(Project, self).__init__(**kwargs)
self.style.update({'position':'relative',
'overflow':'auto',
'background-color':'rgb(250,248,240)',
'background-image':"url('/res/background.png')"})
def new(self):
#remove the main widget
pass
def load(self, ifile, configuration):
self.ifile = ifile
_module = imp.load_source('project', self.ifile) #imp.load_source('module.name', '/path/to/file.py')
configuration.configDict = _module.configuration
#finding App class
clsmembers = inspect.getmembers(_module, inspect.isclass)
app_init_fnc = None
for (name, value) in clsmembers:
if issubclass(value,App) and name!='App':
app_init_fnc = value
if app_init_fnc==None:
return None
members_list = app_init_fnc.__dict__.values()
for m in members_list:
if inspect.isfunction(m) and m.__name__ not in ['__init__', 'main', 'idle']:
setattr(self, m.__name__, self.fakeListenerFunc)
return app_init_fnc.construct_ui(self)
def fakeListenerFunc(*args):
pass
def check_pending_listeners(self, widget, widgetVarName, force=False):
code_nested_listener = ''
#checking if pending listeners code production can be solved
for event in self.pending_listener_registration:
#print("widget: %s source:%s listener:%s"%(str(id(widget)),event['eventsource'].path_to_this_widget,event['eventlistener'].path_to_this_widget))
if force or (hasattr(event['eventsource'],'path_to_this_widget') and hasattr(event['eventlistener'],'path_to_this_widget')):
if (force or (widget.attributes['editor_varname'] in event['eventsource'].path_to_this_widget and widget.attributes['editor_varname'] in event['eventlistener'].path_to_this_widget)) and event['done']==False:
#this means that this is the root node from where the leafs(listener and source) departs, hre can be set the listener
event['done'] = True
#event source chain
sourcename = 'self'
source_filtered_path=event['eventsource'].path_to_this_widget[:]
listener_filtered_path=event['eventlistener'].path_to_this_widget[:]
for v in widget.path_to_this_widget:
source_filtered_path.remove(v)
listener_filtered_path.remove(v)
if force or (self.children['root']==widget and not (widget.attributes['editor_newclass'] == 'True')):
sourcename = self.children['root'].attributes['editor_varname']
if self.children['root'].attributes['editor_varname'] in source_filtered_path:
source_filtered_path.remove(self.children['root'].attributes['editor_varname'])
if len(source_filtered_path)>0:
sourcename = ("%s.children['" + "'].children['".join(source_filtered_path) + "']")%sourcename
#listener chain
listenername = "self"
if force or (self.children['root']==widget and not (widget.attributes['editor_newclass'] == 'True')):
if event['eventlistener'] != self:
listenername = self.children['root'].attributes['editor_varname']
if len(listener_filtered_path)>0:
listenername = ("%s.children['" + "'].children['".join(listener_filtered_path) + "']")%listenername
code_nested_listener += prototypes.proto_set_listener%{'sourcename':sourcename,
'register_function': event['setoneventfuncname'],
'listenername': listenername,
'listener_function': event['listenerfuncname']}
if not event['eventlistener'].identifier in self.code_declared_classes:
self.code_declared_classes[event['eventlistener'].identifier] = ''
self.code_declared_classes[event['eventlistener'].identifier] += event['listenerClassFunction']
return code_nested_listener
def repr_widget_for_editor(self, widget): #widgetVarName is the name with which the parent calls this instance
self.known_project_children.append(widget)
widget.path_to_this_widget.append( widget.attributes['editor_varname'] )
print(widget.attributes['editor_varname'])
code_nested = '' #the code strings to return
if not hasattr( widget, 'attributes' ):
return '' #no nested code
widgetVarName = widget.attributes['editor_varname']
newClass = widget.attributes['editor_newclass'] == 'True'
classname = 'CLASS' + widgetVarName if newClass else widget.__class__.__name__
code_nested = prototypes.proto_widget_allocation%{'varname': widgetVarName, 'classname': classname, 'editor_constructor': widget.attributes['editor_constructor'], 'editor_instance_id':widget.identifier}
code_nested += prototypes.proto_attribute_setup%{'varname': widgetVarName, 'attr_dict': ','.join('"%s":"%s"'%(key,widget.attributes[key]) for key in widget.attributes.keys() if key not in html_helper.htmlInternallyUsedTags)}
code_nested += prototypes.proto_style_setup%{'varname': widgetVarName, 'style_dict': ','.join('"%s":"%s"'%(key,widget.style[key]) for key in widget.style.keys())}
backup_editor_onclick = widget.onclick.callback
widget.onclick.callback = widget.backup_onclick_listener
#for all the methods of this widget
for (setOnEventListenerFuncname,setOnEventListenerFunc) in inspect.getmembers(widget):
#if the member is decorated by decorate_set_on_listener
if hasattr(setOnEventListenerFunc, '_event_info'):
#if there is a callback
if getattr(widget, setOnEventListenerFuncname).callback:
listenerPrototype = setOnEventListenerFunc._event_info['prototype']
listener = getattr(widget, setOnEventListenerFuncname).callback.__self__
listenerFunctionName = setOnEventListenerFunc._event_info['name'] + "_" + widget.attributes['editor_varname']
listenerClassFunction = prototypes.proto_code_function%{'funcname': listenerFunctionName,
'parameters': listenerPrototype}
self.pending_listener_registration.append({'done':False,
'eventsource':widget,
'eventlistener':listener,
'setoneventfuncname':setOnEventListenerFuncname,
'listenerfuncname': listenerFunctionName,
'listenerClassFunction':listenerClassFunction})
if newClass:
widgetVarName = 'self'
children_code_nested = ''
for child_key in widget.children.keys():
child = widget.children[child_key]
if type(child)==str:
#children_code_nested += prototypes.proto_layout_append%{'parentname':widgetVarName,'varname':"'%s'"%child}
continue
if 'editor_varname' not in child.attributes.keys():
continue
child.path_to_this_widget = widget.path_to_this_widget[:]
children_code_nested += self.repr_widget_for_editor(child)
children_code_nested += prototypes.proto_layout_append%{'parentname':widgetVarName,'varname':"%s,'%s'"%(child.attributes['editor_varname'],child.attributes['editor_varname'])}
children_code_nested += self.check_pending_listeners(widget, widgetVarName)
widget.onclick.callback = backup_editor_onclick
if newClass:# and not (classname in self.code_declared_classes.keys()):
if not widget.identifier in self.code_declared_classes:
self.code_declared_classes[widget.identifier] = ''
self.code_declared_classes[widget.identifier] = prototypes.proto_code_class%{'classname': classname, 'superclassname': widget.attributes['editor_baseclass'],
'nested_code': children_code_nested } + self.code_declared_classes[widget.identifier]
else:
code_nested = code_nested + children_code_nested
return code_nested
def prepare_path_to_this_widget(self, node):
#here gets initiated to null list the path_to_this_widget chain
node.path_to_this_widget = []
for child in node.children.values():
if type(child)==str:
continue
if 'editor_varname' not in child.attributes.keys():
continue
self.prepare_path_to_this_widget(child)
def save(self, save_path_filename, configuration):
self.code_declared_classes = {}
self.pending_listener_registration = list()
self.known_project_children = [self,] #a list containing widgets that have been parsed and that are considered valid listeners
self.pending_signals_to_connect = list() #a list containing dicts {listener, emitter, register_function, listener_function}
compiled_code = ''
code_classes = ''
self.path_to_this_widget = []
self.prepare_path_to_this_widget(self.children['root'])
ret = self.repr_widget_for_editor( self.children['root'] )
code_nested = ret + self.check_pending_listeners(self,'self',True)# + self.code_listener_registration[str(id(self))]
main_code_class = prototypes.proto_code_main_class%{'classname':configuration.configDict[configuration.KEY_PRJ_NAME],
'config_resourcepath':configuration.configDict[configuration.KEY_RESOURCEPATH],
'code_nested':code_nested,
'mainwidgetname':self.children['root'].attributes['editor_varname']}
if self.identifier in self.code_declared_classes.keys():
main_code_class += self.code_declared_classes[self.identifier]
del self.code_declared_classes[self.identifier]
for key in self.code_declared_classes.keys():
code_class = self.code_declared_classes[key]
code_listener_setting = ''
code_classes += code_class
code_classes += main_code_class
compiled_code = prototypes.proto_code_program%{ 'code_classes':code_classes,
'classname':configuration.configDict[configuration.KEY_PRJ_NAME],
'configuration':configuration.configDict
}
print(compiled_code)
if save_path_filename!=None:
f = open(save_path_filename, "w")
f.write(compiled_code)
f.close()
class Editor(App):
def __init__(self, *args):
editor_res_path = os.path.join(os.path.dirname(__file__), 'res')
super(Editor, self).__init__(*args, static_file_path=editor_res_path)
def idle(self):
self.resizeHelper.update_position()
self.dragHelper.update_position()
def main(self):
self.mainContainer = gui.Widget(width='100%', height='100%', layout_orientation=gui.Widget.LAYOUT_VERTICAL)
self.mainContainer.style['background-color'] = 'white'
self.mainContainer.style['border'] = 'none'
menubar = gui.MenuBar(height='4%')
menu = gui.Menu(width='100%',height='100%')
menu.style['z-index'] = '1'
m1 = gui.MenuItem('File', width=150, height='100%')
m10 = gui.MenuItem('New', width=150, height=30)
m11 = gui.MenuItem('Open', width=150, height=30)
m12 = gui.MenuItem('Save Your App', width=150, height=30)
#m12.style['visibility'] = 'hidden'
m121 = gui.MenuItem('Save', width=100, height=30)
m122 = gui.MenuItem('Save as', width=100, height=30)
m1.append([m10, m11, m12])
m12.append([m121, m122])
m2 = gui.MenuItem('Edit', width=100, height='100%')
m21 = gui.MenuItem('Cut', width=100, height=30)
m22 = gui.MenuItem('Paste', width=100, height=30)
m2.append([m21, m22])
m3 = gui.MenuItem('Project Config', width=200, height='100%')
menu.append([m1, m2, m3])
menubar.append(menu)
self.toolbar = editor_widgets.ToolBar(width='100%', height='30px', margin='0px 0px')
self.toolbar.style['border-bottom'] = '1px solid rgba(0,0,0,.12)'
self.toolbar.add_command('/res/delete.png', self.toolbar_delete_clicked, 'Delete Widget')
self.toolbar.add_command('/res/cut.png', self.menu_cut_selection_clicked, 'Cut Widget')
self.toolbar.add_command('/res/paste.png', self.menu_paste_selection_clicked, 'Paste Widget')
self.fileOpenDialog = editor_widgets.EditorFileSelectionDialog('Open Project', 'Select the project file.<br>It have to be a python program created with this editor.', False, '.', True, False, self)
self.fileOpenDialog.confirm_value.connect(self.on_open_dialog_confirm)
self.fileSaveAsDialog = editor_widgets.EditorFileSaveDialog('Project Save', 'Select the project folder and type a filename', False, '.', False, True, self)
self.fileSaveAsDialog.add_fileinput_field('untitled.py')
self.fileSaveAsDialog.confirm_value.connect(self.on_saveas_dialog_confirm)
m10.onclick.connect(self.menu_new_clicked)
m11.onclick.connect(self.fileOpenDialog.show)
m121.onclick.connect(self.menu_save_clicked)
m122.onclick.connect(self.fileSaveAsDialog.show)
m21.onclick.connect(self.menu_cut_selection_clicked)
m22.onclick.connect(self.menu_paste_selection_clicked)
m3.onclick.connect(self.menu_project_config_clicked)
self.subContainer = gui.HBox(width='100%', height='96%', layout_orientation=gui.Widget.LAYOUT_HORIZONTAL)
self.subContainer.style.update({'position':'relative',
'overflow':'auto',
'align-items':'stretch'})
#here are contained the widgets
self.widgetsCollection = editor_widgets.WidgetCollection(self, width='100%', height='50%')
self.project = Project(width='100%', height='100%')
self.project.style['min-height'] = '400px'
self.project.attributes['ondragover'] = "event.preventDefault();"
self.EVENT_ONDROPPPED = "on_dropped"
self.project.attributes['ondrop'] = """event.preventDefault();
var data = JSON.parse(event.dataTransfer.getData('application/json'));
var params={};
if( data[0] == 'resize'){
document.getElementById(data[1]).style.left = parseInt(document.getElementById(data[1]).style.left) + event.clientX - data[2] + 'px';
document.getElementById(data[1]).style.top = parseInt(document.getElementById(data[1]).style.top) + event.clientY - data[3] + 'px';
params['left']=document.getElementById(data[1]).style.left;
params['top']=document.getElementById(data[1]).style.top;
}
if( data[0] == 'add'){
params['left']=event.clientX-event.currentTarget.getBoundingClientRect().left;
params['top']=event.clientY-event.currentTarget.getBoundingClientRect().top;
}
if( data[0] == 'move'){
document.getElementById(data[1]).style.left = parseInt(document.getElementById(data[1]).style.left) + event.clientX - data[2] + 'px';
document.getElementById(data[1]).style.top = parseInt(document.getElementById(data[1]).style.top) + event.clientY - data[3] + 'px';
params['left']=document.getElementById(data[1]).style.left;
params['top']=document.getElementById(data[1]).style.top;
}
sendCallbackParam(data[1],'%(evt)s',params);
return false;""" % {'evt':self.EVENT_ONDROPPPED}
self.project.attributes['editor_varname'] = 'App'
self.project.attributes[self.project.EVENT_ONKEYDOWN] = """
var params={};
params['keypressed']=event.keyCode;
sendCallbackParam('%(id)s','%(evt)s',params);
if(event.keyCode==46){
return false;
}
""" % {'id':str(id(self)), 'evt':self.project.EVENT_ONKEYDOWN}
self.projectConfiguration = editor_widgets.ProjectConfigurationDialog('Project Configuration', 'Write here the configuration for your project.')
self.attributeEditor = editor_widgets.EditorAttributes(self, width='100%')
self.attributeEditor.style['overflow'] = 'hide'
self.signalConnectionManager = editor_widgets.SignalConnectionManager(width='100%', height='50%')
self.mainContainer.append([menubar, self.subContainer])
self.subContainerLeft = gui.Widget(width='20%', height='100%')
self.subContainerLeft.style['position'] = 'relative'
self.subContainerLeft.style['left'] = '0px'
self.subContainerLeft.append([self.widgetsCollection, self.signalConnectionManager])
self.subContainerLeft.add_class('RaisedFrame')
self.centralContainer = gui.VBox(width='56%', height='100%')
self.centralContainer.append([self.toolbar, self.project])
self.subContainerRight = gui.Widget(width='24%', height='100%')
self.subContainerRight.style.update({'position':'absolute', 'right':'0px', 'overflow':'scroll'})
self.subContainerRight.add_class('RaisedFrame')
self.instancesWidget = editor_widgets.InstancesWidget(width='100%')
self.instancesWidget.treeView.on_tree_item_selected.connect(self.on_instances_widget_selection)
self.subContainerRight.append([self.instancesWidget, self.attributeEditor])
self.subContainer.append([self.subContainerLeft, self.centralContainer, self.subContainerRight])
self.project.style['position'] = 'relative'
self.resizeHelper = ResizeHelper(self.project, width=16, height=16)
self.dragHelper = DragHelper(self.project, width=15, height=15)
self.resizeHelper.stop_drag.connect(self.on_drag_resize_end)
self.dragHelper.stop_drag.connect(self.on_drag_resize_end)
self.menu_new_clicked(None)
self.projectPathFilename = ''
self.editCuttedWidget = None #cut operation, contains the cutted tag
# returning the root widget
return self.mainContainer
def on_drag_resize_end(self, emitter):
self.attributeEditor.set_widget( self.selectedWidget )
def configure_widget_for_editing(self, widget):
""" A widget have to be added to the editor, it is configured here in order to be conformant
to the editor
"""
if not 'editor_varname' in widget.attributes:
return
widget.backup_onclick_listener = widget.onclick.callback
widget.onclick.connect(self.on_widget_selection)
#setup of the on_dropped function of the widget in order to manage the dragNdrop
widget.__class__.on_dropped = on_dropped
#drag properties
#widget.style['resize'] = 'both'
widget.style['overflow'] = 'auto'
widget.attributes['draggable'] = 'true'
widget.attributes['ondragstart'] = """this.style.cursor='move'; event.dataTransfer.dropEffect = 'move'; event.dataTransfer.setData('application/json', JSON.stringify(['move',event.target.id,(event.clientX),(event.clientY)]));"""
widget.attributes['ondragover'] = "event.preventDefault();"
widget.EVENT_ONDROPPPED = "on_dropped"
widget.attributes['ondrop'] = """
var data = JSON.parse(event.dataTransfer.getData('application/json'));
var params={};
if( data[0] == 'add'){
console.debug('addd---------------------------------------------');
sendCallback('%(id)s','%(event_click)s');
console.debug('dopo---------------------------------------------');
params['left']=event.clientX-event.currentTarget.getBoundingClientRect().left;
params['top']=event.clientY-event.currentTarget.getBoundingClientRect().top;
sendCallbackParam(data[1],'%(evt)s',params);
event.stopPropagation();
event.preventDefault();
}
return false;""" % {'evt':widget.EVENT_ONDROPPPED, 'id': widget.identifier, 'event_click': widget.EVENT_ONCLICK}
widget.attributes['tabindex']=str(self.tabindex)
if not 'position' in widget.style.keys():
widget.style['position'] = 'absolute'
if not 'left' in widget.style.keys():
widget.style['left'] = '1px'
if not 'top' in widget.style.keys():
widget.style['top'] = '1px'
self.tabindex += 1
def add_widget_to_editor(self, widget, parent = None, root_tree_node = True):
if parent == None:
parent = self.selectedWidget
self.configure_widget_for_editing(widget)
key = "root" if parent==self.project else widget.identifier
if root_tree_node:
parent.append(widget,key)
if self.selectedWidget == self.project:
self.on_widget_selection( widget )
#dcopy = widget.children.copy()
for child in widget.children.values():
if type(child) == str:
continue
self.add_widget_to_editor(child, widget, False)
self.instancesWidget.update(self.project, self.selectedWidget)
def on_instances_widget_selection(self, instancesWidgetItem, selectedWidget):
self.on_widget_selection(selectedWidget)
def on_widget_selection(self, widget):
self.remove_box_shadow_selected_widget()
self.selectedWidget = widget
self.selectedWidget.style['box-shadow'] = '0 0 10px rgb(33,150,243)'
self.signalConnectionManager.update(self.selectedWidget, self.project)
self.attributeEditor.set_widget( self.selectedWidget )
parent = remi.server.get_method_by_id(self.selectedWidget.attributes['data-parent-widget'])
self.resizeHelper.setup(widget,parent)
self.dragHelper.setup(widget,parent)
self.instancesWidget.select(self.selectedWidget)
print("selected widget: " + widget.identifier)
def menu_new_clicked(self, widget):
print('new project')
self.project.new()
self.tabindex = 0 #incremental number to allow widgets selection
self.selectedWidget = self.project
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
if 'root' in self.project.children.keys():
self.project.remove_child( self.project.children['root'] )
def on_open_dialog_confirm(self, widget, filelist):
if len(filelist):
widgetTree = self.project.load(filelist[0], self.projectConfiguration)
if widgetTree!=None:
self.add_widget_to_editor( widgetTree )
self.projectPathFilename = filelist[0]
def menu_save_clicked(self, widget):
#the dragHelper have to be removed
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
if self.projectPathFilename == '':
self.fileSaveAsDialog.show()
else:
self.remove_box_shadow_selected_widget()
self.project.save(self.projectPathFilename, self.projectConfiguration)
def remove_box_shadow_selected_widget(self):
if 'box-shadow' in self.selectedWidget.style.keys():
del self.selectedWidget.style['box-shadow']
def on_saveas_dialog_confirm(self, widget, path):
#the resizeHelper have to be removed
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
if len(path):
self.projectPathFilename = path + '/' + self.fileSaveAsDialog.get_fileinput_value()
print("file:%s"%self.projectPathFilename)
self.remove_box_shadow_selected_widget()
self.project.save(self.projectPathFilename, self.projectConfiguration)
def menu_cut_selection_clicked(self, widget):
if self.selectedWidget==self.project:
return
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
parent = remi.server.get_method_by_id(self.selectedWidget.attributes['data-parent-widget'])
self.editCuttedWidget = self.selectedWidget
parent.remove_child(self.selectedWidget)
self.selectedWidget = parent
self.instancesWidget.update(self.project, self.selectedWidget)
print("tag cutted:" + self.editCuttedWidget.identifier)
def menu_paste_selection_clicked(self, widget):
if self.editCuttedWidget != None:
key = "root" if self.selectedWidget==self.project else self.editCuttedWidget.identifier
self.selectedWidget.append(self.editCuttedWidget, key)
self.editCuttedWidget = None
self.instancesWidget.update(self.project, self.selectedWidget)
def menu_project_config_clicked(self, widget):
self.projectConfiguration.show(self)
def toolbar_delete_clicked(self, widget):
if self.selectedWidget==self.project:
return
self.resizeHelper.setup(None, None)
self.dragHelper.setup(None, None)
parent = remi.server.get_method_by_id(self.selectedWidget.attributes['data-parent-widget'])
parent.remove_child(self.selectedWidget)
self.instancesWidget.update(self.project, self.selectedWidget)
self.selectedWidget = parent
print("tag deleted")
def onkeydown(self, keypressed):
if str(keypressed)=='46': #46 the delete keycode
self.toolbar_delete_clicked(None)
print("Key pressed: " + str(keypressed))
def on_dropped(self, left, top):
if len(left)<1:
left='0px'
if len(top)<1:
top='0px'
self.style['left']=left
self.style['top']=top
def main():
#p = Project()
#root = p.load('./example_project.py')
#p.append(root, "root")
#p.save(None)
# starts the webserver
# optional parameters
# start(MyApp,address='127.0.0.1', port=8081, multiple_instance=False,enable_file_cache=True, update_interval=0.1, start_browser=True)
start(Editor, debug=False, address='0.0.0.0', port=8082, update_interval=0.01)
if __name__ == "__main__":
main()
|
from itertools import product as cartes
from sympy import (
limit, exp, oo, log, sqrt, Limit, sin, floor, cos, acos, ceiling,
atan, gamma, Symbol, S, pi, Integral, cot, Rational, I, zoo,
tan, cot, integrate, Sum, sign, Function, subfactorial, PoleError)
from sympy.series.limits import heuristics
from sympy.series.order import Order
from sympy.abc import x, y, z
from sympy.utilities.pytest import XFAIL, raises
def test_basic1():
assert limit(x, x, oo) == oo
assert limit(x, x, -oo) == -oo
assert limit(-x, x, oo) == -oo
assert limit(x**2, x, -oo) == oo
assert limit(-x**2, x, oo) == -oo
assert limit(x*log(x), x, 0, dir="+") == 0
assert limit(1/x, x, oo) == 0
assert limit(exp(x), x, oo) == oo
assert limit(-exp(x), x, oo) == -oo
assert limit(exp(x)/x, x, oo) == oo
assert limit(1/x - exp(-x), x, oo) == 0
assert limit(x + 1/x, x, oo) == oo
assert limit(x - x**2, x, oo) == -oo
assert limit((1 + x)**(1 + sqrt(2)), x, 0) == 1
assert limit((1 + x)**oo, x, 0) == oo
assert limit((1 + x)**oo, x, 0, dir='-') == 0
assert limit((1 + x + y)**oo, x, 0, dir='-') == (1 + y)**(oo)
assert limit(y/x/log(x), x, 0) == -oo*sign(y)
assert limit(cos(x + y)/x, x, 0) == sign(cos(y))*oo
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) -
log(y), y, oo))
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) - 1/y, y, oo))
assert limit(gamma(1/x + 3), x, oo) == 2
assert limit(S.NaN, x, -oo) == S.NaN
assert limit(Order(2)*x, x, S.NaN) == S.NaN
assert limit(1/(x - 1), x, 1, dir="+") == oo
assert limit(1/(x - 1), x, 1, dir="-") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="+") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="-") == oo
assert limit(1/sin(x), x, pi, dir="+") == -oo
assert limit(1/sin(x), x, pi, dir="-") == oo
assert limit(1/cos(x), x, pi/2, dir="+") == -oo
assert limit(1/cos(x), x, pi/2, dir="-") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="+") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="-") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="+") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="-") == oo
# approaching 0
# from dir="+"
assert limit(1 + 1/x, x, 0) == oo
# from dir='-'
# Add
assert limit(1 + 1/x, x, 0, dir='-') == -oo
# Pow
assert limit(x**(-2), x, 0, dir='-') == oo
assert limit(x**(-3), x, 0, dir='-') == -oo
assert limit(1/sqrt(x), x, 0, dir='-') == (-oo)*I
assert limit(x**2, x, 0, dir='-') == 0
assert limit(sqrt(x), x, 0, dir='-') == 0
assert limit(x**-pi, x, 0, dir='-') == oo*sign((-1)**(-pi))
assert limit((1 + cos(x))**oo, x, 0) == oo
assert limit(x**2, x, 0, dir='real') == 0
assert limit(exp(x), x, 0, dir='real') == 1
raises(PoleError, lambda: limit(1/x, x, 0, dir='real'))
def test_basic2():
assert limit(x**x, x, 0, dir="+") == 1
assert limit((exp(x) - 1)/x, x, 0) == 1
assert limit(1 + 1/x, x, oo) == 1
assert limit(-exp(1/x), x, oo) == -1
assert limit(x + exp(-x), x, oo) == oo
assert limit(x + exp(-x**2), x, oo) == oo
assert limit(x + exp(-exp(x)), x, oo) == oo
assert limit(13 + 1/x - exp(-x), x, oo) == 13
def test_basic3():
assert limit(1/x, x, 0, dir="+") == oo
assert limit(1/x, x, 0, dir="-") == -oo
def test_basic4():
assert limit(2*x + y*x, x, 0) == 0
assert limit(2*x + y*x, x, 1) == 2 + y
assert limit(2*x**8 + y*x**(-3), x, -2) == 512 - y/8
assert limit(sqrt(x + 1) - sqrt(x), x, oo) == 0
assert integrate(1/(x**3 + 1), (x, 0, oo)) == 2*pi*sqrt(3)/9
def test_basic5():
class my(Function):
@classmethod
def eval(cls, arg):
if arg is S.Infinity:
return S.NaN
assert limit(my(x), x, oo) == Limit(my(x), x, oo)
def test_issue_3885():
assert limit(x*y + x*z, z, 2) == x*y + 2*x
def test_Limit():
assert Limit(sin(x)/x, x, 0) != 1
assert Limit(sin(x)/x, x, 0).doit() == 1
def test_floor():
assert limit(floor(x), x, -2, "+") == -2
assert limit(floor(x), x, -2, "-") == -3
assert limit(floor(x), x, -1, "+") == -1
assert limit(floor(x), x, -1, "-") == -2
assert limit(floor(x), x, 0, "+") == 0
assert limit(floor(x), x, 0, "-") == -1
assert limit(floor(x), x, 1, "+") == 1
assert limit(floor(x), x, 1, "-") == 0
assert limit(floor(x), x, 2, "+") == 2
assert limit(floor(x), x, 2, "-") == 1
assert limit(floor(x), x, 248, "+") == 248
assert limit(floor(x), x, 248, "-") == 247
def test_floor_requires_robust_assumptions():
assert limit(floor(sin(x)), x, 0, "+") == 0
assert limit(floor(sin(x)), x, 0, "-") == -1
assert limit(floor(cos(x)), x, 0, "+") == 0
assert limit(floor(cos(x)), x, 0, "-") == 0
assert limit(floor(5 + sin(x)), x, 0, "+") == 5
assert limit(floor(5 + sin(x)), x, 0, "-") == 4
assert limit(floor(5 + cos(x)), x, 0, "+") == 5
assert limit(floor(5 + cos(x)), x, 0, "-") == 5
def test_ceiling():
assert limit(ceiling(x), x, -2, "+") == -1
assert limit(ceiling(x), x, -2, "-") == -2
assert limit(ceiling(x), x, -1, "+") == 0
assert limit(ceiling(x), x, -1, "-") == -1
assert limit(ceiling(x), x, 0, "+") == 1
assert limit(ceiling(x), x, 0, "-") == 0
assert limit(ceiling(x), x, 1, "+") == 2
assert limit(ceiling(x), x, 1, "-") == 1
assert limit(ceiling(x), x, 2, "+") == 3
assert limit(ceiling(x), x, 2, "-") == 2
assert limit(ceiling(x), x, 248, "+") == 249
assert limit(ceiling(x), x, 248, "-") == 248
def test_ceiling_requires_robust_assumptions():
assert limit(ceiling(sin(x)), x, 0, "+") == 1
assert limit(ceiling(sin(x)), x, 0, "-") == 0
assert limit(ceiling(cos(x)), x, 0, "+") == 1
assert limit(ceiling(cos(x)), x, 0, "-") == 1
assert limit(ceiling(5 + sin(x)), x, 0, "+") == 6
assert limit(ceiling(5 + sin(x)), x, 0, "-") == 5
assert limit(ceiling(5 + cos(x)), x, 0, "+") == 6
assert limit(ceiling(5 + cos(x)), x, 0, "-") == 6
def test_atan():
x = Symbol("x", extended_real=True)
assert limit(atan(x)*sin(1/x), x, 0) == 0
assert limit(atan(x) + sqrt(x + 1) - sqrt(x), x, oo) == pi/2
def test_abs():
assert limit(abs(x), x, 0) == 0
assert limit(abs(sin(x)), x, 0) == 0
assert limit(abs(cos(x)), x, 0) == 1
assert limit(abs(sin(x + 1)), x, 0) == sin(1)
def test_heuristic():
x = Symbol("x", extended_real=True)
assert heuristics(sin(1/x) + atan(x), x, 0, '+') == sin(oo)
assert limit(log(2 + sqrt(atan(x))*sqrt(sin(1/x))), x, 0) == log(2)
def test_issue_3871():
z = Symbol("z", positive=True)
f = -1/z*exp(-z*x)
assert limit(f, x, oo) == 0
assert f.limit(x, oo) == 0
def test_exponential():
n = Symbol('n')
x = Symbol('x', extended_real=True)
assert limit((1 + x/n)**n, n, oo) == exp(x)
assert limit((1 + x/(2*n))**n, n, oo) == exp(x/2)
assert limit((1 + x/(2*n + 1))**n, n, oo) == exp(x/2)
assert limit(((x - 1)/(x + 1))**x, x, oo) == exp(-2)
assert limit(1 + (1 + 1/x)**x, x, oo) == 1 + S.Exp1
@XFAIL
def test_exponential2():
n = Symbol('n')
assert limit((1 + x/(n + sin(n)))**n, n, oo) == exp(x)
def test_doit():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
assert l.doit() == oo
@XFAIL
def test_doit2():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
# limit() breaks on the contained Integral.
assert l.doit(deep=False) == l
def test_issue_3792():
assert limit( (1 - cos(x))/x**2, x, S(1)/2) == 4 - 4*cos(S(1)/2)
assert limit(sin(sin(x + 1) + 1), x, 0) == sin(1 + sin(1))
assert limit(abs(sin(x + 1) + 1), x, 0) == 1 + sin(1)
def test_issue_4090():
assert limit(1/(x + 3), x, 2) == S(1)/5
assert limit(1/(x + pi), x, 2) == S(1)/(2 + pi)
assert limit(log(x)/(x**2 + 3), x, 2) == log(2)/7
assert limit(log(x)/(x**2 + pi), x, 2) == log(2)/(4 + pi)
def test_issue_4547():
assert limit(cot(x), x, 0, dir='+') == oo
assert limit(cot(x), x, pi/2, dir='+') == 0
def test_issue_5164():
assert limit(x**0.5, x, oo) == oo**0.5 == oo
assert limit(x**0.5, x, 16) == S(16)**0.5
assert limit(x**0.5, x, 0) == 0
assert limit(x**(-0.5), x, oo) == 0
assert limit(x**(-0.5), x, 4) == S(4)**(-0.5)
def test_issue_5183():
# using list(...) so py.test can recalculate values
tests = list(cartes([x, -x],
[-1, 1],
[2, 3, Rational(1, 2), Rational(2, 3)],
['-', '+']))
results = (oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3), oo,
0, 0, 0, 0, 0, 0, 0, 0,
oo, oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3),
0, 0, 0, 0, 0, 0, 0, 0)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
y, s, e, d = args
eq = y**(s*e)
try:
assert limit(eq, x, 0, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, d, limit(eq, x, 0, dir=d))
else:
assert None
def test_issue_5184():
assert limit(sin(x)/x, x, oo) == 0
assert limit(atan(x), x, oo) == pi/2
assert limit(gamma(x), x, oo) == oo
assert limit(cos(x)/x, x, oo) == 0
assert limit(gamma(x), x, Rational(1, 2)) == sqrt(pi)
r = Symbol('r', extended_real=True, finite=True)
assert limit(r*sin(1/r), r, 0) == 0
def test_issue_5229():
assert limit((1 + y)**(1/y) - S.Exp1, y, 0) == 0
def test_issue_4546():
# using list(...) so py.test can recalculate values
tests = list(cartes([cot, tan],
[-pi/2, 0, pi/2, pi, 3*pi/2],
['-', '+']))
results = (0, 0, -oo, oo, 0, 0, -oo, oo, 0, 0,
oo, -oo, 0, 0, oo, -oo, 0, 0, oo, -oo)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
f, l, d = args
eq = f(x)
try:
assert limit(eq, x, l, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, l, d, limit(eq, x, l, dir=d))
else:
assert None
def test_issue_3934():
assert limit((1 + x**log(3))**(1/x), x, 0) == 1
assert limit((5**(1/x) + 3**(1/x))**x, x, 0) == 5
def test_calculate_series():
# needs gruntz calculate_series to go to n = 32
assert limit(x**(S(77)/3)/(1 + x**(S(77)/3)), x, oo) == 1
# needs gruntz calculate_series to go to n = 128
assert limit(x**101.1/(1 + x**101.1), x, oo) == 1
def test_issue_5955():
assert limit((x**16)/(1 + x**16), x, oo) == 1
assert limit((x**100)/(1 + x**100), x, oo) == 1
assert limit((x**1885)/(1 + x**1885), x, oo) == 1
assert limit((x**1000/((x + 1)**1000 + exp(-x))), x, oo) == 1
def test_newissue():
assert limit(exp(1/sin(x))/exp(cot(x)), x, 0) == 1
def test_extended_real_line():
assert limit(x - oo, x, oo) == -oo
assert limit(oo - x, x, -oo) == oo
assert limit(x**2/(x - 5) - oo, x, oo) == -oo
assert limit(1/(x + sin(x)) - oo, x, 0) == -oo
assert limit(oo/x, x, oo) == oo
assert limit(x - oo + 1/x, x, oo) == -oo
assert limit(x - oo + 1/x, x, 0) == -oo
@XFAIL
def test_order_oo():
x = Symbol('x', positive=True, finite=True)
assert Order(x)*oo != Order(1, x)
assert limit(oo/(x**2 - 4), x, oo) == oo
def test_issue_5436():
raises(NotImplementedError, lambda: limit(exp(x*y), x, oo))
raises(NotImplementedError, lambda: limit(exp(-x*y), x, oo))
def test_Limit_dir():
raises(TypeError, lambda: Limit(x, x, 0, dir=0))
raises(ValueError, lambda: Limit(x, x, 0, dir='0'))
def test_polynomial():
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, oo) == 1
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, -oo) == 1
def test_rational():
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, oo) == (z - 1)/(y*z)
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, -oo) == (z - 1)/(y*z)
def test_issue_5740():
assert limit(log(x)*z - log(2*x)*y, x, 0) == oo*sign(y - z)
def test_issue_6366():
n = Symbol('n', integer=True, positive=True)
r = (n + 1)*x**(n + 1)/(x**(n + 1) - 1) - x/(x - 1)
assert limit(r, x, 1).simplify() == n/2
def test_factorial():
from sympy import factorial, E
f = factorial(x)
assert limit(f, x, oo) == oo
assert limit(x/f, x, oo) == 0
# see Stirling's approximation:
# http://en.wikipedia.org/wiki/Stirling's_approximation
assert limit(f/(sqrt(2*pi*x)*(x/E)**x), x, oo) == 1
assert limit(f, x, -oo) == factorial(-oo)
assert limit(f, x, x**2) == factorial(x**2)
assert limit(f, x, -x**2) == factorial(-x**2)
def test_issue_6560():
e = 5*x**3/4 - 3*x/4 + (y*(3*x**2/2 - S(1)/2) + \
35*x**4/8 - 15*x**2/4 + S(3)/8)/(2*(y + 1))
assert limit(e, y, oo) == (5*x**3 + 3*x**2 - 3*x - 1)/4
def test_issue_5172():
n = Symbol('n')
r = Symbol('r', positive=True)
c = Symbol('c')
p = Symbol('p', positive=True)
m = Symbol('m', negative=True)
expr = ((2*n*(n - r + 1)/(n + r*(n - r + 1)))**c + \
(r - 1)*(n*(n - r + 2)/(n + r*(n - r + 1)))**c - n)/(n**c - n)
expr = expr.subs(c, c + 1)
assert limit(expr.subs(c, m), n, oo) == 1
assert limit(expr.subs(c, p), n, oo).simplify() == \
(2**(p + 1) + r - 1)/(r + 1)**(p + 1)
def test_issue_7088():
a = Symbol('a')
assert limit(sqrt(x/(x + a)), x, oo) == 1
def test_issue_6364():
a = Symbol('a')
e = z/(1 - sqrt(1 + z)*sin(a)**2 - sqrt(1 - z)*cos(a)**2)
assert limit(e, z, 0).simplify() == 2/cos(2*a)
def test_issue_4099():
a = Symbol('a')
assert limit(a/x, x, 0) == oo*sign(a)
assert limit(-a/x, x, 0) == -oo*sign(a)
assert limit(-a*x, x, oo) == -oo*sign(a)
assert limit(a*x, x, oo) == oo*sign(a)
def test_issue_4503():
dx = Symbol('dx')
assert limit((sqrt(1 + exp(x + dx)) - sqrt(1 + exp(x)))/dx, dx, 0) == \
exp(x)/(2*sqrt(exp(x) + 1))
def test_issue_8730():
assert limit(subfactorial(x), x, oo) == oo
def test_omgissue_55():
assert limit((x + exp(x))/(x - 1), x, -oo) == 1
assert limit((x*exp(x))/(exp(x) - 1), x, -oo) == 0 # issue 2929
def test_issue_8061():
assert limit(4**(acos(1/(1 + x**2))**2)/log(1 + x, 4), x, 0) == oo
def test_issue_8229():
assert limit((x**Rational(1, 4) - 2)/(sqrt(x) - 4)**Rational(2, 3),
x, 16) == 0
Improve test_issue_4546/5183 (please use debugger instead)
from itertools import product as cartes
from sympy import (
limit, exp, oo, log, sqrt, Limit, sin, floor, cos, acos, ceiling,
atan, gamma, Symbol, S, pi, Integral, cot, Rational, I, zoo,
tan, cot, integrate, Sum, sign, Function, subfactorial, PoleError)
from sympy.series.limits import heuristics
from sympy.series.order import Order
from sympy.abc import x, y, z
from sympy.utilities.pytest import XFAIL, raises
def test_basic1():
assert limit(x, x, oo) == oo
assert limit(x, x, -oo) == -oo
assert limit(-x, x, oo) == -oo
assert limit(x**2, x, -oo) == oo
assert limit(-x**2, x, oo) == -oo
assert limit(x*log(x), x, 0, dir="+") == 0
assert limit(1/x, x, oo) == 0
assert limit(exp(x), x, oo) == oo
assert limit(-exp(x), x, oo) == -oo
assert limit(exp(x)/x, x, oo) == oo
assert limit(1/x - exp(-x), x, oo) == 0
assert limit(x + 1/x, x, oo) == oo
assert limit(x - x**2, x, oo) == -oo
assert limit((1 + x)**(1 + sqrt(2)), x, 0) == 1
assert limit((1 + x)**oo, x, 0) == oo
assert limit((1 + x)**oo, x, 0, dir='-') == 0
assert limit((1 + x + y)**oo, x, 0, dir='-') == (1 + y)**(oo)
assert limit(y/x/log(x), x, 0) == -oo*sign(y)
assert limit(cos(x + y)/x, x, 0) == sign(cos(y))*oo
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) -
log(y), y, oo))
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) - 1/y, y, oo))
assert limit(gamma(1/x + 3), x, oo) == 2
assert limit(S.NaN, x, -oo) == S.NaN
assert limit(Order(2)*x, x, S.NaN) == S.NaN
assert limit(1/(x - 1), x, 1, dir="+") == oo
assert limit(1/(x - 1), x, 1, dir="-") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="+") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="-") == oo
assert limit(1/sin(x), x, pi, dir="+") == -oo
assert limit(1/sin(x), x, pi, dir="-") == oo
assert limit(1/cos(x), x, pi/2, dir="+") == -oo
assert limit(1/cos(x), x, pi/2, dir="-") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="+") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="-") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="+") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="-") == oo
# approaching 0
# from dir="+"
assert limit(1 + 1/x, x, 0) == oo
# from dir='-'
# Add
assert limit(1 + 1/x, x, 0, dir='-') == -oo
# Pow
assert limit(x**(-2), x, 0, dir='-') == oo
assert limit(x**(-3), x, 0, dir='-') == -oo
assert limit(1/sqrt(x), x, 0, dir='-') == (-oo)*I
assert limit(x**2, x, 0, dir='-') == 0
assert limit(sqrt(x), x, 0, dir='-') == 0
assert limit(x**-pi, x, 0, dir='-') == oo*sign((-1)**(-pi))
assert limit((1 + cos(x))**oo, x, 0) == oo
assert limit(x**2, x, 0, dir='real') == 0
assert limit(exp(x), x, 0, dir='real') == 1
raises(PoleError, lambda: limit(1/x, x, 0, dir='real'))
def test_basic2():
assert limit(x**x, x, 0, dir="+") == 1
assert limit((exp(x) - 1)/x, x, 0) == 1
assert limit(1 + 1/x, x, oo) == 1
assert limit(-exp(1/x), x, oo) == -1
assert limit(x + exp(-x), x, oo) == oo
assert limit(x + exp(-x**2), x, oo) == oo
assert limit(x + exp(-exp(x)), x, oo) == oo
assert limit(13 + 1/x - exp(-x), x, oo) == 13
def test_basic3():
assert limit(1/x, x, 0, dir="+") == oo
assert limit(1/x, x, 0, dir="-") == -oo
def test_basic4():
assert limit(2*x + y*x, x, 0) == 0
assert limit(2*x + y*x, x, 1) == 2 + y
assert limit(2*x**8 + y*x**(-3), x, -2) == 512 - y/8
assert limit(sqrt(x + 1) - sqrt(x), x, oo) == 0
assert integrate(1/(x**3 + 1), (x, 0, oo)) == 2*pi*sqrt(3)/9
def test_basic5():
class my(Function):
@classmethod
def eval(cls, arg):
if arg is S.Infinity:
return S.NaN
assert limit(my(x), x, oo) == Limit(my(x), x, oo)
def test_issue_3885():
assert limit(x*y + x*z, z, 2) == x*y + 2*x
def test_Limit():
assert Limit(sin(x)/x, x, 0) != 1
assert Limit(sin(x)/x, x, 0).doit() == 1
def test_floor():
assert limit(floor(x), x, -2, "+") == -2
assert limit(floor(x), x, -2, "-") == -3
assert limit(floor(x), x, -1, "+") == -1
assert limit(floor(x), x, -1, "-") == -2
assert limit(floor(x), x, 0, "+") == 0
assert limit(floor(x), x, 0, "-") == -1
assert limit(floor(x), x, 1, "+") == 1
assert limit(floor(x), x, 1, "-") == 0
assert limit(floor(x), x, 2, "+") == 2
assert limit(floor(x), x, 2, "-") == 1
assert limit(floor(x), x, 248, "+") == 248
assert limit(floor(x), x, 248, "-") == 247
def test_floor_requires_robust_assumptions():
assert limit(floor(sin(x)), x, 0, "+") == 0
assert limit(floor(sin(x)), x, 0, "-") == -1
assert limit(floor(cos(x)), x, 0, "+") == 0
assert limit(floor(cos(x)), x, 0, "-") == 0
assert limit(floor(5 + sin(x)), x, 0, "+") == 5
assert limit(floor(5 + sin(x)), x, 0, "-") == 4
assert limit(floor(5 + cos(x)), x, 0, "+") == 5
assert limit(floor(5 + cos(x)), x, 0, "-") == 5
def test_ceiling():
assert limit(ceiling(x), x, -2, "+") == -1
assert limit(ceiling(x), x, -2, "-") == -2
assert limit(ceiling(x), x, -1, "+") == 0
assert limit(ceiling(x), x, -1, "-") == -1
assert limit(ceiling(x), x, 0, "+") == 1
assert limit(ceiling(x), x, 0, "-") == 0
assert limit(ceiling(x), x, 1, "+") == 2
assert limit(ceiling(x), x, 1, "-") == 1
assert limit(ceiling(x), x, 2, "+") == 3
assert limit(ceiling(x), x, 2, "-") == 2
assert limit(ceiling(x), x, 248, "+") == 249
assert limit(ceiling(x), x, 248, "-") == 248
def test_ceiling_requires_robust_assumptions():
assert limit(ceiling(sin(x)), x, 0, "+") == 1
assert limit(ceiling(sin(x)), x, 0, "-") == 0
assert limit(ceiling(cos(x)), x, 0, "+") == 1
assert limit(ceiling(cos(x)), x, 0, "-") == 1
assert limit(ceiling(5 + sin(x)), x, 0, "+") == 6
assert limit(ceiling(5 + sin(x)), x, 0, "-") == 5
assert limit(ceiling(5 + cos(x)), x, 0, "+") == 6
assert limit(ceiling(5 + cos(x)), x, 0, "-") == 6
def test_atan():
x = Symbol("x", extended_real=True)
assert limit(atan(x)*sin(1/x), x, 0) == 0
assert limit(atan(x) + sqrt(x + 1) - sqrt(x), x, oo) == pi/2
def test_abs():
assert limit(abs(x), x, 0) == 0
assert limit(abs(sin(x)), x, 0) == 0
assert limit(abs(cos(x)), x, 0) == 1
assert limit(abs(sin(x + 1)), x, 0) == sin(1)
def test_heuristic():
x = Symbol("x", extended_real=True)
assert heuristics(sin(1/x) + atan(x), x, 0, '+') == sin(oo)
assert limit(log(2 + sqrt(atan(x))*sqrt(sin(1/x))), x, 0) == log(2)
def test_issue_3871():
z = Symbol("z", positive=True)
f = -1/z*exp(-z*x)
assert limit(f, x, oo) == 0
assert f.limit(x, oo) == 0
def test_exponential():
n = Symbol('n')
x = Symbol('x', extended_real=True)
assert limit((1 + x/n)**n, n, oo) == exp(x)
assert limit((1 + x/(2*n))**n, n, oo) == exp(x/2)
assert limit((1 + x/(2*n + 1))**n, n, oo) == exp(x/2)
assert limit(((x - 1)/(x + 1))**x, x, oo) == exp(-2)
assert limit(1 + (1 + 1/x)**x, x, oo) == 1 + S.Exp1
@XFAIL
def test_exponential2():
n = Symbol('n')
assert limit((1 + x/(n + sin(n)))**n, n, oo) == exp(x)
def test_doit():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
assert l.doit() == oo
@XFAIL
def test_doit2():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
# limit() breaks on the contained Integral.
assert l.doit(deep=False) == l
def test_issue_3792():
assert limit( (1 - cos(x))/x**2, x, S(1)/2) == 4 - 4*cos(S(1)/2)
assert limit(sin(sin(x + 1) + 1), x, 0) == sin(1 + sin(1))
assert limit(abs(sin(x + 1) + 1), x, 0) == 1 + sin(1)
def test_issue_4090():
assert limit(1/(x + 3), x, 2) == S(1)/5
assert limit(1/(x + pi), x, 2) == S(1)/(2 + pi)
assert limit(log(x)/(x**2 + 3), x, 2) == log(2)/7
assert limit(log(x)/(x**2 + pi), x, 2) == log(2)/(4 + pi)
def test_issue_4547():
assert limit(cot(x), x, 0, dir='+') == oo
assert limit(cot(x), x, pi/2, dir='+') == 0
def test_issue_5164():
assert limit(x**0.5, x, oo) == oo**0.5 == oo
assert limit(x**0.5, x, 16) == S(16)**0.5
assert limit(x**0.5, x, 0) == 0
assert limit(x**(-0.5), x, oo) == 0
assert limit(x**(-0.5), x, 4) == S(4)**(-0.5)
def test_issue_5183():
# using list(...) so py.test can recalculate values
tests = list(cartes([x, -x],
[-1, 1],
[2, 3, Rational(1, 2), Rational(2, 3)],
['-', '+']))
results = (oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3), oo,
0, 0, 0, 0, 0, 0, 0, 0,
oo, oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3),
0, 0, 0, 0, 0, 0, 0, 0)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
y, s, e, d = args
eq = y**(s*e)
assert limit(eq, x, 0, dir=d) == res
def test_issue_5184():
assert limit(sin(x)/x, x, oo) == 0
assert limit(atan(x), x, oo) == pi/2
assert limit(gamma(x), x, oo) == oo
assert limit(cos(x)/x, x, oo) == 0
assert limit(gamma(x), x, Rational(1, 2)) == sqrt(pi)
r = Symbol('r', extended_real=True, finite=True)
assert limit(r*sin(1/r), r, 0) == 0
def test_issue_5229():
assert limit((1 + y)**(1/y) - S.Exp1, y, 0) == 0
def test_issue_4546():
# using list(...) so py.test can recalculate values
tests = list(cartes([cot, tan],
[-pi/2, 0, pi/2, pi, 3*pi/2],
['-', '+']))
results = (0, 0, -oo, oo, 0, 0, -oo, oo, 0, 0,
oo, -oo, 0, 0, oo, -oo, 0, 0, oo, -oo)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
f, l, d = args
eq = f(x)
assert limit(eq, x, l, dir=d) == res
def test_issue_3934():
assert limit((1 + x**log(3))**(1/x), x, 0) == 1
assert limit((5**(1/x) + 3**(1/x))**x, x, 0) == 5
def test_calculate_series():
# needs gruntz calculate_series to go to n = 32
assert limit(x**(S(77)/3)/(1 + x**(S(77)/3)), x, oo) == 1
# needs gruntz calculate_series to go to n = 128
assert limit(x**101.1/(1 + x**101.1), x, oo) == 1
def test_issue_5955():
assert limit((x**16)/(1 + x**16), x, oo) == 1
assert limit((x**100)/(1 + x**100), x, oo) == 1
assert limit((x**1885)/(1 + x**1885), x, oo) == 1
assert limit((x**1000/((x + 1)**1000 + exp(-x))), x, oo) == 1
def test_newissue():
assert limit(exp(1/sin(x))/exp(cot(x)), x, 0) == 1
def test_extended_real_line():
assert limit(x - oo, x, oo) == -oo
assert limit(oo - x, x, -oo) == oo
assert limit(x**2/(x - 5) - oo, x, oo) == -oo
assert limit(1/(x + sin(x)) - oo, x, 0) == -oo
assert limit(oo/x, x, oo) == oo
assert limit(x - oo + 1/x, x, oo) == -oo
assert limit(x - oo + 1/x, x, 0) == -oo
@XFAIL
def test_order_oo():
x = Symbol('x', positive=True, finite=True)
assert Order(x)*oo != Order(1, x)
assert limit(oo/(x**2 - 4), x, oo) == oo
def test_issue_5436():
raises(NotImplementedError, lambda: limit(exp(x*y), x, oo))
raises(NotImplementedError, lambda: limit(exp(-x*y), x, oo))
def test_Limit_dir():
raises(TypeError, lambda: Limit(x, x, 0, dir=0))
raises(ValueError, lambda: Limit(x, x, 0, dir='0'))
def test_polynomial():
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, oo) == 1
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, -oo) == 1
def test_rational():
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, oo) == (z - 1)/(y*z)
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, -oo) == (z - 1)/(y*z)
def test_issue_5740():
assert limit(log(x)*z - log(2*x)*y, x, 0) == oo*sign(y - z)
def test_issue_6366():
n = Symbol('n', integer=True, positive=True)
r = (n + 1)*x**(n + 1)/(x**(n + 1) - 1) - x/(x - 1)
assert limit(r, x, 1).simplify() == n/2
def test_factorial():
from sympy import factorial, E
f = factorial(x)
assert limit(f, x, oo) == oo
assert limit(x/f, x, oo) == 0
# see Stirling's approximation:
# http://en.wikipedia.org/wiki/Stirling's_approximation
assert limit(f/(sqrt(2*pi*x)*(x/E)**x), x, oo) == 1
assert limit(f, x, -oo) == factorial(-oo)
assert limit(f, x, x**2) == factorial(x**2)
assert limit(f, x, -x**2) == factorial(-x**2)
def test_issue_6560():
e = 5*x**3/4 - 3*x/4 + (y*(3*x**2/2 - S(1)/2) + \
35*x**4/8 - 15*x**2/4 + S(3)/8)/(2*(y + 1))
assert limit(e, y, oo) == (5*x**3 + 3*x**2 - 3*x - 1)/4
def test_issue_5172():
n = Symbol('n')
r = Symbol('r', positive=True)
c = Symbol('c')
p = Symbol('p', positive=True)
m = Symbol('m', negative=True)
expr = ((2*n*(n - r + 1)/(n + r*(n - r + 1)))**c + \
(r - 1)*(n*(n - r + 2)/(n + r*(n - r + 1)))**c - n)/(n**c - n)
expr = expr.subs(c, c + 1)
assert limit(expr.subs(c, m), n, oo) == 1
assert limit(expr.subs(c, p), n, oo).simplify() == \
(2**(p + 1) + r - 1)/(r + 1)**(p + 1)
def test_issue_7088():
a = Symbol('a')
assert limit(sqrt(x/(x + a)), x, oo) == 1
def test_issue_6364():
a = Symbol('a')
e = z/(1 - sqrt(1 + z)*sin(a)**2 - sqrt(1 - z)*cos(a)**2)
assert limit(e, z, 0).simplify() == 2/cos(2*a)
def test_issue_4099():
a = Symbol('a')
assert limit(a/x, x, 0) == oo*sign(a)
assert limit(-a/x, x, 0) == -oo*sign(a)
assert limit(-a*x, x, oo) == -oo*sign(a)
assert limit(a*x, x, oo) == oo*sign(a)
def test_issue_4503():
dx = Symbol('dx')
assert limit((sqrt(1 + exp(x + dx)) - sqrt(1 + exp(x)))/dx, dx, 0) == \
exp(x)/(2*sqrt(exp(x) + 1))
def test_issue_8730():
assert limit(subfactorial(x), x, oo) == oo
def test_omgissue_55():
assert limit((x + exp(x))/(x - 1), x, -oo) == 1
assert limit((x*exp(x))/(exp(x) - 1), x, -oo) == 0 # issue 2929
def test_issue_8061():
assert limit(4**(acos(1/(1 + x**2))**2)/log(1 + x, 4), x, 0) == oo
def test_issue_8229():
assert limit((x**Rational(1, 4) - 2)/(sqrt(x) - 4)**Rational(2, 3),
x, 16) == 0
|
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipype.testing import assert_equal, assert_true
import nipype.pipeline.engine as pe
from nipype.interfaces.utility import Function
def dummyFunction(filename):
'''
This function writes the value 45 to the given filename.
'''
j = 0
for i in range(0, 10):
j += i
# j is now 45 (0+1+2+3+4+5+6+7+8+9)
with open(filename, 'w') as f:
f.write(str(j))
def mytestFunction(insum=0):
'''
Run a multiprocessing job and spawn child processes.
'''
# need to import here since this is executed as an external process
import multiprocessing
import tempfile
import time
import os
numberOfThreads = 2
# list of processes
t = [None] * numberOfThreads
# list of alive flags
a = [None] * numberOfThreads
# list of tempFiles
f = [None] * numberOfThreads
for n in xrange(numberOfThreads):
# mark thread as alive
a[n] = True
# create a temp file to use as the data exchange container
tmpFile = tempfile.mkstemp('.txt', 'test_engine_')[1]
f[n] = tmpFile # keep track of the temp file
t[n] = multiprocessing.Process(target=dummyFunction,
args=(tmpFile,))
# fire up the job
t[n].start()
# block until all processes are done
allDone = False
while not allDone:
time.sleep(1)
for n in xrange(numberOfThreads):
a[n] = t[n].is_alive()
if not any(a):
# if no thread is alive
allDone = True
# here, all processes are done
# read in all temp files and sum them up
total = 0
for file in f:
with open(file) as fd:
total += int(fd.read())
os.remove(file)
return total
def run_multiproc_nondaemon_with_flag(nondaemon_flag):
'''
Start a pipe with two nodes using the multiproc plugin and passing the nondaemon_flag.
'''
cur_dir = os.getcwd()
temp_dir = mkdtemp(prefix='test_engine_')
os.chdir(temp_dir)
pipe = pe.Workflow(name='pipe')
f1 = pe.Node(interface=Function(function=mytestFunction,
input_names=['insum'],
output_names=['sum_out']),
name='f1')
f2 = pe.Node(interface=Function(function=mytestFunction,
input_names=['insum'],
output_names=['sum_out']),
name='f2')
pipe.connect([(f1, f2, [('sum_out', 'insum')])])
pipe.base_dir = os.getcwd()
f1.inputs.insum = 0
pipe.config = {'execution': {'stop_on_first_error': True}}
# execute the pipe using the MultiProc plugin with 2 processes and the non_daemon flag
# to enable child processes which start other multiprocessing jobs
execgraph = pipe.run(plugin="MultiProc",
plugin_args={'n_procs': 2,
'non_daemon': nondaemon_flag})
names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()]
node = execgraph.nodes()[names.index('pipe.f2')]
result = node.get_output('sum_out')
os.chdir(cur_dir)
rmtree(temp_dir)
if nondaemon_flag:
yield assert_equal, result, 180 # n_procs (2) * numberOfThreads (2) * 45 == 180
def test_run_multiproc_nondaemon_false():
'''
This is the entry point for the test. Two times a pipe of several multiprocessing jobs gets
executed. First, without the nondaemon flag. Second, with the nondaemon flag.
Since the processes of the pipe start child processes, the execution only succeeds when the
non_daemon flag is on.
'''
shouldHaveFailed = False
try:
# with nondaemon_flag = False, the execution should fail
run_multiproc_nondaemon_with_flag(False)
except:
shouldHaveFailed = True
yield assert_true, shouldHaveFailed
def test_run_multiproc_nondaemon_true():
# with nondaemon_flag = True, the execution should succeed
run_multiproc_nondaemon_with_flag(True)
fix: finished cleaning up errors
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipype.testing import assert_equal, assert_true
import nipype.pipeline.engine as pe
from nipype.interfaces.utility import Function
def mytestFunction(insum=0):
'''
Run a multiprocessing job and spawn child processes.
'''
# need to import here since this is executed as an external process
import multiprocessing
import tempfile
import time
import os
numberOfThreads = 2
# list of processes
t = [None] * numberOfThreads
# list of alive flags
a = [None] * numberOfThreads
# list of tempFiles
f = [None] * numberOfThreads
def dummyFunction(filename):
'''
This function writes the value 45 to the given filename.
'''
j = 0
for i in range(0, 10):
j += i
# j is now 45 (0+1+2+3+4+5+6+7+8+9)
with open(filename, 'w') as f:
f.write(str(j))
for n in xrange(numberOfThreads):
# mark thread as alive
a[n] = True
# create a temp file to use as the data exchange container
tmpFile = tempfile.mkstemp('.txt', 'test_engine_')[1]
f[n] = tmpFile # keep track of the temp file
t[n] = multiprocessing.Process(target=dummyFunction,
args=(tmpFile,))
# fire up the job
t[n].start()
# block until all processes are done
allDone = False
while not allDone:
time.sleep(1)
for n in xrange(numberOfThreads):
a[n] = t[n].is_alive()
if not any(a):
# if no thread is alive
allDone = True
# here, all processes are done
# read in all temp files and sum them up
total = insum
for file in f:
with open(file) as fd:
total += int(fd.read())
os.remove(file)
return total
def run_multiproc_nondaemon_with_flag(nondaemon_flag):
'''
Start a pipe with two nodes using the multiproc plugin and passing the nondaemon_flag.
'''
cur_dir = os.getcwd()
temp_dir = mkdtemp(prefix='test_engine_')
os.chdir(temp_dir)
pipe = pe.Workflow(name='pipe')
f1 = pe.Node(interface=Function(function=mytestFunction,
input_names=['insum'],
output_names=['sum_out']),
name='f1')
f2 = pe.Node(interface=Function(function=mytestFunction,
input_names=['insum'],
output_names=['sum_out']),
name='f2')
pipe.connect([(f1, f2, [('sum_out', 'insum')])])
pipe.base_dir = os.getcwd()
f1.inputs.insum = 0
pipe.config = {'execution': {'stop_on_first_crash': True}}
# execute the pipe using the MultiProc plugin with 2 processes and the non_daemon flag
# to enable child processes which start other multiprocessing jobs
execgraph = pipe.run(plugin="MultiProc",
plugin_args={'n_procs': 2,
'non_daemon': nondaemon_flag})
names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()]
node = execgraph.nodes()[names.index('pipe.f2')]
result = node.get_output('sum_out')
os.chdir(cur_dir)
rmtree(temp_dir)
return result
def test_run_multiproc_nondaemon_false():
'''
This is the entry point for the test. Two times a pipe of several multiprocessing jobs gets
executed. First, without the nondaemon flag. Second, with the nondaemon flag.
Since the processes of the pipe start child processes, the execution only succeeds when the
non_daemon flag is on.
'''
shouldHaveFailed = False
try:
# with nondaemon_flag = False, the execution should fail
run_multiproc_nondaemon_with_flag(False)
except:
shouldHaveFailed = True
yield assert_true, shouldHaveFailed
def test_run_multiproc_nondaemon_true():
# with nondaemon_flag = True, the execution should succeed
result = run_multiproc_nondaemon_with_flag(True)
yield assert_equal, result, 180 # n_procs (2) * numberOfThreads (2) * 45 == 180
|
from django.db import models
from django.contrib.auth.models import Group, Permission
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save, post_delete, pre_save
from django.dispatch import receiver
from django.core import exceptions
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from model_utils import * # noqa
# ------------------------------------------------------------------------------
# Models for administration of an institute
# ------------------------------------------------------------------------------
class Institute(models.Model):
name = models.CharField(max_length=256)
logo = models.ImageField(blank=True, null=True)
def __unicode__(self):
return self.name
class StrategicObjective(models.Model):
institute = models.ForeignKey('Institute')
statement = models.CharField(max_length=512)
def __unicode__(self):
return self.statement
class Faculty(models.Model):
name = models.CharField(max_length=256)
institute = models.ForeignKey('Institute')
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = _("Faculties")
ordering = ['name']
class ReportingPeriod(models.Model):
institute = models.ForeignKey('Institute', related_name='reporting_period')
name = models.CharField(max_length=128)
description = models.TextField()
open_date = models.DateField(auto_now_add=True)
close_date = models.DateField(null=True, blank=True)
is_active = models.BooleanField(default=True, verbose_name=_('Open'))
def __unicode__(self):
return self.name
# ------------------------------------------------------------------------------
# Models for users
# ------------------------------------------------------------------------------
# Rename name to InstituteAdminUser ?
class InstituteAdmin(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='institute_admin')
institute = models.ForeignKey('Institute', related_name='institute_admin')
def __unicode__(self):
return self.user.email
class ProjectLeader(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='project_leader')
institute = models.ForeignKey('Institute')
faculty = models.ForeignKey('Faculty')
staff_no = models.CharField(max_length=64)
position = models.CharField(max_length=128)
def __unicode__(self):
return self.user.email
# ------------------------------------------------------------------------------
# Models for questionnaire many-to-many keys
# ------------------------------------------------------------------------------
class FocusArea(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=256)
def __unicode__(self):
return self.choice
class AdvisoryGroupRep(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=256)
def __unicode__(self):
return self.choice
class ResearchTeamMember(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=256)
def __unicode__(self):
return self.choice
class StudentType(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=32)
def __unicode__(self):
return self.choice
class StudentParticipationNature(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=128)
def __unicode__(self):
return self.choice
class ProjectOutputType(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=128)
def __unicode__(self):
return self.choice
# ------------------------------------------------------------------------------
# Models for questionnaire inlines
# ------------------------------------------------------------------------------
class ProjectFunding(models.Model):
funder = models.CharField(max_length=256)
amount = models.DecimalField(decimal_places=2, max_digits=10)
years = models.DecimalField(decimal_places=2, max_digits=5)
renewable = models.CharField(choices=YESNO, max_length=1, null=True)
project = models.ForeignKey('ProjectDetail')
class PHDStudent(models.Model):
name = models.CharField(max_length=128)
project = models.ForeignKey('ProjectDetail')
class ProjectOutput(models.Model):
project = models.ForeignKey('ProjectDetail')
type = models.ForeignKey('ProjectOutputType')
title = models.CharField(max_length=255, null=True, blank=True)
url = models.URLField(null=True, blank=True)
doi = models.CharField(max_length=128, null=True, blank=True)
attachment = models.FileField(upload_to='projects/attachments/output/', null=True, blank=True)
class NewCourseDetail(models.Model):
code = models.CharField(max_length=32)
name = models.CharField(max_length=128)
project = models.ForeignKey('ProjectDetail')
class CourseReqDetail(models.Model):
code = models.CharField(max_length=32)
name = models.CharField(max_length=128)
project = models.ForeignKey('ProjectDetail')
class Collaborators(models.Model):
name = models.CharField(max_length=128)
university = models.CharField(max_length=128)
project = models.ForeignKey('ProjectDetail')
class ProjectDetail(models.Model):
name = models.CharField(max_length=512,
verbose_name=CAPTURE_LABELS['name'])
proj_leader = models.ForeignKey('ProjectLeader')
date_created = models.DateField(auto_now_add=True)
is_leader = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['is_leader'])
is_flagship = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['is_flagship'])
project_status = models.PositiveIntegerField(choices=PROJECT_STATUS, null=True,
verbose_name=CAPTURE_LABELS['project_status'])
start_date = models.DateField(null=True,
verbose_name=CAPTURE_LABELS['start_date'])
end_date = models.DateField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['end_date'])
faculty = models.ForeignKey('Faculty', null=True,
verbose_name=CAPTURE_LABELS['faculty'])
multi_faculty = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['multi_faculty'])
description = models.TextField(null=True,
verbose_name=CAPTURE_LABELS['description'])
focus_area = models.ManyToManyField('FocusArea',
verbose_name=CAPTURE_LABELS['focus_area'],
help_text=CAPTURE_HELP['focus_areas'])
focus_area_text = models.CharField(max_length=256, null=True, blank=True,
verbose_name=CAPTURE_LABELS['focus_area_text'])
classification = models.PositiveIntegerField(choices=CLASSIFICATION, null=True,
verbose_name=CAPTURE_LABELS['classification'])
strategic_objectives = models.ManyToManyField('StrategicObjective',
verbose_name=CAPTURE_LABELS['strategic_objectives'],
help_text=CAPTURE_HELP['strategic_objectives'])
outcomes = models.TextField(null=True,
verbose_name=CAPTURE_LABELS['outcomes'])
beneficiaries = models.TextField(null=True,
verbose_name=CAPTURE_LABELS['beneficiaries'])
initiation = models.PositiveIntegerField(choices=INITIATION_STATEMENTS, null=True,
verbose_name=CAPTURE_LABELS['initiation'])
authors = models.PositiveIntegerField(choices=NUMBER_AUTHORS, null=True,
verbose_name=CAPTURE_LABELS['authors'])
amendments_permitted = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['amendments_permitted'])
public_domain = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['public_domain'],
help_text='If yes, please provide the URL')
public_domain_url = models.URLField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['public_domain_url'])
adv_group = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['adv_group'])
adv_group_rep = models.ManyToManyField('AdvisoryGroupRep', blank=True,
verbose_name=CAPTURE_LABELS['adv_group_rep'])
adv_group_freq = models.PositiveIntegerField(choices=ADV_GROUP_FREQ, null=True, default=None, blank=True,
verbose_name=CAPTURE_LABELS['adv_group_freq'])
team_members = models.ManyToManyField(ResearchTeamMember, blank=True,
verbose_name=CAPTURE_LABELS['team_members'])
team_members_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['team_members_text'])
new_initiative = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['new_initiative'])
new_initiative_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['new_initiative_text'])
new_initiative_party = models.PositiveIntegerField(choices=INITIATIVE_PARTIES, null=True, blank=True,
verbose_name=CAPTURE_LABELS['new_initiative_party'])
new_initiative_party_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['new_initiative_party_text'])
research = models.PositiveIntegerField(choices=RESEARCH_CLASSIFICATION, null=True,
verbose_name=CAPTURE_LABELS['research'])
research_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['research_text'],
help_text=CAPTURE_HELP['research_text'])
phd_research = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['phd_research'])
curriculum_changes = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['curriculum_changes'])
curriculum_changes_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['curriculum_changes_text'])
new_courses = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['new_courses'])
students_involved = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['students_involved'])
student_types = models.ManyToManyField('StudentType', blank=True,
verbose_name=CAPTURE_LABELS['student_types'])
student_nature = models.ManyToManyField('StudentParticipationNature',
verbose_name=CAPTURE_LABELS['student_nature'],
blank=True)
student_nature_text = models.CharField(max_length=128, null=True, blank=True,
verbose_name=CAPTURE_LABELS['student_nature_text'])
course_requirement = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['course_requirement'])
external_collaboration = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['external_collaboration'])
record_status = models.PositiveIntegerField(choices=RECORD_STATUS)
reporting_period = models.ForeignKey('ReportingPeriod')
rejected = models.BooleanField(default=False)
rejected_detail = models.TextField(null=True)
def __unicode__(self):
return '%s - %s' % (self.name, self.reporting_period.name)
class Meta:
verbose_name='Project detail'
verbose_name_plural='Project details'
permissions = (
('view_projectdetail', 'Can only view project details'),
('reject_projectdetail', 'Can reject the project which has been submitted')
)
# ------------------------------------------------------------------------------
# Custom User
# ------------------------------------------------------------------------------
class CustomUserManager(BaseUserManager):
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class CustomUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(unique=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
objects = CustomUserManager()
class Meta:
verbose_name = 'User'
# ------------------------------------------------------------------------------
# Model signals
# ------------------------------------------------------------------------------
@receiver(post_save, sender=InstituteAdmin)
def assign_institute_admin_to_group(sender, **kwargs):
if kwargs['created']:
try:
g = Group.objects.get(name='InstituteAdmins')
except exceptions.ObjectDoesNotExist:
# Move this to migrations file
g = Group.objects.create(name='InstituteAdmins')
admin_permissions = [
'add_projectleader', 'delete_projectleader', 'change_projectleader',
'add_faculty', 'delete_faculty', 'change_faculty',
'add_reportingperiod', 'change_reportingperiod', 'delete_reportingperiod',
'change_projectdetail', 'view_projectdetail', 'reject_projectdetail'
]
perms = Permission.objects.filter(codename__in=admin_permissions)
for perm in perms:
g.permissions.add(perm)
g.save()
kwargs['instance'].user.groups.add(g)
@receiver(post_delete, sender=InstituteAdmin)
def remove_institute_admin_from_group(sender, **kwargs):
g = Group.objects.get(name='InstituteAdmins')
kwargs['instance'].user.groups.remove(g)
@receiver(post_save, sender=ProjectLeader)
def assign_project_leader_to_group(sender, **kwargs):
if kwargs['created']:
try:
g = Group.objects.get(name='ProjectLeaders')
except exceptions.ObjectDoesNotExist:
# Move this to migrations file
g = Group.objects.create(name='ProjectLeaders')
admin_permissions = [
'add_projectdetail', 'delete_projectdetail', 'change_projectdetail',
'add_projectfunding', 'delete_projectfunding', 'change_projectfunding',
'add_phdstudent', 'delete_phdstudent', 'change_phdstudent',
'add_newcoursedetail', 'delete_newcoursedetail', 'change_newcoursedetail',
'add_coursereqdetail', 'delete_coursereqdetail', 'change_coursereqdetail',
'add_collaborators', 'delete_collaborators', 'change_collaborators',
'add_projectoutput', 'delete_projectoutput', 'change_projectoutput'
]
perms = Permission.objects.filter(codename__in=admin_permissions)
for perm in perms:
g.permissions.add(perm)
g.save()
kwargs['instance'].user.groups.add(g)
@receiver(post_delete, sender=ProjectLeader)
def remove_user_from_project_leaders(sender, **kwargs):
g = Group.objects.get(name='ProjectLeaders')
kwargs['instance'].user.groups.remove(g)
@receiver(pre_save, sender=settings.AUTH_USER_MODEL)
def set_user_as_staff(sender, instance, **kwargs):
if not instance.is_staff:
instance.is_staff = True
Change display name
from django.db import models
from django.contrib.auth.models import Group, Permission
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save, post_delete, pre_save
from django.dispatch import receiver
from django.core import exceptions
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from model_utils import * # noqa
# ------------------------------------------------------------------------------
# Models for administration of an institute
# ------------------------------------------------------------------------------
class Institute(models.Model):
name = models.CharField(max_length=256)
logo = models.ImageField(blank=True, null=True)
def __unicode__(self):
return self.name
class StrategicObjective(models.Model):
institute = models.ForeignKey('Institute')
statement = models.CharField(max_length=512)
def __unicode__(self):
return self.statement
class Faculty(models.Model):
name = models.CharField(max_length=256)
institute = models.ForeignKey('Institute')
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = _("Faculties")
ordering = ['name']
class ReportingPeriod(models.Model):
institute = models.ForeignKey('Institute', related_name='reporting_period')
name = models.CharField(max_length=128)
description = models.TextField()
open_date = models.DateField(auto_now_add=True)
close_date = models.DateField(null=True, blank=True)
is_active = models.BooleanField(default=True, verbose_name=_('Open'))
def __unicode__(self):
return self.name
# ------------------------------------------------------------------------------
# Models for users
# ------------------------------------------------------------------------------
# Rename name to InstituteAdminUser ?
class InstituteAdmin(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='institute_admin')
institute = models.ForeignKey('Institute', related_name='institute_admin')
def __unicode__(self):
return self.user.email
class ProjectLeader(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='project_leader')
institute = models.ForeignKey('Institute')
faculty = models.ForeignKey('Faculty')
staff_no = models.CharField(max_length=64)
position = models.CharField(max_length=128)
def __unicode__(self):
return self.user.email
# ------------------------------------------------------------------------------
# Models for questionnaire many-to-many keys
# ------------------------------------------------------------------------------
class FocusArea(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=256)
def __unicode__(self):
return self.choice
class AdvisoryGroupRep(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=256)
def __unicode__(self):
return self.choice
class ResearchTeamMember(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=256)
def __unicode__(self):
return self.choice
class StudentType(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=32)
def __unicode__(self):
return self.choice
class StudentParticipationNature(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=128)
def __unicode__(self):
return self.choice
class ProjectOutputType(models.Model):
code = models.PositiveIntegerField(unique=True)
choice = models.CharField(max_length=128)
def __unicode__(self):
return self.choice
# ------------------------------------------------------------------------------
# Models for questionnaire inlines
# ------------------------------------------------------------------------------
class ProjectFunding(models.Model):
funder = models.CharField(max_length=256)
amount = models.DecimalField(decimal_places=2, max_digits=10)
years = models.DecimalField(decimal_places=2, max_digits=5)
renewable = models.CharField(choices=YESNO, max_length=1, null=True)
project = models.ForeignKey('ProjectDetail')
class PHDStudent(models.Model):
name = models.CharField(max_length=128)
project = models.ForeignKey('ProjectDetail')
class ProjectOutput(models.Model):
project = models.ForeignKey('ProjectDetail')
type = models.ForeignKey('ProjectOutputType')
title = models.CharField(max_length=255, null=True, blank=True)
url = models.URLField(null=True, blank=True)
doi = models.CharField(max_length=128, null=True, blank=True)
attachment = models.FileField(upload_to='projects/attachments/output/', null=True, blank=True)
class NewCourseDetail(models.Model):
code = models.CharField(max_length=32)
name = models.CharField(max_length=128)
project = models.ForeignKey('ProjectDetail')
class CourseReqDetail(models.Model):
code = models.CharField(max_length=32)
name = models.CharField(max_length=128)
project = models.ForeignKey('ProjectDetail')
class Collaborators(models.Model):
name = models.CharField(max_length=128)
university = models.CharField(max_length=128)
project = models.ForeignKey('ProjectDetail')
class ProjectDetail(models.Model):
name = models.CharField(max_length=512,
verbose_name=CAPTURE_LABELS['name'])
proj_leader = models.ForeignKey('ProjectLeader')
date_created = models.DateField(auto_now_add=True)
is_leader = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['is_leader'])
is_flagship = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['is_flagship'])
project_status = models.PositiveIntegerField(choices=PROJECT_STATUS, null=True,
verbose_name=CAPTURE_LABELS['project_status'])
start_date = models.DateField(null=True,
verbose_name=CAPTURE_LABELS['start_date'])
end_date = models.DateField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['end_date'])
faculty = models.ForeignKey('Faculty', null=True,
verbose_name=CAPTURE_LABELS['faculty'])
multi_faculty = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['multi_faculty'])
description = models.TextField(null=True,
verbose_name=CAPTURE_LABELS['description'])
focus_area = models.ManyToManyField('FocusArea',
verbose_name=CAPTURE_LABELS['focus_area'],
help_text=CAPTURE_HELP['focus_areas'])
focus_area_text = models.CharField(max_length=256, null=True, blank=True,
verbose_name=CAPTURE_LABELS['focus_area_text'])
classification = models.PositiveIntegerField(choices=CLASSIFICATION, null=True,
verbose_name=CAPTURE_LABELS['classification'])
strategic_objectives = models.ManyToManyField('StrategicObjective',
verbose_name=CAPTURE_LABELS['strategic_objectives'],
help_text=CAPTURE_HELP['strategic_objectives'])
outcomes = models.TextField(null=True,
verbose_name=CAPTURE_LABELS['outcomes'])
beneficiaries = models.TextField(null=True,
verbose_name=CAPTURE_LABELS['beneficiaries'])
initiation = models.PositiveIntegerField(choices=INITIATION_STATEMENTS, null=True,
verbose_name=CAPTURE_LABELS['initiation'])
authors = models.PositiveIntegerField(choices=NUMBER_AUTHORS, null=True,
verbose_name=CAPTURE_LABELS['authors'])
amendments_permitted = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['amendments_permitted'])
public_domain = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['public_domain'],
help_text='If yes, please provide the URL')
public_domain_url = models.URLField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['public_domain_url'])
adv_group = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['adv_group'])
adv_group_rep = models.ManyToManyField('AdvisoryGroupRep', blank=True,
verbose_name=CAPTURE_LABELS['adv_group_rep'])
adv_group_freq = models.PositiveIntegerField(choices=ADV_GROUP_FREQ, null=True, default=None, blank=True,
verbose_name=CAPTURE_LABELS['adv_group_freq'])
team_members = models.ManyToManyField(ResearchTeamMember, blank=True,
verbose_name=CAPTURE_LABELS['team_members'])
team_members_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['team_members_text'])
new_initiative = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['new_initiative'])
new_initiative_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['new_initiative_text'])
new_initiative_party = models.PositiveIntegerField(choices=INITIATIVE_PARTIES, null=True, blank=True,
verbose_name=CAPTURE_LABELS['new_initiative_party'])
new_initiative_party_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['new_initiative_party_text'])
research = models.PositiveIntegerField(choices=RESEARCH_CLASSIFICATION, null=True,
verbose_name=CAPTURE_LABELS['research'])
research_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['research_text'],
help_text=CAPTURE_HELP['research_text'])
phd_research = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['phd_research'])
curriculum_changes = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['curriculum_changes'])
curriculum_changes_text = models.TextField(null=True, blank=True,
verbose_name=CAPTURE_LABELS['curriculum_changes_text'])
new_courses = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['new_courses'])
students_involved = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['students_involved'])
student_types = models.ManyToManyField('StudentType', blank=True,
verbose_name=CAPTURE_LABELS['student_types'])
student_nature = models.ManyToManyField('StudentParticipationNature',
verbose_name=CAPTURE_LABELS['student_nature'],
blank=True)
student_nature_text = models.CharField(max_length=128, null=True, blank=True,
verbose_name=CAPTURE_LABELS['student_nature_text'])
course_requirement = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['course_requirement'])
external_collaboration = models.CharField(choices=YESNO, max_length=1, null=True,
verbose_name=CAPTURE_LABELS['external_collaboration'])
record_status = models.PositiveIntegerField(choices=RECORD_STATUS)
reporting_period = models.ForeignKey('ReportingPeriod')
rejected = models.BooleanField(default=False)
rejected_detail = models.TextField(null=True)
def __unicode__(self):
return '%s - %s' % (self.name, self.reporting_period.name)
class Meta:
verbose_name='Engagement project'
verbose_name_plural='Engagement projects'
permissions = (
('view_projectdetail', 'Can only view project details'),
('reject_projectdetail', 'Can reject the project which has been submitted')
)
# ------------------------------------------------------------------------------
# Custom User
# ------------------------------------------------------------------------------
class CustomUserManager(BaseUserManager):
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class CustomUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(unique=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
objects = CustomUserManager()
class Meta:
verbose_name = 'User'
# ------------------------------------------------------------------------------
# Model signals
# ------------------------------------------------------------------------------
@receiver(post_save, sender=InstituteAdmin)
def assign_institute_admin_to_group(sender, **kwargs):
if kwargs['created']:
try:
g = Group.objects.get(name='InstituteAdmins')
except exceptions.ObjectDoesNotExist:
# Move this to migrations file
g = Group.objects.create(name='InstituteAdmins')
admin_permissions = [
'add_projectleader', 'delete_projectleader', 'change_projectleader',
'add_faculty', 'delete_faculty', 'change_faculty',
'add_reportingperiod', 'change_reportingperiod', 'delete_reportingperiod',
'change_projectdetail', 'view_projectdetail', 'reject_projectdetail'
]
perms = Permission.objects.filter(codename__in=admin_permissions)
for perm in perms:
g.permissions.add(perm)
g.save()
kwargs['instance'].user.groups.add(g)
@receiver(post_delete, sender=InstituteAdmin)
def remove_institute_admin_from_group(sender, **kwargs):
g = Group.objects.get(name='InstituteAdmins')
kwargs['instance'].user.groups.remove(g)
@receiver(post_save, sender=ProjectLeader)
def assign_project_leader_to_group(sender, **kwargs):
if kwargs['created']:
try:
g = Group.objects.get(name='ProjectLeaders')
except exceptions.ObjectDoesNotExist:
# Move this to migrations file
g = Group.objects.create(name='ProjectLeaders')
admin_permissions = [
'add_projectdetail', 'delete_projectdetail', 'change_projectdetail',
'add_projectfunding', 'delete_projectfunding', 'change_projectfunding',
'add_phdstudent', 'delete_phdstudent', 'change_phdstudent',
'add_newcoursedetail', 'delete_newcoursedetail', 'change_newcoursedetail',
'add_coursereqdetail', 'delete_coursereqdetail', 'change_coursereqdetail',
'add_collaborators', 'delete_collaborators', 'change_collaborators',
'add_projectoutput', 'delete_projectoutput', 'change_projectoutput'
]
perms = Permission.objects.filter(codename__in=admin_permissions)
for perm in perms:
g.permissions.add(perm)
g.save()
kwargs['instance'].user.groups.add(g)
@receiver(post_delete, sender=ProjectLeader)
def remove_user_from_project_leaders(sender, **kwargs):
g = Group.objects.get(name='ProjectLeaders')
kwargs['instance'].user.groups.remove(g)
@receiver(pre_save, sender=settings.AUTH_USER_MODEL)
def set_user_as_staff(sender, instance, **kwargs):
if not instance.is_staff:
instance.is_staff = True
|
# -*- coding: utf-8 -*-
import re
import logging
from collections import OrderedDict
logger = logging.getLogger(__name__)
try:
from simple_salesforce import Salesforce as SimpleSalesforce
from simple_salesforce.api import (SalesforceError)
enabled = True
except ImportError as e:
logger.info(str(e))
enabled = False
from redash.query_runner import BaseQueryRunner, register
from redash.query_runner import TYPE_STRING, TYPE_DATE, TYPE_DATETIME, TYPE_INTEGER, TYPE_FLOAT, TYPE_BOOLEAN
from redash.utils import json_dumps
# See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm
TYPES_MAP = dict(
id=TYPE_STRING,
string=TYPE_STRING,
currency=TYPE_FLOAT,
reference=TYPE_STRING,
double=TYPE_FLOAT,
picklist=TYPE_STRING,
date=TYPE_DATE,
url=TYPE_STRING,
phone=TYPE_STRING,
textarea=TYPE_STRING,
int=TYPE_INTEGER,
datetime=TYPE_DATETIME,
boolean=TYPE_BOOLEAN,
percent=TYPE_FLOAT,
multipicklist=TYPE_STRING,
masterrecord=TYPE_STRING,
location=TYPE_STRING,
JunctionIdList=TYPE_STRING,
encryptedstring=TYPE_STRING,
email=TYPE_STRING,
DataCategoryGroupReference=TYPE_STRING,
combobox=TYPE_STRING,
calculated=TYPE_STRING,
anyType=TYPE_STRING,
address=TYPE_STRING
)
# Query Runner for Salesforce SOQL Queries
# For example queries, see:
# https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm
class Salesforce(BaseQueryRunner):
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"username": {
"type": "string"
},
"password": {
"type": "string"
},
"token": {
"type": "string",
"title": "Security Token"
},
"sandbox": {
"type": "boolean"
}
},
"required": ["username", "password", "token", "sandbox"],
"secret": ["password", "token"]
}
def test_connection(self):
response = self._get_sf().describe()
if response is None:
raise Exception("Failed describing objects.")
pass
def _get_sf(self):
sf = SimpleSalesforce(username=self.configuration['username'],
password=self.configuration['password'],
security_token=self.configuration['token'],
sandbox=self.configuration['sandbox'],
client_id='Redash')
return sf
def _clean_value(self, value):
if type(value) == OrderedDict and 'records' in value:
value = value['records']
for row in value:
row.pop('attributes', None)
return value
def _get_value(self, dct, dots):
for key in dots.split('.'):
dct = dct.get(key)
return dct
def _get_column_name(self, key, parents = []):
return '.'.join(parents + [key])
def _build_columns(self, sf, child, parents = []):
child_type = child['attributes']['type']
child_desc = sf.__getattr__(child_type).describe()
child_type_map = dict((f['name'], f['type'])for f in child_desc['fields'])
columns = []
for key in child.keys():
if key != 'attributes':
if type(child[key]) == OrderedDict and 'attributes' in child[key]:
columns.extend(self._build_columns(sf, child[key], parents + [key]))
else:
column_name = self._get_column_name(key, parents)
key_type = child_type_map.get(key, 'string')
column_type = TYPES_MAP.get(key_type, TYPE_STRING)
columns.append((column_name, column_type))
return columns
def _build_rows(self, columns, records):
rows = []
for record in records:
record.pop('attributes', None)
row = dict()
for column in columns:
key = column[0]
value = self._get_value(record, key)
row[key] = self._clean_value(value)
rows.append(row)
return rows
def run_query(self, query, user):
logger.debug("Salesforce is about to execute query: %s", query)
query = re.sub(r"/\*(.|\n)*?\*/", "", query).strip()
try:
columns = []
rows = []
sf = self._get_sf()
response = sf.query_all(query)
records = response['records']
if response['totalSize'] > 0 and len(records) == 0:
columns = self.fetch_columns([('Count', TYPE_INTEGER)])
rows = [{ 'Count': response['totalSize'] }]
elif len(records) > 0:
cols = self._build_columns(sf, records[0])
rows = self._build_rows(cols, records)
columns = self.fetch_columns(cols)
error = None
data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
except SalesforceError as err:
error = err.message
json_data = None
return json_data, error
def get_schema(self, get_stats=False):
sf = self._get_sf()
response = sf.describe()
if response is None:
raise Exception("Failed describing objects.")
schema = {}
for sobject in response['sobjects']:
table_name = sobject['name']
if sobject['queryable'] is True and table_name not in schema:
desc = sf.__getattr__(sobject['name']).describe()
fields = desc['fields']
schema[table_name] = {'name': table_name, 'columns': [f['name'] for f in fields]}
return schema.values()
register(Salesforce)
Format to PEP8
# -*- coding: utf-8 -*-
import re
import logging
from collections import OrderedDict
logger = logging.getLogger(__name__)
try:
from simple_salesforce import Salesforce as SimpleSalesforce
from simple_salesforce.api import (SalesforceError)
enabled = True
except ImportError as e:
logger.info(str(e))
enabled = False
from redash.query_runner import BaseQueryRunner, register
from redash.query_runner import TYPE_STRING, TYPE_DATE, TYPE_DATETIME, TYPE_INTEGER, TYPE_FLOAT, TYPE_BOOLEAN
from redash.utils import json_dumps
# See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm
TYPES_MAP = dict(
id=TYPE_STRING,
string=TYPE_STRING,
currency=TYPE_FLOAT,
reference=TYPE_STRING,
double=TYPE_FLOAT,
picklist=TYPE_STRING,
date=TYPE_DATE,
url=TYPE_STRING,
phone=TYPE_STRING,
textarea=TYPE_STRING,
int=TYPE_INTEGER,
datetime=TYPE_DATETIME,
boolean=TYPE_BOOLEAN,
percent=TYPE_FLOAT,
multipicklist=TYPE_STRING,
masterrecord=TYPE_STRING,
location=TYPE_STRING,
JunctionIdList=TYPE_STRING,
encryptedstring=TYPE_STRING,
email=TYPE_STRING,
DataCategoryGroupReference=TYPE_STRING,
combobox=TYPE_STRING,
calculated=TYPE_STRING,
anyType=TYPE_STRING,
address=TYPE_STRING
)
# Query Runner for Salesforce SOQL Queries
# For example queries, see:
# https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm
class Salesforce(BaseQueryRunner):
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"username": {
"type": "string"
},
"password": {
"type": "string"
},
"token": {
"type": "string",
"title": "Security Token"
},
"sandbox": {
"type": "boolean"
}
},
"required": ["username", "password", "token", "sandbox"],
"secret": ["password", "token"]
}
def test_connection(self):
response = self._get_sf().describe()
if response is None:
raise Exception("Failed describing objects.")
pass
def _get_sf(self):
sf = SimpleSalesforce(username=self.configuration['username'],
password=self.configuration['password'],
security_token=self.configuration['token'],
sandbox=self.configuration['sandbox'],
client_id='Redash')
return sf
def _clean_value(self, value):
if isinstance(value, OrderedDict) and 'records' in value:
value = value['records']
for row in value:
row.pop('attributes', None)
return value
def _get_value(self, dct, dots):
for key in dots.split('.'):
dct = dct.get(key)
return dct
def _get_column_name(self, key, parents=[]):
return '.'.join(parents + [key])
def _build_columns(self, sf, child, parents=[]):
child_type = child['attributes']['type']
child_desc = sf.__getattr__(child_type).describe()
child_type_map = dict((f['name'], f['type'])for f in child_desc['fields'])
columns = []
for key in child.keys():
if key != 'attributes':
if isinstance(child[key], OrderedDict) and 'attributes' in child[key]:
columns.extend(self._build_columns(sf, child[key], parents + [key]))
else:
column_name = self._get_column_name(key, parents)
key_type = child_type_map.get(key, 'string')
column_type = TYPES_MAP.get(key_type, TYPE_STRING)
columns.append((column_name, column_type))
return columns
def _build_rows(self, columns, records):
rows = []
for record in records:
record.pop('attributes', None)
row = dict()
for column in columns:
key = column[0]
value = self._get_value(record, key)
row[key] = self._clean_value(value)
rows.append(row)
return rows
def run_query(self, query, user):
logger.debug("Salesforce is about to execute query: %s", query)
query = re.sub(r"/\*(.|\n)*?\*/", "", query).strip()
try:
columns = []
rows = []
sf = self._get_sf()
response = sf.query_all(query)
records = response['records']
if response['totalSize'] > 0 and len(records) == 0:
columns = self.fetch_columns([('Count', TYPE_INTEGER)])
rows = [{'Count': response['totalSize']}]
elif len(records) > 0:
cols = self._build_columns(sf, records[0])
rows = self._build_rows(cols, records)
columns = self.fetch_columns(cols)
error = None
data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
except SalesforceError as err:
error = err.message
json_data = None
return json_data, error
def get_schema(self, get_stats=False):
sf = self._get_sf()
response = sf.describe()
if response is None:
raise Exception("Failed describing objects.")
schema = {}
for sobject in response['sobjects']:
table_name = sobject['name']
if sobject['queryable'] is True and table_name not in schema:
desc = sf.__getattr__(sobject['name']).describe()
fields = desc['fields']
schema[table_name] = {'name': table_name, 'columns': [f['name'] for f in fields]}
return schema.values()
register(Salesforce)
|
from typing import Set, Tuple, Union
from great_expectations.data_context.data_context_variables import (
DataContextVariableSchema,
)
from great_expectations.data_context.store.configuration_store import ConfigurationStore
from great_expectations.data_context.types.base import DataContextConfig
class DataContextStore(ConfigurationStore):
"""
A DataContextStore manages persistence around DataContextConfigs.
"""
_configuration_class = DataContextConfig
ge_cloud_exclude_field_names: Set[str] = {
DataContextVariableSchema.DATASOURCES,
DataContextVariableSchema.ANONYMOUS_USAGE_STATISTICS,
}
def serialize(
self, key: Tuple[str, ...], value: DataContextConfig
) -> Union[dict, str]:
"""
Please see `ConfigurationStore.serialize` for more information.
Note that GE Cloud utilizes a subset of the config; as such, an explicit
step to remove unnecessary keys is a required part of the serialization process.
Args:
key:
value: DataContextConfig to serialize utilizing the configured StoreBackend.
Returns:
Either a string or dictionary representation of the serialized config.
"""
payload: Union[str, dict] = super().serialize(key=key, value=value)
# Cloud requires a subset of the DataContextConfig
if self.ge_cloud_mode:
assert isinstance(payload, dict)
for attr in self.ge_cloud_exclude_field_names:
payload.pop(attr)
return payload
[MAINTENANCE] Ensure that validation operators are omitted from Cloud variables payload (#5510)
* chore: port over changes from other PR
* feat: finish initial impl
* refactor: cleanup subset logic
* chore: update linter thresholds
* chore: update pop pattern
* chore: add logger
* chore: update pop logic
* chore: update type hint
* chore: change info to debug
import logging
from typing import Set, Tuple, Union
from great_expectations.data_context.data_context_variables import (
DataContextVariableSchema,
)
from great_expectations.data_context.store.configuration_store import ConfigurationStore
from great_expectations.data_context.types.base import DataContextConfig
logger = logging.getLogger(__name__)
class DataContextStore(ConfigurationStore):
"""
A DataContextStore manages persistence around DataContextConfigs.
"""
_configuration_class = DataContextConfig
ge_cloud_exclude_field_names: Set[DataContextVariableSchema] = {
DataContextVariableSchema.ANONYMOUS_USAGE_STATISTICS,
DataContextVariableSchema.DATASOURCES,
DataContextVariableSchema.VALIDATION_OPERATORS,
}
def serialize(
self, key: Tuple[str, ...], value: DataContextConfig
) -> Union[dict, str]:
"""
Please see `ConfigurationStore.serialize` for more information.
Note that GE Cloud utilizes a subset of the config; as such, an explicit
step to remove unnecessary keys is a required part of the serialization process.
Args:
key: Unused but required to adhere to signature set by parent.
value: DataContextConfig to serialize utilizing the configured StoreBackend.
Returns:
Either a string or dictionary representation of the serialized config.
"""
payload: Union[str, dict] = super().serialize(key=key, value=value)
# Cloud requires a subset of the DataContextConfig
if self.ge_cloud_mode:
assert isinstance(payload, dict)
for attr in self.ge_cloud_exclude_field_names:
if attr in payload:
payload.pop(attr)
logger.debug(
f"Removed {attr} from DataContextConfig while serializing to JSON"
)
return payload
|
"""
All location related functions and converters.
The main entry point is `location_processing`
which gets `location` and `source_ip_address`
and basing on this information generates
precise location description.
"""
from __future__ import print_function
import os
import json
import socket
import requests
import geoip2.database
from globals import GEOLITE, GEOLOCATOR_SERVICE, IP2LCACHE, IP2LOCATION_KEY, NOT_FOUND_LOCATION, \
ALIASES, BLACKLIST, IATA_CODES_FILE
GEOIP_READER = geoip2.database.Reader(GEOLITE)
def ascii_only(string):
"Check if `string` contains only ASCII symbols"
try:
for _ in range(5):
string = string.encode('utf-8')
return True
except UnicodeDecodeError:
return False
def is_ip(ip_addr):
"""
Check if `ip_addr` looks like an IP Address
"""
try:
socket.inet_pton(socket.AF_INET, ip_addr.encode("utf-8"))
return True
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, ip_addr.encode("utf-8"))
return True
except socket.error:
return False
def location_normalize(location):
"""
Normalize location name `location`
"""
#translation_table = dict.fromkeys(map(ord, '!@#$*;'), None)
def _remove_chars(chars, string):
return ''.join(x for x in string if x not in chars)
location = location.lower().replace('_', ' ').replace('+', ' ').strip()
if not location.startswith('moon@'):
location = _remove_chars(r'!@#$*;:\\', location)
return location
def geolocator(location):
"""
Return a GPS pair for specified `location` or None
if nothing can't be found
"""
try:
geo = requests.get('%s/%s' % (GEOLOCATOR_SERVICE, location)).text
except requests.exceptions.ConnectionError as exception:
print("ERROR: %s" % exception)
return None
if geo == "":
return None
try:
answer = json.loads(geo.encode('utf-8'))
return answer
except ValueError as exception:
print("ERROR: %s" % exception)
return None
return None
def ip2location(ip_addr):
"Convert IP address `ip_addr` to a location name"
cached = os.path.join(IP2LCACHE, ip_addr)
if not os.path.exists(IP2LCACHE):
os.makedirs(IP2LCACHE)
location = None
if os.path.exists(cached):
location = open(cached, 'r').read()
else:
# if IP2LOCATION_KEY is not set, do not the query,
# because the query wont be processed anyway
if IP2LOCATION_KEY:
try:
ip2location_response = requests\
.get('http://api.ip2location.com/?ip=%s&key=%s&package=WS3' \
% (ip_addr, IP2LOCATION_KEY)).text
if ';' in ip2location_response:
open(cached, 'w').write(ip2location_response)
location = ip2location_response
except requests.exceptions.ConnectionError:
pass
if location and ';' in location:
location = location.split(';')[3], location.split(';')[1]
else:
location = location, None
return location
def get_location(ip_addr):
"""
Return location pair (CITY, COUNTRY) for `ip_addr`
"""
try:
response = GEOIP_READER.city(ip_addr)
country = response.country.name
city = response.city.name
except geoip2.errors.AddressNotFoundError:
country = None
city = None
#
# temporary disabled it because of geoip services capcacity
#
#if city is None and response.location:
# coord = "%s, %s" % (response.location.latitude, response.location.longitude)
# try:
# location = geolocator.reverse(coord, language='en')
# city = location.raw.get('address', {}).get('city')
# except:
# pass
if city is None:
city, country = ip2location(ip_addr)
# workaround for the strange bug with the country name
# maybe some other countries has this problem too
if country == 'Russian Federation':
country = 'Russia'
if city:
return city, country
else:
return NOT_FOUND_LOCATION, None
def location_canonical_name(location):
"Find canonical name for `location`"
location = location_normalize(location)
if location.lower() in LOCATION_ALIAS:
return LOCATION_ALIAS[location.lower()]
return location
def load_aliases(aliases_filename):
"""
Load aliases from the aliases file
"""
aliases_db = {}
with open(aliases_filename, 'r') as f_aliases:
for line in f_aliases.readlines():
from_, to_ = line.decode('utf-8').split(':', 1)
aliases_db[location_normalize(from_)] = location_normalize(to_)
return aliases_db
def load_iata_codes(iata_codes_filename):
"""
Load IATA codes from the IATA codes file
"""
with open(iata_codes_filename, 'r') as f_iata_codes:
result = []
for line in f_iata_codes.readlines():
result.append(line.strip())
return set(result)
LOCATION_ALIAS = load_aliases(ALIASES)
LOCATION_BLACK_LIST = [x.strip() for x in open(BLACKLIST, 'r').readlines()]
IATA_CODES = load_iata_codes(IATA_CODES_FILE)
def is_location_blocked(location):
"""
Return True if this location is blocked
or False if it is allowed
"""
return location is not None and location.lower() in LOCATION_BLACK_LIST
def location_processing(location, ip_addr):
"""
"""
# if location is starting with ~
# or has non ascii symbols
# it should be handled like a search term (for geolocator)
override_location_name = None
full_address = None
hide_full_address = False
force_show_full_address = location is not None and location.startswith('~')
# location ~ means that it should be detected automatically,
# and shown in the location line below the report
if location == '~':
location = None
if location and location.lstrip('~ ').startswith('@'):
try:
location, country = get_location(
socket.gethostbyname(
location.lstrip('~ ')[1:]))
location = '~' + location
if country:
location += ", %s" % country
hide_full_address = not force_show_full_address
except:
location, country = NOT_FOUND_LOCATION, None
query_source_location = get_location(ip_addr)
country = None
if not location or location == 'MyLocation':
location = ip_addr
if is_ip(location):
location, country = get_location(location)
# here too
if location:
location = '~' + location
if country:
location += ", %s" % country
hide_full_address = not force_show_full_address
if location and not location.startswith('~'):
tmp_location = location_canonical_name(location)
if tmp_location != location:
override_location_name = location
location = tmp_location
# up to this point it is possible that the name
# contains some unicode symbols
# here we resolve them
if location is not None: # and not ascii_only(location):
location = "~" + location.lstrip('~ ')
# if location is not None and location.upper() in IATA_CODES:
# location = '~%s' % location
if location is not None and location.startswith('~'):
geolocation = geolocator(location_canonical_name(location[1:]))
if geolocation is not None:
override_location_name = location[1:].replace('+', ' ')
location = "%s,%s" % (geolocation['latitude'], geolocation['longitude'])
country = None
if not hide_full_address:
full_address = geolocation['address']
else:
full_address = None
else:
location = NOT_FOUND_LOCATION #location[1:]
return location, \
override_location_name, \
full_address, \
country, \
query_source_location
fixed lower case problem
"""
All location related functions and converters.
The main entry point is `location_processing`
which gets `location` and `source_ip_address`
and basing on this information generates
precise location description.
"""
from __future__ import print_function
import os
import json
import socket
import requests
import geoip2.database
from globals import GEOLITE, GEOLOCATOR_SERVICE, IP2LCACHE, IP2LOCATION_KEY, NOT_FOUND_LOCATION, \
ALIASES, BLACKLIST, IATA_CODES_FILE
GEOIP_READER = geoip2.database.Reader(GEOLITE)
def ascii_only(string):
"Check if `string` contains only ASCII symbols"
try:
for _ in range(5):
string = string.encode('utf-8')
return True
except UnicodeDecodeError:
return False
def is_ip(ip_addr):
"""
Check if `ip_addr` looks like an IP Address
"""
try:
socket.inet_pton(socket.AF_INET, ip_addr.encode("utf-8"))
return True
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, ip_addr.encode("utf-8"))
return True
except socket.error:
return False
def location_normalize(location):
"""
Normalize location name `location`
"""
#translation_table = dict.fromkeys(map(ord, '!@#$*;'), None)
def _remove_chars(chars, string):
return ''.join(x for x in string if x not in chars)
location = location.lower().replace('_', ' ').replace('+', ' ').strip()
if not location.startswith('moon@'):
location = _remove_chars(r'!@#$*;:\\', location)
return location
def geolocator(location):
"""
Return a GPS pair for specified `location` or None
if nothing can't be found
"""
try:
geo = requests.get('%s/%s' % (GEOLOCATOR_SERVICE, location)).text
except requests.exceptions.ConnectionError as exception:
print("ERROR: %s" % exception)
return None
if geo == "":
return None
try:
answer = json.loads(geo.encode('utf-8'))
return answer
except ValueError as exception:
print("ERROR: %s" % exception)
return None
return None
def ip2location(ip_addr):
"Convert IP address `ip_addr` to a location name"
cached = os.path.join(IP2LCACHE, ip_addr)
if not os.path.exists(IP2LCACHE):
os.makedirs(IP2LCACHE)
location = None
if os.path.exists(cached):
location = open(cached, 'r').read()
else:
# if IP2LOCATION_KEY is not set, do not the query,
# because the query wont be processed anyway
if IP2LOCATION_KEY:
try:
ip2location_response = requests\
.get('http://api.ip2location.com/?ip=%s&key=%s&package=WS3' \
% (ip_addr, IP2LOCATION_KEY)).text
if ';' in ip2location_response:
open(cached, 'w').write(ip2location_response)
location = ip2location_response
except requests.exceptions.ConnectionError:
pass
if location and ';' in location:
location = location.split(';')[3], location.split(';')[1]
else:
location = location, None
return location
def get_location(ip_addr):
"""
Return location pair (CITY, COUNTRY) for `ip_addr`
"""
try:
response = GEOIP_READER.city(ip_addr)
country = response.country.name
city = response.city.name
except geoip2.errors.AddressNotFoundError:
country = None
city = None
#
# temporary disabled it because of geoip services capcacity
#
#if city is None and response.location:
# coord = "%s, %s" % (response.location.latitude, response.location.longitude)
# try:
# location = geolocator.reverse(coord, language='en')
# city = location.raw.get('address', {}).get('city')
# except:
# pass
if city is None:
city, country = ip2location(ip_addr)
# workaround for the strange bug with the country name
# maybe some other countries has this problem too
if country == 'Russian Federation':
country = 'Russia'
if city:
return city, country
else:
return NOT_FOUND_LOCATION, None
def location_canonical_name(location):
"Find canonical name for `location`"
location = location_normalize(location)
if location.lower() in LOCATION_ALIAS:
return LOCATION_ALIAS[location.lower()]
return location
def load_aliases(aliases_filename):
"""
Load aliases from the aliases file
"""
aliases_db = {}
with open(aliases_filename, 'r') as f_aliases:
for line in f_aliases.readlines():
from_, to_ = line.decode('utf-8').split(':', 1)
aliases_db[location_normalize(from_)] = location_normalize(to_)
return aliases_db
def load_iata_codes(iata_codes_filename):
"""
Load IATA codes from the IATA codes file
"""
with open(iata_codes_filename, 'r') as f_iata_codes:
result = []
for line in f_iata_codes.readlines():
result.append(line.strip())
return set(result)
LOCATION_ALIAS = load_aliases(ALIASES)
LOCATION_BLACK_LIST = [x.strip() for x in open(BLACKLIST, 'r').readlines()]
IATA_CODES = load_iata_codes(IATA_CODES_FILE)
def is_location_blocked(location):
"""
Return True if this location is blocked
or False if it is allowed
"""
return location is not None and location.lower() in LOCATION_BLACK_LIST
def location_processing(location, ip_addr):
"""
"""
# if location is starting with ~
# or has non ascii symbols
# it should be handled like a search term (for geolocator)
override_location_name = None
full_address = None
hide_full_address = False
force_show_full_address = location is not None and location.startswith('~')
# location ~ means that it should be detected automatically,
# and shown in the location line below the report
if location == '~':
location = None
if location and location.lstrip('~ ').startswith('@'):
try:
location, country = get_location(
socket.gethostbyname(
location.lstrip('~ ')[1:]))
location = '~' + location
if country:
location += ", %s" % country
hide_full_address = not force_show_full_address
except:
location, country = NOT_FOUND_LOCATION, None
query_source_location = get_location(ip_addr)
country = None
if not location or location == 'MyLocation':
location = ip_addr
if is_ip(location):
location, country = get_location(location)
# here too
if location:
location = '~' + location
if country:
location += ", %s" % country
hide_full_address = not force_show_full_address
if location and not location.startswith('~'):
tmp_location = location_canonical_name(location)
if tmp_location != location:
override_location_name = location
location = tmp_location
# up to this point it is possible that the name
# contains some unicode symbols
# here we resolve them
if location is not None: # and not ascii_only(location):
location = "~" + location.lstrip('~ ')
if not override_location_name:
override_location_name = location.lstrip('~')
# if location is not None and location.upper() in IATA_CODES:
# location = '~%s' % location
if location is not None and location.startswith('~'):
geolocation = geolocator(location_canonical_name(location[1:]))
if geolocation is not None:
if not override_location_name:
override_location_name = location[1:].replace('+', ' ')
location = "%s,%s" % (geolocation['latitude'], geolocation['longitude'])
country = None
if not hide_full_address:
full_address = geolocation['address']
else:
full_address = None
else:
location = NOT_FOUND_LOCATION #location[1:]
return location, \
override_location_name, \
full_address, \
country, \
query_source_location
|
##############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 Hajime Nakagami<nakagami@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################
import socket
import platform
import locale
import collections
from drda import codepoint as cp
from drda import ddm
from drda import utils
from drda.cursor import Cursor
class Connection:
def _parse_response(self):
results = collections.deque()
description = []
err = qrydsc = None
chained = True
err_msg = None
while chained:
dds_type, chained, number, code_point, obj = ddm.read_dds(self.sock)
if code_point == cp.SQLERRRM:
err_msg = ddm.parse_reply(obj).get(cp.SRVDGN).decode('utf-8')
elif code_point == cp.SQLCARD:
if err is None:
err, _ = ddm.parse_sqlcard(obj, self._enc, self.endian)
elif code_point == cp.SQLDARD:
err, description = ddm.parse_sqldard(obj, 'utf-8', self.endian)
elif code_point == cp.QRYDSC:
ln = obj[0]
b = obj[1:ln]
assert b[:2] == b'\x76\xd0'
b = b[2:]
# [(DRDA_TYPE_xxxx, size_binary), ...]
qrydsc = [(c[0], c[1:]) for c in [b[i:i+3] for i in range(0, len(b), 3)]]
elif code_point == cp.QRYDTA:
b = obj
while len(b):
if (b[0], b[1]) != (0xff, 0x00):
break
b = b[2:]
r = []
for t, ps in qrydsc:
v, b = utils.read_field(t, ps, b, self.endian)
r.append(v)
results.append(tuple(r))
if err:
raise err
return results, description
def __init__(self, host, database, port, user, password, db_type):
self.host = host
self.database = (database + ' ' * 18)[:18]
self.port = port
self.user = user
self.password = password
self.db_type = db_type
if self.db_type is None:
if self.user is None:
self.db_type = 'derby'
elif self.user is not None:
self.db_type = 'db2'
if self.db_type == 'derby':
self._enc = 'utf-8'
self.endian = 'big'
self.prdid = 'DNC10130'
self.pkgid = 'SQLC2026'
self.pkgcnstkn = 'AAAAAfAd'
self.pkgsn = 201
user = 'APP'
password = ''
secmec = cp.SECMEC_USRIDONL
elif self.db_type == 'db2':
self._enc = 'cp500'
self.endian = 'little'
self.prdid = 'SQL11014'
self.pkgid = 'SYSSH200'
self.pkgcnstkn = 'SYSLVL01'
self.pkgsn = 65
user = self.user
password = self.password
secmec = cp.SECMEC_USRIDPWD
else:
raise ValueError('Unknown database type')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
cur_id = 1
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSAT(self, [
cp.AGENT, 10,
cp.SQLAM, 11,
cp.CMNTCPIP, 5,
cp.RDB, 12,
cp.SECMGR, 9,
cp.UNICODEMGR, 1208,
]),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packACCSEC(self.database, secmec),
cur_id, False, True
)
self._parse_response()
cur_id = 1
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSECCHK(secmec, self.database, user, password, self._enc),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packACCRDB(self.prdid, self.database, self._enc),
cur_id, False, True
)
self._parse_response()
def __enter__(self):
return self
def __exit__(self, exc, value, traceback):
self.close()
def _execute(self, query):
cur_id = 1
if self.db_type == 'derby':
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSQLIMM(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT(query),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packRDBCMM(),
cur_id, False, True
)
elif self.db_type == 'db2':
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSAT_MGRLVLLS([cp.CCSIDMGR, 1208]),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSQLSET(self.pkgid, None, 1, self.database),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT("SET CLIENT WRKSTNNAME '{}'".format(platform.node())),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT("SET CURRENT LOCALE LC_CTYPE='{}'".format(locale.getlocale()[0])),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSQLIMM(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT(query),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packRDBCMM(),
cur_id, False, True
)
else:
raise ValueError('Unknown database type')
self._parse_response()
def _query(self, query):
cur_id = 1
if self.db_type == 'derby':
cur_id = ddm.write_request_dds(
self.sock,
ddm.packPRPSQLSTT(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT(query),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packOPNQRY(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, False, True
)
elif self.db_type == 'db2':
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSAT_MGRLVLLS([cp.CCSIDMGR, 1208]),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSQLSET(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT("SET CLIENT WRKSTNNAME '{}'".format(platform.node())),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT("SET CURRENT LOCALE LC_CTYPE='{}'".format(locale.getlocale()[0])),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packPRPSQLSTT(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLATTR("WITH HOLD "),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT(query),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packOPNQRY(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, False, True
)
else:
raise ValueError('Unknown database type')
return self._parse_response()
def is_connect(self):
return bool(self.sock)
def cursor(self):
return Cursor(self)
def begin(self):
self._execute("START TRANSACTION")
def commit(self):
self._execute("COMMIT")
def rollback(self):
self._execute("ROLLBACK")
def close(self):
cur_id = 1
cur_id = ddm.write_request_dds(
self.sock,
ddm.packRDBCMM(),
cur_id, False, True
)
self._parse_response()
self.sock.close()
refactoring
##############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 Hajime Nakagami<nakagami@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################
import socket
import platform
import locale
import collections
from drda import codepoint as cp
from drda import ddm
from drda import utils
from drda.cursor import Cursor
class Connection:
def _parse_response(self):
results = collections.deque()
description = []
err = qrydsc = None
chained = True
err_msg = None
while chained:
dds_type, chained, number, code_point, obj = ddm.read_dds(self.sock)
if code_point == cp.SQLERRRM:
err_msg = ddm.parse_reply(obj).get(cp.SRVDGN).decode('utf-8')
elif code_point == cp.SQLCARD:
if err is None:
err, _ = ddm.parse_sqlcard(obj, self._enc, self.endian)
elif code_point == cp.SQLDARD:
err, description = ddm.parse_sqldard(obj, 'utf-8', self.endian)
elif code_point == cp.QRYDSC:
ln = obj[0]
b = obj[1:ln]
assert b[:2] == b'\x76\xd0'
b = b[2:]
# [(DRDA_TYPE_xxxx, size_binary), ...]
qrydsc = [(c[0], c[1:]) for c in [b[i:i+3] for i in range(0, len(b), 3)]]
elif code_point == cp.QRYDTA:
b = obj
while len(b):
if (b[0], b[1]) != (0xff, 0x00):
break
b = b[2:]
r = []
for t, ps in qrydsc:
v, b = utils.read_field(t, ps, b, self.endian)
r.append(v)
results.append(tuple(r))
if err:
raise err
return results, description
def __init__(self, host, database, port, user, password, db_type):
self.host = host
self.database = (database + ' ' * 18)[:18]
self.port = port
self.user = user
self.password = password
self.db_type = db_type
if self.db_type is None:
if self.user is None:
self.db_type = 'derby'
elif self.user is not None:
self.db_type = 'db2'
if self.db_type == 'derby':
self._enc = 'utf-8'
self.endian = 'big'
self.prdid = 'DNC10130'
self.pkgid = 'SQLC2026'
self.pkgcnstkn = 'AAAAAfAd'
self.pkgsn = 201
user = 'APP'
password = ''
secmec = cp.SECMEC_USRIDONL
elif self.db_type == 'db2':
self._enc = 'cp500'
self.endian = 'little'
self.prdid = 'SQL11014'
self.pkgid = 'SYSSH200'
self.pkgcnstkn = 'SYSLVL01'
self.pkgsn = 65
user = self.user
password = self.password
secmec = cp.SECMEC_USRIDPWD
else:
raise ValueError('Unknown database type')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
cur_id = 1
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSAT(self, [
cp.AGENT, 10,
cp.SQLAM, 11,
cp.CMNTCPIP, 5,
cp.RDB, 12,
cp.SECMGR, 9,
cp.UNICODEMGR, 1208,
]),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packACCSEC(self.database, secmec),
cur_id, False, True
)
self._parse_response()
cur_id = 1
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSECCHK(secmec, self.database, user, password, self._enc),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packACCRDB(self.prdid, self.database, self._enc),
cur_id, False, True
)
self._parse_response()
self._set_valiables()
def __enter__(self):
return self
def __exit__(self, exc, value, traceback):
self.close()
def _set_valiables(self):
cur_id = 1
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSAT_MGRLVLLS([cp.CCSIDMGR, 1208]),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSQLSET(self.pkgid, None, 1, self.database),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT("SET CLIENT WRKSTNNAME '{}'".format(platform.node())),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT("SET CURRENT LOCALE LC_CTYPE='{}'".format(locale.getlocale()[0])),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packRDBCMM(),
cur_id, False, True
)
self._parse_response()
def _execute(self, query):
cur_id = 1
cur_id = ddm.write_request_dds(
self.sock,
ddm.packEXCSQLIMM(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT(query),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packRDBCMM(),
cur_id, False, True
)
self._parse_response()
def _query(self, query):
cur_id = 1
cur_id = ddm.write_request_dds(
self.sock,
ddm.packPRPSQLSTT(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, True, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packSQLSTT(query),
cur_id, False, False
)
cur_id = ddm.write_request_dds(
self.sock,
ddm.packOPNQRY(self.pkgid, self.pkgcnstkn, self.pkgsn, self.database),
cur_id, False, True
)
return self._parse_response()
def is_connect(self):
return bool(self.sock)
def cursor(self):
return Cursor(self)
def begin(self):
self._execute("START TRANSACTION")
def commit(self):
self._execute("COMMIT")
def rollback(self):
self._execute("ROLLBACK")
def close(self):
cur_id = 1
cur_id = ddm.write_request_dds(
self.sock,
ddm.packRDBCMM(),
cur_id, False, True
)
self._parse_response()
self.sock.close()
|
# -*- coding: utf-8 -*-
import status
class GmapException(BaseException):
"""Base exception for all python-gmap exceptions"""
class NoResults(GmapException):
"""Raised when api returned no results"""
class RequestDenied(GmapException):
"""Raised when request to API was denied"""
EXCEPTION_MAPPING = {
status.OK: None,
status.ZERO_RESULTS: NoResults,
status.REQUEST_DENIED: RequestDenied,
}
PEP-8
# -*- coding: utf-8 -*-
import status
class GmapException(BaseException):
"""Base exception for all python-gmap exceptions"""
class NoResults(GmapException):
"""Raised when api returned no results"""
class RequestDenied(GmapException):
"""Raised when request to API was denied"""
EXCEPTION_MAPPING = {
status.OK: None,
status.ZERO_RESULTS: NoResults,
status.REQUEST_DENIED: RequestDenied,
}
|
# Copyright (c) 2010-2020 Manfred Moitzi
# License: MIT License
import pytest
import random
from ezdxf.math import (
cubic_bezier_interpolation,
Vec3,
Vec2,
Bezier3P,
quadratic_to_cubic_bezier,
Bezier4P,
have_bezier_curves_g1_continuity,
bezier_to_bspline,
split_bezier,
quadratic_bezier_from_3p,
close_vectors,
cubic_bezier_bbox,
quadratic_bezier_bbox,
intersection_ray_cubic_bezier_2d,
)
def test_vertex_interpolation():
points = [(0, 0), (3, 1), (5, 3), (0, 8)]
result = list(cubic_bezier_interpolation(points))
assert len(result) == 3
c1, c2, c3 = result
p = c1.control_points
assert p[0].isclose((0, 0))
assert p[1].isclose((0.9333333333333331, 0.3111111111111111))
assert p[2].isclose((1.8666666666666663, 0.6222222222222222))
assert p[3].isclose((3, 1))
p = c2.control_points
assert p[0].isclose((3, 1))
assert p[1].isclose((4.133333333333334, 1.3777777777777778))
assert p[2].isclose((5.466666666666667, 1.822222222222222))
assert p[3].isclose((5, 3))
p = c3.control_points
assert p[0].isclose((5, 3))
assert p[1].isclose((4.533333333333333, 4.177777777777778))
assert p[2].isclose((2.2666666666666666, 6.088888888888889))
assert p[3].isclose((0, 8))
def test_quadratic_to_cubic_bezier():
r = random.Random(0)
def random_vec() -> Vec3:
return Vec3(r.uniform(-10, 10), r.uniform(-10, 10), r.uniform(-10, 10))
for i in range(1000):
quadratic = Bezier3P((random_vec(), random_vec(), random_vec()))
quadratic_approx = list(quadratic.approximate(10))
cubic = quadratic_to_cubic_bezier(quadratic)
cubic_approx = list(cubic.approximate(10))
assert len(quadratic_approx) == len(cubic_approx)
for p1, p2 in zip(quadratic_approx, cubic_approx):
assert p1.isclose(p2)
# G1 continuity: normalized end-tangent == normalized start-tangent of next curve
B1 = Bezier4P([(0, 0), (1, 1), (2, 1), (3, 0)])
# B1/B2 has G1 continuity:
B2 = Bezier4P([(3, 0), (4, -1), (5, -1), (6, 0)])
# B1/B3 has no G1 continuity:
B3 = Bezier4P([(3, 0), (4, 1), (5, 1), (6, 0)])
# B1/B4 G1 continuity off tolerance:
B4 = Bezier4P([(3, 0), (4, -1.03), (5, -1.0), (6, 0)])
# B1/B5 has a gap between B1 end and B5 start:
B5 = Bezier4P([(4, 0), (5, -1), (6, -1), (7, 0)])
def test_g1_continuity_for_bezier_curves():
assert have_bezier_curves_g1_continuity(B1, B2) is True
assert have_bezier_curves_g1_continuity(B1, B3) is False
assert (
have_bezier_curves_g1_continuity(B1, B4, g1_tol=1e-4) is False
), "should be outside of tolerance "
assert (
have_bezier_curves_g1_continuity(B1, B5) is False
), "end- and start point should match"
D1 = Bezier4P([(0, 0), (1, 1), (3, 0), (3, 0)])
D2 = Bezier4P([(3, 0), (3, 0), (5, -1), (6, 0)])
def test_g1_continuity_for_degenerated_bezier_curves():
assert have_bezier_curves_g1_continuity(D1, B2) is False
assert have_bezier_curves_g1_continuity(B1, D2) is False
assert have_bezier_curves_g1_continuity(D1, D2) is False
@pytest.mark.parametrize("curve", [D1, D2])
def test_flatten_degenerated_bezier_curves(curve):
# Degenerated Bezier curves behave like regular curves!
assert len(list(curve.flattening(0.1))) > 4
@pytest.mark.parametrize(
"b1,b2",
[
(B1, B2), # G1 continuity, the common case
(B1, B3), # without G1 continuity is also a regular B-spline
(B1, B5), # regular B-spline, but first control point of B5 is lost
],
ids=["G1", "without G1", "gap"],
)
def test_bezier_curves_to_bspline(b1, b2):
bspline = bezier_to_bspline([b1, b2])
# Remove duplicate control point between two adjacent curves:
expected = list(b1.control_points) + list(b2.control_points)[1:]
assert bspline.degree == 3, "should be a cubic B-spline"
assert bspline.control_points == tuple(expected)
def test_quality_of_bezier_to_bspline_conversion_1():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
points0 = B1.approximate(10)
points1 = bezier_to_bspline([B1]).approximate(10)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_quality_of_bezier_to_bspline_conversion_2():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
# Remove duplicate point between the two curves:
points0 = list(B1.approximate(10)) + list(B2.approximate(10))[1:]
points1 = bezier_to_bspline([B1, B2]).approximate(20)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_bezier_curves_to_bspline_error():
with pytest.raises(ValueError):
bezier_to_bspline([]) # one or more curves expected
class TestSplitBezier:
@pytest.fixture
def points3(self):
return Vec2.list([(0, 0), (0, 1), (1.5, 0.75), (2, 2)])
@pytest.mark.parametrize("t", [-1, 2])
def test_t_validation(self, points3, t):
with pytest.raises(ValueError):
split_bezier(points3, t)
def test_control_point_validation(self):
with pytest.raises(ValueError):
split_bezier([Vec2(0, 0)], 0.5)
def test_split_cubic_bezier(self, points3):
left, right = split_bezier(points3, 0.5)
assert (
close_vectors(
left,
[(0.0, 0.0), (0.0, 0.5), (0.375, 0.6875), (0.8125, 0.90625)],
)
is True
)
assert (
close_vectors(
right,
[(2.0, 2.0), (1.75, 1.375), (1.25, 1.125), (0.8125, 0.90625)],
)
is True
)
def test_quadratic_bezier_from_3_points():
qbez = quadratic_bezier_from_3p((0, 0), (3, 2), (6, 0))
assert qbez.point(0.5).isclose((3, 2))
def test_cubic_bezier_from_3_points():
cbez = quadratic_bezier_from_3p((0, 0), (3, 2), (6, 0))
assert cbez.point(0.5).isclose((3, 2))
class TestBezierCurveBoundingBox:
def test_linear_curve(self):
bbox = cubic_bezier_bbox(Bezier4P([(0, 0), (1, 1), (2, 2), (3, 3)]))
assert bbox.extmin == (0, 0, 0)
assert bbox.extmax == (3, 3, 0)
def test_reverse_linear_curve(self):
bbox = cubic_bezier_bbox(Bezier4P([(3, 3), (2, 2), (-2, -2), (-3, -3)]))
assert bbox.extmin == (-3, -3, 0)
assert bbox.extmax == (3, 3, 0)
def test_cubic_bezier_curve_with_one_extrema(self):
curve = Bezier4P([(0, 0), (0, 1), (2, 1), (2, 0)])
bbox = cubic_bezier_bbox(curve)
assert bbox.extmax.y == pytest.approx(0.75)
def test_cubic_bezier_curve_with_two_extrema(self):
curve = Bezier4P([(0, 0), (0, 1), (2, -1), (2, 0)])
bbox = cubic_bezier_bbox(curve)
assert bbox.extmin.y == pytest.approx(-0.28867513459481287)
assert bbox.extmax.y == pytest.approx(+0.28867513459481287)
def test_closed_3d_cubic_bezier_curve(self):
curve = Bezier4P([(0, 0, -1), (2, 3, 0), (-2, 3, 0), (0, 0, -1)])
bbox = cubic_bezier_bbox(curve)
assert bbox.extmin.x == pytest.approx(-0.5773502691896258)
assert bbox.extmin.z == pytest.approx(-1.0)
assert bbox.extmax.x == pytest.approx(+0.5773502691896258)
assert bbox.extmax.y == pytest.approx(+2.25)
assert bbox.extmax.z == pytest.approx(-0.25)
def test_quadratic_bezier_curve_box(self):
curve = Bezier3P([(0, 0), (1, 1), (2, 0)])
bbox = quadratic_bezier_bbox(curve)
assert bbox.extmax.y == pytest.approx(0.5)
class TestRayCubicBezierCurve2dIntersection:
@pytest.fixture(scope="class")
def curve(self):
return Bezier4P([(0, -2), (2, 6), (4, -6), (6, 2)])
def test_no_intersection(self, curve):
assert (
len(intersection_ray_cubic_bezier_2d((0, -6), (1, -6), curve)) == 0
)
def test_one_intersection_point(self, curve):
points = intersection_ray_cubic_bezier_2d((3, -6), (3, 6), curve)
assert len(points) == 1
assert points[0].isclose((3, 0))
def test_two_intersection_points(self, curve):
points = intersection_ray_cubic_bezier_2d(
(-1.4, -2.5), (7.1, 3.9), curve
)
assert len(points) == 2
expected = (
(0.18851028511733303, -1.3039451970881237),
(2.5249135145844264, 0.4552289992165126),
)
assert all(p.isclose(e) for e, p in zip(expected, points)) is True
def test_three_intersection_points(self, curve):
points = intersection_ray_cubic_bezier_2d((0, 0), (1, 0), curve)
assert len(points) == 3
expected = (
(0.6762099922755492, 0.0),
(3.0, 0.0),
(5.323790007724451, 0.0),
)
assert all(p.isclose(e) for e, p in zip(expected, points)) is True
add more tests for intersection_ray_cubic_bezier_2d()
# Copyright (c) 2010-2020 Manfred Moitzi
# License: MIT License
import pytest
import random
from ezdxf.math import (
cubic_bezier_interpolation,
Vec3,
Vec2,
Bezier3P,
quadratic_to_cubic_bezier,
Bezier4P,
have_bezier_curves_g1_continuity,
bezier_to_bspline,
split_bezier,
quadratic_bezier_from_3p,
close_vectors,
cubic_bezier_bbox,
quadratic_bezier_bbox,
intersection_ray_cubic_bezier_2d,
)
def test_vertex_interpolation():
points = [(0, 0), (3, 1), (5, 3), (0, 8)]
result = list(cubic_bezier_interpolation(points))
assert len(result) == 3
c1, c2, c3 = result
p = c1.control_points
assert p[0].isclose((0, 0))
assert p[1].isclose((0.9333333333333331, 0.3111111111111111))
assert p[2].isclose((1.8666666666666663, 0.6222222222222222))
assert p[3].isclose((3, 1))
p = c2.control_points
assert p[0].isclose((3, 1))
assert p[1].isclose((4.133333333333334, 1.3777777777777778))
assert p[2].isclose((5.466666666666667, 1.822222222222222))
assert p[3].isclose((5, 3))
p = c3.control_points
assert p[0].isclose((5, 3))
assert p[1].isclose((4.533333333333333, 4.177777777777778))
assert p[2].isclose((2.2666666666666666, 6.088888888888889))
assert p[3].isclose((0, 8))
def test_quadratic_to_cubic_bezier():
r = random.Random(0)
def random_vec() -> Vec3:
return Vec3(r.uniform(-10, 10), r.uniform(-10, 10), r.uniform(-10, 10))
for i in range(1000):
quadratic = Bezier3P((random_vec(), random_vec(), random_vec()))
quadratic_approx = list(quadratic.approximate(10))
cubic = quadratic_to_cubic_bezier(quadratic)
cubic_approx = list(cubic.approximate(10))
assert len(quadratic_approx) == len(cubic_approx)
for p1, p2 in zip(quadratic_approx, cubic_approx):
assert p1.isclose(p2)
# G1 continuity: normalized end-tangent == normalized start-tangent of next curve
B1 = Bezier4P([(0, 0), (1, 1), (2, 1), (3, 0)])
# B1/B2 has G1 continuity:
B2 = Bezier4P([(3, 0), (4, -1), (5, -1), (6, 0)])
# B1/B3 has no G1 continuity:
B3 = Bezier4P([(3, 0), (4, 1), (5, 1), (6, 0)])
# B1/B4 G1 continuity off tolerance:
B4 = Bezier4P([(3, 0), (4, -1.03), (5, -1.0), (6, 0)])
# B1/B5 has a gap between B1 end and B5 start:
B5 = Bezier4P([(4, 0), (5, -1), (6, -1), (7, 0)])
def test_g1_continuity_for_bezier_curves():
assert have_bezier_curves_g1_continuity(B1, B2) is True
assert have_bezier_curves_g1_continuity(B1, B3) is False
assert (
have_bezier_curves_g1_continuity(B1, B4, g1_tol=1e-4) is False
), "should be outside of tolerance "
assert (
have_bezier_curves_g1_continuity(B1, B5) is False
), "end- and start point should match"
D1 = Bezier4P([(0, 0), (1, 1), (3, 0), (3, 0)])
D2 = Bezier4P([(3, 0), (3, 0), (5, -1), (6, 0)])
def test_g1_continuity_for_degenerated_bezier_curves():
assert have_bezier_curves_g1_continuity(D1, B2) is False
assert have_bezier_curves_g1_continuity(B1, D2) is False
assert have_bezier_curves_g1_continuity(D1, D2) is False
@pytest.mark.parametrize("curve", [D1, D2])
def test_flatten_degenerated_bezier_curves(curve):
# Degenerated Bezier curves behave like regular curves!
assert len(list(curve.flattening(0.1))) > 4
@pytest.mark.parametrize(
"b1,b2",
[
(B1, B2), # G1 continuity, the common case
(B1, B3), # without G1 continuity is also a regular B-spline
(B1, B5), # regular B-spline, but first control point of B5 is lost
],
ids=["G1", "without G1", "gap"],
)
def test_bezier_curves_to_bspline(b1, b2):
bspline = bezier_to_bspline([b1, b2])
# Remove duplicate control point between two adjacent curves:
expected = list(b1.control_points) + list(b2.control_points)[1:]
assert bspline.degree == 3, "should be a cubic B-spline"
assert bspline.control_points == tuple(expected)
def test_quality_of_bezier_to_bspline_conversion_1():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
points0 = B1.approximate(10)
points1 = bezier_to_bspline([B1]).approximate(10)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_quality_of_bezier_to_bspline_conversion_2():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
# Remove duplicate point between the two curves:
points0 = list(B1.approximate(10)) + list(B2.approximate(10))[1:]
points1 = bezier_to_bspline([B1, B2]).approximate(20)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_bezier_curves_to_bspline_error():
with pytest.raises(ValueError):
bezier_to_bspline([]) # one or more curves expected
class TestSplitBezier:
@pytest.fixture
def points3(self):
return Vec2.list([(0, 0), (0, 1), (1.5, 0.75), (2, 2)])
@pytest.mark.parametrize("t", [-1, 2])
def test_t_validation(self, points3, t):
with pytest.raises(ValueError):
split_bezier(points3, t)
def test_control_point_validation(self):
with pytest.raises(ValueError):
split_bezier([Vec2(0, 0)], 0.5)
def test_split_cubic_bezier(self, points3):
left, right = split_bezier(points3, 0.5)
assert (
close_vectors(
left,
[(0.0, 0.0), (0.0, 0.5), (0.375, 0.6875), (0.8125, 0.90625)],
)
is True
)
assert (
close_vectors(
right,
[(2.0, 2.0), (1.75, 1.375), (1.25, 1.125), (0.8125, 0.90625)],
)
is True
)
def test_quadratic_bezier_from_3_points():
qbez = quadratic_bezier_from_3p((0, 0), (3, 2), (6, 0))
assert qbez.point(0.5).isclose((3, 2))
def test_cubic_bezier_from_3_points():
cbez = quadratic_bezier_from_3p((0, 0), (3, 2), (6, 0))
assert cbez.point(0.5).isclose((3, 2))
class TestBezierCurveBoundingBox:
def test_linear_curve(self):
bbox = cubic_bezier_bbox(Bezier4P([(0, 0), (1, 1), (2, 2), (3, 3)]))
assert bbox.extmin == (0, 0, 0)
assert bbox.extmax == (3, 3, 0)
def test_reverse_linear_curve(self):
bbox = cubic_bezier_bbox(Bezier4P([(3, 3), (2, 2), (-2, -2), (-3, -3)]))
assert bbox.extmin == (-3, -3, 0)
assert bbox.extmax == (3, 3, 0)
def test_cubic_bezier_curve_with_one_extrema(self):
curve = Bezier4P([(0, 0), (0, 1), (2, 1), (2, 0)])
bbox = cubic_bezier_bbox(curve)
assert bbox.extmax.y == pytest.approx(0.75)
def test_cubic_bezier_curve_with_two_extrema(self):
curve = Bezier4P([(0, 0), (0, 1), (2, -1), (2, 0)])
bbox = cubic_bezier_bbox(curve)
assert bbox.extmin.y == pytest.approx(-0.28867513459481287)
assert bbox.extmax.y == pytest.approx(+0.28867513459481287)
def test_closed_3d_cubic_bezier_curve(self):
curve = Bezier4P([(0, 0, -1), (2, 3, 0), (-2, 3, 0), (0, 0, -1)])
bbox = cubic_bezier_bbox(curve)
assert bbox.extmin.x == pytest.approx(-0.5773502691896258)
assert bbox.extmin.z == pytest.approx(-1.0)
assert bbox.extmax.x == pytest.approx(+0.5773502691896258)
assert bbox.extmax.y == pytest.approx(+2.25)
assert bbox.extmax.z == pytest.approx(-0.25)
def test_quadratic_bezier_curve_box(self):
curve = Bezier3P([(0, 0), (1, 1), (2, 0)])
bbox = quadratic_bezier_bbox(curve)
assert bbox.extmax.y == pytest.approx(0.5)
class TestRayCubicBezierCurve2dIntersection:
@pytest.fixture(scope="class")
def curve(self):
return Bezier4P([(0, -2), (2, 6), (4, -6), (6, 2)])
def test_no_intersection(self, curve):
assert (
len(intersection_ray_cubic_bezier_2d((0, -6), (1, -6), curve)) == 0
)
def test_one_intersection_point(self, curve):
points = intersection_ray_cubic_bezier_2d((3, -6), (3, 6), curve)
assert len(points) == 1
assert points[0].isclose((3, 0))
def test_two_intersection_points(self, curve):
points = intersection_ray_cubic_bezier_2d(
(-1.4, -2.5), (7.1, 3.9), curve
)
assert len(points) == 2
expected = (
(0.18851028511733303, -1.3039451970881237),
(2.5249135145844264, 0.4552289992165126),
)
assert all(p.isclose(e) for e, p in zip(expected, points)) is True
def test_three_intersection_points(self, curve):
points = intersection_ray_cubic_bezier_2d((0, 0), (1, 0), curve)
assert len(points) == 3
expected = (
(0.6762099922755492, 0.0),
(3.0, 0.0),
(5.323790007724451, 0.0),
)
assert all(p.isclose(e) for e, p in zip(expected, points)) is True
def test_collinear_ray_and_curve(self):
curve = Bezier4P([(0, 0), (1, 0), (2, 0), (3, 0)])
ip = intersection_ray_cubic_bezier_2d((0, 0), (1, 0), curve)
assert len(ip) == 1
assert ip[0].isclose((0, 0)) # ???
@pytest.mark.parametrize("x", [0, 0.5, 1, 3])
def test_linear_ray_and_curve(self, x):
curve = Bezier4P([(0, 0), (1, 0), (2, 0), (3, 0)])
# ray defined in +y direction
ip = intersection_ray_cubic_bezier_2d((x, -1), (x, 0), curve)
assert len(ip) == 1
assert ip[0].isclose((x, 0))
# ray defined in -y direction
ip = intersection_ray_cubic_bezier_2d((x, 2), (x, 1), curve)
assert len(ip) == 1
assert ip[0].isclose((x, 0))
|
#!/usr/bin/env python
from subprocess import Popen, PIPE
from os import path, makedirs
from shutil import rmtree
from Queue import Queue, Empty
from threading import Thread
from random import choice
import json
import re
def enter_line(stream, line):
stream.write(line + '\n')
def process_lines(lines, dir_path):
files = []
dirs = []
pattern = re.compile('\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}')
for line in lines:
#print line
if not pattern.match(line):
# Ignore lines not containing amrecover ls output
continue
tokens = line.split(None, 1)
the_path = tokens[1]
if the_path == '.':
# Ignore current directory
continue
abs_path = path.join(dir_path, the_path)
if abs_path.endswith(path.sep):
dirs.append(abs_path)
else:
files.append(abs_path)
print abs_path
return dirs, files
def read_until(stream, delimiter):
lines = []
while True:
line = str(stream.readline().strip())
lines.append(line)
if line == delimiter:
return lines
def get_file_list(config, hostname, disk):
pattern = re.compile('\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}')
p = Popen(['amrecover', config], stdin=PIPE, stdout=PIPE)
stdin = p.stdin
stdout = p.stdout
enter_line(stdin, 'sethost %s' % hostname)
enter_line(stdin, 'setdisk %s' % disk)
enter_line(stdin, 'cd %s' % disk)
read_until(stdout, disk)
enter_line(stdin, 'ls')
enter_line(stdin, 'cd %s' % disk)
lines = read_until(stdout, disk)
dirs, files = process_lines(lines, disk)
while dirs:
# Iterate over a copy of dirs, so we can remove things from it
for directory in dirs[:]:
enter_line(stdin, 'cd %s' % directory)
enter_line(stdin, 'ls')
enter_line(stdin, 'cd %s' % disk)
dirs.remove(directory)
lines = read_until(stdout, disk)
new_dirs, new_files = process_lines(lines, directory)
dirs += new_dirs
files += new_files
return files
def test_extraction(config, hostname, disk, target):
prefix = '/tmp/check-amanda'
try:
makedirs(prefix)
except OSError:
print 'Directory %s already exists.' % prefix
output_path = path.join(prefix, target)
p = Popen(['amrecover', config], stdin=PIPE, stdout=PIPE)
stdin = p.stdin
enter_line(stdin, 'lcd %s' % prefix)
enter_line(stdin, 'sethost %s' % hostname)
enter_line(stdin, 'setdisk %s' % disk)
enter_line(stdin, 'add %s' % target)
enter_line(stdin, 'extract')
enter_line(stdin, 'Y')
enter_line(stdin, 'Y')
enter_line(stdin, 'exit')
p.communicate()
p = Popen(['file', output_path], stdin=PIPE, stdout=PIPE)
output, error = p.communicate()
print output.strip()
assert error is None
assert p.returncode == 0
if not path.islink(output_path):
size = path.getsize(output_path)
assert size > 0
print 'File size is %s.' % size
rmtree(prefix)
def main():
dna = json.load(open('/etc/chef/node.json'))
locations = dna['amanda']['backup_locations']
hosts = [host['hostname'] for host in locations]
hostname = choice(hosts)
disks = [host['locations'] for host in locations if host['hostname'] == hostname][0]
disk = choice(disks)
config = choice(('daily', 'weekly', 'monthly'))
print 'Checking %s backup of %s:%s ...' % (config, hostname, disk)
files = get_file_list(config, hostname, disk)
print 'There are %s files.' % len(files)
random_file = choice(files)
print 'Trying to extract %s ...' % random_file
test_extraction(config, hostname, disk, random_file[len(disk) + 1:])
print 'Everything looks OK.'
if __name__ == '__main__':
main()
Only traverse one lucky directory
#!/usr/bin/env python
from subprocess import Popen, PIPE
from os import path, makedirs
from shutil import rmtree
from Queue import Queue, Empty
from threading import Thread
from random import choice
import json
import re
def enter_line(stream, line):
stream.write(line + '\n')
def process_lines(lines, dir_path):
files = []
dirs = []
pattern = re.compile('\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}')
for line in lines:
#print line
if not pattern.match(line):
# Ignore lines not containing amrecover ls output
continue
tokens = line.split(None, 1)
the_path = tokens[1]
if the_path == '.':
# Ignore current directory
continue
abs_path = path.join(dir_path, the_path)
if abs_path.endswith(path.sep):
dirs.append(abs_path)
else:
#print abs_path
files.append(abs_path)
return dirs, files
def read_until(stream, delimiter):
lines = []
while True:
line = str(stream.readline().strip())
lines.append(line)
if line == delimiter:
return lines
def get_file_list(config, hostname, disk):
pattern = re.compile('\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}')
p = Popen(['amrecover', config], stdin=PIPE, stdout=PIPE)
stdin = p.stdin
stdout = p.stdout
enter_line(stdin, 'sethost %s' % hostname)
enter_line(stdin, 'setdisk %s' % disk)
enter_line(stdin, 'cd %s' % disk)
read_until(stdout, disk)
enter_line(stdin, 'ls')
enter_line(stdin, 'cd %s' % disk)
lines = read_until(stdout, disk)
dirs, files = process_lines(lines, disk)
while dirs:
# Only traverse one lucky directory
directory = choice(dirs)
enter_line(stdin, 'cd %s' % directory)
enter_line(stdin, 'ls')
enter_line(stdin, 'cd %s' % disk)
lines = read_until(stdout, disk)
dirs, new_files = process_lines(lines, directory)
files += new_files
return files
def test_extraction(config, hostname, disk, target):
prefix = '/tmp/check-amanda'
try:
makedirs(prefix)
except OSError:
print 'Directory %s already exists.' % prefix
output_path = path.join(prefix, target)
p = Popen(['amrecover', config], stdin=PIPE, stdout=PIPE)
stdin = p.stdin
enter_line(stdin, 'lcd %s' % prefix)
enter_line(stdin, 'sethost %s' % hostname)
enter_line(stdin, 'setdisk %s' % disk)
enter_line(stdin, 'add %s' % target)
enter_line(stdin, 'extract')
enter_line(stdin, 'Y')
enter_line(stdin, 'Y')
enter_line(stdin, 'exit')
p.communicate()
p = Popen(['file', output_path], stdin=PIPE, stdout=PIPE)
output, error = p.communicate()
print output.strip()
assert error is None
assert p.returncode == 0
if not path.islink(output_path):
size = path.getsize(output_path)
assert size > 0
print 'File size is %s.' % size
rmtree(prefix)
def main():
dna = json.load(open('/etc/chef/node.json'))
locations = dna['amanda']['backup_locations']
hosts = [host['hostname'] for host in locations]
hostname = choice(hosts)
disks = [host['locations'] for host in locations if host['hostname'] == hostname][0]
disk = choice(disks)
config = choice(('daily', 'weekly', 'monthly'))
print 'Checking %s backup of %s:%s ...' % (config, hostname, disk)
files = get_file_list(config, hostname, disk)
random_file = choice(files)
print 'Trying to extract %s ...' % random_file
test_extraction(config, hostname, disk, random_file[len(disk) + 1:])
print 'Everything looks OK.'
if __name__ == '__main__':
main()
|
from unittest import TestCase
from mock import patch
from django.core.files.storage import Storage, FileSystemStorage
from django.core.files.base import ContentFile
from os.path import join
from ..management.commands.collectstatic import Command, cache
class BotolikeStorage(Storage):
location = None
def _normalize_name(self, path):
if self.location is not None:
path = join(self.location, path)
return path
class CollectfastTestCase(TestCase):
def setUp(self):
cache.clear()
self.path = '.collectfast-test-file.txt'
self.storage = FileSystemStorage(location='./')
def get_command(self, *args, **kwargs):
return Command(*args, **kwargs)
def tearDown(self):
self.storage.delete(self.path)
class TestCommand(CollectfastTestCase):
@patch("collectfast.management.commands.collectstatic.collectstatic"
".Command.collect")
def test_collect(self, mocked_super):
command = self.get_command()
command.collect()
self.assertEqual(command.num_skipped_files, 0)
self.assertIsInstance(command.collect_time, str)
def test_get_cache_key(self):
command = self.get_command()
cache_key = command.get_cache_key('/some/random/path')
prefix_len = len(command.cache_key_prefix)
self.assertTrue(cache_key.startswith(command.cache_key_prefix))
self.assertEqual(32 + prefix_len, len(cache_key))
@patch("collectfast.management.commands.collectstatic.cache.get")
@patch("collectfast.management.commands.collectstatic.Command"
".get_storage_lookup")
def mock_get_lookup(self, path, cached_value, mocked_lookup, mocked_cache):
mocked_lookup.return_value = 'a fresh lookup'
mocked_cache.return_value = cached_value
command = self.get_command()
ret_val = command.get_lookup(path)
return ret_val, mocked_lookup, mocked_cache
def get_fresh_lookup(self, path):
return self.mock_get_lookup(path, False)
def get_cached_lookup(self, path):
return self.mock_get_lookup(path, 'a cached lookup')
def test_get_lookup(self):
path = '/some/unique/path'
cache_key = self.get_command().get_cache_key(path)
# Assert storage lookup is hit and cache is populated
ret_val, mocked_lookup, mocked_cache = self.get_fresh_lookup(path)
mocked_lookup.assert_called_once_with(path)
self.assertEqual(ret_val, 'a fresh lookup')
self.assertEqual(cache.get(cache_key), 'a fresh lookup')
# Assert storage is not hit, but cache is
ret_val, mocked_lookup, mocked_cache = self.get_cached_lookup(path)
self.assertEqual(mocked_lookup.call_count, 0)
self.assertEqual(mocked_cache.call_count, 1)
self.assertEqual(ret_val, 'a cached lookup')
@patch("collectfast.management.commands.collectstatic.Command"
".get_storage_lookup")
def test_destroy_lookup(self, mocked_lookup):
mocked_lookup.return_value = 'a fake lookup'
c = self.get_command()
path = '/another/unique/path'
cache_key = c.get_cache_key(path)
c.get_lookup(path)
self.assertEqual(cache.get(cache_key), mocked_lookup.return_value)
self.assertEqual(c.lookups[path], mocked_lookup.return_value)
c.destroy_lookup(path)
self.assertEqual(cache.get(cache_key, 'empty'), 'empty')
self.assertNotIn(path, c.lookups)
class TestGetFileHash(CollectfastTestCase):
def test_get_file_hash(self):
content = 'this is some content to be hashed'
expected_hash = '"16e71fd2be8be2a3a8c0be7b9aab6c04"'
c = self.get_command()
self.storage.save(self.path, ContentFile(content))
file_hash = c.get_file_hash(self.storage, self.path)
self.assertEqual(file_hash, expected_hash)
self.storage.delete(self.path)
self.storage.save(self.path, ContentFile('some nonsense'))
file_hash = c.get_file_hash(self.storage, self.path)
self.assertNotEqual(file_hash, expected_hash)
class TestCopyFile(CollectfastTestCase):
def setUp(self):
super(TestCopyFile, self).setUp()
def tearDown(self):
pass
@patch("collectfast.management.commands.collectstatic.collectstatic.Command"
".copy_file")
@patch("collectfast.management.commands.collectstatic.Command.get_lookup")
def call_copy_file(self, mocked_lookup, mocked_copy_file_super, **kwargs):
options = {
"interactive": False,
"post_process": False,
"dry_run": False,
"clear": False,
"link": False,
"ignore_patterns": [],
"use_default_ignore_patterns": True}
options.update(kwargs)
path = options.pop('path', '/a/sweet/path')
if 'lookup_hash' in options:
class FakeLookup:
etag = options.pop('lookup_hash')
mocked_lookup.return_value = FakeLookup()
c = self.get_command()
c.storage = options.pop('storage', BotolikeStorage())
c.set_options(**options)
c.num_skipped_files = 0
ret_val = c.copy_file(path, path, c.storage)
return ret_val, mocked_copy_file_super, mocked_lookup
def test_respect_flags(self):
"""`copy_file` respects --ignore_etag and --dry_run flags"""
path = '/a/sweet/path'
storage = BotolikeStorage()
ret_val, super_mock, lookup_mock = self.call_copy_file(
path=path, storage=storage, ignore_etag=True)
self.assertEqual(lookup_mock.call_count, 0)
ret_val, super_mock, lookup_mock = self.call_copy_file(
path=path, storage=storage, dry_run=True)
self.assertEqual(lookup_mock.call_count, 0)
@patch("collectfast.management.commands.collectstatic.Command"
".get_file_hash")
def test_calls_super(self, mock_get_file_hash):
"""`copy_file` properly calls super method"""
path = '/a/sweet/path'
storage = BotolikeStorage()
ret_val, super_mock, lookup_mock = self.call_copy_file(
path=path, storage=storage)
super_mock.assert_called_once_with(path, path, storage)
self.assertFalse(ret_val is False)
@patch("collectfast.management.commands.collectstatic.Command"
".get_file_hash")
def test_skips(self, mock_get_file_hash):
"""
Returns False and increments self.num_skipped_files if matching
hashes
"""
# mock get_file_hash and lookup to return the same hashes
mock_hash = 'thisisafakehash'
mock_get_file_hash.return_value = mock_hash
storage = BotolikeStorage()
ret_val, super_mock, lookup_mock = self.call_copy_file(
path=self.path, storage=storage, lookup_hash=mock_hash)
self.assertFalse(ret_val)
self.assertEqual(super_mock.call_count, 0)
def test_invalidates_cache(self):
"""Invalidates cache and self.lookups"""
self.assertTrue(False)
calls destroy_lookup
from unittest import TestCase
from mock import patch
from django.core.files.storage import Storage, FileSystemStorage
from django.core.files.base import ContentFile
from os.path import join
from ..management.commands.collectstatic import Command, cache
class BotolikeStorage(Storage):
location = None
def _normalize_name(self, path):
if self.location is not None:
path = join(self.location, path)
return path
class CollectfastTestCase(TestCase):
def setUp(self):
cache.clear()
self.path = '.collectfast-test-file.txt'
self.storage = FileSystemStorage(location='./')
def get_command(self, *args, **kwargs):
return Command(*args, **kwargs)
def tearDown(self):
self.storage.delete(self.path)
class TestCommand(CollectfastTestCase):
@patch("collectfast.management.commands.collectstatic.collectstatic"
".Command.collect")
def test_collect(self, mocked_super):
command = self.get_command()
command.collect()
self.assertEqual(command.num_skipped_files, 0)
self.assertIsInstance(command.collect_time, str)
def test_get_cache_key(self):
command = self.get_command()
cache_key = command.get_cache_key('/some/random/path')
prefix_len = len(command.cache_key_prefix)
self.assertTrue(cache_key.startswith(command.cache_key_prefix))
self.assertEqual(32 + prefix_len, len(cache_key))
@patch("collectfast.management.commands.collectstatic.cache.get")
@patch("collectfast.management.commands.collectstatic.Command"
".get_storage_lookup")
def mock_get_lookup(self, path, cached_value, mocked_lookup, mocked_cache):
mocked_lookup.return_value = 'a fresh lookup'
mocked_cache.return_value = cached_value
command = self.get_command()
ret_val = command.get_lookup(path)
return ret_val, mocked_lookup, mocked_cache
def get_fresh_lookup(self, path):
return self.mock_get_lookup(path, False)
def get_cached_lookup(self, path):
return self.mock_get_lookup(path, 'a cached lookup')
def test_get_lookup(self):
path = '/some/unique/path'
cache_key = self.get_command().get_cache_key(path)
# Assert storage lookup is hit and cache is populated
ret_val, mocked_lookup, mocked_cache = self.get_fresh_lookup(path)
mocked_lookup.assert_called_once_with(path)
self.assertEqual(ret_val, 'a fresh lookup')
self.assertEqual(cache.get(cache_key), 'a fresh lookup')
# Assert storage is not hit, but cache is
ret_val, mocked_lookup, mocked_cache = self.get_cached_lookup(path)
self.assertEqual(mocked_lookup.call_count, 0)
self.assertEqual(mocked_cache.call_count, 1)
self.assertEqual(ret_val, 'a cached lookup')
@patch("collectfast.management.commands.collectstatic.Command"
".get_storage_lookup")
def test_destroy_lookup(self, mocked_lookup):
mocked_lookup.return_value = 'a fake lookup'
c = self.get_command()
path = '/another/unique/path'
cache_key = c.get_cache_key(path)
c.get_lookup(path)
self.assertEqual(cache.get(cache_key), mocked_lookup.return_value)
self.assertEqual(c.lookups[path], mocked_lookup.return_value)
c.destroy_lookup(path)
self.assertEqual(cache.get(cache_key, 'empty'), 'empty')
self.assertNotIn(path, c.lookups)
class TestGetFileHash(CollectfastTestCase):
def test_get_file_hash(self):
content = 'this is some content to be hashed'
expected_hash = '"16e71fd2be8be2a3a8c0be7b9aab6c04"'
c = self.get_command()
self.storage.save(self.path, ContentFile(content))
file_hash = c.get_file_hash(self.storage, self.path)
self.assertEqual(file_hash, expected_hash)
self.storage.delete(self.path)
self.storage.save(self.path, ContentFile('some nonsense'))
file_hash = c.get_file_hash(self.storage, self.path)
self.assertNotEqual(file_hash, expected_hash)
class TestCopyFile(CollectfastTestCase):
@patch("collectfast.management.commands.collectstatic.collectstatic.Command"
".copy_file")
@patch("collectfast.management.commands.collectstatic.Command.get_lookup")
def call_copy_file(self, mocked_lookup, mocked_copy_file_super, **kwargs):
options = {
"interactive": False,
"post_process": False,
"dry_run": False,
"clear": False,
"link": False,
"ignore_patterns": [],
"use_default_ignore_patterns": True}
options.update(kwargs)
path = options.pop('path', '/a/sweet/path')
if 'lookup_hash' in options:
class FakeLookup:
etag = options.pop('lookup_hash')
mocked_lookup.return_value = FakeLookup()
c = self.get_command()
c.storage = options.pop('storage', BotolikeStorage())
c.set_options(**options)
c.num_skipped_files = 0
ret_val = c.copy_file(path, path, c.storage)
return ret_val, mocked_copy_file_super, mocked_lookup
def test_respect_flags(self):
"""`copy_file` respects --ignore_etag and --dry_run flags"""
path = '/a/sweet/path'
storage = BotolikeStorage()
ret_val, super_mock, lookup_mock = self.call_copy_file(
path=path, storage=storage, ignore_etag=True)
self.assertEqual(lookup_mock.call_count, 0)
ret_val, super_mock, lookup_mock = self.call_copy_file(
path=path, storage=storage, dry_run=True)
self.assertEqual(lookup_mock.call_count, 0)
@patch("collectfast.management.commands.collectstatic.Command"
".get_file_hash")
@patch("collectfast.management.commands.collectstatic.Command"
".destroy_lookup")
def test_calls_super(self, mock_destroy_lookup, mock_get_file_hash):
"""`copy_file` properly calls super method"""
path = '/a/sweet/path'
storage = BotolikeStorage()
ret_val, super_mock, lookup_mock = self.call_copy_file(
path=path, storage=storage)
super_mock.assert_called_once_with(path, path, storage)
self.assertFalse(ret_val is False)
mock_destroy_lookup.assert_called_once_with(path)
@patch("collectfast.management.commands.collectstatic.Command"
".get_file_hash")
def test_skips(self, mock_get_file_hash):
"""
Returns False and increments self.num_skipped_files if matching
hashes
"""
# mock get_file_hash and lookup to return the same hashes
mock_hash = 'thisisafakehash'
mock_get_file_hash.return_value = mock_hash
storage = BotolikeStorage()
ret_val, super_mock, lookup_mock = self.call_copy_file(
path=self.path, storage=storage, lookup_hash=mock_hash)
self.assertFalse(ret_val)
self.assertEqual(super_mock.call_count, 0) |
update numpy package and fix errors
|
'''
datasets.py
Represent each parsed dataset as an object. This is
really just a wrapper to the underlying dictionaries,
but it also provides some useful functions that assist
in the namespacing and equivalencing process.
'''
import os.path
import time
from common import get_citation_info
from collections import defaultdict
class DataSet():
def __init__(self, dictionary={}, prefix='unnamed-data-object'):
self._dict = dictionary
self._prefix = prefix
def get_values(self):
''' Get all non-obsolete primary ids in dictionary.'''
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
continue
else:
yield term_id
def __str__(self):
return self._prefix
class OrthologyData(DataSet):
def __init__(self, dictionary={}, prefix='use-index-term-prefix'):
super().__init__(dictionary, prefix)
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get(term_id)
mouse_orthologs = mapping.get('mouse_ortholog_id').split('|')
orthologs.update(mouse_orthologs)
if mapping.get('human_ortholog_id') is not '':
human_orthologs = mapping.get('human_ortholog_id').split('|')
human_orthologs = {'HGNC:' + ortho for ortho in human_orthologs}
orthologs.update(human_orthologs)
return orthologs
def __str__(self):
return self._prefix + '_ortho'
class HomologeneData(OrthologyData):
def __init__(self, dictionary={}, prefix='egid'):
super().__init__(dictionary, prefix)
def get_values(self):
for term_id in self._dict['gene_ids']:
yield term_id
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get('gene_ids').get(term_id)
group = mapping.get('homologene_group')
species = mapping.get('tax_id')
for k, v in self._dict['homologene_groups'][group].items():
if k == species and len(v) > 1:
return set() # stop here, don't return any orthologs since homologene group contains paralog
elif k == species:
next
elif k != species and len(v) == 1:
orthologs.update(v)
else:
print(
"WARNING! Missed case {0} - {1} - {2}".format(term_id, k, v))
orthologs = {'EGID:' + o for o in orthologs}
return orthologs
class HistoryDataSet(DataSet):
def __init__(self, dictionary={}, prefix='use-index-term-prefix'):
super().__init__(dictionary, prefix)
def get_id_update(self, term_id):
''' given an id, return the current value or "withdrawn". '''
mapping = self._dict.get(term_id)
if mapping is not None:
if mapping.get('status') == 'withdrawn':
value = 'withdrawn'
else:
value = mapping.get('new_id')
else:
value = None
return value
def get_obsolete_ids(self):
''' return dict with all obsolete ids, and current value.'''
value = None
replacement_dict = {}
for term_id in self._dict:
mapping = self._dict.get(term_id)
if mapping.get('status') == 'withdrawn':
value = 'withdrawn'
else:
value = self.get_id_update(term_id)
replacement_dict[term_id] = value
return replacement_dict
def __str__(self):
return self._prefix + '_history'
class NamespaceDataSet(DataSet):
ids = False # make .belns file containing labels (default = False)
labels = True # make .bels file containing ids (default = True)
# namespace ('ns') and/or annotation ('anno') concept scheme
scheme_type = ['ns']
def __init__(
self,
dictionary={},
name='namespace-name',
prefix='namespace-prefix',
domain=['other']):
self._name = name
self._domain = domain
super().__init__(dictionary, prefix)
def get_label(self, term_id):
''' Return the value to be used as the preferred
label for the associated term id. Use id as default,
but will generally be a name/symbol. '''
return term_id
def get_xrefs(self, term_id):
''' Return equivalences to other namespaces (or None). '''
return None
def get_name(self, term_id):
''' Return the term name to use as title (or None). '''
try:
name = self._dict.get(term_id).get('name')
return name
except:
return None
def get_species(self, term_id):
''' Return species as NCBI tax ID (or None, as applicable). '''
return None
def get_encoding(self, term_id):
''' Return encoding (allowed abundance types) for value.
Default = 'A' (Abundance). '''
return 'A'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
return {'AnnotationConcept'}
def get_alt_symbols(self, term_id):
''' Return set of symbol synonyms. Default = None. '''
return None
def get_alt_names(self, term_id):
''' Return set of name synonyms. Default = None. '''
return None
def get_alt_ids(self, term_id):
''' Returns set of alternative IDs. IDs should be
unique. '''
try:
alt_ids = self._dict.get(term_id).get('alt_ids')
except:
alt_ids = set()
if alt_ids:
alt_ids = {a.lstrip(self._prefix.upper() + ':') for a in alt_ids}
alt_ids = {a.lstrip(self._prefix.upper() + 'ID:') for a in alt_ids}
return alt_ids
def write_ns_values(self, dir):
data_names = {}
data_ids = {}
for term_id in self.get_values():
encoding = self.get_encoding(term_id)
label = self.get_label(term_id)
data_names[label] = encoding
data_ids[term_id] = encoding
if self.get_alt_ids(term_id):
for alt_id in self.get_alt_ids(term_id):
data_ids[alt_id] = encoding
if self.labels:
self.write_data(data_names, dir, self._name + '.belns')
if self.ids:
self.write_data(data_ids, dir, self._name + '-ids.belns')
def write_data(self, data, dir, name):
if len(data) == 0:
print(' WARNING: skipping writing ' +
name + '; no namespace data found.')
else:
with open(os.path.join(dir, name), mode='w', encoding='utf8') as f:
# insert header chunk
if os.path.exists(dir + '/templates/' + name):
tf = open(dir + '/templates/' + name, encoding="utf-8")
header = tf.read().rstrip()
tf.close()
# add Namespace, Citation and Author values
# source_file attribute added to object during parsing
header = get_citation_info(name, header, self.source_file)
else:
print(
'WARNING: Missing header template for {0}'.format(name))
header = '[Values]'
f.write(header + '\n')
# write data
for i in sorted(data.items()):
f.write('|'.join(i) + '\n')
def __str__(self):
return self._prefix
class StandardCustomData(NamespaceDataSet, HistoryDataSet):
def __init__(self, dictionary={}, *, name, prefix, domain):
super().__init__(dictionary, name, prefix, domain)
self._dict = {} # make unique dict for each instance of class
def get_values(self):
for term_id in self._dict:
if term_id is not None and self._dict.get(
term_id).get('OBSOLETE') != 1:
yield term_id
def get_label(self, term_id):
''' Return the value to be used as the preferred
label for the associated term id. '''
label = self._dict.get(term_id).get('LABEL')
return label
def get_xrefs(self, term_id):
xrefs = set(self._dict.get(term_id).get('XREF').split('|'))
xrefs = {x.strip() for x in xrefs if ':' in x}
return xrefs
def get_species(self, term_id):
species = self._dict.get(term_id).get('SPECIES')
return species
def get_encoding(self, term_id):
encoding = self._dict.get(term_id).get('TYPE')
return encoding
def get_alt_names(self, term_id):
synonyms = set()
synonyms.update(self._dict.get(term_id).get('SYNONYMS').split('|'))
synonyms = {s for s in synonyms if s}
return synonyms
def get_obsolete_ids(self):
''' return dict with all obsolete ids, and current value.'''
# TODO Add alt id handling,
value = None
replacement_dict = {}
for term_id in self._dict:
if self._dict.get(term_id).get('OBSOLETE') == 1:
mapping = self._dict.get(term_id)
value = 'withdrawn'
replacement_dict[term_id] = value
return replacement_dict
class EntrezInfoData(NamespaceDataSet):
ENC = {
'protein-coding': 'GRP', 'miscRNA': 'GR', 'ncRNA': 'GR',
'snoRNA': 'GR', 'snRNA': 'GR', 'tRNA': 'GR', 'scRNA': 'GR',
'other': 'G', 'pseudo': 'GR', 'unknown': 'GRP', 'rRNA': 'GR'
}
subject = "gene/RNA/protein"
description = "NCBI Entrez Gene identifiers for Homo sapiens, Mus musculus, and Rattus norvegicus."
def __init__(
self,
dictionary={},
*,
name='entrez-gene',
prefix='egid',
domain=['gene and gene product'],
ids=True,
labels=False):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.labels = labels
def get_label(self, term_id):
''' Return the value to be used as the preffered
label for the associated term id. For Entrez,
using the gene ID. '''
return term_id
def get_species(self, term_id):
''' Return species as NCBI tax ID (or None, as applicable). '''
species = self._dict.get(term_id).get('tax_id')
return species
def get_encoding(self, gene_id):
''' Return encoding (allowed abundance types) for value. '''
mapping = self._dict.get(gene_id)
gene_type = mapping.get('type_of_gene')
description = mapping.get('description')
encoding = EntrezInfoData.ENC.get(gene_type, 'G')
if gene_type == 'ncRNA' and 'microRNA' in description:
encoding = 'GRM'
if gene_type not in EntrezInfoData.ENC:
print(
'WARNING ' +
gene_type +
' not defined for Entrez. G assigned as default encoding.')
return encoding
def get_xrefs(self, term_id):
''' Returns xrefs to HGNC, MGI, RGD. '''
targets = ('MGI:', 'HGNC:', 'RGD:')
xrefs = set()
mapping = self._dict.get(term_id)
xrefs.update(mapping.get('dbXrefs').split('|'))
# normalize xrefs with duplicated prefix
# e.g., HGNC:HGNC:5
xrefs = {x.split(':', x.count(':') - 1)[-1] for x in xrefs}
xrefs = {x for x in xrefs if x.startswith(targets)}
return xrefs
def get_alt_symbols(self, gene_id):
''' Return set of symbol synonyms. '''
synonyms = set()
mapping = self._dict.get(gene_id)
if mapping.get('Synonyms') is not '-':
synonyms.update(mapping.get('Synonyms').split('|'))
synonyms.add(mapping.get('Symbol'))
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Other_designations') is not '-':
synonyms.update(mapping.get('Other_designations').split('|'))
if mapping.get('description') != '-':
synonyms.add(mapping.get('description'))
return synonyms
def get_name(self, term_id):
''' Get official term name. '''
mapping = self._dict.get(term_id)
name = mapping.get('Full_name_from_nomenclature_authority')
return name
class EntrezHistoryData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='egid'):
super().__init__(dictionary, prefix)
class HGNCData(NamespaceDataSet, OrthologyData, HistoryDataSet):
ENC = {
'gene with protein product': 'GRP', 'RNA, cluster': 'GR',
'RNA, long non-coding': 'GR', 'RNA, micro': 'GRM',
'RNA, ribosomal': 'GR', 'RNA, small cytoplasmic': 'GR',
'RNA, small misc': 'GR', 'RNA, small nuclear': 'GR',
'RNA, small nucleolar': 'GR', 'RNA, transfer': 'GR',
'phenotype only': 'G', 'RNA, pseudogene': 'GR',
'T cell receptor pseudogene': 'GR',
'immunoglobulin pseudogene': 'GR', 'pseudogene': 'GR',
'T cell receptor gene': 'GRP',
'complex locus constituent': 'GRP',
'endogenous retrovirus': 'G', 'fragile site': 'G',
'immunoglobulin gene': 'GRP', 'protocadherin': 'GRP',
'readthrough': 'GR', 'region': 'G',
'transposable element': 'G', 'unknown': 'GRP',
'virus integration site': 'G', 'RNA, micro': 'GRM',
'RNA, misc': 'GR', 'RNA, Y': 'GR', 'RNA, vault': 'GR',
}
def __init__(
self,
dictionary={},
*,
name='hgnc-human-genes',
prefix='hgnc',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_values(self):
for term_id in self._dict:
if '~withdrawn' not in self._dict.get(term_id).get('Symbol'):
yield term_id
def get_id_update(self, term_id):
mapping = self._dict.get(term_id)
if mapping is None:
return None
else:
if mapping.get('Locus Type') == 'withdrawn':
name = self.get_name(term_id)
if 'entry withdrawn' in name:
return 'withdrawn'
elif 'symbol withdrawn' in name:
new_symbol = name.split('see ')[1]
new_id = None
for term_id in self._dict:
if new_symbol == self.get_label(term_id):
new_id = term_id
continue
return new_id
else:
return term_id
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if 'withdrawn' in self.get_label(term_id):
obsolete[term_id] = self.get_id_update(term_id)
return obsolete
def get_label(self, term_id):
''' Return preferred label associated with term id. '''
mapping = self._dict.get(term_id)
if mapping is None:
return None
else:
label = mapping.get('Symbol')
return label
def get_encoding(self, term_id):
mapping = self._dict.get(term_id)
locus_type = mapping.get('Locus Type')
encoding = HGNCData.ENC.get(locus_type, 'G')
if locus_type not in HGNCData.ENC:
print(
'WARNING ' +
locus_type +
' not defined for HGNC. G assigned as default encoding.')
return encoding
def get_species(self, term_id):
return '9606'
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Synonyms'):
symbol_synonyms = [s.strip()
for s in mapping.get('Synonyms').split(',')]
synonyms.update(symbol_synonyms)
if mapping.get('Previous Symbols'):
old_symbols = [s.strip()
for s in mapping.get('Previous Symbols').split(',')]
synonyms.update(old_symbols)
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Previous Names'):
old_names = [s.strip('" ') for s in mapping.get(
'Previous Names').split(', "')]
synonyms.update(old_names)
return synonyms
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('Approved Name')
return name
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get(term_id)
mouse_orthologs = mapping.get('mouse_ortholog_id').split('|')
orthologs.update(mouse_orthologs)
rat_orthologs = mapping.get('rat_ortholog_id').split('|')
orthologs.update(rat_orthologs)
return orthologs
class MGIData(NamespaceDataSet):
ENC = {
'gene': 'GRP', 'protein coding gene': 'GRP',
'non-coding RNA gene': 'GR', 'rRNA gene': 'GR',
'tRNA gene': 'GR', 'snRNA gene': 'GR', 'snoRNA gene': 'GR',
'miRNA gene': 'GRM', 'scRNA gene': 'GR',
'lincRNA gene': 'GR', 'RNase P RNA gene': 'GR',
'RNase MRP RNA gene': 'GR', 'telomerase RNA gene': 'GR',
'unclassified non-coding RNA gene': 'GR',
'heritable phenotypic marker': 'G', 'gene segment': 'G',
'unclassified gene': 'GRP', 'other feature types': 'G',
'pseudogene': 'GR', 'transgene': 'G',
'other genome feature': 'G', 'pseudogenic region': 'GR',
'polymorphic pseudogene': 'GRP',
'pseudogenic gene segment': 'GR', 'SRP RNA gene': 'GR'
}
def __init__(
self,
dictionary={},
*,
name='mgi-mouse-genes',
prefix='mgi',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_values(self):
for term_id in self._dict:
mapping = self._dict.get(term_id)
marker_type = mapping.get('Marker Type')
if marker_type == 'Gene' or marker_type == 'Pseudogene':
yield term_id
def get_species(self, term_id):
return '10090'
def get_encoding(self, term_id):
feature_type = self._dict.get(term_id).get('Feature Type')
encoding = self.ENC.get(feature_type, 'G')
if feature_type not in self.ENC:
print(
'WARNING ' +
feature_type +
' not defined for MGI. G assigned as default encoding.')
return encoding
def get_label(self, term_id):
try:
label = self._dict.get(term_id).get('Symbol')
return label
except:
return None
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('Marker Name')
return name
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms = mapping.get('Marker Synonyms').split('|')
synonyms = {s for s in synonyms if s}
return synonyms
class RGDData(NamespaceDataSet):
ENC = {
'gene': 'GRP', 'miscrna': 'GR', 'predicted-high': 'GRP',
'predicted-low': 'GRP', 'predicted-moderate': 'GRP',
'protein-coding': 'GRP', 'pseudo': 'GR', 'snrna': 'GR',
'trna': 'GR', 'rrna': 'GR', 'ncrna': 'GR'
}
def __init__(
self,
dictionary={},
*,
name='rgd-rat-genes',
prefix='rgd',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_species(self, term_id):
''' Rat '''
return '10116'
def get_label(self, term_id):
''' Use Symbol as preferred label for RGD. '''
try:
label = self._dict.get(term_id).get('SYMBOL')
return label
except:
return None
def get_name(self, term_id):
name = self._dict.get(term_id).get('NAME')
return name
def get_encoding(self, term_id):
gene_type = self._dict.get(term_id).get('GENE_TYPE')
name = self.get_name(term_id)
encoding = RGDData.ENC.get(gene_type, 'G')
if gene_type == 'miscrna' or gene_type == 'ncrna' and 'microRNA' in name:
encoding = 'GRM'
if gene_type not in RGDData.ENC:
print(
'WARNING ' +
gene_type +
' not defined for RGD. G assigned as default encoding.')
return encoding
def get_alt_symbols(self, term_id):
synonyms = set()
if self._dict.get(term_id).get('OLD_SYMBOL'):
old_symbols = self._dict.get(term_id).get('OLD_SYMBOL').split(';')
synonyms.update(old_symbols)
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('OLD_NAME'):
old_names = mapping.get('OLD_NAME').split(';')
synonyms.update(old_names)
synonyms = {s for s in synonyms if s}
return synonyms
class RGDObsoleteData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='rgd'):
super().__init__(dictionary, prefix)
class SwissProtData(NamespaceDataSet):
def __init__(
self,
dictionary=defaultdict(list),
*,
name='swissprot',
prefix='sp',
domain=['gene and gene product'],
ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_encoding(self, term_id):
return 'GRP'
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('recommendedFullName')
return name
def get_alt_ids(self, term_id):
alt_ids = self._dict.get(term_id).get('accessions')
alt_ids = set(alt_ids)
alt_ids = {alt_id for alt_id in alt_ids if alt_id != term_id}
return alt_ids
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('alternativeShortNames'))
if mapping.get('recommendedShortName'):
synonyms.add(mapping.get('recommendedShortname'))
if mapping.get('geneName'):
synonyms.add(mapping.get('geneName'))
if mapping.get('geneSynonyms'):
synonyms.update(mapping.get('geneSynonyms'))
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('alternativeFullNames'))
return synonyms
def get_xrefs(self, term_id):
''' Returns GeneIDs or HGNC/MGI/RGD IDs. '''
mapping = self._dict.get(term_id)
xrefs = set()
xrefs_dict = mapping.get('dbreference')
for ns, values in xrefs_dict.items():
if ns == 'GeneId':
values = {('EGID:' + v) for v in values}
xrefs.update(values)
elif ns == 'HGNC' or ns == 'MGI':
xrefs.update(values)
elif ns == 'RGD':
values = {('RGD:' + v) for v in values}
xrefs.update(values)
return xrefs
def get_species(self, term_id):
species = self._dict.get(term_id).get('tax_id')
return species
class AffyData(NamespaceDataSet):
def __init__(
self,
dictionary=defaultdict(list),
*,
name='affy-probeset',
prefix='affx',
domain=['gene and gene product'],
ids=True,
labels=False):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.labels = labels
def get_species(self, term_id):
species = self._dict.get(term_id).get('Species')
species_dict = {'Homo sapiens': '9606',
'Mus musculus': '10090',
'Rattus norvegicus': '10116'}
tax_id = species_dict.get(species)
return tax_id
def get_encoding(self, term_id):
''' Return encoding (allowed abundance types) for value.
R - RNAAbundance. '''
return 'R'
def get_xrefs(self, term_id):
''' Returns equivalent Entrez Gene IDs for value . '''
entrez_ids = self._dict.get(term_id).get('Entrez Gene').split('///')
if entrez_ids[0] == '---':
return None
else:
entrez_ids = ['EGID:' + eid.strip() for eid in entrez_ids]
return set(entrez_ids)
class CHEBIData(NamespaceDataSet):
def __init__(
self,
dictionary={},
*,
name='chebi',
prefix='chebi',
domain=['chemical'],
ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('synonyms'):
synonyms.update(mapping.get('synonyms'))
return synonyms
class Gene2AccData(DataSet):
def __init__(self, dictionary={}, prefix='gene2acc'):
super().__init__(dictionary, prefix)
def get_eq_values(self):
for entrez_gene in self._dict:
mapping = self._dict.get(entrez_gene)
status = mapping.get('status')
taxid = mapping.get('tax_id')
yield entrez_gene, status, taxid
class GOData(NamespaceDataSet, HistoryDataSet):
# dictionary is required, since GO file parsed into multiple objects
def __init__(self, dictionary, *, name, prefix, domain, ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_values(self):
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
continue
else:
yield term_id
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
obsolete[term_id] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if self._dict.get(term_id):
if self._dict.get(term_id).get('is_obsolete'):
return 'withdrawn'
else:
return term_id
else:
return None
def get_label(self, term_id):
label = self._dict.get(term_id).get('termname')
return label
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('synonyms'))
return synonyms
def get_encoding(self, term_id):
if self._dict.get(term_id).get('complex'):
encoding = 'C'
elif self._prefix == 'gobp':
encoding = 'B'
else:
encoding = 'A'
return encoding
class MESHData(NamespaceDataSet):
# NOTE dictionary and other arguments are required since MeSH file parsed
# into multiple objects
def __init__(
self,
dictionary,
*,
name,
prefix,
domain,
ids=True,
scheme_type=['ns']):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('mesh_header')
return label
def get_encoding(self, term_id):
if self._prefix == 'meshd':
return 'O'
elif self._prefix == 'meshpp':
return 'B'
else:
return 'A'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
if self._prefix == 'meshd':
return {'Disease'}
elif self._prefix == 'mesha':
return {'Anatomy'}
elif self._prefix == 'meshcs':
return {'Location'}
else:
return None
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('synonyms'))
return synonyms
class SwissWithdrawnData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='sp'):
super().__init__(dictionary, prefix)
def get_obsolete_ids(self):
accessions = self._dict.get('accessions')
obsolete = {}
for a in accessions:
obsolete[a] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if term_id in self._dict.get('accessions'):
return 'withdrawn'
else:
return None
class OWLData(NamespaceDataSet, HistoryDataSet):
def __init__(
self,
dictionary={},
*,
name,
prefix,
domain,
ids=True,
scheme_type):
super().__init__(dictionary, name, prefix, domain)
self._dict = {} # make unique dict for each instance of class
self.ids = ids
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_encoding(self, term_id):
return 'O'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
concept_type = set()
if 'anno' not in self.scheme_type:
return None
elif self._prefix == 'clo':
concept_type = {'CellLine'}
elif self._prefix == 'cl':
concept_type = {'Cell'}
elif self._prefix == 'uberon':
concept_type = {'Anatomy'}
elif self._prefix == 'efo':
concept_type = self._dict.get(term_id).get("term_type")
elif self._prefix == 'do':
concept_type = {'Disease'}
return concept_type
def get_alt_names(self, term_id):
mapping = self._dict.get(term_id)
synonyms = set(mapping.get('synonyms'))
return synonyms
def find_xref(self, ref):
''' Used only in equiv module. '''
for term_id, mapping in self._dict.items():
dbxrefs = mapping.get('dbxrefs')
if ref in dbxrefs:
return term_id
def get_xrefs(self, term_id):
''' Returns MeSH (MSH) xrefs for a given DO ID . '''
xrefs = set()
mapping = self._dict.get(term_id)
xrefs.update(mapping.get('dbxrefs'))
if self._prefix == 'do':
xrefs = {x.replace('MSH:', 'MESHD:')
for x in xrefs if x.startswith('MSH:')}
return xrefs
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
obsolete[term_id] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if self._dict.get(term_id):
if self._dict.get(term_id).get('is_obsolete'):
return 'withdrawn'
else:
return term_id
else:
return None
class NCBITaxonomyData(NamespaceDataSet):
def __init__(
self,
dictionary={},
*,
name,
prefix,
domain,
ids=True,
scheme_type):
super().__init__(dictionary, name, prefix, domain)
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_alt_names(self, term_id):
mapping = self._dict.get(term_id)
synonyms = set(mapping.get('synonyms'))
return synonyms
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
return {'Species'}
Added missing MGI gene types
'''
datasets.py
Represent each parsed dataset as an object. This is
really just a wrapper to the underlying dictionaries,
but it also provides some useful functions that assist
in the namespacing and equivalencing process.
'''
import os.path
import time
from common import get_citation_info
from collections import defaultdict
class DataSet():
def __init__(self, dictionary={}, prefix='unnamed-data-object'):
self._dict = dictionary
self._prefix = prefix
def get_values(self):
''' Get all non-obsolete primary ids in dictionary.'''
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
continue
else:
yield term_id
def __str__(self):
return self._prefix
class OrthologyData(DataSet):
def __init__(self, dictionary={}, prefix='use-index-term-prefix'):
super().__init__(dictionary, prefix)
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get(term_id)
mouse_orthologs = mapping.get('mouse_ortholog_id').split('|')
orthologs.update(mouse_orthologs)
if mapping.get('human_ortholog_id') is not '':
human_orthologs = mapping.get('human_ortholog_id').split('|')
human_orthologs = {'HGNC:' + ortho for ortho in human_orthologs}
orthologs.update(human_orthologs)
return orthologs
def __str__(self):
return self._prefix + '_ortho'
class HomologeneData(OrthologyData):
def __init__(self, dictionary={}, prefix='egid'):
super().__init__(dictionary, prefix)
def get_values(self):
for term_id in self._dict['gene_ids']:
yield term_id
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get('gene_ids').get(term_id)
group = mapping.get('homologene_group')
species = mapping.get('tax_id')
for k, v in self._dict['homologene_groups'][group].items():
if k == species and len(v) > 1:
return set() # stop here, don't return any orthologs since homologene group contains paralog
elif k == species:
next
elif k != species and len(v) == 1:
orthologs.update(v)
else:
print(
"WARNING! Missed case {0} - {1} - {2}".format(term_id, k, v))
orthologs = {'EGID:' + o for o in orthologs}
return orthologs
class HistoryDataSet(DataSet):
def __init__(self, dictionary={}, prefix='use-index-term-prefix'):
super().__init__(dictionary, prefix)
def get_id_update(self, term_id):
''' given an id, return the current value or "withdrawn". '''
mapping = self._dict.get(term_id)
if mapping is not None:
if mapping.get('status') == 'withdrawn':
value = 'withdrawn'
else:
value = mapping.get('new_id')
else:
value = None
return value
def get_obsolete_ids(self):
''' return dict with all obsolete ids, and current value.'''
value = None
replacement_dict = {}
for term_id in self._dict:
mapping = self._dict.get(term_id)
if mapping.get('status') == 'withdrawn':
value = 'withdrawn'
else:
value = self.get_id_update(term_id)
replacement_dict[term_id] = value
return replacement_dict
def __str__(self):
return self._prefix + '_history'
class NamespaceDataSet(DataSet):
ids = False # make .belns file containing labels (default = False)
labels = True # make .bels file containing ids (default = True)
# namespace ('ns') and/or annotation ('anno') concept scheme
scheme_type = ['ns']
def __init__(
self,
dictionary={},
name='namespace-name',
prefix='namespace-prefix',
domain=['other']):
self._name = name
self._domain = domain
super().__init__(dictionary, prefix)
def get_label(self, term_id):
''' Return the value to be used as the preferred
label for the associated term id. Use id as default,
but will generally be a name/symbol. '''
return term_id
def get_xrefs(self, term_id):
''' Return equivalences to other namespaces (or None). '''
return None
def get_name(self, term_id):
''' Return the term name to use as title (or None). '''
try:
name = self._dict.get(term_id).get('name')
return name
except:
return None
def get_species(self, term_id):
''' Return species as NCBI tax ID (or None, as applicable). '''
return None
def get_encoding(self, term_id):
''' Return encoding (allowed abundance types) for value.
Default = 'A' (Abundance). '''
return 'A'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
return {'AnnotationConcept'}
def get_alt_symbols(self, term_id):
''' Return set of symbol synonyms. Default = None. '''
return None
def get_alt_names(self, term_id):
''' Return set of name synonyms. Default = None. '''
return None
def get_alt_ids(self, term_id):
''' Returns set of alternative IDs. IDs should be
unique. '''
try:
alt_ids = self._dict.get(term_id).get('alt_ids')
except:
alt_ids = set()
if alt_ids:
alt_ids = {a.lstrip(self._prefix.upper() + ':') for a in alt_ids}
alt_ids = {a.lstrip(self._prefix.upper() + 'ID:') for a in alt_ids}
return alt_ids
def write_ns_values(self, dir):
data_names = {}
data_ids = {}
for term_id in self.get_values():
encoding = self.get_encoding(term_id)
label = self.get_label(term_id)
data_names[label] = encoding
data_ids[term_id] = encoding
if self.get_alt_ids(term_id):
for alt_id in self.get_alt_ids(term_id):
data_ids[alt_id] = encoding
if self.labels:
self.write_data(data_names, dir, self._name + '.belns')
if self.ids:
self.write_data(data_ids, dir, self._name + '-ids.belns')
def write_data(self, data, dir, name):
if len(data) == 0:
print(' WARNING: skipping writing ' +
name + '; no namespace data found.')
else:
with open(os.path.join(dir, name), mode='w', encoding='utf8') as f:
# insert header chunk
if os.path.exists(dir + '/templates/' + name):
tf = open(dir + '/templates/' + name, encoding="utf-8")
header = tf.read().rstrip()
tf.close()
# add Namespace, Citation and Author values
# source_file attribute added to object during parsing
header = get_citation_info(name, header, self.source_file)
else:
print(
'WARNING: Missing header template for {0}'.format(name))
header = '[Values]'
f.write(header + '\n')
# write data
for i in sorted(data.items()):
f.write('|'.join(i) + '\n')
def __str__(self):
return self._prefix
class StandardCustomData(NamespaceDataSet, HistoryDataSet):
def __init__(self, dictionary={}, *, name, prefix, domain):
super().__init__(dictionary, name, prefix, domain)
self._dict = {} # make unique dict for each instance of class
def get_values(self):
for term_id in self._dict:
if term_id is not None and self._dict.get(
term_id).get('OBSOLETE') != 1:
yield term_id
def get_label(self, term_id):
''' Return the value to be used as the preferred
label for the associated term id. '''
label = self._dict.get(term_id).get('LABEL')
return label
def get_xrefs(self, term_id):
xrefs = set(self._dict.get(term_id).get('XREF').split('|'))
xrefs = {x.strip() for x in xrefs if ':' in x}
return xrefs
def get_species(self, term_id):
species = self._dict.get(term_id).get('SPECIES')
return species
def get_encoding(self, term_id):
encoding = self._dict.get(term_id).get('TYPE')
return encoding
def get_alt_names(self, term_id):
synonyms = set()
synonyms.update(self._dict.get(term_id).get('SYNONYMS').split('|'))
synonyms = {s for s in synonyms if s}
return synonyms
def get_obsolete_ids(self):
''' return dict with all obsolete ids, and current value.'''
# TODO Add alt id handling,
value = None
replacement_dict = {}
for term_id in self._dict:
if self._dict.get(term_id).get('OBSOLETE') == 1:
mapping = self._dict.get(term_id)
value = 'withdrawn'
replacement_dict[term_id] = value
return replacement_dict
class EntrezInfoData(NamespaceDataSet):
ENC = {
'protein-coding': 'GRP', 'miscRNA': 'GR', 'ncRNA': 'GR',
'snoRNA': 'GR', 'snRNA': 'GR', 'tRNA': 'GR', 'scRNA': 'GR',
'other': 'G', 'pseudo': 'GR', 'unknown': 'GRP', 'rRNA': 'GR'
}
subject = "gene/RNA/protein"
description = "NCBI Entrez Gene identifiers for Homo sapiens, Mus musculus, and Rattus norvegicus."
def __init__(
self,
dictionary={},
*,
name='entrez-gene',
prefix='egid',
domain=['gene and gene product'],
ids=True,
labels=False):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.labels = labels
def get_label(self, term_id):
''' Return the value to be used as the preffered
label for the associated term id. For Entrez,
using the gene ID. '''
return term_id
def get_species(self, term_id):
''' Return species as NCBI tax ID (or None, as applicable). '''
species = self._dict.get(term_id).get('tax_id')
return species
def get_encoding(self, gene_id):
''' Return encoding (allowed abundance types) for value. '''
mapping = self._dict.get(gene_id)
gene_type = mapping.get('type_of_gene')
description = mapping.get('description')
encoding = EntrezInfoData.ENC.get(gene_type, 'G')
if gene_type == 'ncRNA' and 'microRNA' in description:
encoding = 'GRM'
if gene_type not in EntrezInfoData.ENC:
print(
'WARNING ' +
gene_type +
' not defined for Entrez. G assigned as default encoding.')
return encoding
def get_xrefs(self, term_id):
''' Returns xrefs to HGNC, MGI, RGD. '''
targets = ('MGI:', 'HGNC:', 'RGD:')
xrefs = set()
mapping = self._dict.get(term_id)
xrefs.update(mapping.get('dbXrefs').split('|'))
# normalize xrefs with duplicated prefix
# e.g., HGNC:HGNC:5
xrefs = {x.split(':', x.count(':') - 1)[-1] for x in xrefs}
xrefs = {x for x in xrefs if x.startswith(targets)}
return xrefs
def get_alt_symbols(self, gene_id):
''' Return set of symbol synonyms. '''
synonyms = set()
mapping = self._dict.get(gene_id)
if mapping.get('Synonyms') is not '-':
synonyms.update(mapping.get('Synonyms').split('|'))
synonyms.add(mapping.get('Symbol'))
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Other_designations') is not '-':
synonyms.update(mapping.get('Other_designations').split('|'))
if mapping.get('description') != '-':
synonyms.add(mapping.get('description'))
return synonyms
def get_name(self, term_id):
''' Get official term name. '''
mapping = self._dict.get(term_id)
name = mapping.get('Full_name_from_nomenclature_authority')
return name
class EntrezHistoryData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='egid'):
super().__init__(dictionary, prefix)
class HGNCData(NamespaceDataSet, OrthologyData, HistoryDataSet):
ENC = {
'gene with protein product': 'GRP', 'RNA, cluster': 'GR',
'RNA, long non-coding': 'GR', 'RNA, micro': 'GRM',
'RNA, ribosomal': 'GR', 'RNA, small cytoplasmic': 'GR',
'RNA, small misc': 'GR', 'RNA, small nuclear': 'GR',
'RNA, small nucleolar': 'GR', 'RNA, transfer': 'GR',
'phenotype only': 'G', 'RNA, pseudogene': 'GR',
'T cell receptor pseudogene': 'GR',
'immunoglobulin pseudogene': 'GR', 'pseudogene': 'GR',
'T cell receptor gene': 'GRP',
'complex locus constituent': 'GRP',
'endogenous retrovirus': 'G', 'fragile site': 'G',
'immunoglobulin gene': 'GRP', 'protocadherin': 'GRP',
'readthrough': 'GR', 'region': 'G',
'transposable element': 'G', 'unknown': 'GRP',
'virus integration site': 'G', 'RNA, micro': 'GRM',
'RNA, misc': 'GR', 'RNA, Y': 'GR', 'RNA, vault': 'GR',
}
def __init__(
self,
dictionary={},
*,
name='hgnc-human-genes',
prefix='hgnc',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_values(self):
for term_id in self._dict:
if '~withdrawn' not in self._dict.get(term_id).get('Symbol'):
yield term_id
def get_id_update(self, term_id):
mapping = self._dict.get(term_id)
if mapping is None:
return None
else:
if mapping.get('Locus Type') == 'withdrawn':
name = self.get_name(term_id)
if 'entry withdrawn' in name:
return 'withdrawn'
elif 'symbol withdrawn' in name:
new_symbol = name.split('see ')[1]
new_id = None
for term_id in self._dict:
if new_symbol == self.get_label(term_id):
new_id = term_id
continue
return new_id
else:
return term_id
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if 'withdrawn' in self.get_label(term_id):
obsolete[term_id] = self.get_id_update(term_id)
return obsolete
def get_label(self, term_id):
''' Return preferred label associated with term id. '''
mapping = self._dict.get(term_id)
if mapping is None:
return None
else:
label = mapping.get('Symbol')
return label
def get_encoding(self, term_id):
mapping = self._dict.get(term_id)
locus_type = mapping.get('Locus Type')
encoding = HGNCData.ENC.get(locus_type, 'G')
if locus_type not in HGNCData.ENC:
print(
'WARNING ' +
locus_type +
' not defined for HGNC. G assigned as default encoding.')
return encoding
def get_species(self, term_id):
return '9606'
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Synonyms'):
symbol_synonyms = [s.strip()
for s in mapping.get('Synonyms').split(',')]
synonyms.update(symbol_synonyms)
if mapping.get('Previous Symbols'):
old_symbols = [s.strip()
for s in mapping.get('Previous Symbols').split(',')]
synonyms.update(old_symbols)
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Previous Names'):
old_names = [s.strip('" ') for s in mapping.get(
'Previous Names').split(', "')]
synonyms.update(old_names)
return synonyms
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('Approved Name')
return name
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get(term_id)
mouse_orthologs = mapping.get('mouse_ortholog_id').split('|')
orthologs.update(mouse_orthologs)
rat_orthologs = mapping.get('rat_ortholog_id').split('|')
orthologs.update(rat_orthologs)
return orthologs
class MGIData(NamespaceDataSet):
ENC = {
'gene': 'GRP', 'protein coding gene': 'GRP',
'non-coding RNA gene': 'GR', 'rRNA gene': 'GR',
'tRNA gene': 'GR', 'snRNA gene': 'GR', 'snoRNA gene': 'GR',
'miRNA gene': 'GRM', 'scRNA gene': 'GR',
'lincRNA gene': 'GR', 'RNase P RNA gene': 'GR',
'RNase MRP RNA gene': 'GR', 'telomerase RNA gene': 'GR',
'unclassified non-coding RNA gene': 'GR',
'heritable phenotypic marker': 'G', 'gene segment': 'G',
'unclassified gene': 'GRP', 'other feature types': 'G',
'pseudogene': 'GR', 'transgene': 'G',
'other genome feature': 'G', 'pseudogenic region': 'GR',
'polymorphic pseudogene': 'GRP',
'pseudogenic gene segment': 'GR', 'SRP RNA gene': 'GR',
'antisense lncRNA gene': 'GR', 'lncRNA gene': 'GR',
'intronic lncRNA gene': 'GR', 'ribozyme gene': 'GR'
}
def __init__(
self,
dictionary={},
*,
name='mgi-mouse-genes',
prefix='mgi',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_values(self):
for term_id in self._dict:
mapping = self._dict.get(term_id)
marker_type = mapping.get('Marker Type')
if marker_type == 'Gene' or marker_type == 'Pseudogene':
yield term_id
def get_species(self, term_id):
return '10090'
def get_encoding(self, term_id):
feature_type = self._dict.get(term_id).get('Feature Type')
encoding = self.ENC.get(feature_type, 'G')
if feature_type not in self.ENC:
print(
'WARNING ' +
feature_type +
' not defined for MGI. G assigned as default encoding.')
return encoding
def get_label(self, term_id):
try:
label = self._dict.get(term_id).get('Symbol')
return label
except:
return None
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('Marker Name')
return name
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms = mapping.get('Marker Synonyms').split('|')
synonyms = {s for s in synonyms if s}
return synonyms
class RGDData(NamespaceDataSet):
ENC = {
'gene': 'GRP', 'miscrna': 'GR', 'predicted-high': 'GRP',
'predicted-low': 'GRP', 'predicted-moderate': 'GRP',
'protein-coding': 'GRP', 'pseudo': 'GR', 'snrna': 'GR',
'trna': 'GR', 'rrna': 'GR', 'ncrna': 'GR'
}
def __init__(
self,
dictionary={},
*,
name='rgd-rat-genes',
prefix='rgd',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_species(self, term_id):
''' Rat '''
return '10116'
def get_label(self, term_id):
''' Use Symbol as preferred label for RGD. '''
try:
label = self._dict.get(term_id).get('SYMBOL')
return label
except:
return None
def get_name(self, term_id):
name = self._dict.get(term_id).get('NAME')
return name
def get_encoding(self, term_id):
gene_type = self._dict.get(term_id).get('GENE_TYPE')
name = self.get_name(term_id)
encoding = RGDData.ENC.get(gene_type, 'G')
if gene_type == 'miscrna' or gene_type == 'ncrna' and 'microRNA' in name:
encoding = 'GRM'
if gene_type not in RGDData.ENC:
print(
'WARNING ' +
gene_type +
' not defined for RGD. G assigned as default encoding.')
return encoding
def get_alt_symbols(self, term_id):
synonyms = set()
if self._dict.get(term_id).get('OLD_SYMBOL'):
old_symbols = self._dict.get(term_id).get('OLD_SYMBOL').split(';')
synonyms.update(old_symbols)
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('OLD_NAME'):
old_names = mapping.get('OLD_NAME').split(';')
synonyms.update(old_names)
synonyms = {s for s in synonyms if s}
return synonyms
class RGDObsoleteData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='rgd'):
super().__init__(dictionary, prefix)
class SwissProtData(NamespaceDataSet):
def __init__(
self,
dictionary=defaultdict(list),
*,
name='swissprot',
prefix='sp',
domain=['gene and gene product'],
ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_encoding(self, term_id):
return 'GRP'
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('recommendedFullName')
return name
def get_alt_ids(self, term_id):
alt_ids = self._dict.get(term_id).get('accessions')
alt_ids = set(alt_ids)
alt_ids = {alt_id for alt_id in alt_ids if alt_id != term_id}
return alt_ids
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('alternativeShortNames'))
if mapping.get('recommendedShortName'):
synonyms.add(mapping.get('recommendedShortname'))
if mapping.get('geneName'):
synonyms.add(mapping.get('geneName'))
if mapping.get('geneSynonyms'):
synonyms.update(mapping.get('geneSynonyms'))
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('alternativeFullNames'))
return synonyms
def get_xrefs(self, term_id):
''' Returns GeneIDs or HGNC/MGI/RGD IDs. '''
mapping = self._dict.get(term_id)
xrefs = set()
xrefs_dict = mapping.get('dbreference')
for ns, values in xrefs_dict.items():
if ns == 'GeneId':
values = {('EGID:' + v) for v in values}
xrefs.update(values)
elif ns == 'HGNC' or ns == 'MGI':
xrefs.update(values)
elif ns == 'RGD':
values = {('RGD:' + v) for v in values}
xrefs.update(values)
return xrefs
def get_species(self, term_id):
species = self._dict.get(term_id).get('tax_id')
return species
class AffyData(NamespaceDataSet):
def __init__(
self,
dictionary=defaultdict(list),
*,
name='affy-probeset',
prefix='affx',
domain=['gene and gene product'],
ids=True,
labels=False):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.labels = labels
def get_species(self, term_id):
species = self._dict.get(term_id).get('Species')
species_dict = {'Homo sapiens': '9606',
'Mus musculus': '10090',
'Rattus norvegicus': '10116'}
tax_id = species_dict.get(species)
return tax_id
def get_encoding(self, term_id):
''' Return encoding (allowed abundance types) for value.
R - RNAAbundance. '''
return 'R'
def get_xrefs(self, term_id):
''' Returns equivalent Entrez Gene IDs for value . '''
entrez_ids = self._dict.get(term_id).get('Entrez Gene').split('///')
if entrez_ids[0] == '---':
return None
else:
entrez_ids = ['EGID:' + eid.strip() for eid in entrez_ids]
return set(entrez_ids)
class CHEBIData(NamespaceDataSet):
def __init__(
self,
dictionary={},
*,
name='chebi',
prefix='chebi',
domain=['chemical'],
ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('synonyms'):
synonyms.update(mapping.get('synonyms'))
return synonyms
class Gene2AccData(DataSet):
def __init__(self, dictionary={}, prefix='gene2acc'):
super().__init__(dictionary, prefix)
def get_eq_values(self):
for entrez_gene in self._dict:
mapping = self._dict.get(entrez_gene)
status = mapping.get('status')
taxid = mapping.get('tax_id')
yield entrez_gene, status, taxid
class GOData(NamespaceDataSet, HistoryDataSet):
# dictionary is required, since GO file parsed into multiple objects
def __init__(self, dictionary, *, name, prefix, domain, ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_values(self):
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
continue
else:
yield term_id
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
obsolete[term_id] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if self._dict.get(term_id):
if self._dict.get(term_id).get('is_obsolete'):
return 'withdrawn'
else:
return term_id
else:
return None
def get_label(self, term_id):
label = self._dict.get(term_id).get('termname')
return label
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('synonyms'))
return synonyms
def get_encoding(self, term_id):
if self._dict.get(term_id).get('complex'):
encoding = 'C'
elif self._prefix == 'gobp':
encoding = 'B'
else:
encoding = 'A'
return encoding
class MESHData(NamespaceDataSet):
# NOTE dictionary and other arguments are required since MeSH file parsed
# into multiple objects
def __init__(
self,
dictionary,
*,
name,
prefix,
domain,
ids=True,
scheme_type=['ns']):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('mesh_header')
return label
def get_encoding(self, term_id):
if self._prefix == 'meshd':
return 'O'
elif self._prefix == 'meshpp':
return 'B'
else:
return 'A'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
if self._prefix == 'meshd':
return {'Disease'}
elif self._prefix == 'mesha':
return {'Anatomy'}
elif self._prefix == 'meshcs':
return {'Location'}
else:
return None
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('synonyms'))
return synonyms
class SwissWithdrawnData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='sp'):
super().__init__(dictionary, prefix)
def get_obsolete_ids(self):
accessions = self._dict.get('accessions')
obsolete = {}
for a in accessions:
obsolete[a] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if term_id in self._dict.get('accessions'):
return 'withdrawn'
else:
return None
class OWLData(NamespaceDataSet, HistoryDataSet):
def __init__(
self,
dictionary={},
*,
name,
prefix,
domain,
ids=True,
scheme_type):
super().__init__(dictionary, name, prefix, domain)
self._dict = {} # make unique dict for each instance of class
self.ids = ids
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_encoding(self, term_id):
return 'O'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
concept_type = set()
if 'anno' not in self.scheme_type:
return None
elif self._prefix == 'clo':
concept_type = {'CellLine'}
elif self._prefix == 'cl':
concept_type = {'Cell'}
elif self._prefix == 'uberon':
concept_type = {'Anatomy'}
elif self._prefix == 'efo':
concept_type = self._dict.get(term_id).get("term_type")
elif self._prefix == 'do':
concept_type = {'Disease'}
return concept_type
def get_alt_names(self, term_id):
mapping = self._dict.get(term_id)
synonyms = set(mapping.get('synonyms'))
return synonyms
def find_xref(self, ref):
''' Used only in equiv module. '''
for term_id, mapping in self._dict.items():
dbxrefs = mapping.get('dbxrefs')
if ref in dbxrefs:
return term_id
def get_xrefs(self, term_id):
''' Returns MeSH (MSH) xrefs for a given DO ID . '''
xrefs = set()
mapping = self._dict.get(term_id)
xrefs.update(mapping.get('dbxrefs'))
if self._prefix == 'do':
xrefs = {x.replace('MSH:', 'MESHD:')
for x in xrefs if x.startswith('MSH:')}
return xrefs
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
obsolete[term_id] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if self._dict.get(term_id):
if self._dict.get(term_id).get('is_obsolete'):
return 'withdrawn'
else:
return term_id
else:
return None
class NCBITaxonomyData(NamespaceDataSet):
def __init__(
self,
dictionary={},
*,
name,
prefix,
domain,
ids=True,
scheme_type):
super().__init__(dictionary, name, prefix, domain)
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_alt_names(self, term_id):
mapping = self._dict.get(term_id)
synonyms = set(mapping.get('synonyms'))
return synonyms
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
return {'Species'}
|
""" IMAPClient wrapper for Inbox.
Unfortunately, due to IMAP's statefulness, to implement connection pooling we
have to shunt off dealing with the connection pool to the caller or we'll end
up trying to execute calls with the wrong folder selected some amount of the
time. That's why functions take a connection argument.
"""
import imaplib
import functools
import threading
from email.parser import HeaderParser
from collections import namedtuple, defaultdict
import gevent
from gevent import socket, GreenletExit
from gevent.coros import BoundedSemaphore
import geventconnpool
from inbox.util.concurrency import retry
from inbox.util.itert import chunk
from inbox.util.misc import or_none, timed
from inbox.basicauth import (ConnectionError, ValidationError,
TransientConnectionError, AuthError)
from inbox.models import Folder
from inbox.models.session import session_scope
from inbox.providers import provider_info
from inbox.models.account import Account
from inbox.models.backends.imap import ImapAccount
from inbox.log import get_logger
logger = get_logger()
__all__ = ['CrispinClient', 'GmailCrispinClient', 'CondStoreCrispinClient']
# Unify flags API across IMAP and Gmail
Flags = namedtuple('Flags', 'flags')
# Flags includes labels on Gmail because Gmail doesn't use \Draft.
GmailFlags = namedtuple('GmailFlags', 'flags labels')
GMetadata = namedtuple('GMetadata', 'msgid thrid')
RawMessage = namedtuple(
'RawImapMessage',
'uid internaldate flags body g_thrid g_msgid g_labels')
# We will retry a couple of times for transient errors, such as an invalid
# access token or the server being temporariliy unavailable.
MAX_TRANSIENT_ERRORS = 2
# Lazily-initialized map of account ids to lock objects.
# This prevents multiple greenlets from concurrently creating duplicate
# connection pools for a given account.
_lock_map = defaultdict(threading.Lock)
class GmailSettingError(Exception):
""" Thrown on misconfigured Gmail accounts. """
pass
def _get_connection_pool(account_id, pool_size, pool_map, readonly):
with _lock_map[account_id]:
try:
pool = pool_map.get(account_id)
return pool if pool else \
pool_map.setdefault(
account_id,
CrispinConnectionPool(account_id,
num_connections=pool_size,
readonly=readonly))
except AuthError:
logger.error('Auth error for account {}'.format(account_id))
raise GreenletExit()
def connection_pool(account_id, pool_size=4, pool_map=dict()):
""" Per-account crispin connection pool.
Use like this:
with crispin.connection_pool(account_id).get() as crispin_client:
# your code here
pass
Note that the returned CrispinClient could have ANY folder selected, or
none at all! It's up to the calling code to handle folder sessions
properly. We don't reset to a certain select state because it's slow.
"""
return _get_connection_pool(account_id, pool_size, pool_map, True)
def writable_connection_pool(account_id, pool_size=1, pool_map=dict()):
""" Per-account crispin connection pool, with *read-write* connections.
Use like this:
conn_pool = crispin.writable_connection_pool(account_id)
with conn_pool.get() as crispin_client:
# your code here
pass
"""
return _get_connection_pool(account_id, pool_size, pool_map, False)
CONN_DISCARD_EXC_CLASSES = (socket.error, imaplib.IMAP4.error)
class CrispinConnectionPool(geventconnpool.ConnectionPool):
"""
Connection pool for Crispin clients.
Connections in a pool are specific to an IMAPAccount.
Parameters
----------
account_id : int
Which IMAPAccount to open up a connection to.
num_connections : int
How many connections in the pool.
readonly : bool
Is the connection to the IMAP server read-only?
"""
def __init__(self, account_id, num_connections, readonly):
logger.info('Creating Crispin connection pool for account {} with {} '
'connections'.format(account_id, num_connections))
self.account_id = account_id
self.readonly = readonly
self._new_conn_lock = BoundedSemaphore(1)
self._set_account_info()
# 1200s == 20min
geventconnpool.ConnectionPool.__init__(
self, num_connections, keepalive=1200,
exc_classes=CONN_DISCARD_EXC_CLASSES)
def _set_account_info(self):
with session_scope() as db_session:
account = db_session.query(Account).get(self.account_id)
self.provider_name = account.provider
self.email_address = account.email_address
self.provider_info = provider_info(account.provider,
account.email_address)
self.sync_state = account.sync_state
# Refresh token if need be, for OAuthed accounts
if self.provider_info['auth'] == 'oauth2':
try:
self.credential = account.access_token
except ValidationError:
logger.error("Error obtaining access token",
account_id=self.account_id)
account.sync_state = 'invalid'
db_session.commit()
raise
except ConnectionError:
logger.error("Error connecting",
account_id=self.account_id)
account.sync_state = 'connerror'
db_session.commit()
raise
else:
self.credential = account.password
def _new_connection(self):
from inbox.auth import handler_from_provider
# Ensure that connections are initialized serially, so as not to use
# many db sessions on startup.
with self._new_conn_lock as _:
auth_handler = handler_from_provider(self.provider_name)
for retry_count in range(MAX_TRANSIENT_ERRORS):
try:
conn = auth_handler.connect_account(self.provider_name,
self.email_address,
self.credential)
# If we can connect the account, then we can set the sate
# to 'running' if it wasn't already
if self.sync_state != 'running':
with session_scope() as db_session:
query = db_session.query(ImapAccount)
account = query.get(self.account_id)
self.sync_state = account.sync_state = 'running'
return new_crispin(self.account_id, self.email_address,
self.provider_name, conn, self.readonly)
except ConnectionError, e:
if isinstance(e, TransientConnectionError):
return None
else:
logger.error('Error connecting',
account_id=self.account_id)
with session_scope() as db_session:
query = db_session.query(ImapAccount)
account = query.get(self.account_id)
account.sync_state = 'connerror'
return None
except ValidationError, e:
# If we failed to validate, but the account is oauth2, we
# may just need to refresh the access token. Try this one
# time.
if (self.provider_info['auth'] == 'oauth2' and
retry_count == 0):
with session_scope() as db_session:
query = db_session.query(ImapAccount)
account = query.get(self.account_id)
self.credential = account.renew_access_token()
else:
logger.error('Error validating',
account_id=self.account_id)
with session_scope() as db_session:
query = db_session.query(ImapAccount)
account = query.get(self.account_id)
account.sync_state = 'invalid'
raise
return None
def _keepalive(self, c):
c.conn.noop()
def _exc_callback():
gevent.sleep(5)
logger.info('Connection broken with error; retrying with new connection',
exc_info=True)
def _fail_callback():
logger.error('Max retries reached. Aborting', exc_info=True)
retry_crispin = functools.partial(
retry, retry_classes=CONN_DISCARD_EXC_CLASSES, exc_callback=_exc_callback,
fail_callback=_fail_callback, max_count=5, reset_interval=150)
def new_crispin(account_id, email_address, provider_name, conn, readonly=True):
if provider_name == 'gmail':
cls = GmailCrispinClient
else:
info = provider_info(provider_name, email_address)
# look up in the provider database to see
# if the provider supports CONDSTORE
if "condstore" in info:
if info["condstore"]:
cls = CondStoreCrispinClient
else:
# condstore=False in provider file
cls = CrispinClient
else:
# no match in provider file, check in the
# account settings.
with session_scope() as db_session:
acc = db_session.query(Account).get(account_id)
if acc is not None:
if getattr(acc, 'supports_condstore', False):
cls = CondStoreCrispinClient
else:
cls = CrispinClient
return cls(account_id, provider_name, email_address, conn,
readonly=readonly)
class CrispinClient(object):
""" Generic IMAP client wrapper.
One thing to note about crispin clients is that *all* calls operate on
the currently selected folder.
Crispin will NEVER implicitly select a folder for you.
This is very important! IMAP only guarantees that folder message UIDs
are valid for a "session", which is defined as from the time you
SELECT a folder until the connection is closed or another folder is
selected.
Crispin clients *always* return long ints rather than strings for number
data types, such as message UIDs, Google message IDs, and Google thread
IDs.
All inputs are coerced to strings before being passed off to the IMAPClient
connection.
You should really be interfacing with this class via a connection pool,
see `connection_pool()`.
Parameters
----------
account_id : int
Database id of the associated IMAPAccount.
conn : IMAPClient
Open IMAP connection (should be already authed).
readonly : bool
Whether or not to open IMAP connections as readonly.
"""
PROVIDER = 'IMAP'
# NOTE: Be *careful* changing this! Downloading too much at once may
# cause memory errors that only pop up in extreme edge cases.
CHUNK_SIZE = 1
def __init__(self, account_id, provider_name, email_address, conn,
readonly=True):
self.log = logger.new(account_id=account_id, module='crispin')
self.account_id = account_id
self.provider_name = provider_name
self.email_address = email_address
# IMAP isn't stateless :(
self.selected_folder = None
self._folder_names = None
self.conn = conn
self.readonly = readonly
def _fetch_folder_list(self):
""" NOTE: XLIST is deprecated, so we just use LIST.
An example response with some other flags:
* LIST (\HasNoChildren) "/" "INBOX"
* LIST (\Noselect \HasChildren) "/" "[Gmail]"
* LIST (\HasNoChildren \All) "/" "[Gmail]/All Mail"
* LIST (\HasNoChildren \Drafts) "/" "[Gmail]/Drafts"
* LIST (\HasNoChildren \Important) "/" "[Gmail]/Important"
* LIST (\HasNoChildren \Sent) "/" "[Gmail]/Sent Mail"
* LIST (\HasNoChildren \Junk) "/" "[Gmail]/Spam"
* LIST (\HasNoChildren \Flagged) "/" "[Gmail]/Starred"
* LIST (\HasNoChildren \Trash) "/" "[Gmail]/Trash"
IMAPClient parses this response into a list of
(flags, delimiter, name) tuples.
"""
folders = self.conn.list_folders()
return folders
def select_folder(self, folder, uidvalidity_cb):
""" Selects a given folder.
Makes sure to set the 'selected_folder' attribute to a
(folder_name, select_info) pair.
Selecting a folder indicates the start of an IMAP session. IMAP UIDs
are only guaranteed valid for sessions, so the caller must provide a
callback that checks UID validity.
Starts a new session even if `folder` is already selected, since
this does things like e.g. makes sure we're not getting
cached/out-of-date values for HIGHESTMODSEQ from the IMAP server.
"""
select_info = self.conn.select_folder(
folder, readonly=self.readonly)
select_info['UIDVALIDITY'] = long(select_info['UIDVALIDITY'])
self.selected_folder = (folder, select_info)
# don't propagate cached information from previous session
self._folder_names = None
return uidvalidity_cb(self.account_id, folder, select_info)
@property
def selected_folder_name(self):
return or_none(self.selected_folder, lambda f: f[0])
@property
def selected_folder_info(self):
return or_none(self.selected_folder, lambda f: f[1])
@property
def selected_uidvalidity(self):
return or_none(self.selected_folder_info, lambda i: i['UIDVALIDITY'])
def sync_folders(self):
to_sync = []
folders = self.folder_names()
for tag in ('inbox', 'drafts', 'sent', 'starred', 'important',
'archive', 'extra', 'spam', 'trash'):
if tag == 'extra' and tag in folders:
to_sync.extend(folders['extra'])
elif tag in folders:
to_sync.append(folders[tag])
return to_sync
def folder_names(self):
# Different providers have different names for folders, here
# we have a default map for common name mapping, additional
# mappings can be provided via the provider configuration file
default_folder_map = {'INBOX': 'inbox', 'DRAFTS': 'drafts',
'DRAFT': 'drafts', 'JUNK': 'spam',
'ARCHIVE': 'archive', 'SENT': 'sent',
'TRASH': 'trash', 'SPAM': 'spam'}
# Some providers also provide flags to determine common folders
# Here we read these flags and apply the mapping
flag_to_folder_map = {'\\Trash': 'trash', '\\Sent': 'sent',
'\\Drafts': 'drafts', '\\Junk': 'spam',
'\\Inbox': 'inbox', '\\Spam': 'spam'}
# Additionally we provide a custom mapping for providers that
# don't fit into the defaults.
info = provider_info(self.provider_name, self.email_address)
folder_map = info.get('folder_map', {})
if self._folder_names is None:
folders = self._fetch_folder_list()
self._folder_names = dict()
for flags, delimiter, name in folders:
if u'\\Noselect' in flags:
# special folders that can't contain messages
pass
# TODO: internationalization support
elif name in folder_map:
self._folder_names[folder_map[name]] = name
elif name.upper() in default_folder_map:
self._folder_names[default_folder_map[name.upper()]] = name
else:
matched = False
for flag in flags:
if flag in flag_to_folder_map:
self._folder_names[flag_to_folder_map[flag]] = name
matched = True
if not matched:
self._folder_names.setdefault(
'extra', list()).append(name)
# TODO: support subfolders
# Create any needed folders that don't exist on the backend
needed_folders = set(['inbox', 'drafts', 'sent', 'spam',
'trash', 'archive'])
needed_folders -= set(self._folder_names.keys())
for folder_id in needed_folders:
name = folder_id.capitalize()
self.create_folder(name)
with session_scope() as db_session:
account = db_session.query(Account).get(self.account_id)
folder = Folder.find_or_create(db_session, account,
name, folder_id)
setattr(account, folder_id + '_folder', folder)
db_session.commit()
self._folder_names[folder_id] = name
return self._folder_names
def folder_status(self, folder):
status = [long(val) for val in self.conn.folder_status(
folder, ('UIDVALIDITY'))]
return status
def create_folder(self, name):
self.conn.create_folder(name)
def search_uids(self, criteria):
""" Find not-deleted UIDs in this folder matching the criteria.
See http://tools.ietf.org/html/rfc3501.html#section-6.4.4 for valid
criteria.
"""
full_criteria = ['NOT DELETED']
if isinstance(criteria, list):
full_criteria.extend(criteria)
else:
full_criteria.append(criteria)
return sorted([long(uid) for uid in self.conn.search(full_criteria)])
def all_uids(self):
""" Fetch all UIDs associated with the currently selected folder.
Returns
-------
list
UIDs as integers sorted in ascending order.
"""
data = self.conn.search(['NOT DELETED'])
return sorted([long(s) for s in data])
def uids(self, uids):
uids = [str(u) for u in uids]
raw_messages = self.conn.fetch(uids,
['BODY.PEEK[] INTERNALDATE FLAGS'])
for uid, msg in raw_messages.iteritems():
if 'BODY[]' not in msg:
raise Exception(
'No BODY[] element in IMAP response. Tags given: {}'
.format(msg.keys()))
# NOTE: flanker needs encoded bytestrings as its input, since to
# deal properly with MIME-encoded email you need to do part
# decoding based on message / MIME part headers anyway. imapclient
# tries to abstract away bytes and decodes all bytes received from
# the wire as _latin-1_, which is wrong in any case where 8bit MIME
# is used. so we have to reverse the damage before we proceed.
#
# We should REMOVE this XXX HACK XXX when we finish working with
# Menno to fix this problem upstream.
msg['BODY[]'] = msg['BODY[]'].encode('latin-1')
messages = []
for uid in sorted(raw_messages.iterkeys(), key=long):
msg = raw_messages[uid]
messages.append(RawMessage(uid=long(uid),
internaldate=msg['INTERNALDATE'],
flags=msg['FLAGS'],
body=msg['BODY[]'],
# TODO: use data structure that isn't
# Gmail-specific
g_thrid=None, g_msgid=None,
g_labels=None))
return messages
def flags(self, uids):
uids = [str(u) for u in uids]
data = self.conn.fetch(uids, ['FLAGS'])
return dict([(long(uid), Flags(msg['FLAGS']))
for uid, msg in data.iteritems()])
def copy_uids(self, uids, to_folder):
if not uids:
return
uids = [str(u) for u in uids]
self.conn.copy(uids, to_folder)
def delete_uids(self, uids):
uids = [str(u) for u in uids]
self.conn.delete_messages(uids)
self.conn.expunge()
def set_starred(self, uids, starred):
self.conn.add_flags(uids, ['\\Flagged'])
def set_unread(self, uids, unread):
uids = [str(u) for u in uids]
if unread:
self.conn.remove_flags(uids, ['\\Seen'])
else:
self.conn.add_flags(uids, ['\\Seen'])
def save_draft(self, message, date=None):
assert self.selected_folder_name == self.folder_names()['drafts'], \
'Must select drafts folder first ({0})'.format(
self.selected_folder_name)
self.conn.append(self.selected_folder_name, message, ['\\Draft'], date)
def create_message(self, message, date=None):
"""Create a message on the server. Only used to fix server-side bugs,
like iCloud not saving Sent messages"""
assert self.selected_folder_name == self.folder_names()['sent'], \
'Must select sent folder first ({0})'.format(
self.selected_folder_name)
self.conn.append(self.selected_folder_name, message, [], date)
def fetch_headers(self, uids):
"""Fetch headers for the given uids. Chunked because certain providers
fail with 'Command line too large' if you feed them too many uids at
once."""
headers = {}
for uid_chunk in chunk(uids, 100):
headers.update(self.conn.fetch(
uid_chunk, ['BODY.PEEK[HEADER]']))
return headers
def delete_draft(self, inbox_uid):
"""
Move the message from the "Drafts" folder and into the "Trash" folder.
Parameters
----------
inbox_uid : str
The public_id of the draft we want to delete on the remote,
which is its X-INBOX-ID header too.
Notes
-----
Need the public_id == inbox_uid since that is the only unique
identifier for the message that both we and the remote know.
"""
assert inbox_uid
criteria = ['DRAFT', 'NOT DELETED']
all_draft_uids = self.conn.search(criteria)
# It would be nice to just search by X-INBOX-ID header too, but some
# backends don't support that. So fetch the header for each draft and
# see if we can find one that matches.
# TODO(emfree): are there other ways we can narrow the result set a
# priori (by subject or date, etc.)
matching_draft_headers = self.fetch_headers(all_draft_uids)
for uid, response in matching_draft_headers.iteritems():
headers = response['BODY[HEADER]']
parser = HeaderParser()
x_inbox_id = parser.parsestr(headers).get('X-Inbox-Id')
if x_inbox_id == inbox_uid:
# TODO: do we need this part?
# Remove IMAP `Draft` label
self.conn.remove_flags([uid], ['\Draft'])
self.conn.delete_messages([uid])
self.conn.expunge()
# Delete from `Trash`
# Needed because otherwise deleting a draft that was sent
# results in it synced twice - once via the Trash folder and
# once via the Sent folder.
self.conn.select_folder(self.folder_names()['trash'])
all_trash_uids = self.conn.search()
all_trash_headers = self.fetch_headers(all_trash_uids)
for u, r in all_trash_headers.iteritems():
x_inbox_header = HeaderParser().parsestr(
r['BODY[HEADER]']).get('X-Inbox-Id')
if x_inbox_header == inbox_uid:
self.conn.delete_messages([u])
self.conn.expunge()
return
class CondStoreCrispinClient(CrispinClient):
def select_folder(self, folder, uidvalidity_cb):
ret = super(CondStoreCrispinClient,
self).select_folder(folder, uidvalidity_cb)
# We need to issue a STATUS command asking for HIGHESTMODSEQ
# because some servers won't enable CONDSTORE support otherwise
status = self.folder_status(folder)
if 'HIGHESTMODSEQ' in self.selected_folder_info:
self.selected_folder_info['HIGHESTMODSEQ'] = \
long(self.selected_folder_info['HIGHESTMODSEQ'])
elif 'HIGHESTMODSEQ' in status:
self.selected_folder_info['HIGHESTMODSEQ'] = \
status['HIGHESTMODSEQ']
return ret
def folder_status(self, folder):
status = self.conn.folder_status(
folder, ('UIDVALIDITY', 'HIGHESTMODSEQ', 'UIDNEXT'))
for param in status:
status[param] = long(status[param])
return status
@property
def selected_highestmodseq(self):
return or_none(self.selected_folder_info, lambda i: i['HIGHESTMODSEQ'])
@timed
def new_and_updated_uids(self, modseq):
resp = self.conn.fetch('1:*', ['FLAGS'],
modifiers=['CHANGEDSINCE {}'.format(modseq)])
# TODO(emfree): It may be useful to hold on to the whole response here
# and/or fetch more metadata, not just return the UIDs.
return sorted(resp.keys())
class GmailCrispinClient(CondStoreCrispinClient):
PROVIDER = 'gmail'
def sync_folders(self):
""" Gmail-specific list of folders to sync.
In Gmail, every message is a subset of All Mail, with the exception of
the Trash and Spam folders. So we only sync All Mail, Trash, Spam,
and Inbox (for quickly downloading initial inbox messages and
continuing to receive new Inbox messages while a large mail archive is
downloading).
Returns
-------
list
Folders to sync (as strings).
"""
if 'all' not in self.folder_names():
raise GmailSettingError(
"Account {} ({}) has no detected 'All Mail' folder. This is "
"probably because it is disabled from appearing in IMAP. "
"Please enable at "
"https://mail.google.com/mail/#settings/labels"
.format(self.account_id, self.email_address))
folders = [self.folder_names()['inbox'], self.folder_names()['all']]
# Non-essential folders, so don't error out if they're not present.
for tag in ('trash', 'spam'):
if tag in self.folder_names():
folders.append(self.folder_names()[tag])
return folders
def flags(self, uids):
""" Gmail-specific flags.
Returns
-------
dict
Mapping of `uid` (str) : GmailFlags.
"""
uids = [str(u) for u in uids]
data = self.conn.fetch(uids, ['FLAGS X-GM-LABELS'])
return dict([(long(uid), GmailFlags(msg['FLAGS'], msg['X-GM-LABELS']))
for uid, msg in data.iteritems()])
def folder_names(self):
""" Parses out Gmail-specific folder names based on Gmail IMAP flags.
If the user's account is localized to a different language, it will
return the proper localized string.
Caches the call since we use it all over the place and folders never
change names during a session.
"""
if self._folder_names is None:
folders = self._fetch_folder_list()
self._folder_names = dict()
for flags, delimiter, name in folders:
if u'\\Noselect' in flags:
# special folders that can't contain messages, usually
# just '[Gmail]'
pass
elif '\\All' in flags:
self._folder_names['all'] = name
elif name.lower() == 'inbox':
self._folder_names[name.lower()] = name.capitalize()
continue
else:
for flag in ['\\Drafts', '\\Important', '\\Sent', '\\Junk',
'\\Flagged', '\\Trash']:
# find localized names for Gmail's special folders
if flag in flags:
k = flag.replace('\\', '').lower()
if k == 'flagged':
self._folder_names['starred'] = name
elif k == 'junk':
self._folder_names['spam'] = name
else:
self._folder_names[k] = name
break
else:
# everything else is a label
self._folder_names.setdefault('labels', list())\
.append(name)
if 'labels' in self._folder_names:
self._folder_names['labels'].sort()
# synonyms on Gmail
self._folder_names['extra'] = self._folder_names['labels']
return self._folder_names
def uids(self, uids):
uids = [str(u) for u in uids]
raw_messages = self.conn.fetch(uids, ['BODY.PEEK[] INTERNALDATE FLAGS',
'X-GM-THRID', 'X-GM-MSGID',
'X-GM-LABELS'])
for uid, msg in raw_messages.iteritems():
# NOTE: flanker needs encoded bytestrings as its input, since to
# deal properly with MIME-encoded email you need to do part
# decoding based on message / MIME part headers anyway. imapclient
# tries to abstract away bytes and decodes all bytes received from
# the wire as _latin-1_, which is wrong in any case where 8bit MIME
# is used. so we have to reverse the damage before we proceed.
#
# We should REMOVE this XXX HACK XXX when we finish working with
# Menno to fix this problem upstream.
msg['BODY[]'] = msg['BODY[]'].encode('latin-1')
messages = []
for uid in sorted(raw_messages.iterkeys(), key=long):
msg = raw_messages[uid]
messages.append(RawMessage(uid=long(uid),
internaldate=msg['INTERNALDATE'],
flags=msg['FLAGS'],
body=msg['BODY[]'],
g_thrid=long(msg['X-GM-THRID']),
g_msgid=long(msg['X-GM-MSGID']),
g_labels=msg['X-GM-LABELS']))
return messages
def g_metadata(self, uids):
""" Download Gmail MSGIDs and THRIDs for the given messages.
NOTE: only UIDs are guaranteed to be unique to a folder, X-GM-MSGID
and X-GM-THRID may not be.
Parameters
----------
uids : list
UIDs to fetch data for. Must be from the selected folder.
Returns
-------
dict
uid: GMetadata(msgid, thrid)
"""
uids = [str(u) for u in uids]
self.log.debug('fetching X-GM-MSGID and X-GM-THRID',
uid_count=len(uids))
return dict([(long(uid), GMetadata(long(ret['X-GM-MSGID']),
long(ret['X-GM-THRID']))) for uid,
ret in self.conn.fetch(uids, ['X-GM-MSGID',
'X-GM-THRID']).iteritems()])
def expand_thread(self, g_thrid):
""" Find all message UIDs in this account with X-GM-THRID equal to
g_thrid.
Requires the "All Mail" folder to be selected.
Returns
-------
list
All Mail UIDs (as integers), sorted most-recent first.
"""
assert self.selected_folder_name == self.folder_names()['all'], \
"must select All Mail first ({})".format(
self.selected_folder_name)
criterion = 'X-GM-THRID {}'.format(g_thrid)
uids = [long(uid) for uid in self.conn.search(['NOT DELETED',
criterion])]
# UIDs ascend over time; return in order most-recent first
return sorted(uids, reverse=True)
def find_messages(self, g_thrid):
""" Get UIDs for the [sub]set of messages belonging to the given thread
that are in the current folder.
"""
criteria = 'X-GM-THRID {}'.format(g_thrid)
return sorted([long(uid) for uid in
self.conn.search(['NOT DELETED', criteria])])
# -----------------------------------------
# following methods WRITE to IMAP account!
# -----------------------------------------
def archive_thread(self, g_thrid):
assert self.selected_folder_name == self.folder_names()['inbox'], \
"must select INBOX first ({0})".format(self.selected_folder_name)
uids = self.find_messages(g_thrid)
# delete from inbox == archive for Gmail
if uids:
self.conn.delete_messages(uids)
def copy_thread(self, g_thrid, to_folder):
""" NOTE: Does nothing if the thread isn't in the currently selected
folder.
"""
uids = self.find_messages(g_thrid)
if uids:
self.conn.copy(uids, to_folder)
def add_label(self, g_thrid, label_name):
"""
NOTE: Does nothing if the thread isn't in the currently selected
folder.
"""
uids = self.find_messages(g_thrid)
self.conn.add_gmail_labels(uids, [label_name])
def remove_label(self, g_thrid, label_name):
"""
NOTE: Does nothing if the thread isn't in the currently selected
folder.
"""
# Gmail won't even include the label of the selected folder (when the
# selected folder is a label) in the list of labels for a UID, FYI.
assert self.selected_folder_name != label_name, \
"Gmail doesn't support removing a selected label"
uids = self.find_messages(g_thrid)
self.conn.remove_gmail_labels(uids, [label_name])
def get_labels(self, g_thrid):
uids = self.find_messages(g_thrid)
labels = self.conn.get_gmail_labels(uids)
# the complicated list comprehension below simply flattens the list
unique_labels = set([item for sublist in labels.values()
for item in sublist])
return list(unique_labels)
def set_unread(self, g_thrid, unread):
uids = self.find_messages(g_thrid)
if unread:
self.conn.remove_flags(uids, ['\\Seen'])
else:
self.conn.add_flags(uids, ['\\Seen'])
def set_starred(self, g_thrid, starred):
uids = self.find_messages(g_thrid)
if starred:
self.conn.add_flags(uids, ['\\Starred'])
else:
self.conn.remove_flags(uids, ['\\Starred'])
def delete(self, g_thrid, folder_name):
"""
Permanent delete i.e. remove the corresponding label and add the
`Trash` flag. We currently only allow this for Drafts, all other
non-All Mail deletes are archives.
"""
uids = self.find_messages(g_thrid)
if folder_name == self.folder_names()['drafts']:
# Remove Gmail's `Draft` label
self.conn.remove_gmail_labels(uids, ['\Draft'])
# Move to Gmail's `Trash` folder
self.conn.delete_messages(uids)
self.conn.expunge()
# Delete from `Trash`
self.conn.select_folder(self.folder_names()['trash'])
trash_uids = self.find_messages(g_thrid)
self.conn.delete_messages(trash_uids)
self.conn.expunge()
def delete_draft(self, inbox_uid):
"""
Remove the `\Draft label` and add the `Trash` flag.
Need both since that is the intended behaviour i.e. so the message is
removed from the user's `Drafts` folder and into the `Trash` folder.
Parameters
----------
inbox_uid : str
The public_id of the draft we want to delete on the remote,
which is its X-INBOX-ID header too.
Notes
-----
Need the public_id == inbox_uid since that is the only unique
identifier for the message that both we and the remote know.
"""
criteria = ['DRAFT', 'NOT DELETED',
'HEADER X-INBOX-ID {0}'.format(inbox_uid)]
draft_uids = self.conn.search(criteria)
if draft_uids:
assert len(draft_uids) == 1
# Remove Gmail's `Draft` label
self.conn.remove_gmail_labels(draft_uids, ['\Draft'])
# Move to Gmail's `Trash` folder
self.conn.delete_messages(draft_uids)
self.conn.expunge()
# Delete from `Trash`
self.conn.select_folder(self.folder_names()['trash'])
criteria = ['HEADER X-INBOX-ID {0}'.format(inbox_uid)]
trash_uids = self.conn.search(criteria)
self.conn.delete_messages(trash_uids)
self.conn.expunge()
Fix set_starred syncback actions.
""" IMAPClient wrapper for Inbox.
Unfortunately, due to IMAP's statefulness, to implement connection pooling we
have to shunt off dealing with the connection pool to the caller or we'll end
up trying to execute calls with the wrong folder selected some amount of the
time. That's why functions take a connection argument.
"""
import imaplib
import functools
import threading
from email.parser import HeaderParser
from collections import namedtuple, defaultdict
import gevent
from gevent import socket, GreenletExit
from gevent.coros import BoundedSemaphore
import geventconnpool
from inbox.util.concurrency import retry
from inbox.util.itert import chunk
from inbox.util.misc import or_none, timed
from inbox.basicauth import (ConnectionError, ValidationError,
TransientConnectionError, AuthError)
from inbox.models import Folder
from inbox.models.session import session_scope
from inbox.providers import provider_info
from inbox.models.account import Account
from inbox.models.backends.imap import ImapAccount
from inbox.log import get_logger
logger = get_logger()
__all__ = ['CrispinClient', 'GmailCrispinClient', 'CondStoreCrispinClient']
# Unify flags API across IMAP and Gmail
Flags = namedtuple('Flags', 'flags')
# Flags includes labels on Gmail because Gmail doesn't use \Draft.
GmailFlags = namedtuple('GmailFlags', 'flags labels')
GMetadata = namedtuple('GMetadata', 'msgid thrid')
RawMessage = namedtuple(
'RawImapMessage',
'uid internaldate flags body g_thrid g_msgid g_labels')
# We will retry a couple of times for transient errors, such as an invalid
# access token or the server being temporariliy unavailable.
MAX_TRANSIENT_ERRORS = 2
# Lazily-initialized map of account ids to lock objects.
# This prevents multiple greenlets from concurrently creating duplicate
# connection pools for a given account.
_lock_map = defaultdict(threading.Lock)
class GmailSettingError(Exception):
""" Thrown on misconfigured Gmail accounts. """
pass
def _get_connection_pool(account_id, pool_size, pool_map, readonly):
with _lock_map[account_id]:
try:
pool = pool_map.get(account_id)
return pool if pool else \
pool_map.setdefault(
account_id,
CrispinConnectionPool(account_id,
num_connections=pool_size,
readonly=readonly))
except AuthError:
logger.error('Auth error for account {}'.format(account_id))
raise GreenletExit()
def connection_pool(account_id, pool_size=4, pool_map=dict()):
""" Per-account crispin connection pool.
Use like this:
with crispin.connection_pool(account_id).get() as crispin_client:
# your code here
pass
Note that the returned CrispinClient could have ANY folder selected, or
none at all! It's up to the calling code to handle folder sessions
properly. We don't reset to a certain select state because it's slow.
"""
return _get_connection_pool(account_id, pool_size, pool_map, True)
def writable_connection_pool(account_id, pool_size=1, pool_map=dict()):
""" Per-account crispin connection pool, with *read-write* connections.
Use like this:
conn_pool = crispin.writable_connection_pool(account_id)
with conn_pool.get() as crispin_client:
# your code here
pass
"""
return _get_connection_pool(account_id, pool_size, pool_map, False)
CONN_DISCARD_EXC_CLASSES = (socket.error, imaplib.IMAP4.error)
class CrispinConnectionPool(geventconnpool.ConnectionPool):
"""
Connection pool for Crispin clients.
Connections in a pool are specific to an IMAPAccount.
Parameters
----------
account_id : int
Which IMAPAccount to open up a connection to.
num_connections : int
How many connections in the pool.
readonly : bool
Is the connection to the IMAP server read-only?
"""
def __init__(self, account_id, num_connections, readonly):
logger.info('Creating Crispin connection pool for account {} with {} '
'connections'.format(account_id, num_connections))
self.account_id = account_id
self.readonly = readonly
self._new_conn_lock = BoundedSemaphore(1)
self._set_account_info()
# 1200s == 20min
geventconnpool.ConnectionPool.__init__(
self, num_connections, keepalive=1200,
exc_classes=CONN_DISCARD_EXC_CLASSES)
def _set_account_info(self):
with session_scope() as db_session:
account = db_session.query(Account).get(self.account_id)
self.provider_name = account.provider
self.email_address = account.email_address
self.provider_info = provider_info(account.provider,
account.email_address)
self.sync_state = account.sync_state
# Refresh token if need be, for OAuthed accounts
if self.provider_info['auth'] == 'oauth2':
try:
self.credential = account.access_token
except ValidationError:
logger.error("Error obtaining access token",
account_id=self.account_id)
account.sync_state = 'invalid'
db_session.commit()
raise
except ConnectionError:
logger.error("Error connecting",
account_id=self.account_id)
account.sync_state = 'connerror'
db_session.commit()
raise
else:
self.credential = account.password
def _new_connection(self):
from inbox.auth import handler_from_provider
# Ensure that connections are initialized serially, so as not to use
# many db sessions on startup.
with self._new_conn_lock as _:
auth_handler = handler_from_provider(self.provider_name)
for retry_count in range(MAX_TRANSIENT_ERRORS):
try:
conn = auth_handler.connect_account(self.provider_name,
self.email_address,
self.credential)
# If we can connect the account, then we can set the sate
# to 'running' if it wasn't already
if self.sync_state != 'running':
with session_scope() as db_session:
query = db_session.query(ImapAccount)
account = query.get(self.account_id)
self.sync_state = account.sync_state = 'running'
return new_crispin(self.account_id, self.email_address,
self.provider_name, conn, self.readonly)
except ConnectionError, e:
if isinstance(e, TransientConnectionError):
return None
else:
logger.error('Error connecting',
account_id=self.account_id)
with session_scope() as db_session:
query = db_session.query(ImapAccount)
account = query.get(self.account_id)
account.sync_state = 'connerror'
return None
except ValidationError, e:
# If we failed to validate, but the account is oauth2, we
# may just need to refresh the access token. Try this one
# time.
if (self.provider_info['auth'] == 'oauth2' and
retry_count == 0):
with session_scope() as db_session:
query = db_session.query(ImapAccount)
account = query.get(self.account_id)
self.credential = account.renew_access_token()
else:
logger.error('Error validating',
account_id=self.account_id)
with session_scope() as db_session:
query = db_session.query(ImapAccount)
account = query.get(self.account_id)
account.sync_state = 'invalid'
raise
return None
def _keepalive(self, c):
c.conn.noop()
def _exc_callback():
gevent.sleep(5)
logger.info('Connection broken with error; retrying with new connection',
exc_info=True)
def _fail_callback():
logger.error('Max retries reached. Aborting', exc_info=True)
retry_crispin = functools.partial(
retry, retry_classes=CONN_DISCARD_EXC_CLASSES, exc_callback=_exc_callback,
fail_callback=_fail_callback, max_count=5, reset_interval=150)
def new_crispin(account_id, email_address, provider_name, conn, readonly=True):
if provider_name == 'gmail':
cls = GmailCrispinClient
else:
info = provider_info(provider_name, email_address)
# look up in the provider database to see
# if the provider supports CONDSTORE
if "condstore" in info:
if info["condstore"]:
cls = CondStoreCrispinClient
else:
# condstore=False in provider file
cls = CrispinClient
else:
# no match in provider file, check in the
# account settings.
with session_scope() as db_session:
acc = db_session.query(Account).get(account_id)
if acc is not None:
if getattr(acc, 'supports_condstore', False):
cls = CondStoreCrispinClient
else:
cls = CrispinClient
return cls(account_id, provider_name, email_address, conn,
readonly=readonly)
class CrispinClient(object):
""" Generic IMAP client wrapper.
One thing to note about crispin clients is that *all* calls operate on
the currently selected folder.
Crispin will NEVER implicitly select a folder for you.
This is very important! IMAP only guarantees that folder message UIDs
are valid for a "session", which is defined as from the time you
SELECT a folder until the connection is closed or another folder is
selected.
Crispin clients *always* return long ints rather than strings for number
data types, such as message UIDs, Google message IDs, and Google thread
IDs.
All inputs are coerced to strings before being passed off to the IMAPClient
connection.
You should really be interfacing with this class via a connection pool,
see `connection_pool()`.
Parameters
----------
account_id : int
Database id of the associated IMAPAccount.
conn : IMAPClient
Open IMAP connection (should be already authed).
readonly : bool
Whether or not to open IMAP connections as readonly.
"""
PROVIDER = 'IMAP'
# NOTE: Be *careful* changing this! Downloading too much at once may
# cause memory errors that only pop up in extreme edge cases.
CHUNK_SIZE = 1
def __init__(self, account_id, provider_name, email_address, conn,
readonly=True):
self.log = logger.new(account_id=account_id, module='crispin')
self.account_id = account_id
self.provider_name = provider_name
self.email_address = email_address
# IMAP isn't stateless :(
self.selected_folder = None
self._folder_names = None
self.conn = conn
self.readonly = readonly
def _fetch_folder_list(self):
""" NOTE: XLIST is deprecated, so we just use LIST.
An example response with some other flags:
* LIST (\HasNoChildren) "/" "INBOX"
* LIST (\Noselect \HasChildren) "/" "[Gmail]"
* LIST (\HasNoChildren \All) "/" "[Gmail]/All Mail"
* LIST (\HasNoChildren \Drafts) "/" "[Gmail]/Drafts"
* LIST (\HasNoChildren \Important) "/" "[Gmail]/Important"
* LIST (\HasNoChildren \Sent) "/" "[Gmail]/Sent Mail"
* LIST (\HasNoChildren \Junk) "/" "[Gmail]/Spam"
* LIST (\HasNoChildren \Flagged) "/" "[Gmail]/Starred"
* LIST (\HasNoChildren \Trash) "/" "[Gmail]/Trash"
IMAPClient parses this response into a list of
(flags, delimiter, name) tuples.
"""
folders = self.conn.list_folders()
return folders
def select_folder(self, folder, uidvalidity_cb):
""" Selects a given folder.
Makes sure to set the 'selected_folder' attribute to a
(folder_name, select_info) pair.
Selecting a folder indicates the start of an IMAP session. IMAP UIDs
are only guaranteed valid for sessions, so the caller must provide a
callback that checks UID validity.
Starts a new session even if `folder` is already selected, since
this does things like e.g. makes sure we're not getting
cached/out-of-date values for HIGHESTMODSEQ from the IMAP server.
"""
select_info = self.conn.select_folder(
folder, readonly=self.readonly)
select_info['UIDVALIDITY'] = long(select_info['UIDVALIDITY'])
self.selected_folder = (folder, select_info)
# don't propagate cached information from previous session
self._folder_names = None
return uidvalidity_cb(self.account_id, folder, select_info)
@property
def selected_folder_name(self):
return or_none(self.selected_folder, lambda f: f[0])
@property
def selected_folder_info(self):
return or_none(self.selected_folder, lambda f: f[1])
@property
def selected_uidvalidity(self):
return or_none(self.selected_folder_info, lambda i: i['UIDVALIDITY'])
def sync_folders(self):
to_sync = []
folders = self.folder_names()
for tag in ('inbox', 'drafts', 'sent', 'starred', 'important',
'archive', 'extra', 'spam', 'trash'):
if tag == 'extra' and tag in folders:
to_sync.extend(folders['extra'])
elif tag in folders:
to_sync.append(folders[tag])
return to_sync
def folder_names(self):
# Different providers have different names for folders, here
# we have a default map for common name mapping, additional
# mappings can be provided via the provider configuration file
default_folder_map = {'INBOX': 'inbox', 'DRAFTS': 'drafts',
'DRAFT': 'drafts', 'JUNK': 'spam',
'ARCHIVE': 'archive', 'SENT': 'sent',
'TRASH': 'trash', 'SPAM': 'spam'}
# Some providers also provide flags to determine common folders
# Here we read these flags and apply the mapping
flag_to_folder_map = {'\\Trash': 'trash', '\\Sent': 'sent',
'\\Drafts': 'drafts', '\\Junk': 'spam',
'\\Inbox': 'inbox', '\\Spam': 'spam'}
# Additionally we provide a custom mapping for providers that
# don't fit into the defaults.
info = provider_info(self.provider_name, self.email_address)
folder_map = info.get('folder_map', {})
if self._folder_names is None:
folders = self._fetch_folder_list()
self._folder_names = dict()
for flags, delimiter, name in folders:
if u'\\Noselect' in flags:
# special folders that can't contain messages
pass
# TODO: internationalization support
elif name in folder_map:
self._folder_names[folder_map[name]] = name
elif name.upper() in default_folder_map:
self._folder_names[default_folder_map[name.upper()]] = name
else:
matched = False
for flag in flags:
if flag in flag_to_folder_map:
self._folder_names[flag_to_folder_map[flag]] = name
matched = True
if not matched:
self._folder_names.setdefault(
'extra', list()).append(name)
# TODO: support subfolders
# Create any needed folders that don't exist on the backend
needed_folders = set(['inbox', 'drafts', 'sent', 'spam',
'trash', 'archive'])
needed_folders -= set(self._folder_names.keys())
for folder_id in needed_folders:
name = folder_id.capitalize()
self.create_folder(name)
with session_scope() as db_session:
account = db_session.query(Account).get(self.account_id)
folder = Folder.find_or_create(db_session, account,
name, folder_id)
setattr(account, folder_id + '_folder', folder)
db_session.commit()
self._folder_names[folder_id] = name
return self._folder_names
def folder_status(self, folder):
status = [long(val) for val in self.conn.folder_status(
folder, ('UIDVALIDITY'))]
return status
def create_folder(self, name):
self.conn.create_folder(name)
def search_uids(self, criteria):
""" Find not-deleted UIDs in this folder matching the criteria.
See http://tools.ietf.org/html/rfc3501.html#section-6.4.4 for valid
criteria.
"""
full_criteria = ['NOT DELETED']
if isinstance(criteria, list):
full_criteria.extend(criteria)
else:
full_criteria.append(criteria)
return sorted([long(uid) for uid in self.conn.search(full_criteria)])
def all_uids(self):
""" Fetch all UIDs associated with the currently selected folder.
Returns
-------
list
UIDs as integers sorted in ascending order.
"""
data = self.conn.search(['NOT DELETED'])
return sorted([long(s) for s in data])
def uids(self, uids):
uids = [str(u) for u in uids]
raw_messages = self.conn.fetch(uids,
['BODY.PEEK[] INTERNALDATE FLAGS'])
for uid, msg in raw_messages.iteritems():
if 'BODY[]' not in msg:
raise Exception(
'No BODY[] element in IMAP response. Tags given: {}'
.format(msg.keys()))
# NOTE: flanker needs encoded bytestrings as its input, since to
# deal properly with MIME-encoded email you need to do part
# decoding based on message / MIME part headers anyway. imapclient
# tries to abstract away bytes and decodes all bytes received from
# the wire as _latin-1_, which is wrong in any case where 8bit MIME
# is used. so we have to reverse the damage before we proceed.
#
# We should REMOVE this XXX HACK XXX when we finish working with
# Menno to fix this problem upstream.
msg['BODY[]'] = msg['BODY[]'].encode('latin-1')
messages = []
for uid in sorted(raw_messages.iterkeys(), key=long):
msg = raw_messages[uid]
messages.append(RawMessage(uid=long(uid),
internaldate=msg['INTERNALDATE'],
flags=msg['FLAGS'],
body=msg['BODY[]'],
# TODO: use data structure that isn't
# Gmail-specific
g_thrid=None, g_msgid=None,
g_labels=None))
return messages
def flags(self, uids):
uids = [str(u) for u in uids]
data = self.conn.fetch(uids, ['FLAGS'])
return dict([(long(uid), Flags(msg['FLAGS']))
for uid, msg in data.iteritems()])
def copy_uids(self, uids, to_folder):
if not uids:
return
uids = [str(u) for u in uids]
self.conn.copy(uids, to_folder)
def delete_uids(self, uids):
uids = [str(u) for u in uids]
self.conn.delete_messages(uids)
self.conn.expunge()
def set_starred(self, uids, starred):
if starred:
self.conn.add_flags(uids, ['\\Flagged'])
else:
self.conn.remove_flags(uids, ['\\Flagged'])
def set_unread(self, uids, unread):
uids = [str(u) for u in uids]
if unread:
self.conn.remove_flags(uids, ['\\Seen'])
else:
self.conn.add_flags(uids, ['\\Seen'])
def save_draft(self, message, date=None):
assert self.selected_folder_name == self.folder_names()['drafts'], \
'Must select drafts folder first ({0})'.format(
self.selected_folder_name)
self.conn.append(self.selected_folder_name, message, ['\\Draft'], date)
def create_message(self, message, date=None):
"""Create a message on the server. Only used to fix server-side bugs,
like iCloud not saving Sent messages"""
assert self.selected_folder_name == self.folder_names()['sent'], \
'Must select sent folder first ({0})'.format(
self.selected_folder_name)
self.conn.append(self.selected_folder_name, message, [], date)
def fetch_headers(self, uids):
"""Fetch headers for the given uids. Chunked because certain providers
fail with 'Command line too large' if you feed them too many uids at
once."""
headers = {}
for uid_chunk in chunk(uids, 100):
headers.update(self.conn.fetch(
uid_chunk, ['BODY.PEEK[HEADER]']))
return headers
def delete_draft(self, inbox_uid):
"""
Move the message from the "Drafts" folder and into the "Trash" folder.
Parameters
----------
inbox_uid : str
The public_id of the draft we want to delete on the remote,
which is its X-INBOX-ID header too.
Notes
-----
Need the public_id == inbox_uid since that is the only unique
identifier for the message that both we and the remote know.
"""
assert inbox_uid
criteria = ['DRAFT', 'NOT DELETED']
all_draft_uids = self.conn.search(criteria)
# It would be nice to just search by X-INBOX-ID header too, but some
# backends don't support that. So fetch the header for each draft and
# see if we can find one that matches.
# TODO(emfree): are there other ways we can narrow the result set a
# priori (by subject or date, etc.)
matching_draft_headers = self.fetch_headers(all_draft_uids)
for uid, response in matching_draft_headers.iteritems():
headers = response['BODY[HEADER]']
parser = HeaderParser()
x_inbox_id = parser.parsestr(headers).get('X-Inbox-Id')
if x_inbox_id == inbox_uid:
# TODO: do we need this part?
# Remove IMAP `Draft` label
self.conn.remove_flags([uid], ['\Draft'])
self.conn.delete_messages([uid])
self.conn.expunge()
# Delete from `Trash`
# Needed because otherwise deleting a draft that was sent
# results in it synced twice - once via the Trash folder and
# once via the Sent folder.
self.conn.select_folder(self.folder_names()['trash'])
all_trash_uids = self.conn.search()
all_trash_headers = self.fetch_headers(all_trash_uids)
for u, r in all_trash_headers.iteritems():
x_inbox_header = HeaderParser().parsestr(
r['BODY[HEADER]']).get('X-Inbox-Id')
if x_inbox_header == inbox_uid:
self.conn.delete_messages([u])
self.conn.expunge()
return
class CondStoreCrispinClient(CrispinClient):
def select_folder(self, folder, uidvalidity_cb):
ret = super(CondStoreCrispinClient,
self).select_folder(folder, uidvalidity_cb)
# We need to issue a STATUS command asking for HIGHESTMODSEQ
# because some servers won't enable CONDSTORE support otherwise
status = self.folder_status(folder)
if 'HIGHESTMODSEQ' in self.selected_folder_info:
self.selected_folder_info['HIGHESTMODSEQ'] = \
long(self.selected_folder_info['HIGHESTMODSEQ'])
elif 'HIGHESTMODSEQ' in status:
self.selected_folder_info['HIGHESTMODSEQ'] = \
status['HIGHESTMODSEQ']
return ret
def folder_status(self, folder):
status = self.conn.folder_status(
folder, ('UIDVALIDITY', 'HIGHESTMODSEQ', 'UIDNEXT'))
for param in status:
status[param] = long(status[param])
return status
@property
def selected_highestmodseq(self):
return or_none(self.selected_folder_info, lambda i: i['HIGHESTMODSEQ'])
@timed
def new_and_updated_uids(self, modseq):
resp = self.conn.fetch('1:*', ['FLAGS'],
modifiers=['CHANGEDSINCE {}'.format(modseq)])
# TODO(emfree): It may be useful to hold on to the whole response here
# and/or fetch more metadata, not just return the UIDs.
return sorted(resp.keys())
class GmailCrispinClient(CondStoreCrispinClient):
PROVIDER = 'gmail'
def sync_folders(self):
""" Gmail-specific list of folders to sync.
In Gmail, every message is a subset of All Mail, with the exception of
the Trash and Spam folders. So we only sync All Mail, Trash, Spam,
and Inbox (for quickly downloading initial inbox messages and
continuing to receive new Inbox messages while a large mail archive is
downloading).
Returns
-------
list
Folders to sync (as strings).
"""
if 'all' not in self.folder_names():
raise GmailSettingError(
"Account {} ({}) has no detected 'All Mail' folder. This is "
"probably because it is disabled from appearing in IMAP. "
"Please enable at "
"https://mail.google.com/mail/#settings/labels"
.format(self.account_id, self.email_address))
folders = [self.folder_names()['inbox'], self.folder_names()['all']]
# Non-essential folders, so don't error out if they're not present.
for tag in ('trash', 'spam'):
if tag in self.folder_names():
folders.append(self.folder_names()[tag])
return folders
def flags(self, uids):
""" Gmail-specific flags.
Returns
-------
dict
Mapping of `uid` (str) : GmailFlags.
"""
uids = [str(u) for u in uids]
data = self.conn.fetch(uids, ['FLAGS X-GM-LABELS'])
return dict([(long(uid), GmailFlags(msg['FLAGS'], msg['X-GM-LABELS']))
for uid, msg in data.iteritems()])
def folder_names(self):
""" Parses out Gmail-specific folder names based on Gmail IMAP flags.
If the user's account is localized to a different language, it will
return the proper localized string.
Caches the call since we use it all over the place and folders never
change names during a session.
"""
if self._folder_names is None:
folders = self._fetch_folder_list()
self._folder_names = dict()
for flags, delimiter, name in folders:
if u'\\Noselect' in flags:
# special folders that can't contain messages, usually
# just '[Gmail]'
pass
elif '\\All' in flags:
self._folder_names['all'] = name
elif name.lower() == 'inbox':
self._folder_names[name.lower()] = name.capitalize()
continue
else:
for flag in ['\\Drafts', '\\Important', '\\Sent', '\\Junk',
'\\Flagged', '\\Trash']:
# find localized names for Gmail's special folders
if flag in flags:
k = flag.replace('\\', '').lower()
if k == 'flagged':
self._folder_names['starred'] = name
elif k == 'junk':
self._folder_names['spam'] = name
else:
self._folder_names[k] = name
break
else:
# everything else is a label
self._folder_names.setdefault('labels', list())\
.append(name)
if 'labels' in self._folder_names:
self._folder_names['labels'].sort()
# synonyms on Gmail
self._folder_names['extra'] = self._folder_names['labels']
return self._folder_names
def uids(self, uids):
uids = [str(u) for u in uids]
raw_messages = self.conn.fetch(uids, ['BODY.PEEK[] INTERNALDATE FLAGS',
'X-GM-THRID', 'X-GM-MSGID',
'X-GM-LABELS'])
for uid, msg in raw_messages.iteritems():
# NOTE: flanker needs encoded bytestrings as its input, since to
# deal properly with MIME-encoded email you need to do part
# decoding based on message / MIME part headers anyway. imapclient
# tries to abstract away bytes and decodes all bytes received from
# the wire as _latin-1_, which is wrong in any case where 8bit MIME
# is used. so we have to reverse the damage before we proceed.
#
# We should REMOVE this XXX HACK XXX when we finish working with
# Menno to fix this problem upstream.
msg['BODY[]'] = msg['BODY[]'].encode('latin-1')
messages = []
for uid in sorted(raw_messages.iterkeys(), key=long):
msg = raw_messages[uid]
messages.append(RawMessage(uid=long(uid),
internaldate=msg['INTERNALDATE'],
flags=msg['FLAGS'],
body=msg['BODY[]'],
g_thrid=long(msg['X-GM-THRID']),
g_msgid=long(msg['X-GM-MSGID']),
g_labels=msg['X-GM-LABELS']))
return messages
def g_metadata(self, uids):
""" Download Gmail MSGIDs and THRIDs for the given messages.
NOTE: only UIDs are guaranteed to be unique to a folder, X-GM-MSGID
and X-GM-THRID may not be.
Parameters
----------
uids : list
UIDs to fetch data for. Must be from the selected folder.
Returns
-------
dict
uid: GMetadata(msgid, thrid)
"""
uids = [str(u) for u in uids]
self.log.debug('fetching X-GM-MSGID and X-GM-THRID',
uid_count=len(uids))
return dict([(long(uid), GMetadata(long(ret['X-GM-MSGID']),
long(ret['X-GM-THRID']))) for uid,
ret in self.conn.fetch(uids, ['X-GM-MSGID',
'X-GM-THRID']).iteritems()])
def expand_thread(self, g_thrid):
""" Find all message UIDs in this account with X-GM-THRID equal to
g_thrid.
Requires the "All Mail" folder to be selected.
Returns
-------
list
All Mail UIDs (as integers), sorted most-recent first.
"""
assert self.selected_folder_name == self.folder_names()['all'], \
"must select All Mail first ({})".format(
self.selected_folder_name)
criterion = 'X-GM-THRID {}'.format(g_thrid)
uids = [long(uid) for uid in self.conn.search(['NOT DELETED',
criterion])]
# UIDs ascend over time; return in order most-recent first
return sorted(uids, reverse=True)
def find_messages(self, g_thrid):
""" Get UIDs for the [sub]set of messages belonging to the given thread
that are in the current folder.
"""
criteria = 'X-GM-THRID {}'.format(g_thrid)
return sorted([long(uid) for uid in
self.conn.search(['NOT DELETED', criteria])])
# -----------------------------------------
# following methods WRITE to IMAP account!
# -----------------------------------------
def archive_thread(self, g_thrid):
assert self.selected_folder_name == self.folder_names()['inbox'], \
"must select INBOX first ({0})".format(self.selected_folder_name)
uids = self.find_messages(g_thrid)
# delete from inbox == archive for Gmail
if uids:
self.conn.delete_messages(uids)
def copy_thread(self, g_thrid, to_folder):
""" NOTE: Does nothing if the thread isn't in the currently selected
folder.
"""
uids = self.find_messages(g_thrid)
if uids:
self.conn.copy(uids, to_folder)
def add_label(self, g_thrid, label_name):
"""
NOTE: Does nothing if the thread isn't in the currently selected
folder.
"""
uids = self.find_messages(g_thrid)
self.conn.add_gmail_labels(uids, [label_name])
def remove_label(self, g_thrid, label_name):
"""
NOTE: Does nothing if the thread isn't in the currently selected
folder.
"""
# Gmail won't even include the label of the selected folder (when the
# selected folder is a label) in the list of labels for a UID, FYI.
assert self.selected_folder_name != label_name, \
"Gmail doesn't support removing a selected label"
uids = self.find_messages(g_thrid)
self.conn.remove_gmail_labels(uids, [label_name])
def get_labels(self, g_thrid):
uids = self.find_messages(g_thrid)
labels = self.conn.get_gmail_labels(uids)
# the complicated list comprehension below simply flattens the list
unique_labels = set([item for sublist in labels.values()
for item in sublist])
return list(unique_labels)
def set_unread(self, g_thrid, unread):
uids = self.find_messages(g_thrid)
if unread:
self.conn.remove_flags(uids, ['\\Seen'])
else:
self.conn.add_flags(uids, ['\\Seen'])
def set_starred(self, g_thrid, starred):
uids = self.find_messages(g_thrid)
if starred:
self.conn.add_flags(uids, ['\\Flagged'])
else:
self.conn.remove_flags(uids, ['\\Flagged'])
def delete(self, g_thrid, folder_name):
"""
Permanent delete i.e. remove the corresponding label and add the
`Trash` flag. We currently only allow this for Drafts, all other
non-All Mail deletes are archives.
"""
uids = self.find_messages(g_thrid)
if folder_name == self.folder_names()['drafts']:
# Remove Gmail's `Draft` label
self.conn.remove_gmail_labels(uids, ['\Draft'])
# Move to Gmail's `Trash` folder
self.conn.delete_messages(uids)
self.conn.expunge()
# Delete from `Trash`
self.conn.select_folder(self.folder_names()['trash'])
trash_uids = self.find_messages(g_thrid)
self.conn.delete_messages(trash_uids)
self.conn.expunge()
def delete_draft(self, inbox_uid):
"""
Remove the `\Draft label` and add the `Trash` flag.
Need both since that is the intended behaviour i.e. so the message is
removed from the user's `Drafts` folder and into the `Trash` folder.
Parameters
----------
inbox_uid : str
The public_id of the draft we want to delete on the remote,
which is its X-INBOX-ID header too.
Notes
-----
Need the public_id == inbox_uid since that is the only unique
identifier for the message that both we and the remote know.
"""
criteria = ['DRAFT', 'NOT DELETED',
'HEADER X-INBOX-ID {0}'.format(inbox_uid)]
draft_uids = self.conn.search(criteria)
if draft_uids:
assert len(draft_uids) == 1
# Remove Gmail's `Draft` label
self.conn.remove_gmail_labels(draft_uids, ['\Draft'])
# Move to Gmail's `Trash` folder
self.conn.delete_messages(draft_uids)
self.conn.expunge()
# Delete from `Trash`
self.conn.select_folder(self.folder_names()['trash'])
criteria = ['HEADER X-INBOX-ID {0}'.format(inbox_uid)]
trash_uids = self.conn.search(criteria)
self.conn.delete_messages(trash_uids)
self.conn.expunge()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
# Copyright(c)2008-2010 SIA "KN dati".(http://kndati.lv) All Rights Reserved.
# General contacts <info@kndati.lv>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import sys
import logging
from openerp.tools.translate import _
from openerp.report import report_sxw
from openerp.report.report_sxw import rml_parse
from datetime import datetime, timedelta
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class Parser(report_sxw.rml_parse):
counters = {}
last_record_id = 0
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
# counter manage:
'reset_counter': self.reset_counter,
'get_counter': self.get_counter,
'set_counter': self.set_counter,
'get_last_record_id': self.get_last_record_id,
'get_object_line': self.get_object_line,
'get_datetime': self.get_datetime,
'get_datetime_now': self.get_datetime_now,
'get_partic': self.get_partic,
'get_partic_description': self.get_partic_description,
'get_parcels': self.get_parcels,
'get_parcels_table_load': self.get_parcels_table_load,
'set_total_parcel': self.set_total_parcel,
'get_total_parcel': self.get_total_parcel,
'reset_print': self.reset_print,
'get_order_selected': self.get_order_selected,
# Wizard mode function:
'get_header_string': self.get_header_string,
'get_stock_value': self.get_stock_value,
'has_extra_description': self.has_extra_description,
})
self.reset_counter()
# TODO remove
self.total_parcel = 0.0
def has_extra_description(self, o):
''' Test if partner required extra columns for his description
'''
return o.partner_id.extra_description_load_list
def get_mode(self, ):
''' Utility for check the mode of report depend on name
'''
if self.name == 'custom_mx1_todo_summary_report':
return 'odoo'
else: # custom_mx2_todo_internal_summary_report
return 'mexal'
def get_header_string(self, data, col):
''' Check report mode and return correct header
mode:
'odoo' (OC, B, Del.) *default
'mexal' (OC, S, B)
'''
if data is None:
data = {}
mode = self.get_mode()
_logger.info('Report run in mode: %s' % mode)
translate = {
'odoo': {
'title': 'REQUEST DELIVERY PRODUCT FORM',
0: 'ID', # never call
1: 'Our ref.',
2: 'Your ref.',
3: 'Date',
4: 'Deadline',
5: 'Our item',
6: 'Your item',
7: 'LMU',
8: 'Pc.',
9: 'Ordered',
10: 'Maked+Assigned',
11: 'Delivered',
12: 'Available',
13: 'Request',
14: 'Bookable',
15: 'Note',
# Extra columns:
'6a': 'Your description',
},
'mexal': {
'title': 'CONSEGNE DA FARE',
0: 'ID', # never call
1: 'Ns. rif.',
2: 'Vs. rif.',
3: 'Data',
4: 'Scadenza',
5: 'Ns. articolo',
6: 'Vs. articolo',
7: 'LMU',
8: 'Pz.',
9: 'Ordinati',
10: 'Sospesi',
11: 'Disponibili',
12: 'A disposizione',
13: 'Richiesti',
14: 'Prenotati',
15: 'Note',
# Extra columns:
'6a': 'Vs. descrizione',
},
}
return translate[mode].get(col, _('ERROR'))
def get_stock_value(self, data, line, col):
''' Get 3 cols with correct qty data
mode:
'odoo' (OC, B, Del.) *default
'mexal' (OC, S, B)
'''
if data is None:
data = {}
mode = self.get_mode()
# Qty used:
oc_qty = line.product_uom_qty
assigned_qty = line.mx_assigned_qty
b_qty = line.product_uom_maked_sync_qty
delivered_qty = line.delivered_qty
if mode == 'odoo':
if col == 0: # Total order
return int(oc_qty)
elif col == 1: # Total producer
return int(b_qty + assigned_qty)
elif col == 2: # Total deliver
return int(delivered_qty)
else: # mexal
if col == 0: # Remain order
return int(oc_qty - delivered_qty)
elif col == 1: # To produce
res = int(oc_qty - assigned_qty - b_qty)
return res if res > 0 else 0
elif col == 2: # To deliver
res = int(b_qty + assigned_qty - delivered_qty)
return res if res > 0 else 0
return _('ERROR')
def get_last_record_id(self):
return self.last_record_id
# -------------------------------------------------------------------------
# COUNTER MANAGE
# -------------------------------------------------------------------------
def reset_counter(self):
_logger.info('Counter reset for company 1 load report')
# reset counters:
self.counters = {
'total_parcel': 0.0,
'volume': 0.0,
'volume10': 0.0,
'length': 0.0,
'weight': 0.0,
}
_logger.info('Counter: %s' % self.counters)
return ''
def get_counter(self, name):
''' Get counter with name passed (else create an empty)
'''
if name not in self.counters:
self.counters[name] = 0.0
return self.counters[name]
def set_counter(self, name, value):
''' Set counter with name with value passed
'''
self.counters[name] = value
return "" # empty so no write in module
def set_total_parcel(self, v):
self.total_parcel = v
def get_total_parcel(self, ):
return self.total_parcel or 0.0
def get_parcels(self, product, qty):
''' Get text for parcels totals:
product: proxy obj for product
qty: total to parcels
'''
res = ''
q_x_pack = product.q_x_pack
if q_x_pack:
parcel = qty / q_x_pack
if not parcel:
res = ''
else:
res = 'SC. %s x %s =' % (int(parcel), int(q_x_pack))
return res
def get_parcels_table_load(self, l): #product, qty):
''' Get text for parcels totals:
product: proxy obj for product
qty: total to parcels
'''
res = []
elements = {
'S': l.delivery_s, #remain,
'B': l.delivery_b,
}
_logger.info('Element for company 1 load report %s: %s' % (
elements,
l.product_id.default_code,
))
parcel_text = ''
product = l.product_id
for key, v in elements.iteritems():
if v:
# Counter totals:
# TODO volume from dimension (pack or piece?)
self.counters['volume'] += v * (product.volume or 0.0)
self.counters['volume10'] += v * (
(product.volume or 0.0) * 1.1)
self.counters['length'] += v* (product.linear_length or 0.0)
self.counters['weight'] += \
v * ((product.weight or 0.0) or (
product.weight_net or 0.0))
q_x_pack = l.product_id.q_x_pack
partial = False
if q_x_pack:
parcel = v / q_x_pack # truncated
if v % q_x_pack != 0:
partial = True
parcel += 1
if parcel:
parcel_text = 'SC. %sx%s=' % (
'??' if partial else int(
parcel),
int(q_x_pack),
)
if not partial:
self.counters['total_parcel'] += parcel
else:
parcel_text = ''
res.append((key, parcel_text, v, partial))
_logger.info('Counters: %s, %s' % (
product.default_code,
self.counters,
))
return res
def get_partic_browse(self, line):
''' Return browse object for line passed:
'''
partic_pool = self.pool.get('res.partner.product.partic')
partic_ids = partic_pool.search(self.cr, self.uid, [
('partner_id', '=', line.order_id.partner_id.id),
('product_id', '=', line.product_id.id),
])
if partic_ids:
return partic_pool.browse(self.cr, self.uid, partic_ids)[0]
else:
return False
def get_partic(self, line):
''' Return return if present partner-product partic code
'''
partic_proxy = self.get_partic_browse(line)
if partic_proxy:
return partic_proxy.partner_code or '/'
else:
return ''
def get_partic_description(self, line):
''' Return return if present partner-product partic code
'''
partic_proxy = self.get_partic_browse(line)
if partic_proxy:
return '%s %s' % (
partic_proxy.partner_description or '',
partic_proxy.fabric_color or '',
)
else:
return ''
def get_datetime(self):
''' Return datetime obj
'''
return datetime
def get_datetime_now(self):
''' Return datetime obj
'''
return datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _get_fully_list(self, objects):
''' Return list of object browse id list merged with no replication
with al record masked for print
'''
sale_pool = self.pool.get('sale.order')
# Selection by check:
active_ids = [x.id for x in objects]
# Selection by button:
print_ids = sale_pool.search(self.cr, self.uid, [
('print', '=', True)])
active_ids.extend(print_ids)
res = list(set(active_ids))
# Order per deadline:
return sale_pool.search(self.cr, self.uid, [
('id', 'in', res)], order='date_deadline')
def get_order_selected(self, objects):
sale_pool = self.pool.get('sale.order')
res = sale_pool.browse(
self.cr, self.uid, self._get_fully_list(objects))
try:
self.last_record_id = res[-1].id
except:
self.last_record_id = 0
return res
def get_object_line(self, objects):
''' Selected object + print object
'''
products = {}
res = []
sale_pool = self.pool.get('sale.order')
for order in sale_pool.browse(
self.cr, self.uid, self._get_fully_list(objects)):
for line in order.order_line:
# TODO parametrize (jump delivered all):
if line.product_uom_qty - line.delivered_qty == 0:
continue
code = line.product_id.default_code
if code not in products:
products[code] = []
#res.append(line) # unsorted
products[code].append(line)
# create a res order by product code
for code in sorted(products):
res.extend(products[code])
return res
def reset_print(self):
''' Called at the end of report to reset print check
'''
sale_pool = self.pool.get('sale.order')
sale_pool.reset_print(self.cr, self.uid, False)
_logger.info('Reset selection')
return ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
generic bugfix (fast message)
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
# Copyright(c)2008-2010 SIA "KN dati".(http://kndati.lv) All Rights Reserved.
# General contacts <info@kndati.lv>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import sys
import logging
from openerp.tools.translate import _
from openerp.report import report_sxw
from openerp.report.report_sxw import rml_parse
from datetime import datetime, timedelta
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class Parser(report_sxw.rml_parse):
counters = {}
last_record_id = 0
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
# counter manage:
'reset_counter': self.reset_counter,
'get_counter': self.get_counter,
'set_counter': self.set_counter,
'get_last_record_id': self.get_last_record_id,
'get_object_line': self.get_object_line,
'get_datetime': self.get_datetime,
'get_datetime_now': self.get_datetime_now,
'get_partic': self.get_partic,
'get_partic_description': self.get_partic_description,
'get_parcels': self.get_parcels,
'get_parcels_table_load': self.get_parcels_table_load,
'set_total_parcel': self.set_total_parcel,
'get_total_parcel': self.get_total_parcel,
'reset_print': self.reset_print,
'get_order_selected': self.get_order_selected,
# Wizard mode function:
'get_header_string': self.get_header_string,
'get_stock_value': self.get_stock_value,
'has_extra_description': self.has_extra_description,
})
self.reset_counter()
# TODO remove
self.total_parcel = 0.0
def has_extra_description(self, o):
''' Test if partner required extra columns for his description
'''
return o.partner_id.extra_description_load_list
def get_mode(self, ):
''' Utility for check the mode of report depend on name
'''
if self.name == 'custom_mx1_todo_summary_report':
return 'odoo'
else: # custom_mx2_todo_internal_summary_report
return 'mexal'
def get_header_string(self, data, col):
''' Check report mode and return correct header
mode:
'odoo' (OC, B, Del.) *default
'mexal' (OC, S, B)
'''
if data is None:
data = {}
mode = self.get_mode()
_logger.info('Report run in mode: %s' % mode)
translate = {
'odoo': {
'title': 'REQUEST DELIVERY PRODUCT FORM',
0: 'ID', # never call
1: 'Our ref.',
2: 'Your ref.',
3: 'Date',
4: 'Deadline',
5: 'Our item',
6: 'Your item',
7: 'LMU',
8: 'Pc.',
9: 'Ordered',
10: 'Maked+Assigned',
11: 'Delivered',
12: 'Available',
13: 'Request',
14: 'Bookable',
15: 'Note',
# Extra columns:
'6a': 'Your description',
},
'mexal': {
'title': 'CONSEGNE DA FARE',
0: 'ID', # never call
1: 'Ns. rif.',
2: 'Vs. rif.',
3: 'Data',
4: 'Scadenza',
5: 'Ns. articolo',
6: 'Vs. articolo',
7: 'LMU',
8: 'Pz.',
9: 'Ordinati',
10: 'Sospesi',
11: 'Disponibili',
12: 'A disposizione',
13: 'Richiesti',
14: 'Prenotati',
15: 'Note',
# Extra columns:
'6a': 'Vs. descrizione',
},
}
return translate[mode].get(col, _('ERROR'))
def get_stock_value(self, data, line, col):
''' Get 3 cols with correct qty data
mode:
'odoo' (OC, B, Del.) *default
'mexal' (OC, S, B)
'''
if data is None:
data = {}
mode = self.get_mode()
# Qty used:
oc_qty = line.product_uom_qty
assigned_qty = line.mx_assigned_qty
b_qty = line.product_uom_maked_sync_qty
delivered_qty = line.delivered_qty
if mode == 'odoo':
if col == 0: # Total order
return int(oc_qty)
elif col == 1: # Total producer
return int(b_qty + assigned_qty)
elif col == 2: # Total deliver
return int(delivered_qty)
else: # mexal
if col == 0: # Remain order
return int(oc_qty - delivered_qty)
elif col == 1: # To produce
res = int(oc_qty - assigned_qty - b_qty)
return res if res > 0 else 0
elif col == 2: # To deliver
res = int(b_qty + assigned_qty - delivered_qty)
return res if res > 0 else 0
return _('ERROR')
def get_last_record_id(self):
return self.last_record_id
# -------------------------------------------------------------------------
# COUNTER MANAGE
# -------------------------------------------------------------------------
def reset_counter(self):
_logger.info('Counter reset for company 1 load report')
# reset counters:
self.counters = {
'total_parcel': 0.0,
'volume': 0.0,
'volume10': 0.0,
'length': 0.0,
'weight': 0.0,
}
_logger.info('Counter: %s' % self.counters)
return ''
def get_counter(self, name):
''' Get counter with name passed (else create an empty)
'''
if name not in self.counters:
self.counters[name] = 0.0
return self.counters[name]
def set_counter(self, name, value):
''' Set counter with name with value passed
'''
self.counters[name] = value
return "" # empty so no write in module
def set_total_parcel(self, v):
self.total_parcel = v
def get_total_parcel(self, ):
return self.total_parcel or 0.0
def get_parcels(self, product, qty):
''' Get text for parcels totals:
product: proxy obj for product
qty: total to parcels
'''
res = ''
q_x_pack = product.q_x_pack
if q_x_pack:
parcel = qty / q_x_pack
if not parcel:
res = ''
else:
res = 'SC. %s x %s =' % (int(parcel), int(q_x_pack))
return res
def get_parcels_table_load(self, l): #product, qty):
''' Get text for parcels totals:
product: proxy obj for product
qty: total to parcels
'''
res = []
elements = {
'S': l.delivery_s, #remain,
'B': l.delivery_b,
}
_logger.info('Element for company 1 load report %s: %s' % (
elements,
l.product_id.default_code,
))
parcel_text = ''
product = l.product_id
for key, v in elements.iteritems():
if v:
# Counter totals:
# TODO volume from dimension (pack or piece?)
self.counters['volume'] += v * product.volume
self.counters['volume10'] += v * product.volume * 1.1
self.counters['length'] += v * product.linear_length
self.counters['weight'] += v * (
product.weight or product.weight_net)
q_x_pack = l.product_id.q_x_pack
partial = False
if q_x_pack:
parcel = v / q_x_pack # truncated
if v % q_x_pack != 0:
partial = True
parcel += 1
if parcel:
parcel_text = 'SC. %sx%s=' % (
'??' if partial else int(
parcel),
int(q_x_pack),
)
if not partial:
self.counters['total_parcel'] += parcel
else:
parcel_text = ''
res.append((key, parcel_text, v, partial))
_logger.info('Counters: %s, %s' % (
product.default_code,
self.counters,
))
return res
def get_partic_browse(self, line):
''' Return browse object for line passed:
'''
partic_pool = self.pool.get('res.partner.product.partic')
partic_ids = partic_pool.search(self.cr, self.uid, [
('partner_id', '=', line.order_id.partner_id.id),
('product_id', '=', line.product_id.id),
])
if partic_ids:
return partic_pool.browse(self.cr, self.uid, partic_ids)[0]
else:
return False
def get_partic(self, line):
''' Return return if present partner-product partic code
'''
partic_proxy = self.get_partic_browse(line)
if partic_proxy:
return partic_proxy.partner_code or '/'
else:
return ''
def get_partic_description(self, line):
''' Return return if present partner-product partic code
'''
partic_proxy = self.get_partic_browse(line)
if partic_proxy:
return '%s %s' % (
partic_proxy.partner_description or '',
partic_proxy.fabric_color or '',
)
else:
return ''
def get_datetime(self):
''' Return datetime obj
'''
return datetime
def get_datetime_now(self):
''' Return datetime obj
'''
return datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _get_fully_list(self, objects):
''' Return list of object browse id list merged with no replication
with al record masked for print
'''
sale_pool = self.pool.get('sale.order')
# Selection by check:
active_ids = [x.id for x in objects]
# Selection by button:
print_ids = sale_pool.search(self.cr, self.uid, [
('print', '=', True)])
active_ids.extend(print_ids)
res = list(set(active_ids))
# Order per deadline:
return sale_pool.search(self.cr, self.uid, [
('id', 'in', res)], order='date_deadline')
def get_order_selected(self, objects):
sale_pool = self.pool.get('sale.order')
res = sale_pool.browse(
self.cr, self.uid, self._get_fully_list(objects))
try:
self.last_record_id = res[-1].id
except:
self.last_record_id = 0
return res
def get_object_line(self, objects):
''' Selected object + print object
'''
products = {}
res = []
sale_pool = self.pool.get('sale.order')
for order in sale_pool.browse(
self.cr, self.uid, self._get_fully_list(objects)):
for line in order.order_line:
# TODO parametrize (jump delivered all):
if line.product_uom_qty - line.delivered_qty == 0:
continue
code = line.product_id.default_code
if code not in products:
products[code] = []
#res.append(line) # unsorted
products[code].append(line)
# create a res order by product code
for code in sorted(products):
res.extend(products[code])
return res
def reset_print(self):
''' Called at the end of report to reset print check
'''
sale_pool = self.pool.get('sale.order')
sale_pool.reset_print(self.cr, self.uid, False)
_logger.info('Reset selection')
return ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('structure', '0011_customer_registration_code'),
]
operations = [
migrations.AddField(
model_name='customer',
name='image',
field=models.ImageField(null=True, blank=True),
preserve_default=True,
),
]
Fix migration (SAAS-436)
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import nodeconductor.structure.images
class Migration(migrations.Migration):
dependencies = [
('structure', '0011_customer_registration_code'),
]
operations = [
migrations.AddField(
model_name='customer',
name='image',
field=models.ImageField(null=True, upload_to=nodeconductor.structure.images.get_upload_path, blank=True),
preserve_default=True,
),
]
|
import os
import subprocess
__all__ = ['__author__', '__author_email__', '__version__', '__git_uri__', '__dependencies__', '__optional_dependencies__']
__author__ = "Erik Ritter (maintainer), Serena Jiang, John Bodley, Bill Ulammandakh, Naoya Kanai, Robert Chang, Dan Frank, Chetan Sharma, Matthew Wardrop"
__author_email__ = "erik.ritter@airbnb.com, serena.jiang@airbnb.com, john.bodley@airbnb.com, bill.ulammandakh@airbnb.com, naoya.kanai@airbnb.com, robert.chang@airbnb.com, dan.frank@airbnb.com, chetan.sharma@airbnb.com, mpwardrop@gmail.com"
__version__ = "0.9.0"
try:
with open(os.devnull, 'w') as devnull:
__version__ += '_' + subprocess.check_output(['git', 'rev-parse', 'HEAD'], shell=False, stderr=devnull).decode('utf-8').replace('\n', '')
except:
pass
__git_uri__ = "https://github.com/airbnb/knowledge-repo.git"
# These are the core dependencies, and should include all packages needed for accessing repositories
# and running a non-server-side instance of the flask application. Optional dependencies for converters/etc
# should be defined elsewhere.
__dependencies__ = [
# Knowledge Repository Dependencies
'pyyaml', # Yaml parser and utilities
'markdown', # Markdown conversion utilities
'pygments', # Code highlighting support in markdown
'gitpython==3.1.18', # Git abstraction
'tabulate', # Rendering information prettily in knowledge_repo script
'cooked_input', # Used for interactive input from user in CLI tooling
'requests', # Used for downloading images
# Flask App Dependencies
'flask', # Main flask framework
'flask_login', # User management framework
'flask_principal', # Permissions management framework
'flask_mail', # Mail client and utilities
'Flask-Migrate', # Database migration utilities
'sqlalchemy', # Database abstractions
'jinja2>=2.7', # Templating engine
'werkzeug>=1.0', # Development webserver
'gunicorn', # Deployed webserver
'inflection', # String transformation library
'pillow', # Image thumbnailing
'weasyprint', # Post PDF download option
]
__optional_dependencies__ = {
# ipynb notebook conversion suport
'ipynb': [
'nbformat',
'nbconvert[execute]<6.0.0',
'traitlets'
],
# PDF to image conversion used by app
'pdf': [
'PyPDF2', # image for parsing PDFs to images
'wand', # imagemagick integration for image uploading
],
# Optional OAuth library for external authentication support
'oauth': [
'requests_oauthlib'
],
# Optional ldap library for ldap authentication
'ldap': [
'ldap3'
],
# Testing dependencies
'dev': [
'pycodestyle', # PEP8 conformance
'nose', # Testing framework
'beautifulsoup4', # HTML/XML parser
'coverage' # Documentation coverage tester
]
}
__optional_dependencies__['all'] = [dep for deps in __optional_dependencies__.values() for dep in deps]
fix_rscript
import os
import subprocess
__all__ = ['__author__', '__author_email__', '__version__', '__git_uri__', '__dependencies__', '__optional_dependencies__']
__author__ = "Erik Ritter (maintainer), Serena Jiang, John Bodley, Bill Ulammandakh, Naoya Kanai, Robert Chang, Dan Frank, Chetan Sharma, Matthew Wardrop"
__author_email__ = "erik.ritter@airbnb.com, serena.jiang@airbnb.com, john.bodley@airbnb.com, bill.ulammandakh@airbnb.com, naoya.kanai@airbnb.com, robert.chang@airbnb.com, dan.frank@airbnb.com, chetan.sharma@airbnb.com, mpwardrop@gmail.com"
__version__ = "0.9.0"
try:
with open(os.devnull, 'w') as devnull:
__version__ += '_' + subprocess.check_output(['git', 'rev-parse', 'HEAD'], shell=False, stderr=devnull).decode('utf-8').replace('\n', '')
except:
pass
__git_uri__ = "https://github.com/airbnb/knowledge-repo.git"
# These are the core dependencies, and should include all packages needed for accessing repositories
# and running a non-server-side instance of the flask application. Optional dependencies for converters/etc
# should be defined elsewhere.
__dependencies__ = [
# Knowledge Repository Dependencies
'pyyaml', # Yaml parser and utilities
'markdown==3.3.4', # Markdown conversion utilities
'pygments', # Code highlighting support in markdown
'gitpython==3.1.18', # Git abstraction
'tabulate', # Rendering information prettily in knowledge_repo script
'cooked_input', # Used for interactive input from user in CLI tooling
'requests', # Used for downloading images
# Flask App Dependencies
'flask', # Main flask framework
'flask_login', # User management framework
'flask_principal', # Permissions management framework
'flask_mail', # Mail client and utilities
'Flask-Migrate', # Database migration utilities
'sqlalchemy', # Database abstractions
'jinja2>=2.7', # Templating engine
'werkzeug>=1.0', # Development webserver
'gunicorn', # Deployed webserver
'inflection', # String transformation library
'pillow', # Image thumbnailing
'weasyprint', # Post PDF download option
]
__optional_dependencies__ = {
# ipynb notebook conversion suport
'ipynb': [
'nbformat',
'nbconvert[execute]<6.0.0',
'traitlets'
],
# PDF to image conversion used by app
'pdf': [
'PyPDF2', # image for parsing PDFs to images
'wand', # imagemagick integration for image uploading
],
# Optional OAuth library for external authentication support
'oauth': [
'requests_oauthlib'
],
# Optional ldap library for ldap authentication
'ldap': [
'ldap3'
],
# Testing dependencies
'dev': [
'pycodestyle', # PEP8 conformance
'nose', # Testing framework
'beautifulsoup4', # HTML/XML parser
'coverage' # Documentation coverage tester
]
}
__optional_dependencies__['all'] = [dep for deps in __optional_dependencies__.values() for dep in deps]
|
import sys
import subprocess
sys.path.insert(0, '../../../../optimizer')
sys.path.insert(0, '../../../../frontend')
from Compiler import *
from Constructs import *
def cCompile(inFile, outFile, cCompiler=None):
if cCompiler == None or cCompiler == "intel":
cxx = "icpc"
opt = "-openmp -xhost -O3 -ipo -ansi-alias"
elif cCompiler == "gnu":
cxx = "g++"
opt = "-fopenmp -march=native -O3 -ftree-vectorize"
#fi
inc = "-I../../../memory_allocation/ "+\
"../../../memory_allocation/simple_pool_allocator.cpp"
shared = "-fPIC -shared"
out = "-o "+outFile
compileStr = cxx + " " \
+ opt + " " \
+ inc + " " \
+ shared + " " \
+ inFile + " " \
+ out
print
print "[compiler]: compiling residual function to", outFile, "..."
print ">", compileStr
subprocess.check_output(compileStr, shell=True)
print "[compiler]: ... DONE"
return
compiler code for nas-mg-3d
import sys
import subprocess
from compiler import *
from constructs import *
def c_compile(in_file, out_file, arg_data):
# CXX compiler and flags :
cxx = arg_data.cxx
cxx_flags = arg_data.cxx_flags
#fi
# Include Flags :
#include = "-I../../../../memory_allocation/ "+\
# "../../../../memory_allocation/simple_pool_allocator.cpp"
include = ""
# Shared library Flags
shared = "-fPIC -shared"
out = "-o "+out_file
compile_str = cxx + " " \
+ cxx_flags + " " \
+ include + " " \
+ shared + " " \
+ in_file + " " \
+ out
print("")
print("[cpp_compiler]: compiling", in_file, "to", out_file, "...")
print(">", compile_str)
subprocess.check_output(compile_str, shell=True)
print("[cpp_compiler]: ... DONE")
return
|
# coding: utf-8
from flask import Blueprint
from flask import request, render_template, render_template_string, Flask
from flask import jsonify
import json
import random
from ..decorators.crossdomain import crossdomain
__all__ = ['bp']
bp = Blueprint('demo_handler', __name__)
@bp.route('/', methods=['GET'])
def login():
return render_template('security/login-user.html')
@bp.route('/forgot-password')
def forgot_password():
return render_template('security/forgot-password.html')
@bp.route('/reset-password')
def reset_password():
return render_template('security/reset-password.html')
@bp.route('/dashboard')
def dashboard():
return render_template('dashboard/dashboard-container.html')
@bp.route('/product-search')
def product_search():
return render_template('product/product-search.html')
@bp.route('/user-mgmt')
def user_mgmt():
return render_template('user/user-container.html')
@bp.route('/user-group-mgmt')
def user_group_mgmt():
return render_template('user-group/user-group-container.html')
@bp.route('/role-mgmt')
def role_mgmt():
return render_template('role/role-container.html')
@bp.route('/privilege-mgmt')
def privilege_mgmt():
return render_template('privilege/privilege-container.html')
@bp.route('/criteria-mgmt')
def criteria_mgmt():
return render_template('criteria/criteria-container.html')
@bp.route('/role-details')
def role_details():
return render_template('role/role-details.html')
@bp.route('/privilege-details')
def privilege_details():
return render_template('privilege/privilege-details.html')
@bp.route('/criteria-details')
def criteria_details():
return render_template('criteria/criteria-details.html')
@bp.route('/vehicle-mgmt')
def vehicle_mgmt():
response = render_template('vehicle/vehicle-container.html')
return response
@bp.app_errorhandler(404)
def not_found(error):
return render_template('security/404.html'), 404
@bp.app_errorhandler(500)
def internal_error(error):
return render_template('security/500.html'), 500
###############################################################
#
# Mock Data Requests Handling
#
###############################################################
@bp.route('/user-mgmt-data')
def user_mgmt_data():
return jsonify(retrieve_mock_data('mock-user-mgmt.json'))
@bp.route('/userGroup-mgmt-data')
def userGroup_mgmt_data():
return jsonify(retrieve_mock_data('mock-userGroup-mgmt.json'))
@bp.route('/vehicle-mgmt-data')
def vehicle_mgmt_data():
return jsonify(retrieve_mock_data('mock-vehicle-mgmt.json'))
@bp.route('/role-mgmt-data')
def role_mgmt_data():
return jsonify(retrieve_mock_data('mock-role-mgmt.json'))
@bp.route('/privilege-mgmt-data')
def privilege_mgmt_data():
return jsonify(retrieve_mock_data('mock-privilege-mgmt.json'))
@bp.route('/criteria-mgmt-data')
def criteria_mgmt_data():
return jsonify(retrieve_mock_data('mock-criteria-mgmt.json'))
@bp.route('/role-history-data')
def role_history_data():
return jsonify(retrieve_mock_data('mock-role-history.json'))
@bp.route('/privilege-history-data')
def privilege_history_data():
return jsonify(retrieve_mock_data('mock-privilege-history.json'))
@bp.route('/criteria-history-data')
def criteria_history_data():
return jsonify(retrieve_mock_data('mock-criteria-history.json'))
###############################################################
#
# Mock Data With BACKBONE.JS
#
###############################################################
### Security Mock
@bp.route('/security/authenticate', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def authenticate():
return jsonify(security_user={auth_token: 'mocked-hmac-authorization-token'})
### Vehicle Mock Data
@bp.route('/vehicles', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_vehicles():
mock_vehicles = retrieve_mock_data('mock-vehicle-record.json', 'mock-data-backbone')
return json.dumps(mock_vehicles);
@bp.route('/vehicles', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def create_vehicle():
return jsonify(id=random.randint(8, 1000))
@bp.route('/vehicles/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*')
def destory_vehicle(id):
print 'In DELETE METHOD..'
return jsonify(id=id)
@bp.route('/vehicles/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def update_vehicle(id):
print 'In PUT METHOD..'
return jsonify(id=id)
@bp.route('/vehicle-criteriable-attrs', methods=['GET'])
@crossdomain(origin='*')
def fetch_vehicle_criteriable_attrs():
mock_vehicle_criteriable_attrs = retrieve_mock_data('mock-vehicle-header.json', 'mock-data-backbone')
print mock_vehicle_criteriable_attrs
return json.dumps(mock_vehicle_criteriable_attrs)
### User Mock Data
@bp.route('/users', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_users():
mock_users = retrieve_mock_data('mock-user-record.json', 'mock-data-backbone')
return json.dumps(mock_users);
@bp.route('/users', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def create_user():
return jsonify(id=random.randint(8, 1000));
@bp.route('/users/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*')
def destory_user(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/users/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def update_user(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/user-attrs', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_user_atts():
mock_user_group_attrs = retrieve_mock_data('mock-user-attrs.json', 'mock-data-backbone')
return json.dumps(mock_user_attrs);
### User Group Mock Data
@bp.route('/user-groups', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_user_groups():
mock_user_groups = retrieve_mock_data('mock-user-group-record.json', 'mock-data-backbone')
return json.dumps(mock_user_groups);
@bp.route('/user-groups', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def create_user_group():
return jsonify(id=random.randint(8, 1000));
@bp.route('/user-groups/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*')
def destory_user_group(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/user-groups/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def update_user_group(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/user-group-attrs', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_user_group_atts():
mock_user_group_attrs = retrieve_mock_data('mock-user-group-attrs.json', 'mock-data-backbone')
return json.dumps(mock_user_group_attrs);
### Role Mock Data
@bp.route('/roles', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_roles():
mock_roles = retrieve_mock_data('mock-role-record.json', 'mock-data-backbone')
return json.dumps(mock_roles);
@bp.route('/roles', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def create_role():
return jsonify(id=random.randint(8, 1000));
@bp.route('/roles/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*')
def destory_role(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/roles/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def update_role(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/role-history', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_role_history():
mock_role_hisroty = retrieve_mock_data('mock-role-history-record.json', 'mock-data-backbone')
return json.dumps(mock_role_hisroty);
### Privilege Mock Data
@bp.route('/privileges', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_priveilges():
mock_privileges= retrieve_mock_data('mock-privilege-record.json', 'mock-data-backbone')
return json.dumps(mock_privileges);
@bp.route('/privileges', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def create_privilege():
return jsonify(id=random.randint(8, 1000));
@bp.route('/privileges/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*')
def destory_privilege(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/privileges/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def update_privilege(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/privilege-history', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_privilege_history():
mock_privilege_hisroty = retrieve_mock_data('mock-privilege-history-record.json', 'mock-data-backbone')
return json.dumps(mock_privilege_hisroty);
### Criteria Mock Data
@bp.route('/criterias', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_criterias():
fetch_criterias = retrieve_mock_data('mock-criteria-record.json', 'mock-data-backbone')
return json.dumps(fetch_criterias);
@bp.route('/criterias', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def create_criteria():
return jsonify(id=random.randint(8, 1000));
@bp.route('/criterias/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*')
def destory_criteria(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/criterias/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def update_criteria(id):
print 'In PUT METHOD..'
return jsonify(id=id)
@bp.route('/criteria-history', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_criteria_history():
mock_criteria_hisroty = retrieve_mock_data('mock-criteria-history-record.json', 'mock-data-backbone')
return json.dumps(mock_criteria_hisroty)
### Generic Filter Mock Data
@bp.route('/generic-filter', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def fetch_generic_filter():
mock_filter_settings = retrieve_mock_data('mock-filter-settings.json', 'mock-data-backbone')
return json.dumps(mock_filter_settings)
@bp.route('/generic-records/filter', methods=['GET'])
@crossdomain(origin='*', headers='Content-Type')
def filter_generic_records():
print 'Generic Filter Params >> '+request.args.get('q');
mock_filter_records = retrieve_mock_data('mock-filter-records.json', 'mock-data-backbone')
return json.dumps(mock_filter_records)
### Method for Mock data
def is_ajax(request):
return "X-Requested-With" in request.headers and request.headers['X-Requested-With'] == "XMLHttpRequest"
def retrieve_mock_data(file_name, folder='mock-data'):
import os
DEMO_DATA_FOLDER = os.path.join(os.getcwd(), folder)
with open(os.path.join(DEMO_DATA_FOLDER, file_name)) as mock_json:
mock_data = json.load(mock_json)
return mock_data
[arctic-bear] - Add authorization header & integrate with stateless authentication.
# coding: utf-8
from flask import Blueprint
from flask import request, render_template, render_template_string, Flask
from flask import jsonify
import json
import random
from ..decorators.crossdomain import crossdomain
__all__ = ['bp']
bp = Blueprint('demo_handler', __name__)
@bp.route('/', methods=['GET'])
def login():
return render_template('security/login-user.html')
@bp.route('/forgot-password')
def forgot_password():
return render_template('security/forgot-password.html')
@bp.route('/reset-password')
def reset_password():
return render_template('security/reset-password.html')
@bp.route('/dashboard')
def dashboard():
return render_template('dashboard/dashboard-container.html')
@bp.route('/product-search')
def product_search():
return render_template('product/product-search.html')
@bp.route('/user-mgmt')
def user_mgmt():
return render_template('user/user-container.html')
@bp.route('/user-group-mgmt')
def user_group_mgmt():
return render_template('user-group/user-group-container.html')
@bp.route('/role-mgmt')
def role_mgmt():
return render_template('role/role-container.html')
@bp.route('/privilege-mgmt')
def privilege_mgmt():
return render_template('privilege/privilege-container.html')
@bp.route('/criteria-mgmt')
def criteria_mgmt():
return render_template('criteria/criteria-container.html')
@bp.route('/role-details')
def role_details():
return render_template('role/role-details.html')
@bp.route('/privilege-details')
def privilege_details():
return render_template('privilege/privilege-details.html')
@bp.route('/criteria-details')
def criteria_details():
return render_template('criteria/criteria-details.html')
@bp.route('/vehicle-mgmt')
def vehicle_mgmt():
response = render_template('vehicle/vehicle-container.html')
return response
@bp.app_errorhandler(404)
def not_found(error):
return render_template('security/404.html'), 404
@bp.app_errorhandler(500)
def internal_error(error):
return render_template('security/500.html'), 500
###############################################################
#
# Mock Data Requests Handling
#
###############################################################
@bp.route('/user-mgmt-data')
def user_mgmt_data():
return jsonify(retrieve_mock_data('mock-user-mgmt.json'))
@bp.route('/userGroup-mgmt-data')
def userGroup_mgmt_data():
return jsonify(retrieve_mock_data('mock-userGroup-mgmt.json'))
@bp.route('/vehicle-mgmt-data')
def vehicle_mgmt_data():
return jsonify(retrieve_mock_data('mock-vehicle-mgmt.json'))
@bp.route('/role-mgmt-data')
def role_mgmt_data():
return jsonify(retrieve_mock_data('mock-role-mgmt.json'))
@bp.route('/privilege-mgmt-data')
def privilege_mgmt_data():
return jsonify(retrieve_mock_data('mock-privilege-mgmt.json'))
@bp.route('/criteria-mgmt-data')
def criteria_mgmt_data():
return jsonify(retrieve_mock_data('mock-criteria-mgmt.json'))
@bp.route('/role-history-data')
def role_history_data():
return jsonify(retrieve_mock_data('mock-role-history.json'))
@bp.route('/privilege-history-data')
def privilege_history_data():
return jsonify(retrieve_mock_data('mock-privilege-history.json'))
@bp.route('/criteria-history-data')
def criteria_history_data():
return jsonify(retrieve_mock_data('mock-criteria-history.json'))
###############################################################
#
# Mock Data With BACKBONE.JS
#
###############################################################
### Security Mock
@bp.route('/security/authenticate', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def authenticate():
print 'Auth User name :>>' + request.form.get('username')
return jsonify(security_user={'auth_token': 'mocked-hmac-authorization-token'})
@bp.route('/security/signout', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def signout():
return jsonify(result='success')
### Vehicle Mock Data
@bp.route('/vehicles', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_vehicles():
mock_vehicles = retrieve_mock_data('mock-vehicle-record.json', 'mock-data-backbone')
return json.dumps(mock_vehicles);
@bp.route('/vehicles', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_vehicle():
return jsonify(id=random.randint(8, 1000))
@bp.route('/vehicles/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_vehicle(id):
print 'In DELETE METHOD..'
return jsonify(id=id)
@bp.route('/vehicles/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_vehicle(id):
print 'In PUT METHOD..'
return jsonify(id=id)
@bp.route('/vehicle-criteriable-attrs', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_vehicle_criteriable_attrs():
mock_vehicle_criteriable_attrs = retrieve_mock_data('mock-vehicle-header.json', 'mock-data-backbone')
print mock_vehicle_criteriable_attrs
return json.dumps(mock_vehicle_criteriable_attrs)
### User Mock Data
@bp.route('/users', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_users():
mock_users = retrieve_mock_data('mock-user-record.json', 'mock-data-backbone')
return json.dumps(mock_users);
@bp.route('/users', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_user():
return jsonify(id=random.randint(8, 1000));
@bp.route('/users/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_user(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/users/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_user(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/user-attrs', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_atts():
mock_user_group_attrs = retrieve_mock_data('mock-user-attrs.json', 'mock-data-backbone')
return json.dumps(mock_user_attrs);
### User Group Mock Data
@bp.route('/user-groups', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_groups():
mock_user_groups = retrieve_mock_data('mock-user-group-record.json', 'mock-data-backbone')
return json.dumps(mock_user_groups);
@bp.route('/user-groups', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_user_group():
return jsonify(id=random.randint(8, 1000));
@bp.route('/user-groups/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_user_group(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/user-groups/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_user_group(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/user-group-attrs', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_group_atts():
mock_user_group_attrs = retrieve_mock_data('mock-user-group-attrs.json', 'mock-data-backbone')
return json.dumps(mock_user_group_attrs);
### Role Mock Data
@bp.route('/roles', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_roles():
mock_roles = retrieve_mock_data('mock-role-record.json', 'mock-data-backbone')
return json.dumps(mock_roles);
@bp.route('/roles', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_role():
return jsonify(id=random.randint(8, 1000));
@bp.route('/roles/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_role(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/roles/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_role(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/role-history', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_role_history():
mock_role_hisroty = retrieve_mock_data('mock-role-history-record.json', 'mock-data-backbone')
return json.dumps(mock_role_hisroty);
### Privilege Mock Data
@bp.route('/privileges', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_priveilges():
mock_privileges= retrieve_mock_data('mock-privilege-record.json', 'mock-data-backbone')
return json.dumps(mock_privileges);
@bp.route('/privileges', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_privilege():
return jsonify(id=random.randint(8, 1000));
@bp.route('/privileges/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_privilege(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/privileges/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def update_privilege(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/privilege-history', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_privilege_history():
mock_privilege_hisroty = retrieve_mock_data('mock-privilege-history-record.json', 'mock-data-backbone')
return json.dumps(mock_privilege_hisroty);
### Criteria Mock Data
@bp.route('/criterias', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_criterias():
fetch_criterias = retrieve_mock_data('mock-criteria-record.json', 'mock-data-backbone')
return json.dumps(fetch_criterias);
@bp.route('/criterias', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_criteria():
return jsonify(id=random.randint(8, 1000));
@bp.route('/criterias/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_criteria(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/criterias/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_criteria(id):
print 'In PUT METHOD..'
return jsonify(id=id)
@bp.route('/criteria-history', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_criteria_history():
mock_criteria_hisroty = retrieve_mock_data('mock-criteria-history-record.json', 'mock-data-backbone')
return json.dumps(mock_criteria_hisroty)
### Generic Filter Mock Data
@bp.route('/generic-filter', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_generic_filter():
print request.headers.get('Authorization')
mock_filter_settings = retrieve_mock_data('mock-filter-settings.json', 'mock-data-backbone')
return json.dumps(mock_filter_settings)
@bp.route('/generic-records/filter', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def filter_generic_records():
print request.headers.get('Authorization')
print 'Generic Filter Params >> '+request.args.get('q');
mock_filter_records = retrieve_mock_data('mock-filter-records.json', 'mock-data-backbone')
return json.dumps(mock_filter_records)
### Method for Mock data
def is_ajax(request):
return "X-Requested-With" in request.headers and request.headers['X-Requested-With'] == "XMLHttpRequest"
def retrieve_mock_data(file_name, folder='mock-data'):
import os
DEMO_DATA_FOLDER = os.path.join(os.getcwd(), folder)
with open(os.path.join(DEMO_DATA_FOLDER, file_name)) as mock_json:
mock_data = json.load(mock_json)
return mock_data
|
# https://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&category=608&page=show_problem&problem=1055
import fileinput
with fileinput.input() as f:
while True:
line = next(f).split(' ')
duration, down_pay, load, depr_nel = int(line[0]), float(line[1]), float(line[2]), int(line[3])
if duration < 0:
break
buyer_owes = load
line = next(f).split(' ')
off_lot, off_lot_perc = int(line[0]), float(line[1])
car_value = (load + down_pay) * (1 - off_lot_perc)
deprs = [off_lot_perc] + ([0] * duration)
last = 0
for d in range(depr_nel-1):
line = next(f).split(' ')
month, perc = int(line[0]), float(line[1])
for i in range(last+1, month):
deprs[i]=deprs[last]
deprs[month]=perc
last = month
for i in range(last+1, len(deprs)):
deprs[i]=deprs[last]
deprs[0] = off_lot_perc
#print(deprs)
for month in range(1, len(deprs)):
buyer_owes -= down_pay
car_value *= 1 - deprs[month]
print('buyer: {} car: {}'.format(buyer_owes, car_value))
if buyer_owes < car_value:
break
print('{} month{}'.format(month, '' if month == 1 else 's'))
Solved UVa 10114.
# https://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&category=608&page=show_problem&problem=1055
import collections
import fileinput
from contextlib import contextmanager
@contextmanager
def line_bind(line, *args, splitter=' '):
yield (c(v) for c, v in zip(args, line.split(splitter)))
with fileinput.input() as f:
while True:
with line_bind(next(f), int, float, float, int) as (duration, down_pay, loan, depr_nel):
if duration < 0: break
deprs = [0] * (duration + 1)
last = -1
for d in range(depr_nel):
with line_bind(next(f), int, float) as (month, perc):
for i in range(last+1, month): deprs[i]=deprs[last]
deprs[month], last = perc, month
for i in range(last+1, len(deprs)):
deprs[i]=deprs[last]
buyer_owes, payment = loan, loan / duration
car_value = (loan + down_pay) * (1 - deprs[0])
if loan < car_value:
print('0 months')
continue
for month in range(1, len(deprs)):
buyer_owes -= payment
car_value -= car_value*deprs[month]
#print('month: {} buyer: {} car: {}'.format(
#month, buyer_owes, car_value))
if buyer_owes < car_value:
break
print('{} month{}'.format(month, '' if month == 1 else 's'))
|
"""
The set of classes used to model the 3 'partitions' for N3 assertions.
There is a top level class which implements operations common to all partitions as
well as a class for each partition. These classes are meant to allow the underlying
SQL schema to be completely configurable as well as to automate the generation
of SQL queries for adding,updating,removing,resolving triples from the partitions.
These classes work in tandem with the RelationHashes to automate all (or most) of
the SQL processing associated with this FOPL Relational Model
NOTE: The use of foreign keys (which - unfortunately - bumps the minimum MySQL version to 5.0) allows for
the efficient removal of all statements about a particular resource using cascade on delete (currently not used)
see: http://dev.mysql.com/doc/refman/5.0/en/ansi-diff-foreign-keys.html
"""
from rdflib.URIRef import URIRef
from rdflib import BNode
from rdflib import RDF
from rdflib.Literal import Literal
from rdflib.URIRef import URIRef
from pprint import pprint
from rdflib.term_utils import *
from rdflib.store.REGEXMatching import REGEXTerm
from QuadSlot import *
Any = None
CONTEXT_COLUMN = 'context'
ANY_TERM = ['U','B','F','V','L']
CONTEXT_TERMS = ['U','B','F']
IDENTIFIER_TERMS = ['U','B']
GROUND_IDENTIFIERS = ['U']
NON_LITERALS = ['U','B','F','V']
CLASS_TERMS = ['U','B','V']
PREDICATE_NAMES = ['U','V']
NAMED_BINARY_RELATION_PREDICATES = GROUND_IDENTIFIERS
NAMED_BINARY_RELATION_OBJECTS = ['U','B','L']
NAMED_LITERAL_PREDICATES = GROUND_IDENTIFIERS
NAMED_LITERAL_OBJECTS = ['L']
ASSOCIATIVE_BOX_CLASSES = GROUND_IDENTIFIERS
CREATE_BRP_TABLE = """
CREATE TABLE %s (
%s
) ENGINE=InnoDB"""
LOOKUP_INTERSECTION_SQL = "INNER JOIN %s %s ON (%s)"
LOOKUP_UNION_SQL = "LEFT JOIN %s %s ON (%s)"
class BinaryRelationPartition(object):
"""
The common ancestor of the three partitions for assertions.
Implements behavior common to all 3. Each subclass is expected to define the following:
nameSuffix - The suffix appended to the name of the table
termEnumerations - a 4 item list (for each quad 'slot') of lists (or None) which enumerate the allowable term types
for each quad slot (one of 'U' - URIs,'V' - Variable,'L' - Literals,'B' - BNodes,'F' - Formulae)
columnNames - a list of column names for each quad slot (can be of additional length where each item is a 3-item tuple of:
column name, column type, index)
columnIntersectionList - a list of 2 item tuples (the quad index and a boolean indicating whether or not the associated term is an identifier)
this list (the order of which is very important) is used for generating intersections between the partition and the identifier / value hash
hardCodedResultFields - a dictionary mapping quad slot indices to their hardcoded value (for partitions - such as ABOX - which have a hardcoded value for a particular quad slot)
hardCodedResultTermsTypes - a dictionary mapping quad slot indices to their hardcoded term type (for partitions - such as Literal properties - which have hardcoded values for a particular quad slot's term type)
"""
assertedColumnName = 'asserted'
indexSuffix = 'Index'
def __init__(self,identifier,idHash,valueHash):
self.identifier = identifier
self.idHash = idHash
self.valueHash = valueHash
self._repr = self.identifier+'_'+self.nameSuffix
self.singularInsertionSQLCmd = self.insertRelationsSQLCMD()
self._resetPendingInsertions()
self._intersectionSQL = self.generateHashIntersections()
self._selectFieldsLeading = self._selectFields(True) + ['NULL as '+SlotPrefixes[DATATYPE_INDEX],'NULL as '+SlotPrefixes[LANGUAGE_INDEX]]
self._selectFieldsNonLeading = self._selectFields(False) + ['NULL','NULL']
def __repr__(self):
return self._repr
def foreignKeySQL(self,slot):
"""
Generates foreign key expression relating a particular quad term with
the identifier hash
"""
rt = ["\tCONSTRAINT %s_%s_lookup FOREIGN KEY (%s) REFERENCES %s (%s)"%(
self,
self.columnNames[slot],
self.columnNames[slot],
self.idHash,
self.idHash.columns[0][0])]
return rt
def IndexManagementSQL(self,create=False):
idxSQLStmts = []
for slot in POSITION_LIST:
if self.columnNames[slot]:
if create:
idxSQLStmts.append("create INDEX %s%s on %s (%s)"%(self.columnNames[slot],self.indexSuffix,self,self.columnNames[slot]))
idxSQLStmts.append("ALTER TABLE %s ADD %s"%(self,self.foreignKeySQL(slot)[0]))
else:
idxSQLStmts.append("ALTER TABLE %s DROP FOREIGN KEY %s_%s_lookup"%(self,self,self.columnNames[slot]))
idxSQLStmts.append("ALTER TABLE %s DROP INDEX %s%s"%(self,self.columnNames[slot],self.indexSuffix))
if self.termEnumerations[slot]:
if create:
idxSQLStmts.append("create INDEX %s_term%s on %s (%s_term)"%(self.columnNames[slot],self.indexSuffix,self,self.columnNames[slot]))
else:
idxSQLStmts.append("drop index %s_term%s on %s"%(self.columnNames[slot],self.indexSuffix,self))
if len(self.columnNames) > 4:
for otherSlot in range(4,len(self.columnNames)):
colMD = self.columnNames[otherSlot]
if isinstance(colMD,tuple):
colName,colType,indexStr = colMD
if create:
idxSQLStmts.append("create INDEX %s%s on %s (%s)"%(colName,self.indexSuffix,self,indexStr%colName))
else:
idxSQLStmts.append("drop index %s%s on %s"%(colName,self.indexSuffix,self))
else:
if create:
idxSQLStmts.append("create INDEX %s%s on (%s)"%(colMD,self.indexSuffix,self,colMD))
idxSQLStmts.append("ALTER TABLE %s ADD %s"%(self,self.foreignKeySQL(otherSlot)[0]))
else:
idxSQLStmts.append("ALTER TABLE %s DROP FOREIGN KEY %s_%s_lookup"%(self,self,colMD))
idxSQLStmts.append("drop index %s%s on %s"%(colMD,self.indexSuffix,self))
return idxSQLStmts
def createSQL(self):
"""
Generates a CREATE TABLE statement which creates a SQL table used for
persisting assertions associated with this partition
"""
columnSQLStmts = []
for slot in POSITION_LIST:
if self.columnNames[slot]:
columnSQLStmts.append("\t%s\tBIGINT unsigned not NULL"%(self.columnNames[slot]))
columnSQLStmts.append("\tINDEX %s%s (%s)"%(self.columnNames[slot],self.indexSuffix,self.columnNames[slot]))
if self.termEnumerations[slot]:
columnSQLStmts.append("\t%s_term enum(%s) not NULL"%(self.columnNames[slot],','.join(["'%s'"%tType for tType in self.termEnumerations[slot]])))
columnSQLStmts.append("\tINDEX %s_term%s (%s_term)"%(self.columnNames[slot],self.indexSuffix,self.columnNames[slot]))
columnSQLStmts.extend(self.foreignKeySQL(slot))
if len(self.columnNames) > 4:
for otherSlot in range(4,len(self.columnNames)):
colMD = self.columnNames[otherSlot]
if isinstance(colMD,tuple):
colName,colType,indexStr = colMD
columnSQLStmts.append("\t%s %s"%(colName,colType))
columnSQLStmts.append("\tINDEX %s%s (%s)"%(colName,self.indexSuffix,indexStr%colName))
else:
columnSQLStmts.append("\t%s BIGINT unsigned not NULL"%colMD)
columnSQLStmts.append("\tINDEX %s%s (%s)"%(colMD,self.indexSuffix,colMD))
columnSQLStmts.extend(self.foreignKeySQL(otherSlot))
return CREATE_BRP_TABLE%(
self,
',\n'.join(columnSQLStmts)
)
def _resetPendingInsertions(self):
"""
Resets the cache for pending insertions
"""
self.pendingInsertions = []
def insertRelationsSQLCMD(self):
"""
Generates a SQL command with parameter references (%s) in order to facilitate
efficient batch insertion of multiple assertions by Python DB implementations (such as MySQLdb)
"""
vals = 0
insertColNames = []
for colName in self.columnNames:
colIdx = self.columnNames.index(colName)
if colName:
insertColNames.append(colName)
vals += 1
if colIdx < len(self.termEnumerations) and self.termEnumerations[colIdx]:
insertColNames.append(colName+'_term')
vals += 1
insertColsExpr = "(%s)"%(','.join([isinstance(i,tuple) and i[0] or i for i in insertColNames]))
return "INSERT INTO %s %s VALUES "%(self,insertColsExpr)+"(%s)"%(','.join(['%s' for i in range(vals)]))
def insertRelations(self,quadSlots):
"""
Takes a list of QuadSlot objects and queues the new identifiers / values to insert and
the assertions as well (so they can be added in a batch for maximum efficiency)
"""
for quadSlot in quadSlots:
self.extractIdentifiers(quadSlot)
self.pendingInsertions.append(self.compileQuadToParams(quadSlot))
def flushInsertions(self,db):
"""
Adds the pending identifiers / values and assertions (using executemany for
maximum efficiency), and resets the queue.
"""
self.idHash.insertIdentifiers(db)
self.valueHash.insertIdentifiers(db)
cursor = db.cursor()
cursor.executemany(self.singularInsertionSQLCmd,self.pendingInsertions)
cursor.close()
self._resetPendingInsertions()
def selectContextFields(self,first):
"""
Generates a list of column aliases for the SELECT SQL command used in order
to fetch contexts from each partition
"""
rt = []
idHashLexicalCol = self.idHash.columns[-1][0]
idHashTermTypeCol = self.idHash.columns[-2][0]
termNameAlias = first and ' as %s'%SlotPrefixes[CONTEXT] or ''
rt.append('rt_'+SlotPrefixes[CONTEXT]+'.'+idHashLexicalCol + termNameAlias)
termTypeAlias = first and ' as %sTermType'%SlotPrefixes[CONTEXT] or ''
if self.termEnumerations[CONTEXT]:
rt.append('rt_'+SlotPrefixes[CONTEXT]+'.'+idHashTermTypeCol+termTypeAlias)
else:
rt.append("'%s'"%self.hardCodedResultTermsTypes[CONTEXT]+termTypeAlias)
return rt
def _selectFields(self,first):
rt = []
idHashLexicalCol = self.idHash.columns[-1][0]
idHashTermTypeCol = self.idHash.columns[-2][0]
for idx in range(len(POSITION_LIST)):
termNameAlias = first and ' as %s'%SlotPrefixes[idx] or ''
if idx < len(self.columnNames) and self.columnNames[idx]:
rt.append('rt_'+SlotPrefixes[idx]+'.'+idHashLexicalCol + termNameAlias)
termTypeAlias = first and ' as %sTermType'%SlotPrefixes[idx] or ''
if self.termEnumerations[idx]:
rt.append('rt_'+SlotPrefixes[idx]+'.'+idHashTermTypeCol+termTypeAlias)
else:
rt.append("'%s'"%self.hardCodedResultTermsTypes[idx]+termTypeAlias)
else:
rt.append("'%s'"%self.hardCodedResultFields[idx]+termNameAlias)
if self.hardCodedResultTermsTypes[idx]:
rt.append("'%s'"%self.hardCodedResultTermsTypes[idx]+termNameAlias)
return rt
def selectFields(self,first=False):
"""
Returns a list of column aliases for the SELECT SQL command used to fetch quads from
a partition
"""
return first and self._selectFieldsLeading or self._selectFieldsNonLeading
def generateHashIntersections(self):
"""
Generates the SQL JOINS (INNER and LEFT) used to intersect the identifier and value hashes
with this partition. This relies on each parition setting up an ordered list of
intersections (ordered with optimization in mind). For instance the ABOX partition
would want to intersect on classes first (since this will have a lower cardinality than any other field)
wherease the Literal Properties partition would want to intersect on datatypes first.
The paritions and hashes are joined on the integer half-MD5-hash of the URI (or literal) as well
as the 'Term Type'
"""
intersections = []
for idx,isId in self.columnIntersectionList:
lookup = isId and self.idHash or self.valueHash
lookupAlias = idx < len(POSITION_LIST) and 'rt_'+SlotPrefixes[idx] or 'rt_'+self.columnNames[idx][0]
lookupKeyCol = lookup.columns[0][0]
if idx < len(POSITION_LIST) or len(self.columnNames) > len(POSITION_LIST):
colName = idx < len(POSITION_LIST) and self.columnNames[idx] or self.columnNames[idx][0]
intersectionClauses = ["%s.%s = %s.%s"%(self,colName,lookupAlias,lookupKeyCol)]
if idx < len(POSITION_LIST) and self.termEnumerations[idx]:
intersectionClauses.append("%s.%s_term = %s.%s"%(self,colName,lookupAlias,lookup.columns[1][0]))
if isId and idx < len(POSITION_LIST) and idx in self.hardCodedResultTermsTypes:
intersectionClauses.append("%s.%s = '%s'"%(lookupAlias,lookup.columns[1][0],self.hardCodedResultTermsTypes[idx]))
if idx == DATATYPE_INDEX and len(self.columnNames) > len(POSITION_LIST):
intersections.append(LOOKUP_UNION_SQL%(lookup,lookupAlias,' AND '.join(intersectionClauses)))
else:
intersections.append(LOOKUP_INTERSECTION_SQL%(lookup,lookupAlias,' AND '.join(intersectionClauses)))
return ' '.join(intersections)
def generateWhereClause(self,queryPattern):
"""
Takes a query pattern (a list of quad terms - subject,predicate,object,context)
and generates a SQL WHERE clauses which works in conjunction to the intersections
to filter the result set by partial matching (by REGEX), full matching (by integer half-hash),
and term types. For maximally efficient SELECT queries
"""
whereClauses = []
whereParameters = []
asserted = dereferenceQuad(CONTEXT,queryPattern) is None
for idx in SlotPrefixes.keys():
queryTerm = dereferenceQuad(idx,queryPattern)
lookupAlias = 'rt_'+SlotPrefixes[idx]
if idx == CONTEXT and asserted:
whereClauses.append("%s.%s_term != 'F'"%(self,self.columnNames[idx]))
if idx < len(POSITION_LIST) and isinstance(queryTerm,REGEXTerm):
whereClauses.append("%s.lexical REGEXP "%lookupAlias+"%s")
whereParameters.append(queryTerm)
elif idx == CONTEXT and isinstance(queryTerm,Graph) and isinstance(queryTerm.identifier,REGEXTerm):
whereClauses.append("%s.lexical REGEXP "%lookupAlias+"%s")
whereParameters.append(queryTerm.identifier)
elif idx < len(POSITION_LIST) and queryTerm is not Any:
if self.columnNames[idx]:
if isinstance(queryTerm,list):
whereClauses.append("%s.%s"%(self,self.columnNames[idx])+" in (%s)"%','.join(['%s' for item in range(len(queryTerm))]))
whereParameters.extend([normalizeValue(item,term2Letter(item)) for item in queryTerm])
else:
whereClauses.append("%s.%s"%(self,self.columnNames[idx])+" = %s")
whereParameters.append(normalizeValue(queryTerm,term2Letter(queryTerm)))
if not idx in self.hardCodedResultTermsTypes and self.termEnumerations[idx] and not isinstance(queryTerm,list):
whereClauses.append("%s.%s_term"%(self,self.columnNames[idx])+" = %s")
whereParameters.append(term2Letter(queryTerm))
elif idx >= len(POSITION_LIST) and len(self.columnNames) > len(POSITION_LIST) and queryTerm is not None:
compVal = idx == DATATYPE_INDEX and normalizeValue(queryTerm,term2Letter(queryTerm)) or queryTerm
whereClauses.append("%s.%s"%(self,self.columnNames[idx][0])+" = %s")
whereParameters.append(compVal)
return ' AND '.join(whereClauses),whereParameters# + "#{%s}\n"%(str(queryPattern)),whereParameters
class AssociativeBox(BinaryRelationPartition):
"""
The partition associated with assertions of class membership (formally known - in Description Logics - as an Associative Box)
This partition is for all assertions where the property is rdf:type
see: http://en.wikipedia.org/wiki/Description_Logic#Modelling_in_Description_Logics
"""
nameSuffix = 'associativeBox'
termEnumerations=[NON_LITERALS,None,CLASS_TERMS,CONTEXT_TERMS]
columnNames = ['member',None,'class',CONTEXT_COLUMN]
columnIntersectionList = [
(OBJECT,True),
(CONTEXT,True),
(SUBJECT,True)]
hardCodedResultFields = {
PREDICATE : RDF.type,
}
hardCodedResultTermsTypes = {
PREDICATE : 'U',
}
def compileQuadToParams(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
return (subjSlot.md5Int,
term2Letter(subjSlot.term),
objSlot.md5Int,
term2Letter(objSlot.term),
conSlot.md5Int,
term2Letter(conSlot.term))
def extractIdentifiers(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
self.idHash.updateIdentifierQueue([
(subjSlot.term,subjSlot.termType),
(objSlot.term,objSlot.termType),
(conSlot.term,conSlot.termType)
])
class NamedLiteralProperties(BinaryRelationPartition):
"""
The partition associated with assertions where the object is a Literal.
"""
nameSuffix = 'literalProperties'
termEnumerations=[NON_LITERALS,PREDICATE_NAMES,None,CONTEXT_TERMS]
columnNames = ['subject','predicate','object',CONTEXT_COLUMN,('data_type','BIGINT unsigned','%s'),('language','varchar(3)','%s(3)')]
columnIntersectionList = [
(DATATYPE_INDEX,True),
(PREDICATE,True),
(CONTEXT,True),
(OBJECT,False),
(SUBJECT,True)]
hardCodedResultFields = {}
hardCodedResultTermsTypes = {
OBJECT : 'L'
}
def foreignKeySQL(self,slot):
hash = slot == OBJECT and self.valueHash or self.idHash
rt = ["\tCONSTRAINT %s_%s_lookup FOREIGN KEY (%s) REFERENCES %s (%s)"%(
self,
self.columnNames[slot],
self.columnNames[slot],
hash,
hash.columns[0][0])]
return rt
def __init__(self,identifier,idHash,valueHash):
super(NamedLiteralProperties,self).__init__(identifier,idHash,valueHash)
self.insertSQLCmds = {
(False,False): self.insertRelationsSQLCMD(),
(False,True) : self.insertRelationsSQLCMD(language=True),
(True,False) : self.insertRelationsSQLCMD(dataType=True),
(True,True) : self.insertRelationsSQLCMD(dataType=True,language=True)
}
idHashLexicalCol = self.idHash.columns[-1][0]
self._selectFieldsLeading = self._selectFields(True) + \
[
'rt_%s.%s'%(self.columnNames[DATATYPE_INDEX][0],idHashLexicalCol) + ' as %s'%SlotPrefixes[DATATYPE_INDEX],
str(self)+'.'+self.columnNames[LANGUAGE_INDEX][0]+' as %s'%SlotPrefixes[LANGUAGE_INDEX],
]
self._selectFields = self._selectFields(False) + \
[
'rt_%s.%s'%(self.columnNames[DATATYPE_INDEX][0],idHashLexicalCol),
str(self)+'.'+self.columnNames[LANGUAGE_INDEX][0],
]
def _resetPendingInsertions(self):
self.pendingInsertions = {
(False,False): [],
(False,True) : [],
(True,False) : [],
(True,True) : [],
}
def insertRelationsSQLCMD(self,dataType=None,language=None):
vals = 0
insertColNames = []
for colName in self.columnNames:
colIdx = self.columnNames.index(colName)
if colName:
if isinstance(colName,tuple):
colName = colName[0]
for argColName,arg in [(self.columnNames[DATATYPE_INDEX][0],dataType),(self.columnNames[LANGUAGE_INDEX][0],language)]:
if colName == argColName and arg:
insertColNames.append(colName)
vals += 1
else:
insertColNames.append(colName)
vals += 1
if colIdx < len(self.termEnumerations) and self.termEnumerations[colIdx]:
insertColNames.append(colName+'_term')
vals += 1
insertColsExpr = "(%s)"%(','.join([i for i in insertColNames]))
return "INSERT INTO %s %s VALUES "%(self,insertColsExpr)+"(%s)"%(','.join(['%s' for i in range(vals)]))
def insertRelations(self,quadSlots):
for quadSlot in quadSlots:
self.extractIdentifiers(quadSlot)
literal = quadSlot[OBJECT].term
insertionCMDKey = (bool(literal.datatype),bool(literal.language))
self.pendingInsertions[insertionCMDKey].append(self.compileQuadToParams(quadSlot))
def flushInsertions(self,db):
self.idHash.insertIdentifiers(db)
self.valueHash.insertIdentifiers(db)
cursor = db.cursor()
for key,paramList in self.pendingInsertions.items():
if paramList:
cursor.executemany(self.insertSQLCmds[key],paramList)
cursor.close()
self._resetPendingInsertions()
def compileQuadToParams(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
dTypeParam = objSlot.term.datatype and normalizeValue(objSlot.term.datatype,objSlot.termType) or None
langParam = objSlot.term.language and objSlot.term.language or None
rtList = [
subjSlot.md5Int,
term2Letter(subjSlot.term),
predSlot.md5Int,
term2Letter(predSlot.term),
objSlot.md5Int,
conSlot.md5Int,
term2Letter(conSlot.term)]
for item in [dTypeParam,langParam]:
if item:
rtList.append(item)
return tuple(rtList)
def extractIdentifiers(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
idTerms = [
(subjSlot.term,subjSlot.termType),
(predSlot.term,predSlot.termType),
(conSlot.term,conSlot.termType)]
if objSlot.term.datatype:
idTerms.append((objSlot.term.datatype,objSlot.termType))
self.idHash.updateIdentifierQueue(idTerms)
self.valueHash.updateIdentifierQueue([(objSlot.term,objSlot.termType)])
def selectFields(self,first=False):
return first and self._selectFieldsLeading or self._selectFieldsNonLeading
class NamedBinaryRelations(BinaryRelationPartition):
"""
Partition associated with assertions where the predicate isn't rdf:type and the object isn't a literal
"""
nameSuffix = 'relations'
termEnumerations=[NON_LITERALS,PREDICATE_NAMES,NON_LITERALS,CONTEXT_TERMS]
columnNames = ['subject','predicate','object',CONTEXT_COLUMN]
columnIntersectionList = [
(PREDICATE,True),
(CONTEXT,True),
(OBJECT,True),
(SUBJECT,True)]
hardCodedResultFields = {}
hardCodedResultTermsTypes = {}
def compileQuadToParams(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
return (subjSlot.md5Int,
term2Letter(subjSlot.term),
predSlot.md5Int,
term2Letter(predSlot.term),
objSlot.md5Int,
term2Letter(objSlot.term),
conSlot.md5Int,
term2Letter(conSlot.term))
def extractIdentifiers(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
self.idHash.updateIdentifierQueue([
(subjSlot.term,subjSlot.termType),
(predSlot.term,predSlot.termType),
(objSlot.term,objSlot.termType),
(conSlot.term,conSlot.termType)])
def BinaryRelationPartitionCoverage((subject,predicate,object_,context),BRPs):
"""
This function takes a quad pattern (where any term is one of: URIRef,BNode,Literal,None,or REGEXTerm)
,a list of 3 live partitions and returns a list of only those partitions that need to be searched
in order to resolve the pattern. This function relies on the BRPQueryDecisionMap dictionary
to determine which partitions to use. Note that the dictionary as it is currently constituted
requres that REGEXTerms in the object slot require that *both* the binary relation partition and
the literal properties partitions are searched when this search could be limited to the literal
properties only (for more efficient REGEX evaluation of literal values). Given the nature of the
REGEX function in SPARQL and the way Versa matches by REGEX, this seperation couldn't be done
"""
if isinstance(predicate,list) and len(predicate) == 1:
predicate = predicate[0]
if isinstance(predicate,REGEXTerm):
pId = predicate.compiledExpr.match(RDF.type) and 'RT' or 'U_RNT'
elif isinstance(predicate,(URIRef,BNode)):
pId = predicate == RDF.type and 'T' or 'U_RNT'
elif predicate is None or predicate is []:
pId = 'W'
elif isinstance(predicate,list):
if [p for p in predicate if p == RDF.type or isinstance(p,REGEXTerm) and p.compiledExpr.match(RDF.type)]:
#One of the predicates is (or matches) rdf:type, so can be treated as a REGEX term that matches rdf:type
pId = 'RT'
else:
#Otherwise, can be treated as a REGEXTerm that *doesn't* match rdf:type
pId = 'U_RNT'
elif isinstance(predicate,Variable):
#Predicates as variables would only exist in literal property assertions and 'other' Relations partition
#(same as URIs or REGEX Terms that don't match rdf:type)
pId = 'U_RNT'
else:
raise Exception("Unable to determine a parition to cover with the given predicate %s (a %s)"%(predicate,type(predicate).__name__))
if isinstance(object_,list) and len(object_) == 1:
object_ = object_[0]
if isinstance(object_,REGEXTerm):
oId = 'R'
elif isinstance(object_,Literal):
oId = 'L'
elif isinstance(object_,(URIRef,BNode,Graph)):
oId = 'U'
elif object_ is None:
oId = 'W'
elif isinstance(object_,list):
if [o for o in object_ if isinstance(o,REGEXTerm)]:
#If there are any REGEXTerms in the list then the list behaves as a REGEX / Wildcard
oId = 'R'
elif not [o for o in object_ if isinstance(o,REGEXTerm) or isinstance(o,Literal)]:
#There are no Literals or REGEXTerms, the list behaves as a URI (i.e., it never checks literal partition)
oId = 'U'
elif len([o for o in object_ if isinstance(o,Literal)]) == len(object_):
#They are all literals
oId = 'L'
else:
#Treat as a wildcard
oId = 'R'
elif isinstance(object_,Variable):
#Variables would only exist in the ABOX and 'other' Relations partition (same as URIs)
oId = 'U'
else:
raise Exception("Unable to determine a parition to cover with the given object %s (a %s)"%(object_,type(object_).__name__))
targetBRPs = [brp for brp in BRPs if isinstance(brp,BRPQueryDecisionMap[pId+oId])]
return targetBRPs
def PatternResolution(quad,cursor,BRPs,orderByTriple=True,fetchall=True,fetchContexts=False):
"""
This function implements query pattern resolution against a list of partition objects and
3 parameters specifying whether to sort the result set (in order to group identical triples
by the contexts in which they appear), whether to fetch the entire result set or one at a time,
and whether to fetch the matching contexts only or the assertions.
This function uses BinaryRelationPartitionCoverage to whittle out the partitions that don't need
to be searched, generateHashIntersections / generateWhereClause to generate the SQL query
and the parameter fill-ins and creates a single UNION query against the relevant partitions.
Note the use of UNION syntax requires that the literal properties partition is first (since it
uses the first select to determine the column types for the resulting rows from the subsequent
SELECT queries)
see: http://dev.mysql.com/doc/refman/5.0/en/union.html
"""
subject,predicate,object_,context = quad
targetBRPs = BinaryRelationPartitionCoverage((subject,predicate,object_,context),BRPs)
unionQueries = []
unionQueriesParams = []
for brp in targetBRPs:
first = targetBRPs.index(brp) == 0
if fetchContexts:
query = "SELECT DISTINCT %s FROM %s %s WHERE "%(
','.join(brp.selectContextFields(first)),
brp,
brp._intersectionSQL
)
else:
query = CROSS_BRP_QUERY_SQL%(
','.join(brp.selectFields(first)),
brp,
brp._intersectionSQL
)
whereClause,whereParameters = brp.generateWhereClause((subject,predicate,object_,context))
unionQueries.append(query+whereClause)
unionQueriesParams.extend(whereParameters)
if fetchContexts:
orderBySuffix = ''
else:
orderBySuffix = orderByTriple and ' ORDER BY %s,%s,%s'%(SlotPrefixes[SUBJECT],SlotPrefixes[PREDICATE],SlotPrefixes[OBJECT]) or ''
if len(unionQueries) == 1:
query = unionQueries[0] + orderBySuffix
else:
query = ' union all '.join(['('+q+')' for q in unionQueries]) + orderBySuffix
query = query + ' # %s'%str(quad)
try:
cursor.execute(query,tuple(unionQueriesParams))
except ValueError,e:
print "## Query ##\n",query
print "## Parameters ##\n",unionQueriesParams
raise e
if fetchall:
qRT = cursor.fetchall()
else:
qRT = cursor.fetchone()
return qRT
CREATE_RESULT_TABLE = \
"""
CREATE TEMPORARY TABLE result (
subject text NOT NULL,
subjectTerm enum('F','V','U','B','L') NOT NULL,
predicate text NOT NULL,
predicateTerm enum('F','V','U','B','L') NOT NULL,
object text NOT NULL,
objectTerm enum('F','V','U','B','L') NOT NULL,
context text not NULL,
contextTerm enum('F','V','U','B','L') NOT NULL,
dataType text,
language char(3),
INDEX USING BTREE (context(50))
)
"""
CROSS_BRP_QUERY_SQL="SELECT %s FROM %s %s WHERE "
CROSS_BRP_RESULT_QUERY_SQL="SELECT * FROM result ORDER BY context"
DROP_RESULT_TABLE_SQL = "DROP result"
BRPQueryDecisionMap = {
'WL':(NamedLiteralProperties),
'WU':(AssociativeBox,NamedBinaryRelations),
'WW':(NamedLiteralProperties,AssociativeBox,NamedBinaryRelations),
'WR':(NamedLiteralProperties,AssociativeBox,NamedBinaryRelations), #Could be optimized to not include NamedBinaryRelations
'RTL':(NamedLiteralProperties),
'RTU':(NamedBinaryRelations,AssociativeBox),
'RTR':(NamedLiteralProperties,AssociativeBox,NamedBinaryRelations), #Could be optimized to not include NamedBinaryRelations
'TU':(AssociativeBox),
'TW':(AssociativeBox),
'TR':(AssociativeBox),
'U_RNTL':(NamedLiteralProperties),
'U_RNTU':(NamedBinaryRelations),
'U_RNTW':(NamedLiteralProperties,NamedBinaryRelations),
'U_RNTR':(NamedLiteralProperties,NamedBinaryRelations), #Could be optimized to not include NamedBinaryRelations
}
A DISTINCT was added to the SELECT clause to ensure duplicate triples are not returned (an RDF graph is a set of triples) - which can happen for certain join expressions.
"""
The set of classes used to model the 3 'partitions' for N3 assertions.
There is a top level class which implements operations common to all partitions as
well as a class for each partition. These classes are meant to allow the underlying
SQL schema to be completely configurable as well as to automate the generation
of SQL queries for adding,updating,removing,resolving triples from the partitions.
These classes work in tandem with the RelationHashes to automate all (or most) of
the SQL processing associated with this FOPL Relational Model
NOTE: The use of foreign keys (which - unfortunately - bumps the minimum MySQL version to 5.0) allows for
the efficient removal of all statements about a particular resource using cascade on delete (currently not used)
see: http://dev.mysql.com/doc/refman/5.0/en/ansi-diff-foreign-keys.html
"""
from rdflib.URIRef import URIRef
from rdflib import BNode
from rdflib import RDF
from rdflib.Literal import Literal
from rdflib.URIRef import URIRef
from pprint import pprint
from rdflib.term_utils import *
from rdflib.store.REGEXMatching import REGEXTerm
from QuadSlot import *
Any = None
CONTEXT_COLUMN = 'context'
ANY_TERM = ['U','B','F','V','L']
CONTEXT_TERMS = ['U','B','F']
IDENTIFIER_TERMS = ['U','B']
GROUND_IDENTIFIERS = ['U']
NON_LITERALS = ['U','B','F','V']
CLASS_TERMS = ['U','B','V']
PREDICATE_NAMES = ['U','V']
NAMED_BINARY_RELATION_PREDICATES = GROUND_IDENTIFIERS
NAMED_BINARY_RELATION_OBJECTS = ['U','B','L']
NAMED_LITERAL_PREDICATES = GROUND_IDENTIFIERS
NAMED_LITERAL_OBJECTS = ['L']
ASSOCIATIVE_BOX_CLASSES = GROUND_IDENTIFIERS
CREATE_BRP_TABLE = """
CREATE TABLE %s (
%s
) ENGINE=InnoDB"""
LOOKUP_INTERSECTION_SQL = "INNER JOIN %s %s ON (%s)"
LOOKUP_UNION_SQL = "LEFT JOIN %s %s ON (%s)"
class BinaryRelationPartition(object):
"""
The common ancestor of the three partitions for assertions.
Implements behavior common to all 3. Each subclass is expected to define the following:
nameSuffix - The suffix appended to the name of the table
termEnumerations - a 4 item list (for each quad 'slot') of lists (or None) which enumerate the allowable term types
for each quad slot (one of 'U' - URIs,'V' - Variable,'L' - Literals,'B' - BNodes,'F' - Formulae)
columnNames - a list of column names for each quad slot (can be of additional length where each item is a 3-item tuple of:
column name, column type, index)
columnIntersectionList - a list of 2 item tuples (the quad index and a boolean indicating whether or not the associated term is an identifier)
this list (the order of which is very important) is used for generating intersections between the partition and the identifier / value hash
hardCodedResultFields - a dictionary mapping quad slot indices to their hardcoded value (for partitions - such as ABOX - which have a hardcoded value for a particular quad slot)
hardCodedResultTermsTypes - a dictionary mapping quad slot indices to their hardcoded term type (for partitions - such as Literal properties - which have hardcoded values for a particular quad slot's term type)
"""
assertedColumnName = 'asserted'
indexSuffix = 'Index'
def __init__(self,identifier,idHash,valueHash):
self.identifier = identifier
self.idHash = idHash
self.valueHash = valueHash
self._repr = self.identifier+'_'+self.nameSuffix
self.singularInsertionSQLCmd = self.insertRelationsSQLCMD()
self._resetPendingInsertions()
self._intersectionSQL = self.generateHashIntersections()
self._selectFieldsLeading = self._selectFields(True) + ['NULL as '+SlotPrefixes[DATATYPE_INDEX],'NULL as '+SlotPrefixes[LANGUAGE_INDEX]]
self._selectFieldsNonLeading = self._selectFields(False) + ['NULL','NULL']
def __repr__(self):
return self._repr
def foreignKeySQL(self,slot):
"""
Generates foreign key expression relating a particular quad term with
the identifier hash
"""
rt = ["\tCONSTRAINT %s_%s_lookup FOREIGN KEY (%s) REFERENCES %s (%s)"%(
self,
self.columnNames[slot],
self.columnNames[slot],
self.idHash,
self.idHash.columns[0][0])]
return rt
def IndexManagementSQL(self,create=False):
idxSQLStmts = []
for slot in POSITION_LIST:
if self.columnNames[slot]:
if create:
idxSQLStmts.append("create INDEX %s%s on %s (%s)"%(self.columnNames[slot],self.indexSuffix,self,self.columnNames[slot]))
idxSQLStmts.append("ALTER TABLE %s ADD %s"%(self,self.foreignKeySQL(slot)[0]))
else:
idxSQLStmts.append("ALTER TABLE %s DROP FOREIGN KEY %s_%s_lookup"%(self,self,self.columnNames[slot]))
idxSQLStmts.append("ALTER TABLE %s DROP INDEX %s%s"%(self,self.columnNames[slot],self.indexSuffix))
if self.termEnumerations[slot]:
if create:
idxSQLStmts.append("create INDEX %s_term%s on %s (%s_term)"%(self.columnNames[slot],self.indexSuffix,self,self.columnNames[slot]))
else:
idxSQLStmts.append("drop index %s_term%s on %s"%(self.columnNames[slot],self.indexSuffix,self))
if len(self.columnNames) > 4:
for otherSlot in range(4,len(self.columnNames)):
colMD = self.columnNames[otherSlot]
if isinstance(colMD,tuple):
colName,colType,indexStr = colMD
if create:
idxSQLStmts.append("create INDEX %s%s on %s (%s)"%(colName,self.indexSuffix,self,indexStr%colName))
else:
idxSQLStmts.append("drop index %s%s on %s"%(colName,self.indexSuffix,self))
else:
if create:
idxSQLStmts.append("create INDEX %s%s on (%s)"%(colMD,self.indexSuffix,self,colMD))
idxSQLStmts.append("ALTER TABLE %s ADD %s"%(self,self.foreignKeySQL(otherSlot)[0]))
else:
idxSQLStmts.append("ALTER TABLE %s DROP FOREIGN KEY %s_%s_lookup"%(self,self,colMD))
idxSQLStmts.append("drop index %s%s on %s"%(colMD,self.indexSuffix,self))
return idxSQLStmts
def createSQL(self):
"""
Generates a CREATE TABLE statement which creates a SQL table used for
persisting assertions associated with this partition
"""
columnSQLStmts = []
for slot in POSITION_LIST:
if self.columnNames[slot]:
columnSQLStmts.append("\t%s\tBIGINT unsigned not NULL"%(self.columnNames[slot]))
columnSQLStmts.append("\tINDEX %s%s (%s)"%(self.columnNames[slot],self.indexSuffix,self.columnNames[slot]))
if self.termEnumerations[slot]:
columnSQLStmts.append("\t%s_term enum(%s) not NULL"%(self.columnNames[slot],','.join(["'%s'"%tType for tType in self.termEnumerations[slot]])))
columnSQLStmts.append("\tINDEX %s_term%s (%s_term)"%(self.columnNames[slot],self.indexSuffix,self.columnNames[slot]))
columnSQLStmts.extend(self.foreignKeySQL(slot))
if len(self.columnNames) > 4:
for otherSlot in range(4,len(self.columnNames)):
colMD = self.columnNames[otherSlot]
if isinstance(colMD,tuple):
colName,colType,indexStr = colMD
columnSQLStmts.append("\t%s %s"%(colName,colType))
columnSQLStmts.append("\tINDEX %s%s (%s)"%(colName,self.indexSuffix,indexStr%colName))
else:
columnSQLStmts.append("\t%s BIGINT unsigned not NULL"%colMD)
columnSQLStmts.append("\tINDEX %s%s (%s)"%(colMD,self.indexSuffix,colMD))
columnSQLStmts.extend(self.foreignKeySQL(otherSlot))
return CREATE_BRP_TABLE%(
self,
',\n'.join(columnSQLStmts)
)
def _resetPendingInsertions(self):
"""
Resets the cache for pending insertions
"""
self.pendingInsertions = []
def insertRelationsSQLCMD(self):
"""
Generates a SQL command with parameter references (%s) in order to facilitate
efficient batch insertion of multiple assertions by Python DB implementations (such as MySQLdb)
"""
vals = 0
insertColNames = []
for colName in self.columnNames:
colIdx = self.columnNames.index(colName)
if colName:
insertColNames.append(colName)
vals += 1
if colIdx < len(self.termEnumerations) and self.termEnumerations[colIdx]:
insertColNames.append(colName+'_term')
vals += 1
insertColsExpr = "(%s)"%(','.join([isinstance(i,tuple) and i[0] or i for i in insertColNames]))
return "INSERT INTO %s %s VALUES "%(self,insertColsExpr)+"(%s)"%(','.join(['%s' for i in range(vals)]))
def insertRelations(self,quadSlots):
"""
Takes a list of QuadSlot objects and queues the new identifiers / values to insert and
the assertions as well (so they can be added in a batch for maximum efficiency)
"""
for quadSlot in quadSlots:
self.extractIdentifiers(quadSlot)
self.pendingInsertions.append(self.compileQuadToParams(quadSlot))
def flushInsertions(self,db):
"""
Adds the pending identifiers / values and assertions (using executemany for
maximum efficiency), and resets the queue.
"""
self.idHash.insertIdentifiers(db)
self.valueHash.insertIdentifiers(db)
cursor = db.cursor()
cursor.executemany(self.singularInsertionSQLCmd,self.pendingInsertions)
cursor.close()
self._resetPendingInsertions()
def selectContextFields(self,first):
"""
Generates a list of column aliases for the SELECT SQL command used in order
to fetch contexts from each partition
"""
rt = []
idHashLexicalCol = self.idHash.columns[-1][0]
idHashTermTypeCol = self.idHash.columns[-2][0]
termNameAlias = first and ' as %s'%SlotPrefixes[CONTEXT] or ''
rt.append('rt_'+SlotPrefixes[CONTEXT]+'.'+idHashLexicalCol + termNameAlias)
termTypeAlias = first and ' as %sTermType'%SlotPrefixes[CONTEXT] or ''
if self.termEnumerations[CONTEXT]:
rt.append('rt_'+SlotPrefixes[CONTEXT]+'.'+idHashTermTypeCol+termTypeAlias)
else:
rt.append("'%s'"%self.hardCodedResultTermsTypes[CONTEXT]+termTypeAlias)
return rt
def _selectFields(self,first):
rt = []
idHashLexicalCol = self.idHash.columns[-1][0]
idHashTermTypeCol = self.idHash.columns[-2][0]
for idx in range(len(POSITION_LIST)):
termNameAlias = first and ' as %s'%SlotPrefixes[idx] or ''
if idx < len(self.columnNames) and self.columnNames[idx]:
rt.append('rt_'+SlotPrefixes[idx]+'.'+idHashLexicalCol + termNameAlias)
termTypeAlias = first and ' as %sTermType'%SlotPrefixes[idx] or ''
if self.termEnumerations[idx]:
rt.append('rt_'+SlotPrefixes[idx]+'.'+idHashTermTypeCol+termTypeAlias)
else:
rt.append("'%s'"%self.hardCodedResultTermsTypes[idx]+termTypeAlias)
else:
rt.append("'%s'"%self.hardCodedResultFields[idx]+termNameAlias)
if self.hardCodedResultTermsTypes[idx]:
rt.append("'%s'"%self.hardCodedResultTermsTypes[idx]+termNameAlias)
return rt
def selectFields(self,first=False):
"""
Returns a list of column aliases for the SELECT SQL command used to fetch quads from
a partition
"""
return first and self._selectFieldsLeading or self._selectFieldsNonLeading
def generateHashIntersections(self):
"""
Generates the SQL JOINS (INNER and LEFT) used to intersect the identifier and value hashes
with this partition. This relies on each parition setting up an ordered list of
intersections (ordered with optimization in mind). For instance the ABOX partition
would want to intersect on classes first (since this will have a lower cardinality than any other field)
wherease the Literal Properties partition would want to intersect on datatypes first.
The paritions and hashes are joined on the integer half-MD5-hash of the URI (or literal) as well
as the 'Term Type'
"""
intersections = []
for idx,isId in self.columnIntersectionList:
lookup = isId and self.idHash or self.valueHash
lookupAlias = idx < len(POSITION_LIST) and 'rt_'+SlotPrefixes[idx] or 'rt_'+self.columnNames[idx][0]
lookupKeyCol = lookup.columns[0][0]
if idx < len(POSITION_LIST) or len(self.columnNames) > len(POSITION_LIST):
colName = idx < len(POSITION_LIST) and self.columnNames[idx] or self.columnNames[idx][0]
intersectionClauses = ["%s.%s = %s.%s"%(self,colName,lookupAlias,lookupKeyCol)]
if idx < len(POSITION_LIST) and self.termEnumerations[idx]:
intersectionClauses.append("%s.%s_term = %s.%s"%(self,colName,lookupAlias,lookup.columns[1][0]))
if isId and idx < len(POSITION_LIST) and idx in self.hardCodedResultTermsTypes:
intersectionClauses.append("%s.%s = '%s'"%(lookupAlias,lookup.columns[1][0],self.hardCodedResultTermsTypes[idx]))
if idx == DATATYPE_INDEX and len(self.columnNames) > len(POSITION_LIST):
intersections.append(LOOKUP_UNION_SQL%(lookup,lookupAlias,' AND '.join(intersectionClauses)))
else:
intersections.append(LOOKUP_INTERSECTION_SQL%(lookup,lookupAlias,' AND '.join(intersectionClauses)))
return ' '.join(intersections)
def generateWhereClause(self,queryPattern):
"""
Takes a query pattern (a list of quad terms - subject,predicate,object,context)
and generates a SQL WHERE clauses which works in conjunction to the intersections
to filter the result set by partial matching (by REGEX), full matching (by integer half-hash),
and term types. For maximally efficient SELECT queries
"""
whereClauses = []
whereParameters = []
asserted = dereferenceQuad(CONTEXT,queryPattern) is None
for idx in SlotPrefixes.keys():
queryTerm = dereferenceQuad(idx,queryPattern)
lookupAlias = 'rt_'+SlotPrefixes[idx]
if idx == CONTEXT and asserted:
whereClauses.append("%s.%s_term != 'F'"%(self,self.columnNames[idx]))
if idx < len(POSITION_LIST) and isinstance(queryTerm,REGEXTerm):
whereClauses.append("%s.lexical REGEXP "%lookupAlias+"%s")
whereParameters.append(queryTerm)
elif idx == CONTEXT and isinstance(queryTerm,Graph) and isinstance(queryTerm.identifier,REGEXTerm):
whereClauses.append("%s.lexical REGEXP "%lookupAlias+"%s")
whereParameters.append(queryTerm.identifier)
elif idx < len(POSITION_LIST) and queryTerm is not Any:
if self.columnNames[idx]:
if isinstance(queryTerm,list):
whereClauses.append("%s.%s"%(self,self.columnNames[idx])+" in (%s)"%','.join(['%s' for item in range(len(queryTerm))]))
whereParameters.extend([normalizeValue(item,term2Letter(item)) for item in queryTerm])
else:
whereClauses.append("%s.%s"%(self,self.columnNames[idx])+" = %s")
whereParameters.append(normalizeValue(queryTerm,term2Letter(queryTerm)))
if not idx in self.hardCodedResultTermsTypes and self.termEnumerations[idx] and not isinstance(queryTerm,list):
whereClauses.append("%s.%s_term"%(self,self.columnNames[idx])+" = %s")
whereParameters.append(term2Letter(queryTerm))
elif idx >= len(POSITION_LIST) and len(self.columnNames) > len(POSITION_LIST) and queryTerm is not None:
compVal = idx == DATATYPE_INDEX and normalizeValue(queryTerm,term2Letter(queryTerm)) or queryTerm
whereClauses.append("%s.%s"%(self,self.columnNames[idx][0])+" = %s")
whereParameters.append(compVal)
return ' AND '.join(whereClauses),whereParameters# + "#{%s}\n"%(str(queryPattern)),whereParameters
class AssociativeBox(BinaryRelationPartition):
"""
The partition associated with assertions of class membership (formally known - in Description Logics - as an Associative Box)
This partition is for all assertions where the property is rdf:type
see: http://en.wikipedia.org/wiki/Description_Logic#Modelling_in_Description_Logics
"""
nameSuffix = 'associativeBox'
termEnumerations=[NON_LITERALS,None,CLASS_TERMS,CONTEXT_TERMS]
columnNames = ['member',None,'class',CONTEXT_COLUMN]
columnIntersectionList = [
(OBJECT,True),
(CONTEXT,True),
(SUBJECT,True)]
hardCodedResultFields = {
PREDICATE : RDF.type,
}
hardCodedResultTermsTypes = {
PREDICATE : 'U',
}
def compileQuadToParams(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
return (subjSlot.md5Int,
term2Letter(subjSlot.term),
objSlot.md5Int,
term2Letter(objSlot.term),
conSlot.md5Int,
term2Letter(conSlot.term))
def extractIdentifiers(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
self.idHash.updateIdentifierQueue([
(subjSlot.term,subjSlot.termType),
(objSlot.term,objSlot.termType),
(conSlot.term,conSlot.termType)
])
class NamedLiteralProperties(BinaryRelationPartition):
"""
The partition associated with assertions where the object is a Literal.
"""
nameSuffix = 'literalProperties'
termEnumerations=[NON_LITERALS,PREDICATE_NAMES,None,CONTEXT_TERMS]
columnNames = ['subject','predicate','object',CONTEXT_COLUMN,('data_type','BIGINT unsigned','%s'),('language','varchar(3)','%s(3)')]
columnIntersectionList = [
(DATATYPE_INDEX,True),
(PREDICATE,True),
(CONTEXT,True),
(OBJECT,False),
(SUBJECT,True)]
hardCodedResultFields = {}
hardCodedResultTermsTypes = {
OBJECT : 'L'
}
def foreignKeySQL(self,slot):
hash = slot == OBJECT and self.valueHash or self.idHash
rt = ["\tCONSTRAINT %s_%s_lookup FOREIGN KEY (%s) REFERENCES %s (%s)"%(
self,
self.columnNames[slot],
self.columnNames[slot],
hash,
hash.columns[0][0])]
return rt
def __init__(self,identifier,idHash,valueHash):
super(NamedLiteralProperties,self).__init__(identifier,idHash,valueHash)
self.insertSQLCmds = {
(False,False): self.insertRelationsSQLCMD(),
(False,True) : self.insertRelationsSQLCMD(language=True),
(True,False) : self.insertRelationsSQLCMD(dataType=True),
(True,True) : self.insertRelationsSQLCMD(dataType=True,language=True)
}
idHashLexicalCol = self.idHash.columns[-1][0]
self._selectFieldsLeading = self._selectFields(True) + \
[
'rt_%s.%s'%(self.columnNames[DATATYPE_INDEX][0],idHashLexicalCol) + ' as %s'%SlotPrefixes[DATATYPE_INDEX],
str(self)+'.'+self.columnNames[LANGUAGE_INDEX][0]+' as %s'%SlotPrefixes[LANGUAGE_INDEX],
]
self._selectFields = self._selectFields(False) + \
[
'rt_%s.%s'%(self.columnNames[DATATYPE_INDEX][0],idHashLexicalCol),
str(self)+'.'+self.columnNames[LANGUAGE_INDEX][0],
]
def _resetPendingInsertions(self):
self.pendingInsertions = {
(False,False): [],
(False,True) : [],
(True,False) : [],
(True,True) : [],
}
def insertRelationsSQLCMD(self,dataType=None,language=None):
vals = 0
insertColNames = []
for colName in self.columnNames:
colIdx = self.columnNames.index(colName)
if colName:
if isinstance(colName,tuple):
colName = colName[0]
for argColName,arg in [(self.columnNames[DATATYPE_INDEX][0],dataType),(self.columnNames[LANGUAGE_INDEX][0],language)]:
if colName == argColName and arg:
insertColNames.append(colName)
vals += 1
else:
insertColNames.append(colName)
vals += 1
if colIdx < len(self.termEnumerations) and self.termEnumerations[colIdx]:
insertColNames.append(colName+'_term')
vals += 1
insertColsExpr = "(%s)"%(','.join([i for i in insertColNames]))
return "INSERT INTO %s %s VALUES "%(self,insertColsExpr)+"(%s)"%(','.join(['%s' for i in range(vals)]))
def insertRelations(self,quadSlots):
for quadSlot in quadSlots:
self.extractIdentifiers(quadSlot)
literal = quadSlot[OBJECT].term
insertionCMDKey = (bool(literal.datatype),bool(literal.language))
self.pendingInsertions[insertionCMDKey].append(self.compileQuadToParams(quadSlot))
def flushInsertions(self,db):
self.idHash.insertIdentifiers(db)
self.valueHash.insertIdentifiers(db)
cursor = db.cursor()
for key,paramList in self.pendingInsertions.items():
if paramList:
cursor.executemany(self.insertSQLCmds[key],paramList)
cursor.close()
self._resetPendingInsertions()
def compileQuadToParams(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
dTypeParam = objSlot.term.datatype and normalizeValue(objSlot.term.datatype,objSlot.termType) or None
langParam = objSlot.term.language and objSlot.term.language or None
rtList = [
subjSlot.md5Int,
term2Letter(subjSlot.term),
predSlot.md5Int,
term2Letter(predSlot.term),
objSlot.md5Int,
conSlot.md5Int,
term2Letter(conSlot.term)]
for item in [dTypeParam,langParam]:
if item:
rtList.append(item)
return tuple(rtList)
def extractIdentifiers(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
idTerms = [
(subjSlot.term,subjSlot.termType),
(predSlot.term,predSlot.termType),
(conSlot.term,conSlot.termType)]
if objSlot.term.datatype:
idTerms.append((objSlot.term.datatype,objSlot.termType))
self.idHash.updateIdentifierQueue(idTerms)
self.valueHash.updateIdentifierQueue([(objSlot.term,objSlot.termType)])
def selectFields(self,first=False):
return first and self._selectFieldsLeading or self._selectFieldsNonLeading
class NamedBinaryRelations(BinaryRelationPartition):
"""
Partition associated with assertions where the predicate isn't rdf:type and the object isn't a literal
"""
nameSuffix = 'relations'
termEnumerations=[NON_LITERALS,PREDICATE_NAMES,NON_LITERALS,CONTEXT_TERMS]
columnNames = ['subject','predicate','object',CONTEXT_COLUMN]
columnIntersectionList = [
(PREDICATE,True),
(CONTEXT,True),
(OBJECT,True),
(SUBJECT,True)]
hardCodedResultFields = {}
hardCodedResultTermsTypes = {}
def compileQuadToParams(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
return (subjSlot.md5Int,
term2Letter(subjSlot.term),
predSlot.md5Int,
term2Letter(predSlot.term),
objSlot.md5Int,
term2Letter(objSlot.term),
conSlot.md5Int,
term2Letter(conSlot.term))
def extractIdentifiers(self,quadSlots):
subjSlot,predSlot,objSlot,conSlot = quadSlots
self.idHash.updateIdentifierQueue([
(subjSlot.term,subjSlot.termType),
(predSlot.term,predSlot.termType),
(objSlot.term,objSlot.termType),
(conSlot.term,conSlot.termType)])
def BinaryRelationPartitionCoverage((subject,predicate,object_,context),BRPs):
"""
This function takes a quad pattern (where any term is one of: URIRef,BNode,Literal,None,or REGEXTerm)
,a list of 3 live partitions and returns a list of only those partitions that need to be searched
in order to resolve the pattern. This function relies on the BRPQueryDecisionMap dictionary
to determine which partitions to use. Note that the dictionary as it is currently constituted
requres that REGEXTerms in the object slot require that *both* the binary relation partition and
the literal properties partitions are searched when this search could be limited to the literal
properties only (for more efficient REGEX evaluation of literal values). Given the nature of the
REGEX function in SPARQL and the way Versa matches by REGEX, this seperation couldn't be done
"""
if isinstance(predicate,list) and len(predicate) == 1:
predicate = predicate[0]
if isinstance(predicate,REGEXTerm):
pId = predicate.compiledExpr.match(RDF.type) and 'RT' or 'U_RNT'
elif isinstance(predicate,(URIRef,BNode)):
pId = predicate == RDF.type and 'T' or 'U_RNT'
elif predicate is None or predicate is []:
pId = 'W'
elif isinstance(predicate,list):
if [p for p in predicate if p == RDF.type or isinstance(p,REGEXTerm) and p.compiledExpr.match(RDF.type)]:
#One of the predicates is (or matches) rdf:type, so can be treated as a REGEX term that matches rdf:type
pId = 'RT'
else:
#Otherwise, can be treated as a REGEXTerm that *doesn't* match rdf:type
pId = 'U_RNT'
elif isinstance(predicate,Variable):
#Predicates as variables would only exist in literal property assertions and 'other' Relations partition
#(same as URIs or REGEX Terms that don't match rdf:type)
pId = 'U_RNT'
else:
raise Exception("Unable to determine a parition to cover with the given predicate %s (a %s)"%(predicate,type(predicate).__name__))
if isinstance(object_,list) and len(object_) == 1:
object_ = object_[0]
if isinstance(object_,REGEXTerm):
oId = 'R'
elif isinstance(object_,Literal):
oId = 'L'
elif isinstance(object_,(URIRef,BNode,Graph)):
oId = 'U'
elif object_ is None:
oId = 'W'
elif isinstance(object_,list):
if [o for o in object_ if isinstance(o,REGEXTerm)]:
#If there are any REGEXTerms in the list then the list behaves as a REGEX / Wildcard
oId = 'R'
elif not [o for o in object_ if isinstance(o,REGEXTerm) or isinstance(o,Literal)]:
#There are no Literals or REGEXTerms, the list behaves as a URI (i.e., it never checks literal partition)
oId = 'U'
elif len([o for o in object_ if isinstance(o,Literal)]) == len(object_):
#They are all literals
oId = 'L'
else:
#Treat as a wildcard
oId = 'R'
elif isinstance(object_,Variable):
#Variables would only exist in the ABOX and 'other' Relations partition (same as URIs)
oId = 'U'
else:
raise Exception("Unable to determine a parition to cover with the given object %s (a %s)"%(object_,type(object_).__name__))
targetBRPs = [brp for brp in BRPs if isinstance(brp,BRPQueryDecisionMap[pId+oId])]
return targetBRPs
def PatternResolution(quad,cursor,BRPs,orderByTriple=True,fetchall=True,fetchContexts=False):
"""
This function implements query pattern resolution against a list of partition objects and
3 parameters specifying whether to sort the result set (in order to group identical triples
by the contexts in which they appear), whether to fetch the entire result set or one at a time,
and whether to fetch the matching contexts only or the assertions.
This function uses BinaryRelationPartitionCoverage to whittle out the partitions that don't need
to be searched, generateHashIntersections / generateWhereClause to generate the SQL query
and the parameter fill-ins and creates a single UNION query against the relevant partitions.
Note the use of UNION syntax requires that the literal properties partition is first (since it
uses the first select to determine the column types for the resulting rows from the subsequent
SELECT queries)
see: http://dev.mysql.com/doc/refman/5.0/en/union.html
"""
subject,predicate,object_,context = quad
targetBRPs = BinaryRelationPartitionCoverage((subject,predicate,object_,context),BRPs)
unionQueries = []
unionQueriesParams = []
for brp in targetBRPs:
first = targetBRPs.index(brp) == 0
if fetchContexts:
query = "SELECT DISTINCT %s FROM %s %s WHERE "%(
','.join(brp.selectContextFields(first)),
brp,
brp._intersectionSQL
)
else:
query = CROSS_BRP_QUERY_SQL%(
','.join(brp.selectFields(first)),
brp,
brp._intersectionSQL
)
whereClause,whereParameters = brp.generateWhereClause((subject,predicate,object_,context))
unionQueries.append(query+whereClause)
unionQueriesParams.extend(whereParameters)
if fetchContexts:
orderBySuffix = ''
else:
orderBySuffix = orderByTriple and ' ORDER BY %s,%s,%s'%(SlotPrefixes[SUBJECT],SlotPrefixes[PREDICATE],SlotPrefixes[OBJECT]) or ''
if len(unionQueries) == 1:
query = unionQueries[0] + orderBySuffix
else:
query = ' union all '.join(['('+q+')' for q in unionQueries]) + orderBySuffix
query = query + ' # %s'%str(quad)
try:
cursor.execute(query,tuple(unionQueriesParams))
except ValueError,e:
print "## Query ##\n",query
print "## Parameters ##\n",unionQueriesParams
raise e
if fetchall:
qRT = cursor.fetchall()
else:
qRT = cursor.fetchone()
return qRT
CREATE_RESULT_TABLE = \
"""
CREATE TEMPORARY TABLE result (
subject text NOT NULL,
subjectTerm enum('F','V','U','B','L') NOT NULL,
predicate text NOT NULL,
predicateTerm enum('F','V','U','B','L') NOT NULL,
object text NOT NULL,
objectTerm enum('F','V','U','B','L') NOT NULL,
context text not NULL,
contextTerm enum('F','V','U','B','L') NOT NULL,
dataType text,
language char(3),
INDEX USING BTREE (context(50))
)
"""
CROSS_BRP_QUERY_SQL="SELECT DISTINCT %s FROM %s %s WHERE "
CROSS_BRP_RESULT_QUERY_SQL="SELECT * FROM result ORDER BY context"
DROP_RESULT_TABLE_SQL = "DROP result"
BRPQueryDecisionMap = {
'WL':(NamedLiteralProperties),
'WU':(AssociativeBox,NamedBinaryRelations),
'WW':(NamedLiteralProperties,AssociativeBox,NamedBinaryRelations),
'WR':(NamedLiteralProperties,AssociativeBox,NamedBinaryRelations), #Could be optimized to not include NamedBinaryRelations
'RTL':(NamedLiteralProperties),
'RTU':(NamedBinaryRelations,AssociativeBox),
'RTR':(NamedLiteralProperties,AssociativeBox,NamedBinaryRelations), #Could be optimized to not include NamedBinaryRelations
'TU':(AssociativeBox),
'TW':(AssociativeBox),
'TR':(AssociativeBox),
'U_RNTL':(NamedLiteralProperties),
'U_RNTU':(NamedBinaryRelations),
'U_RNTW':(NamedLiteralProperties,NamedBinaryRelations),
'U_RNTR':(NamedLiteralProperties,NamedBinaryRelations), #Could be optimized to not include NamedBinaryRelations
}
|
import logging
import os
from typing import Text, Tuple
import warnings
from rasa.constants import (
ENV_GPU_CONFIG,
ENV_CPU_INTER_OP_CONFIG,
ENV_CPU_INTRA_OP_CONFIG,
)
from tensorflow import config as tf_config
logger = logging.getLogger(__name__)
def setup_gpu_environment() -> None:
"""Set configuration for a GPU environment based on the environment variable set"""
gpu_memory_config = os.getenv(ENV_GPU_CONFIG, None)
if gpu_memory_config:
parsed_gpu_config = parse_gpu_config(gpu_memory_config)
physical_gpus = tf_config.list_physical_devices("GPU")
# Logic taken from https://www.tensorflow.org/guide/gpu
if physical_gpus:
for gpu_id, gpu_id_memory in parsed_gpu_config.items():
try:
tf_config.experimental.set_virtual_device_configuration(
physical_gpus[gpu_id],
[
tf_config.experimental.VirtualDeviceConfiguration(
memory_limit=gpu_id_memory
)
],
)
except RuntimeError:
# Add a helper explanation where the error comes from
raise RuntimeError(
"Error while setting up tensorflow environment. "
"Virtual devices must be set before GPUs have been initialized"
)
else:
warnings.warn(
f"You have an environment variable '{ENV_GPU_CONFIG}' set but no GPUs were detected to configure"
)
def parse_gpu_config(gpu_memory_config: Text):
"""Parse GPU configuration variable from a string to a dict"""
# gpu_config is of format "gpu_id_1:gpu_id_1_memory, gpu_id_2: gpu_id_2_memory"
# Parse it and store in a dictionary
parsed_gpu_config = {}
try:
for instance in gpu_memory_config.split(","):
instance_gpu_id, instance_gpu_mem = instance.split(":")
instance_gpu_id = int(instance_gpu_id)
instance_gpu_mem = int(instance_gpu_mem)
parsed_gpu_config[instance_gpu_id] = instance_gpu_mem
except ValueError:
# Add a helper explanation
raise ValueError(
f"Error parsing GPU configuration. Please cross-check the format of '{ENV_GPU_CONFIG}'"
)
return parsed_gpu_config
def setup_cpu_environment() -> Tuple[int, int]:
"""Set configuration for the CPU environment based on the environment variable set"""
inter_op_parallel_threads = os.getenv(ENV_CPU_INTER_OP_CONFIG, None)
intra_op_parallel_threads = os.getenv(ENV_CPU_INTRA_OP_CONFIG, None)
if inter_op_parallel_threads:
try:
inter_op_parallel_threads = int(inter_op_parallel_threads.strip())
except ValueError:
raise ValueError(
f"Error parsing the environment variable '{ENV_CPU_INTER_OP_CONFIG}'. Please "
f"cross-check the value"
)
tf_config.threading.set_inter_op_parallelism_threads(inter_op_parallel_threads)
if intra_op_parallel_threads:
try:
intra_op_parallel_threads = int(intra_op_parallel_threads.strip())
except ValueError:
raise ValueError(
f"Error parsing the environment variable '{ENV_CPU_INTRA_OP_CONFIG}'. Please "
f"cross-check the value"
)
tf_config.threading.set_intra_op_parallelism_threads(intra_op_parallel_threads)
# Returning the actual values as a confirmation. Helps with tests too.
return (
tf_config.threading.get_inter_op_parallelism_threads(),
tf_config.threading.get_intra_op_parallelism_threads(),
)
def setup_tf_environment():
setup_cpu_environment()
setup_gpu_environment()
fix type annotations
import logging
import os
from typing import Text, Tuple, Dict
import warnings
from rasa.constants import (
ENV_GPU_CONFIG,
ENV_CPU_INTER_OP_CONFIG,
ENV_CPU_INTRA_OP_CONFIG,
)
from tensorflow import config as tf_config
logger = logging.getLogger(__name__)
def setup_gpu_environment() -> None:
"""Set configuration for a GPU environment based on the environment variable set"""
gpu_memory_config = os.getenv(ENV_GPU_CONFIG)
if gpu_memory_config:
parsed_gpu_config = parse_gpu_config(gpu_memory_config)
physical_gpus = tf_config.list_physical_devices("GPU")
# Logic taken from https://www.tensorflow.org/guide/gpu
if physical_gpus:
for gpu_id, gpu_id_memory in parsed_gpu_config.items():
try:
tf_config.experimental.set_virtual_device_configuration(
physical_gpus[gpu_id],
[
tf_config.experimental.VirtualDeviceConfiguration(
memory_limit=gpu_id_memory
)
],
)
except RuntimeError:
# Add a helper explanation where the error comes from
raise RuntimeError(
"Error while setting up tensorflow environment. "
"Virtual devices must be set before GPUs have been initialized"
)
else:
warnings.warn(
f"You have an environment variable '{ENV_GPU_CONFIG}' set but no GPUs were detected to configure"
)
def parse_gpu_config(gpu_memory_config: Text) -> Dict[int, int]:
"""Parse GPU configuration variable from a string to a dict"""
# gpu_config is of format "gpu_id_1:gpu_id_1_memory, gpu_id_2: gpu_id_2_memory"
# Parse it and store in a dictionary
parsed_gpu_config = {}
try:
for instance in gpu_memory_config.split(","):
instance_gpu_id, instance_gpu_mem = instance.split(":")
instance_gpu_id = int(instance_gpu_id)
instance_gpu_mem = int(instance_gpu_mem)
parsed_gpu_config[instance_gpu_id] = instance_gpu_mem
except ValueError:
# Add a helper explanation
raise ValueError(
f"Error parsing GPU configuration. Please cross-check the format of '{ENV_GPU_CONFIG}'"
)
return parsed_gpu_config
def setup_cpu_environment() -> Tuple[int, int]:
"""Set configuration for the CPU environment based on the environment variable set"""
inter_op_parallel_threads = os.getenv(ENV_CPU_INTER_OP_CONFIG)
intra_op_parallel_threads = os.getenv(ENV_CPU_INTRA_OP_CONFIG)
if inter_op_parallel_threads:
try:
inter_op_parallel_threads = int(inter_op_parallel_threads.strip())
except ValueError:
raise ValueError(
f"Error parsing the environment variable '{ENV_CPU_INTER_OP_CONFIG}'. Please "
f"cross-check the value"
)
tf_config.threading.set_inter_op_parallelism_threads(inter_op_parallel_threads)
if intra_op_parallel_threads:
try:
intra_op_parallel_threads = int(intra_op_parallel_threads.strip())
except ValueError:
raise ValueError(
f"Error parsing the environment variable '{ENV_CPU_INTRA_OP_CONFIG}'. Please "
f"cross-check the value"
)
tf_config.threading.set_intra_op_parallelism_threads(intra_op_parallel_threads)
# Returning the actual values as a confirmation. Helps with tests too.
return (
tf_config.threading.get_inter_op_parallelism_threads(),
tf_config.threading.get_intra_op_parallelism_threads(),
)
def setup_tf_environment() -> None:
setup_cpu_environment()
setup_gpu_environment()
|
# Tai Sakuma <tai.sakuma@gmail.com>
import math
import collections
import logging
from .ReturnTrue import ReturnTrue
##__________________________________________________________________||
class Round(object):
def __init__(self, width=1, aboundary=None,
min=None, underflow_bin=None,
max=None, overflow_bin=None,
valid=ReturnTrue()):
self.width = width
self.aboundary = aboundary
self.halfWidth = self.width/2 if self.width % 2 == 0 else float(self.width)/2
if aboundary is None: aboundary = self.halfWidth
self.boundaries = collections.deque([aboundary - width, aboundary, aboundary + width])
self.min = min
self.underflow_bin = underflow_bin
self.max = max
self.overflow_bin = overflow_bin
self.valid = valid
def __repr__(self):
return '{}(width={!r}, aboundary={!r}, min={!r}, underflow_bin={!r}, max={!r}, overflow_bin={!r}, valid={!r})'.format(
self.__class__.__name__,
self.width,
self.aboundary,
self.min,
self.underflow_bin,
self.max,
self.overflow_bin,
self.valid
)
def __call__(self, val):
return self._lower_boundary(val)
def _lower_boundary(self, val):
if not self.valid(val):
return None
if self.min is not None:
if not self.min <= val:
return self.underflow_bin
if self.max is not None:
if not val < self.max:
return self.overflow_bin
if math.isinf(val):
logger = logging.getLogger(__name__)
logger.warning('val={}. will return {}'.format(val, None))
return None
self._update_boundaries(val)
bin = self.boundaries[0]
for b in self.boundaries:
if b <= val:
bin = b
else:
break
return bin
def _update_boundaries(self, val):
while val < self.boundaries[0]:
self.boundaries.appendleft(self.boundaries[0] - self.width)
while val > self.boundaries[-1]:
self.boundaries.append(self.boundaries[-1] + self.width)
def next(self, bin):
return self._next_lower_boundary(bin)
def _next_lower_boundary(self, bin):
bin = self._lower_boundary(bin)
if bin is None:
return None
if bin == self.underflow_bin:
return self._lower_boundary(self.min)
if bin == self.overflow_bin:
return self.overflow_bin
self._update_boundaries(bin)
return self._lower_boundary(bin + self.width*1.001)
##__________________________________________________________________||
fix a potential bug, clean code in Round
# Tai Sakuma <tai.sakuma@gmail.com>
import math
import collections
import logging
from .ReturnTrue import ReturnTrue
##__________________________________________________________________||
class Round(object):
def __init__(self, width=1, aboundary=None,
min=None, underflow_bin=None,
max=None, overflow_bin=None,
valid=ReturnTrue()):
self.width = width
self.aboundary = aboundary
halfWidth = self.width/2 if self.width % 2 == 0 else float(self.width)/2
if aboundary is None:
aboundary = halfWidth
self.boundaries = collections.deque([aboundary])
self.min = min
self.underflow_bin = underflow_bin
self.max = max
self.overflow_bin = overflow_bin
self.valid = valid
def __repr__(self):
return '{}(width={!r}, aboundary={!r}, min={!r}, underflow_bin={!r}, max={!r}, overflow_bin={!r}, valid={!r})'.format(
self.__class__.__name__,
self.width,
self.aboundary,
self.min,
self.underflow_bin,
self.max,
self.overflow_bin,
self.valid
)
def __call__(self, val):
return self._lower_boundary(val)
def _lower_boundary(self, val):
if not self.valid(val):
return None
if self.min is not None:
if not self.min <= val:
return self.underflow_bin
if self.max is not None:
if not val < self.max:
return self.overflow_bin
if math.isinf(val):
logger = logging.getLogger(__name__)
logger.warning('val={}. will return {}'.format(val, None))
return None
self._update_boundaries(val)
bin = self.boundaries[0]
for b in self.boundaries:
if b <= val:
bin = b
else:
break
return bin
def _update_boundaries(self, val):
while val < self.boundaries[0]:
self.boundaries.appendleft(self.boundaries[0] - self.width)
while val > self.boundaries[-1]:
self.boundaries.append(self.boundaries[-1] + self.width)
def next(self, bin):
return self._next_lower_boundary(bin)
def _next_lower_boundary(self, bin):
bin = self._lower_boundary(bin)
if bin is None:
return None
if bin == self.underflow_bin:
return self._lower_boundary(self.min)
if bin == self.overflow_bin:
return self.overflow_bin
self._update_boundaries(bin)
return self._lower_boundary(bin + self.width*1.001)
##__________________________________________________________________||
|
import io
from fontTools.misc.py23 import tobytes, tostr
from fontTools.misc.testTools import getXML
from fontTools import subset
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables import otTables as ot
from fontTools.misc.loggingTools import CapturingLogHandler
import difflib
import logging
import os
import shutil
import sys
import tempfile
import unittest
import pathlib
import pytest
class SubsetTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
@staticmethod
def getpath(testfile):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", testfile)
def temp_path(self, suffix):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp()
self.num_tempfiles += 1
return os.path.join(self.tempdir,
"tmp%d%s" % (self.num_tempfiles, suffix))
def read_ttx(self, path):
lines = []
with open(path, "r", encoding="utf-8") as ttx:
for line in ttx.readlines():
# Elide ttFont attributes because ttLibVersion may change,
# and use os-native line separators so we can run difflib.
if line.startswith("<ttFont "):
lines.append("<ttFont>" + os.linesep)
else:
lines.append(line.rstrip() + os.linesep)
return lines
def expect_ttx(self, font, expected_ttx, tables=None):
path = self.temp_path(suffix=".ttx")
font.saveXML(path, tables=tables)
actual = self.read_ttx(path)
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
expected, actual, fromfile=expected_ttx, tofile=path):
sys.stdout.write(line)
self.fail("TTX output is different from expected")
def compile_font(self, path, suffix):
savepath = self.temp_path(suffix=suffix)
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(path)
font.save(savepath, reorderTables=None)
return font, savepath
# -----
# Tests
# -----
def test_layout_scripts(self):
_, fontpath = self.compile_font(self.getpath("layout_scripts.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--glyphs=*", "--layout-features=*",
"--layout-scripts=latn,arab.URD,arab.dflt",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_layout_scripts.ttx"),
["GPOS", "GSUB"])
def test_no_notdef_outline_otf(self):
_, fontpath = self.compile_font(self.getpath("TestOTF-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_otf.ttx"), ["CFF "])
def test_no_notdef_outline_cid(self):
_, fontpath = self.compile_font(self.getpath("TestCID-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_cid.ttx"), ["CFF "])
def test_no_notdef_outline_ttf(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_ttf.ttx"), ["glyf", "hmtx"])
def test_subset_ankr(self):
_, fontpath = self.compile_font(self.getpath("TestANKR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_ankr.ttx"), ["ankr"])
def test_subset_ankr_remove(self):
_, fontpath = self.compile_font(self.getpath("TestANKR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=two", "--output-file=%s" % subsetpath])
self.assertNotIn("ankr", TTFont(subsetpath))
def test_subset_bsln_format_0(self):
_, fontpath = self.compile_font(self.getpath("TestBSLN-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_0.ttx"), ["bsln"])
def test_subset_bsln_format_0_from_format_1(self):
# TestBSLN-1 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two} use the roman
# baseline instead of the default ideographic baseline. As we request
# a subsetted font with {zero, one} and the implicit .notdef, all
# glyphs in the resulting font use the Roman baseline. In this case,
# we expect a format 0 'bsln' table because it is the most compact.
_, fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0031",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_0.ttx"), ["bsln"])
def test_subset_bsln_format_1(self):
# TestBSLN-1 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two} use the roman
# baseline instead of the default ideographic baseline. We request
# a subset where the majority of glyphs use the roman baseline,
# but one single glyph (uni2EA2) is ideographic. In the resulting
# subsetted font, we expect a format 1 'bsln' table whose default
# is Roman, but with an override that uses the ideographic baseline
# for uni2EA2.
_, fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_1.ttx"), ["bsln"])
def test_subset_bsln_format_2(self):
# The 'bsln' table in TestBSLN-2 refers to control points in glyph 'P'
# for defining its baselines. Therefore, the subsetted font should
# include this glyph even though it is not requested explicitly.
_, fontpath = self.compile_font(self.getpath("TestBSLN-2.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_2.ttx"), ["bsln"])
def test_subset_bsln_format_2_from_format_3(self):
# TestBSLN-3 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two, P} use the roman
# baseline instead of the default ideographic baseline. As we request
# a subsetted font with zero and the implicit .notdef and P for
# baseline measurement, all glyphs in the resulting font use the Roman
# baseline. In this case, we expect a format 2 'bsln' table because it
# is the most compact encoding.
_, fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_2.ttx"), ["bsln"])
def test_subset_bsln_format_3(self):
# TestBSLN-3 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two} use the roman
# baseline instead of the default ideographic baseline. We request
# a subset where the majority of glyphs use the roman baseline,
# but one single glyph (uni2EA2) is ideographic. In the resulting
# subsetted font, we expect a format 1 'bsln' table whose default
# is Roman, but with an override that uses the ideographic baseline
# for uni2EA2.
_, fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_3.ttx"), ["bsln"])
def test_subset_clr(self):
_, fontpath = self.compile_font(self.getpath("TestCLR-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=smileface", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_colr.ttx"), ["GlyphOrder", "hmtx", "glyf", "COLR", "CPAL"])
def test_subset_gvar(self):
_, fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+002B,U+2212", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"])
def test_subset_gvar_notdef_outline(self):
_, fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030", "--notdef_outline", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar_notdef_outline.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"])
def test_subset_lcar_remove(self):
_, fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertNotIn("lcar", subsetfont)
def test_subset_lcar_format_0(self):
_, fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+FB01",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_lcar_0.ttx"), ["lcar"])
def test_subset_lcar_format_1(self):
_, fontpath = self.compile_font(self.getpath("TestLCAR-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+FB01",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_lcar_1.ttx"), ["lcar"])
def test_subset_math(self):
_, fontpath = self.compile_font(self.getpath("TestMATH-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0041,U+0028,U+0302,U+1D400,U+1D435", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_math.ttx"), ["GlyphOrder", "CFF ", "MATH", "hmtx"])
def test_subset_math_partial(self):
_, fontpath = self.compile_font(self.getpath("test_math_partial.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--text=A", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_math_partial.ttx"), ["MATH"])
def test_subset_opbd_remove(self):
# In the test font, only the glyphs 'A' and 'zero' have an entry in
# the Optical Bounds table. When subsetting, we do not request any
# of those glyphs. Therefore, the produced subsetted font should
# not contain an 'opbd' table.
_, fontpath = self.compile_font(self.getpath("TestOPBD-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertNotIn("opbd", subsetfont)
def test_subset_opbd_format_0(self):
_, fontpath = self.compile_font(self.getpath("TestOPBD-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=A", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_opbd_0.ttx"), ["opbd"])
def test_subset_opbd_format_1(self):
_, fontpath = self.compile_font(self.getpath("TestOPBD-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=A", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_opbd_1.ttx"), ["opbd"])
def test_subset_prop_remove_default_zero(self):
# If all glyphs have an AAT glyph property with value 0,
# the "prop" table should be removed from the subsetted font.
_, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0041",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertNotIn("prop", subsetfont)
def test_subset_prop_0(self):
# If all glyphs share the same AAT glyph properties, the "prop" table
# in the subsetted font should use format 0.
#
# Unless the shared value is zero, in which case the subsetted font
# should have no "prop" table at all. But that case has already been
# tested above in test_subset_prop_remove_default_zero().
_, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0032", "--no-notdef-glyph",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_prop_0.ttx"), ["prop"])
def test_subset_prop_1(self):
# If not all glyphs share the same AAT glyph properties, the subsetted
# font should contain a "prop" table in format 1. To save space, the
# DefaultProperties should be set to the most frequent value.
_, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0032", "--notdef-outline",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_prop_1.ttx"), ["prop"])
def test_options(self):
# https://github.com/fonttools/fonttools/issues/413
opt1 = subset.Options()
self.assertTrue('Xyz-' not in opt1.layout_features)
opt2 = subset.Options()
opt2.layout_features.append('Xyz-')
self.assertTrue('Xyz-' in opt2.layout_features)
self.assertTrue('Xyz-' not in opt1.layout_features)
def test_google_color(self):
_, fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--gids=0,1", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertTrue("CBDT" in subsetfont)
self.assertTrue("CBLC" in subsetfont)
self.assertTrue("x" in subsetfont['CBDT'].strikeData[0])
self.assertFalse("y" in subsetfont['CBDT'].strikeData[0])
def test_google_color_all(self):
_, fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=*", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertTrue("x" in subsetfont['CBDT'].strikeData[0])
self.assertTrue("y" in subsetfont['CBDT'].strikeData[0])
def test_sbix(self):
_, fontpath = self.compile_font(self.getpath("sbix.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--gids=0,1", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_sbix.ttx"), ["sbix"])
def test_timing_publishes_parts(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
options = subset.Options()
options.timing = True
subsetter = subset.Subsetter(options)
subsetter.populate(text='ABC')
font = TTFont(fontpath)
with CapturingLogHandler('fontTools.subset.timer', logging.DEBUG) as captor:
subsetter.subset(font)
logs = captor.records
self.assertTrue(len(logs) > 5)
self.assertEqual(len(logs), len([l for l in logs if 'msg' in l.args and 'time' in l.args]))
# Look for a few things we know should happen
self.assertTrue(filter(lambda l: l.args['msg'] == "load 'cmap'", logs))
self.assertTrue(filter(lambda l: l.args['msg'] == "subset 'cmap'", logs))
self.assertTrue(filter(lambda l: l.args['msg'] == "subset 'glyf'", logs))
def test_passthrough_tables(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
font = TTFont(fontpath)
unknown_tag = 'ZZZZ'
unknown_table = newTable(unknown_tag)
unknown_table.data = b'\0'*10
font[unknown_tag] = unknown_table
font.save(fontpath)
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
# tables we can't subset are dropped by default
self.assertFalse(unknown_tag in subsetfont)
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--passthrough-tables", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
# unknown tables are kept if --passthrough-tables option is passed
self.assertTrue(unknown_tag in subsetfont)
def test_non_BMP_text_arg_input(self):
_, fontpath = self.compile_font(
self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
text = tostr(u"A\U0001F6D2", encoding='utf-8')
subset.main([fontpath, "--text=%s" % text, "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertEqual(subsetfont['maxp'].numGlyphs, 3)
self.assertEqual(subsetfont.getGlyphOrder(), ['.notdef', 'A', 'u1F6D2'])
def test_non_BMP_text_file_input(self):
_, fontpath = self.compile_font(
self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
text = tobytes(u"A\U0001F6D2", encoding='utf-8')
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(text)
try:
subset.main([fontpath, "--text-file=%s" % tmp.name,
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
finally:
os.remove(tmp.name)
self.assertEqual(subsetfont['maxp'].numGlyphs, 3)
self.assertEqual(subsetfont.getGlyphOrder(), ['.notdef', 'A', 'u1F6D2'])
def test_no_hinting_CFF(self):
ttxpath = self.getpath("Lobster.subset.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-hinting", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_no_hinting_CFF.ttx"), ["CFF "])
def test_desubroutinize_CFF(self):
ttxpath = self.getpath("Lobster.subset.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_desubroutinize_CFF.ttx"), ["CFF "])
def test_desubroutinize_hinted_subrs_CFF(self):
ttxpath = self.getpath("test_hinted_subrs_CFF.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"test_hinted_subrs_CFF.desub.ttx"), ["CFF "])
def test_desubroutinize_cntrmask_CFF(self):
ttxpath = self.getpath("test_cntrmask_CFF.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"test_cntrmask_CFF.desub.ttx"), ["CFF "])
def test_no_hinting_desubroutinize_CFF(self):
ttxpath = self.getpath("test_hinted_subrs_CFF.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-hinting", "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_no_hinting_desubroutinize_CFF.ttx"), ["CFF "])
def test_no_hinting_TTF(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--no-hinting", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_no_hinting_TTF.ttx"), ["glyf", "maxp"])
for tag in subset.Options().hinting_tables:
self.assertTrue(tag not in subsetfont)
def test_notdef_width_cid(self):
# https://github.com/fonttools/fonttools/pull/845
_, fontpath = self.compile_font(self.getpath("NotdefWidthCID-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0,1", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_notdef_width_cid.ttx"), ["CFF "])
def test_recalc_bounds_ttf(self):
ttxpath = self.getpath("TestTTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
head = font['head']
bounds = [head.xMin, head.yMin, head.xMax, head.yMax]
_, fontpath = self.compile_font(ttxpath, ".ttf")
subsetpath = self.temp_path(".ttf")
# by default, the subsetter does not recalculate the bounding box
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
head = TTFont(subsetpath)['head']
self.assertEqual(bounds, [head.xMin, head.yMin, head.xMax, head.yMax])
subset.main([fontpath, "--recalc-bounds", "--output-file=%s" % subsetpath, "*"])
head = TTFont(subsetpath)['head']
bounds = [132, 304, 365, 567]
self.assertEqual(bounds, [head.xMin, head.yMin, head.xMax, head.yMax])
def test_recalc_bounds_otf(self):
ttxpath = self.getpath("TestOTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
head = font['head']
bounds = [head.xMin, head.yMin, head.xMax, head.yMax]
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the bounding box
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
head = TTFont(subsetpath)['head']
self.assertEqual(bounds, [head.xMin, head.yMin, head.xMax, head.yMax])
subset.main([fontpath, "--recalc-bounds", "--output-file=%s" % subsetpath, "*"])
head = TTFont(subsetpath)['head']
bounds = [132, 304, 365, 567]
self.assertEqual(bounds, [head.xMin, head.yMin, head.xMax, head.yMax])
def test_recalc_timestamp_ttf(self):
ttxpath = self.getpath("TestTTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
modified = font['head'].modified
_, fontpath = self.compile_font(ttxpath, ".ttf")
subsetpath = self.temp_path(".ttf")
# by default, the subsetter does not recalculate the modified timestamp
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
self.assertEqual(modified, TTFont(subsetpath)['head'].modified)
subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"])
self.assertLess(modified, TTFont(subsetpath)['head'].modified)
def test_recalc_timestamp_otf(self):
ttxpath = self.getpath("TestOTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
modified = font['head'].modified
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the modified timestamp
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
self.assertEqual(modified, TTFont(subsetpath)['head'].modified)
subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"])
self.assertLess(modified, TTFont(subsetpath)['head'].modified)
def test_recalc_max_context(self):
ttxpath = self.getpath("Lobster.subset.ttx")
font = TTFont()
font.importXML(ttxpath)
max_context = font['OS/2'].usMaxContext
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the usMaxContext
subset.main([fontpath, "--drop-tables+=GSUB,GPOS",
"--output-file=%s" % subsetpath])
self.assertEqual(max_context, TTFont(subsetpath)['OS/2'].usMaxContext)
subset.main([fontpath, "--recalc-max-context",
"--drop-tables+=GSUB,GPOS",
"--output-file=%s" % subsetpath])
self.assertEqual(0, TTFont(subsetpath)['OS/2'].usMaxContext)
def test_retain_gids_ttf(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
font = TTFont(fontpath)
self.assertEqual(font["hmtx"]["A"], (500, 132))
self.assertEqual(font["hmtx"]["B"], (400, 132))
self.assertGreater(font["glyf"]["A"].numberOfContours, 0)
self.assertGreater(font["glyf"]["B"].numberOfContours, 0)
subsetpath = self.temp_path(".ttf")
subset.main(
[
fontpath,
"--retain-gids",
"--output-file=%s" % subsetpath,
"--glyph-names",
"B",
]
)
subsetfont = TTFont(subsetpath)
self.assertEqual(subsetfont.getGlyphOrder(), font.getGlyphOrder()[0:3])
hmtx = subsetfont["hmtx"]
self.assertEqual(hmtx["A"], ( 0, 0))
self.assertEqual(hmtx["B"], (400, 132))
glyf = subsetfont["glyf"]
self.assertEqual(glyf["A"].numberOfContours, 0)
self.assertGreater(glyf["B"].numberOfContours, 0)
def test_retain_gids_cff(self):
_, fontpath = self.compile_font(self.getpath("TestOTF-Regular.ttx"), ".otf")
font = TTFont(fontpath)
self.assertEqual(font["hmtx"]["A"], (500, 132))
self.assertEqual(font["hmtx"]["B"], (400, 132))
self.assertEqual(font["hmtx"]["C"], (500, 0))
font["CFF "].cff[0].decompileAllCharStrings()
cs = font["CFF "].cff[0].CharStrings
self.assertGreater(len(cs["A"].program), 0)
self.assertGreater(len(cs["B"].program), 0)
self.assertGreater(len(cs["C"].program), 0)
subsetpath = self.temp_path(".otf")
subset.main(
[
fontpath,
"--retain-gids",
"--output-file=%s" % subsetpath,
"--glyph-names",
"B",
]
)
subsetfont = TTFont(subsetpath)
self.assertEqual(subsetfont.getGlyphOrder(), font.getGlyphOrder()[0:3])
hmtx = subsetfont["hmtx"]
self.assertEqual(hmtx["A"], (0, 0))
self.assertEqual(hmtx["B"], (400, 132))
subsetfont["CFF "].cff[0].decompileAllCharStrings()
cs = subsetfont["CFF "].cff[0].CharStrings
self.assertEqual(cs["A"].program, ["endchar"])
self.assertGreater(len(cs["B"].program), 0)
def test_retain_gids_cff2(self):
ttx_path = self.getpath("../../varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx")
font, fontpath = self.compile_font(ttx_path, ".otf")
self.assertEqual(font["hmtx"]["A"], (600, 31))
self.assertEqual(font["hmtx"]["T"], (600, 41))
font["CFF2"].cff[0].decompileAllCharStrings()
cs = font["CFF2"].cff[0].CharStrings
self.assertGreater(len(cs["A"].program), 0)
self.assertGreater(len(cs["T"].program), 0)
subsetpath = self.temp_path(".otf")
subset.main(
[
fontpath,
"--retain-gids",
"--output-file=%s" % subsetpath,
"T",
]
)
subsetfont = TTFont(subsetpath)
self.assertEqual(len(subsetfont.getGlyphOrder()), len(font.getGlyphOrder()[0:3]))
hmtx = subsetfont["hmtx"]
self.assertEqual(hmtx["glyph00001"], ( 0, 0))
self.assertEqual(hmtx["T"], (600, 41))
subsetfont["CFF2"].cff[0].decompileAllCharStrings()
cs = subsetfont["CFF2"].cff[0].CharStrings
self.assertEqual(cs["glyph00001"].program, [])
self.assertGreater(len(cs["T"].program), 0)
def test_HVAR_VVAR(self):
_, fontpath = self.compile_font(self.getpath("TestHVVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--text=BD", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_HVVAR.ttx"), ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"])
def test_HVAR_VVAR_retain_gids(self):
_, fontpath = self.compile_font(self.getpath("TestHVVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--text=BD", "--retain-gids", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_HVVAR_retain_gids.ttx"), ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"])
def test_subset_flavor(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
font = TTFont(fontpath)
woff_path = self.temp_path(".woff")
subset.main(
[
fontpath,
"*",
"--flavor=woff",
"--output-file=%s" % woff_path,
]
)
woff = TTFont(woff_path)
self.assertEqual(woff.flavor, "woff")
woff2_path = self.temp_path(".woff2")
subset.main(
[
woff_path,
"*",
"--flavor=woff2",
"--output-file=%s" % woff2_path,
]
)
woff2 = TTFont(woff2_path)
self.assertEqual(woff2.flavor, "woff2")
ttf_path = self.temp_path(".ttf")
subset.main(
[
woff2_path,
"*",
"--output-file=%s" % ttf_path,
]
)
ttf = TTFont(ttf_path)
self.assertEqual(ttf.flavor, None)
def test_subset_context_subst_format_3(self):
# https://github.com/fonttools/fonttools/issues/1879
# Test font contains 'calt' feature with Format 3 ContextSubst lookup subtables
ttx = self.getpath("TestContextSubstFormat3.ttx")
font, fontpath = self.compile_font(ttx, ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=*", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
# check all glyphs are kept via GSUB closure, no changes expected
self.expect_ttx(subsetfont, ttx)
def test_cmap_prune_format12(self):
_, fontpath = self.compile_font(self.getpath("CmapSubsetTest.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=a", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("CmapSubsetTest.subset.ttx"), ["cmap"])
def test_GPOS_PairPos_Format2_useClass0(self):
# Check two things related to class 0 ('every other glyph'):
# 1) that it's reused for ClassDef1 when it becomes empty as the subset glyphset
# is intersected with the table's Coverage
# 2) that it is never reused for ClassDef2 even when it happens to become empty
# because of the subset glyphset. In this case, we don't keep a PairPosClass2
# subtable if only ClassDef2's class0 survived subsetting.
# The test font (from Harfbuzz test suite) is constructed to trigger these two
# situations depending on the input subset --text.
# https://github.com/fonttools/fonttools/pull/2221
_, fontpath = self.compile_font(
self.getpath("GPOS_PairPos_Format2_PR_2221.ttx"), ".ttf"
)
subsetpath = self.temp_path(".ttf")
for n, text in enumerate("!#", start=1):
expected_ttx = self.getpath(
f"GPOS_PairPos_Format2_ClassDef{n}_useClass0.subset.ttx"
)
with self.subTest(text=text, expected_ttx=expected_ttx):
subset.main(
[
fontpath,
f"--text='{text}'",
"--layout-features+=test",
"--output-file=%s" % subsetpath,
]
)
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, expected_ttx, ["GPOS"])
def test_GPOS_SinglePos_prune_post_subset_no_value(self):
_, fontpath = self.compile_font(
self.getpath("GPOS_SinglePos_no_value_issue_2312.ttx"), ".ttf"
)
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "*", "--glyph-names", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(
subsetfont,
self.getpath("GPOS_SinglePos_no_value_issue_2312.subset.ttx"),
["GlyphOrder", "GPOS"],
)
@pytest.fixture
def featureVarsTestFont():
fb = FontBuilder(unitsPerEm=100)
fb.setupGlyphOrder([".notdef", "f", "f_f", "dollar", "dollar.rvrn"])
fb.setupCharacterMap({ord("f"): "f", ord("$"): "dollar"})
fb.setupNameTable({"familyName": "TestFeatureVars", "styleName": "Regular"})
fb.setupPost()
fb.setupFvar(axes=[("wght", 100, 400, 900, "Weight")], instances=[])
fb.addOpenTypeFeatures("""\
feature dlig {
sub f f by f_f;
} dlig;
""")
fb.addFeatureVariations(
[([{"wght": (0.20886, 1.0)}], {"dollar": "dollar.rvrn"})],
featureTag="rvrn"
)
buf = io.BytesIO()
fb.save(buf)
buf.seek(0)
return TTFont(buf)
def test_subset_feature_variations_keep_all(featureVarsTestFont):
font = featureVarsTestFont
options = subset.Options()
subsetter = subset.Subsetter(options)
subsetter.populate(unicodes=[ord("f"), ord("$")])
subsetter.subset(font)
featureTags = {
r.FeatureTag for r in font["GSUB"].table.FeatureList.FeatureRecord
}
# 'dlig' is discretionary so it is dropped by default
assert "dlig" not in featureTags
assert "f_f" not in font.getGlyphOrder()
# 'rvrn' is required so it is kept by default
assert "rvrn" in featureTags
assert "dollar.rvrn" in font.getGlyphOrder()
def test_subset_feature_variations_drop_all(featureVarsTestFont):
font = featureVarsTestFont
options = subset.Options()
options.layout_features.remove("rvrn") # drop 'rvrn'
subsetter = subset.Subsetter(options)
subsetter.populate(unicodes=[ord("f"), ord("$")])
subsetter.subset(font)
featureTags = {
r.FeatureTag for r in font["GSUB"].table.FeatureList.FeatureRecord
}
glyphs = set(font.getGlyphOrder())
assert "rvrn" not in featureTags
assert glyphs == {".notdef", "f", "dollar"}
# all FeatureVariationRecords were dropped
assert font["GSUB"].table.FeatureVariations is None
assert font["GSUB"].table.Version == 0x00010000
# TODO test_subset_feature_variations_drop_from_end_empty_records
# https://github.com/fonttools/fonttools/issues/1881#issuecomment-619415044
def test_subset_single_pos_format():
fb = FontBuilder(unitsPerEm=1000)
fb.setupGlyphOrder([".notdef", "a", "b", "c"])
fb.setupCharacterMap({ord("a"): "a", ord("b"): "b", ord("c"): "c"})
fb.setupNameTable({"familyName": "TestSingePosFormat", "styleName": "Regular"})
fb.setupPost()
fb.addOpenTypeFeatures("""
feature kern {
pos a -50;
pos b -40;
pos c -50;
} kern;
""")
buf = io.BytesIO()
fb.save(buf)
buf.seek(0)
font = TTFont(buf)
# The input font has a SinglePos Format 2 subtable where each glyph has
# different ValueRecords
assert getXML(font["GPOS"].table.LookupList.Lookup[0].toXML, font) == [
'<Lookup>',
' <LookupType value="1"/>',
' <LookupFlag value="0"/>',
' <!-- SubTableCount=1 -->',
' <SinglePos index="0" Format="2">',
' <Coverage>',
' <Glyph value="a"/>',
' <Glyph value="b"/>',
' <Glyph value="c"/>',
' </Coverage>',
' <ValueFormat value="4"/>',
' <!-- ValueCount=3 -->',
' <Value index="0" XAdvance="-50"/>',
' <Value index="1" XAdvance="-40"/>',
' <Value index="2" XAdvance="-50"/>',
' </SinglePos>',
'</Lookup>',
]
options = subset.Options()
subsetter = subset.Subsetter(options)
subsetter.populate(unicodes=[ord("a"), ord("c")])
subsetter.subset(font)
# All the subsetted glyphs from the original SinglePos Format2 subtable
# now have the same ValueRecord, so we use a more compact Format 1 subtable.
assert getXML(font["GPOS"].table.LookupList.Lookup[0].toXML, font) == [
'<Lookup>',
' <LookupType value="1"/>',
' <LookupFlag value="0"/>',
' <!-- SubTableCount=1 -->',
' <SinglePos index="0" Format="1">',
' <Coverage>',
' <Glyph value="a"/>',
' <Glyph value="c"/>',
' </Coverage>',
' <ValueFormat value="4"/>',
' <Value XAdvance="-50"/>',
' </SinglePos>',
'</Lookup>',
]
@pytest.fixture
def ttf_path(tmp_path):
# $(dirname $0)/../ttLib/data
ttLib_data = pathlib.Path(__file__).parent.parent / "ttLib" / "data"
font = TTFont()
font.importXML(ttLib_data / "TestTTF-Regular.ttx")
font_path = tmp_path / "TestTTF-Regular.ttf"
font.save(font_path)
return font_path
def test_subset_empty_glyf(tmp_path, ttf_path):
subset_path = tmp_path / (ttf_path.name + ".subset")
# only keep empty .notdef and space glyph, resulting in an empty glyf table
subset.main(
[
str(ttf_path),
"--no-notdef-outline",
"--glyph-names",
f"--output-file={subset_path}",
"--glyphs=.notdef space",
]
)
subset_font = TTFont(subset_path)
assert subset_font.getGlyphOrder() == [".notdef", "space"]
assert subset_font.reader['glyf'] == b"\x00"
glyf = subset_font["glyf"]
assert all(glyf[g].numberOfContours == 0 for g in subset_font.getGlyphOrder())
loca = subset_font["loca"]
assert all(loc == 0 for loc in loca)
@pytest.fixture
def colrv1_path(tmp_path):
base_glyph_names = ["uni%04X" % i for i in range(0xE000, 0xE000 + 10)]
layer_glyph_names = ["glyph%05d" % i for i in range(10, 20)]
glyph_order = [".notdef"] + base_glyph_names + layer_glyph_names
pen = TTGlyphPen(glyphSet=None)
pen.moveTo((0, 0))
pen.lineTo((0, 500))
pen.lineTo((500, 500))
pen.lineTo((500, 0))
pen.closePath()
glyph = pen.glyph()
glyphs = {g: glyph for g in glyph_order}
fb = FontBuilder(unitsPerEm=1024, isTTF=True)
fb.setupGlyphOrder(glyph_order)
fb.setupCharacterMap({int(name[3:], 16): name for name in base_glyph_names})
fb.setupGlyf(glyphs)
fb.setupHorizontalMetrics({g: (500, 0) for g in glyph_order})
fb.setupHorizontalHeader()
fb.setupOS2()
fb.setupPost()
fb.setupNameTable({"familyName": "TestCOLRv1", "styleName": "Regular"})
fb.setupCOLR(
{
"uniE000": (
ot.PaintFormat.PaintColrLayers,
[
{
"Format": ot.PaintFormat.PaintGlyph,
"Paint": (ot.PaintFormat.PaintSolid, 0),
"Glyph": "glyph00010",
},
{
"Format": ot.PaintFormat.PaintGlyph,
"Paint": (ot.PaintFormat.PaintSolid, (2, 0.3)),
"Glyph": "glyph00011",
},
],
),
"uniE001": (
ot.PaintFormat.PaintColrLayers,
[
{
"Format": ot.PaintFormat.PaintTransform,
"Paint": {
"Format": ot.PaintFormat.PaintGlyph,
"Paint": {
"Format": ot.PaintFormat.PaintRadialGradient,
"x0": 250,
"y0": 250,
"r0": 250,
"x1": 200,
"y1": 200,
"r1": 0,
"ColorLine": {
"ColorStop": [(0.0, 1), (1.0, 2)],
"Extend": "repeat",
},
},
"Glyph": "glyph00012",
},
"Transform": (0.7071, 0.7071, -0.7071, 0.7071, 0, 0),
},
{
"Format": ot.PaintFormat.PaintGlyph,
"Paint": (ot.PaintFormat.PaintSolid, (1, 0.5)),
"Glyph": "glyph00013",
},
],
),
"uniE002": (
ot.PaintFormat.PaintColrLayers,
[
{
"Format": ot.PaintFormat.PaintGlyph,
"Paint": {
"Format": ot.PaintFormat.PaintLinearGradient,
"x0": 0,
"y0": 0,
"x1": 500,
"y1": 500,
"x2": -500,
"y2": 500,
"ColorLine": {"ColorStop": [(0.0, 1), (1.0, 2)]},
},
"Glyph": "glyph00014",
},
{
"Format": ot.PaintFormat.PaintTransform,
"Paint": {
"Format": ot.PaintFormat.PaintGlyph,
"Paint": (ot.PaintFormat.PaintSolid, 1),
"Glyph": "glyph00015",
},
"Transform": (1, 0, 0, 1, 400, 400),
},
],
),
"uniE003": {
"Format": ot.PaintFormat.PaintRotate,
"Paint": {
"Format": ot.PaintFormat.PaintColrGlyph,
"Glyph": "uniE001",
},
"angle": 45,
"centerX": 250,
"centerY": 250,
},
"uniE004": [
("glyph00016", 1),
("glyph00017", 0xFFFF), # special palette index for foreground text
("glyph00018", 2),
],
},
)
fb.setupCPAL(
[
[
(1.0, 0.0, 0.0, 1.0), # red
(0.0, 1.0, 0.0, 1.0), # green
(0.0, 0.0, 1.0, 1.0), # blue
],
],
)
output_path = tmp_path / "TestCOLRv1.ttf"
fb.save(output_path)
return output_path
def test_subset_COLRv1_and_CPAL(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
subset.main(
[
str(colrv1_path),
"--glyph-names",
f"--output-file={subset_path}",
"--unicodes=E002,E003,E004",
]
)
subset_font = TTFont(subset_path)
glyph_set = set(subset_font.getGlyphOrder())
# uniE000 and its children are excluded from subset
assert "uniE000" not in glyph_set
assert "glyph00010" not in glyph_set
assert "glyph00011" not in glyph_set
# uniE001 and children are pulled in indirectly as PaintColrGlyph by uniE003
assert "uniE001" in glyph_set
assert "glyph00012" in glyph_set
assert "glyph00013" in glyph_set
assert "uniE002" in glyph_set
assert "glyph00014" in glyph_set
assert "glyph00015" in glyph_set
assert "uniE003" in glyph_set
assert "uniE004" in glyph_set
assert "glyph00016" in glyph_set
assert "glyph00017" in glyph_set
assert "glyph00018" in glyph_set
assert "COLR" in subset_font
colr = subset_font["COLR"].table
assert colr.Version == 1
assert len(colr.BaseGlyphRecordArray.BaseGlyphRecord) == 1
assert len(colr.BaseGlyphV1List.BaseGlyphV1Record) == 3 # was 4
base = colr.BaseGlyphV1List.BaseGlyphV1Record[0]
assert base.BaseGlyph == "uniE001"
layers = colr.LayerV1List.Paint[
base.Paint.FirstLayerIndex: base.Paint.FirstLayerIndex + base.Paint.NumLayers
]
assert len(layers) == 2
# check v1 palette indices were remapped
assert layers[0].Paint.Paint.ColorLine.ColorStop[0].Color.PaletteIndex == 0
assert layers[0].Paint.Paint.ColorLine.ColorStop[1].Color.PaletteIndex == 1
assert layers[1].Paint.Color.PaletteIndex == 0
baseRecV0 = colr.BaseGlyphRecordArray.BaseGlyphRecord[0]
assert baseRecV0.BaseGlyph == "uniE004"
layersV0 = colr.LayerRecordArray.LayerRecord
assert len(layersV0) == 3
# check v0 palette indices were remapped (except for 0xFFFF)
assert layersV0[0].PaletteIndex == 0
assert layersV0[1].PaletteIndex == 0xFFFF
assert layersV0[2].PaletteIndex == 1
assert "CPAL" in subset_font
cpal = subset_font["CPAL"]
assert [
tuple(v / 255 for v in (c.red, c.green, c.blue, c.alpha))
for c in cpal.palettes[0]
] == [
# the first color 'red' was pruned
(0.0, 1.0, 0.0, 1.0), # green
(0.0, 0.0, 1.0, 1.0), # blue
]
def test_subset_COLRv1_and_CPAL_drop_empty(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
subset.main(
[
str(colrv1_path),
"--glyph-names",
f"--output-file={subset_path}",
"--glyphs=glyph00010",
]
)
subset_font = TTFont(subset_path)
glyph_set = set(subset_font.getGlyphOrder())
assert "glyph00010" in glyph_set
assert "uniE000" not in glyph_set
assert "COLR" not in subset_font
assert "CPAL" not in subset_font
def test_subset_COLRv1_downgrade_version(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
subset.main(
[
str(colrv1_path),
"--glyph-names",
f"--output-file={subset_path}",
"--unicodes=E004",
]
)
subset_font = TTFont(subset_path)
assert set(subset_font.getGlyphOrder()) == {
".notdef",
"uniE004",
"glyph00016",
"glyph00017",
"glyph00018",
}
assert "COLR" in subset_font
assert subset_font["COLR"].version == 0
def test_subset_COLRv1_drop_all_v0_glyphs(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
subset.main(
[
str(colrv1_path),
"--glyph-names",
f"--output-file={subset_path}",
"--unicodes=E003",
]
)
subset_font = TTFont(subset_path)
assert set(subset_font.getGlyphOrder()) == {
".notdef",
"uniE001",
"uniE003",
"glyph00012",
"glyph00013",
}
assert "COLR" in subset_font
colr = subset_font["COLR"]
assert colr.version == 1
assert colr.table.BaseGlyphRecordCount == 0
assert colr.table.BaseGlyphRecordArray is None
assert colr.table.LayerRecordArray is None
assert colr.table.LayerRecordCount is 0
if __name__ == "__main__":
sys.exit(unittest.main())
subset_test: check we keep empty 'size' but drop empty 'ssXX' features
https://github.com/fonttools/fonttools/issues/2324
import io
from fontTools.misc.py23 import tobytes, tostr
from fontTools.misc.testTools import getXML
from fontTools import subset
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables import otTables as ot
from fontTools.misc.loggingTools import CapturingLogHandler
import difflib
import logging
import os
import shutil
import sys
import tempfile
import unittest
import pathlib
import pytest
class SubsetTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
@staticmethod
def getpath(testfile):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", testfile)
def temp_path(self, suffix):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp()
self.num_tempfiles += 1
return os.path.join(self.tempdir,
"tmp%d%s" % (self.num_tempfiles, suffix))
def read_ttx(self, path):
lines = []
with open(path, "r", encoding="utf-8") as ttx:
for line in ttx.readlines():
# Elide ttFont attributes because ttLibVersion may change,
# and use os-native line separators so we can run difflib.
if line.startswith("<ttFont "):
lines.append("<ttFont>" + os.linesep)
else:
lines.append(line.rstrip() + os.linesep)
return lines
def expect_ttx(self, font, expected_ttx, tables=None):
path = self.temp_path(suffix=".ttx")
font.saveXML(path, tables=tables)
actual = self.read_ttx(path)
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
expected, actual, fromfile=expected_ttx, tofile=path):
sys.stdout.write(line)
self.fail("TTX output is different from expected")
def compile_font(self, path, suffix):
savepath = self.temp_path(suffix=suffix)
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(path)
font.save(savepath, reorderTables=None)
return font, savepath
# -----
# Tests
# -----
def test_layout_scripts(self):
_, fontpath = self.compile_font(self.getpath("layout_scripts.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--glyphs=*", "--layout-features=*",
"--layout-scripts=latn,arab.URD,arab.dflt",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_layout_scripts.ttx"),
["GPOS", "GSUB"])
def test_no_notdef_outline_otf(self):
_, fontpath = self.compile_font(self.getpath("TestOTF-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_otf.ttx"), ["CFF "])
def test_no_notdef_outline_cid(self):
_, fontpath = self.compile_font(self.getpath("TestCID-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_cid.ttx"), ["CFF "])
def test_no_notdef_outline_ttf(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_ttf.ttx"), ["glyf", "hmtx"])
def test_subset_ankr(self):
_, fontpath = self.compile_font(self.getpath("TestANKR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_ankr.ttx"), ["ankr"])
def test_subset_ankr_remove(self):
_, fontpath = self.compile_font(self.getpath("TestANKR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=two", "--output-file=%s" % subsetpath])
self.assertNotIn("ankr", TTFont(subsetpath))
def test_subset_bsln_format_0(self):
_, fontpath = self.compile_font(self.getpath("TestBSLN-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_0.ttx"), ["bsln"])
def test_subset_bsln_format_0_from_format_1(self):
# TestBSLN-1 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two} use the roman
# baseline instead of the default ideographic baseline. As we request
# a subsetted font with {zero, one} and the implicit .notdef, all
# glyphs in the resulting font use the Roman baseline. In this case,
# we expect a format 0 'bsln' table because it is the most compact.
_, fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0031",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_0.ttx"), ["bsln"])
def test_subset_bsln_format_1(self):
# TestBSLN-1 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two} use the roman
# baseline instead of the default ideographic baseline. We request
# a subset where the majority of glyphs use the roman baseline,
# but one single glyph (uni2EA2) is ideographic. In the resulting
# subsetted font, we expect a format 1 'bsln' table whose default
# is Roman, but with an override that uses the ideographic baseline
# for uni2EA2.
_, fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_1.ttx"), ["bsln"])
def test_subset_bsln_format_2(self):
# The 'bsln' table in TestBSLN-2 refers to control points in glyph 'P'
# for defining its baselines. Therefore, the subsetted font should
# include this glyph even though it is not requested explicitly.
_, fontpath = self.compile_font(self.getpath("TestBSLN-2.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_2.ttx"), ["bsln"])
def test_subset_bsln_format_2_from_format_3(self):
# TestBSLN-3 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two, P} use the roman
# baseline instead of the default ideographic baseline. As we request
# a subsetted font with zero and the implicit .notdef and P for
# baseline measurement, all glyphs in the resulting font use the Roman
# baseline. In this case, we expect a format 2 'bsln' table because it
# is the most compact encoding.
_, fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_2.ttx"), ["bsln"])
def test_subset_bsln_format_3(self):
# TestBSLN-3 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two} use the roman
# baseline instead of the default ideographic baseline. We request
# a subset where the majority of glyphs use the roman baseline,
# but one single glyph (uni2EA2) is ideographic. In the resulting
# subsetted font, we expect a format 1 'bsln' table whose default
# is Roman, but with an override that uses the ideographic baseline
# for uni2EA2.
_, fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_3.ttx"), ["bsln"])
def test_subset_clr(self):
_, fontpath = self.compile_font(self.getpath("TestCLR-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=smileface", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_colr.ttx"), ["GlyphOrder", "hmtx", "glyf", "COLR", "CPAL"])
def test_subset_gvar(self):
_, fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+002B,U+2212", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"])
def test_subset_gvar_notdef_outline(self):
_, fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030", "--notdef_outline", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar_notdef_outline.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"])
def test_subset_lcar_remove(self):
_, fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertNotIn("lcar", subsetfont)
def test_subset_lcar_format_0(self):
_, fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+FB01",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_lcar_0.ttx"), ["lcar"])
def test_subset_lcar_format_1(self):
_, fontpath = self.compile_font(self.getpath("TestLCAR-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+FB01",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_lcar_1.ttx"), ["lcar"])
def test_subset_math(self):
_, fontpath = self.compile_font(self.getpath("TestMATH-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0041,U+0028,U+0302,U+1D400,U+1D435", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_math.ttx"), ["GlyphOrder", "CFF ", "MATH", "hmtx"])
def test_subset_math_partial(self):
_, fontpath = self.compile_font(self.getpath("test_math_partial.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--text=A", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_math_partial.ttx"), ["MATH"])
def test_subset_opbd_remove(self):
# In the test font, only the glyphs 'A' and 'zero' have an entry in
# the Optical Bounds table. When subsetting, we do not request any
# of those glyphs. Therefore, the produced subsetted font should
# not contain an 'opbd' table.
_, fontpath = self.compile_font(self.getpath("TestOPBD-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertNotIn("opbd", subsetfont)
def test_subset_opbd_format_0(self):
_, fontpath = self.compile_font(self.getpath("TestOPBD-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=A", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_opbd_0.ttx"), ["opbd"])
def test_subset_opbd_format_1(self):
_, fontpath = self.compile_font(self.getpath("TestOPBD-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=A", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_opbd_1.ttx"), ["opbd"])
def test_subset_prop_remove_default_zero(self):
# If all glyphs have an AAT glyph property with value 0,
# the "prop" table should be removed from the subsetted font.
_, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0041",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertNotIn("prop", subsetfont)
def test_subset_prop_0(self):
# If all glyphs share the same AAT glyph properties, the "prop" table
# in the subsetted font should use format 0.
#
# Unless the shared value is zero, in which case the subsetted font
# should have no "prop" table at all. But that case has already been
# tested above in test_subset_prop_remove_default_zero().
_, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0032", "--no-notdef-glyph",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_prop_0.ttx"), ["prop"])
def test_subset_prop_1(self):
# If not all glyphs share the same AAT glyph properties, the subsetted
# font should contain a "prop" table in format 1. To save space, the
# DefaultProperties should be set to the most frequent value.
_, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0032", "--notdef-outline",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_prop_1.ttx"), ["prop"])
def test_options(self):
# https://github.com/fonttools/fonttools/issues/413
opt1 = subset.Options()
self.assertTrue('Xyz-' not in opt1.layout_features)
opt2 = subset.Options()
opt2.layout_features.append('Xyz-')
self.assertTrue('Xyz-' in opt2.layout_features)
self.assertTrue('Xyz-' not in opt1.layout_features)
def test_google_color(self):
_, fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--gids=0,1", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertTrue("CBDT" in subsetfont)
self.assertTrue("CBLC" in subsetfont)
self.assertTrue("x" in subsetfont['CBDT'].strikeData[0])
self.assertFalse("y" in subsetfont['CBDT'].strikeData[0])
def test_google_color_all(self):
_, fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=*", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertTrue("x" in subsetfont['CBDT'].strikeData[0])
self.assertTrue("y" in subsetfont['CBDT'].strikeData[0])
def test_sbix(self):
_, fontpath = self.compile_font(self.getpath("sbix.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--gids=0,1", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_sbix.ttx"), ["sbix"])
def test_timing_publishes_parts(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
options = subset.Options()
options.timing = True
subsetter = subset.Subsetter(options)
subsetter.populate(text='ABC')
font = TTFont(fontpath)
with CapturingLogHandler('fontTools.subset.timer', logging.DEBUG) as captor:
subsetter.subset(font)
logs = captor.records
self.assertTrue(len(logs) > 5)
self.assertEqual(len(logs), len([l for l in logs if 'msg' in l.args and 'time' in l.args]))
# Look for a few things we know should happen
self.assertTrue(filter(lambda l: l.args['msg'] == "load 'cmap'", logs))
self.assertTrue(filter(lambda l: l.args['msg'] == "subset 'cmap'", logs))
self.assertTrue(filter(lambda l: l.args['msg'] == "subset 'glyf'", logs))
def test_passthrough_tables(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
font = TTFont(fontpath)
unknown_tag = 'ZZZZ'
unknown_table = newTable(unknown_tag)
unknown_table.data = b'\0'*10
font[unknown_tag] = unknown_table
font.save(fontpath)
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
# tables we can't subset are dropped by default
self.assertFalse(unknown_tag in subsetfont)
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--passthrough-tables", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
# unknown tables are kept if --passthrough-tables option is passed
self.assertTrue(unknown_tag in subsetfont)
def test_non_BMP_text_arg_input(self):
_, fontpath = self.compile_font(
self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
text = tostr(u"A\U0001F6D2", encoding='utf-8')
subset.main([fontpath, "--text=%s" % text, "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertEqual(subsetfont['maxp'].numGlyphs, 3)
self.assertEqual(subsetfont.getGlyphOrder(), ['.notdef', 'A', 'u1F6D2'])
def test_non_BMP_text_file_input(self):
_, fontpath = self.compile_font(
self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
text = tobytes(u"A\U0001F6D2", encoding='utf-8')
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(text)
try:
subset.main([fontpath, "--text-file=%s" % tmp.name,
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
finally:
os.remove(tmp.name)
self.assertEqual(subsetfont['maxp'].numGlyphs, 3)
self.assertEqual(subsetfont.getGlyphOrder(), ['.notdef', 'A', 'u1F6D2'])
def test_no_hinting_CFF(self):
ttxpath = self.getpath("Lobster.subset.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-hinting", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_no_hinting_CFF.ttx"), ["CFF "])
def test_desubroutinize_CFF(self):
ttxpath = self.getpath("Lobster.subset.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_desubroutinize_CFF.ttx"), ["CFF "])
def test_desubroutinize_hinted_subrs_CFF(self):
ttxpath = self.getpath("test_hinted_subrs_CFF.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"test_hinted_subrs_CFF.desub.ttx"), ["CFF "])
def test_desubroutinize_cntrmask_CFF(self):
ttxpath = self.getpath("test_cntrmask_CFF.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"test_cntrmask_CFF.desub.ttx"), ["CFF "])
def test_no_hinting_desubroutinize_CFF(self):
ttxpath = self.getpath("test_hinted_subrs_CFF.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-hinting", "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_no_hinting_desubroutinize_CFF.ttx"), ["CFF "])
def test_no_hinting_TTF(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--no-hinting", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_no_hinting_TTF.ttx"), ["glyf", "maxp"])
for tag in subset.Options().hinting_tables:
self.assertTrue(tag not in subsetfont)
def test_notdef_width_cid(self):
# https://github.com/fonttools/fonttools/pull/845
_, fontpath = self.compile_font(self.getpath("NotdefWidthCID-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0,1", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_notdef_width_cid.ttx"), ["CFF "])
def test_recalc_bounds_ttf(self):
ttxpath = self.getpath("TestTTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
head = font['head']
bounds = [head.xMin, head.yMin, head.xMax, head.yMax]
_, fontpath = self.compile_font(ttxpath, ".ttf")
subsetpath = self.temp_path(".ttf")
# by default, the subsetter does not recalculate the bounding box
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
head = TTFont(subsetpath)['head']
self.assertEqual(bounds, [head.xMin, head.yMin, head.xMax, head.yMax])
subset.main([fontpath, "--recalc-bounds", "--output-file=%s" % subsetpath, "*"])
head = TTFont(subsetpath)['head']
bounds = [132, 304, 365, 567]
self.assertEqual(bounds, [head.xMin, head.yMin, head.xMax, head.yMax])
def test_recalc_bounds_otf(self):
ttxpath = self.getpath("TestOTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
head = font['head']
bounds = [head.xMin, head.yMin, head.xMax, head.yMax]
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the bounding box
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
head = TTFont(subsetpath)['head']
self.assertEqual(bounds, [head.xMin, head.yMin, head.xMax, head.yMax])
subset.main([fontpath, "--recalc-bounds", "--output-file=%s" % subsetpath, "*"])
head = TTFont(subsetpath)['head']
bounds = [132, 304, 365, 567]
self.assertEqual(bounds, [head.xMin, head.yMin, head.xMax, head.yMax])
def test_recalc_timestamp_ttf(self):
ttxpath = self.getpath("TestTTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
modified = font['head'].modified
_, fontpath = self.compile_font(ttxpath, ".ttf")
subsetpath = self.temp_path(".ttf")
# by default, the subsetter does not recalculate the modified timestamp
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
self.assertEqual(modified, TTFont(subsetpath)['head'].modified)
subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"])
self.assertLess(modified, TTFont(subsetpath)['head'].modified)
def test_recalc_timestamp_otf(self):
ttxpath = self.getpath("TestOTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
modified = font['head'].modified
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the modified timestamp
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
self.assertEqual(modified, TTFont(subsetpath)['head'].modified)
subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"])
self.assertLess(modified, TTFont(subsetpath)['head'].modified)
def test_recalc_max_context(self):
ttxpath = self.getpath("Lobster.subset.ttx")
font = TTFont()
font.importXML(ttxpath)
max_context = font['OS/2'].usMaxContext
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the usMaxContext
subset.main([fontpath, "--drop-tables+=GSUB,GPOS",
"--output-file=%s" % subsetpath])
self.assertEqual(max_context, TTFont(subsetpath)['OS/2'].usMaxContext)
subset.main([fontpath, "--recalc-max-context",
"--drop-tables+=GSUB,GPOS",
"--output-file=%s" % subsetpath])
self.assertEqual(0, TTFont(subsetpath)['OS/2'].usMaxContext)
def test_retain_gids_ttf(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
font = TTFont(fontpath)
self.assertEqual(font["hmtx"]["A"], (500, 132))
self.assertEqual(font["hmtx"]["B"], (400, 132))
self.assertGreater(font["glyf"]["A"].numberOfContours, 0)
self.assertGreater(font["glyf"]["B"].numberOfContours, 0)
subsetpath = self.temp_path(".ttf")
subset.main(
[
fontpath,
"--retain-gids",
"--output-file=%s" % subsetpath,
"--glyph-names",
"B",
]
)
subsetfont = TTFont(subsetpath)
self.assertEqual(subsetfont.getGlyphOrder(), font.getGlyphOrder()[0:3])
hmtx = subsetfont["hmtx"]
self.assertEqual(hmtx["A"], ( 0, 0))
self.assertEqual(hmtx["B"], (400, 132))
glyf = subsetfont["glyf"]
self.assertEqual(glyf["A"].numberOfContours, 0)
self.assertGreater(glyf["B"].numberOfContours, 0)
def test_retain_gids_cff(self):
_, fontpath = self.compile_font(self.getpath("TestOTF-Regular.ttx"), ".otf")
font = TTFont(fontpath)
self.assertEqual(font["hmtx"]["A"], (500, 132))
self.assertEqual(font["hmtx"]["B"], (400, 132))
self.assertEqual(font["hmtx"]["C"], (500, 0))
font["CFF "].cff[0].decompileAllCharStrings()
cs = font["CFF "].cff[0].CharStrings
self.assertGreater(len(cs["A"].program), 0)
self.assertGreater(len(cs["B"].program), 0)
self.assertGreater(len(cs["C"].program), 0)
subsetpath = self.temp_path(".otf")
subset.main(
[
fontpath,
"--retain-gids",
"--output-file=%s" % subsetpath,
"--glyph-names",
"B",
]
)
subsetfont = TTFont(subsetpath)
self.assertEqual(subsetfont.getGlyphOrder(), font.getGlyphOrder()[0:3])
hmtx = subsetfont["hmtx"]
self.assertEqual(hmtx["A"], (0, 0))
self.assertEqual(hmtx["B"], (400, 132))
subsetfont["CFF "].cff[0].decompileAllCharStrings()
cs = subsetfont["CFF "].cff[0].CharStrings
self.assertEqual(cs["A"].program, ["endchar"])
self.assertGreater(len(cs["B"].program), 0)
def test_retain_gids_cff2(self):
ttx_path = self.getpath("../../varLib/data/master_ttx_varfont_otf/TestCFF2VF.ttx")
font, fontpath = self.compile_font(ttx_path, ".otf")
self.assertEqual(font["hmtx"]["A"], (600, 31))
self.assertEqual(font["hmtx"]["T"], (600, 41))
font["CFF2"].cff[0].decompileAllCharStrings()
cs = font["CFF2"].cff[0].CharStrings
self.assertGreater(len(cs["A"].program), 0)
self.assertGreater(len(cs["T"].program), 0)
subsetpath = self.temp_path(".otf")
subset.main(
[
fontpath,
"--retain-gids",
"--output-file=%s" % subsetpath,
"T",
]
)
subsetfont = TTFont(subsetpath)
self.assertEqual(len(subsetfont.getGlyphOrder()), len(font.getGlyphOrder()[0:3]))
hmtx = subsetfont["hmtx"]
self.assertEqual(hmtx["glyph00001"], ( 0, 0))
self.assertEqual(hmtx["T"], (600, 41))
subsetfont["CFF2"].cff[0].decompileAllCharStrings()
cs = subsetfont["CFF2"].cff[0].CharStrings
self.assertEqual(cs["glyph00001"].program, [])
self.assertGreater(len(cs["T"].program), 0)
def test_HVAR_VVAR(self):
_, fontpath = self.compile_font(self.getpath("TestHVVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--text=BD", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_HVVAR.ttx"), ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"])
def test_HVAR_VVAR_retain_gids(self):
_, fontpath = self.compile_font(self.getpath("TestHVVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--text=BD", "--retain-gids", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_HVVAR_retain_gids.ttx"), ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"])
def test_subset_flavor(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
font = TTFont(fontpath)
woff_path = self.temp_path(".woff")
subset.main(
[
fontpath,
"*",
"--flavor=woff",
"--output-file=%s" % woff_path,
]
)
woff = TTFont(woff_path)
self.assertEqual(woff.flavor, "woff")
woff2_path = self.temp_path(".woff2")
subset.main(
[
woff_path,
"*",
"--flavor=woff2",
"--output-file=%s" % woff2_path,
]
)
woff2 = TTFont(woff2_path)
self.assertEqual(woff2.flavor, "woff2")
ttf_path = self.temp_path(".ttf")
subset.main(
[
woff2_path,
"*",
"--output-file=%s" % ttf_path,
]
)
ttf = TTFont(ttf_path)
self.assertEqual(ttf.flavor, None)
def test_subset_context_subst_format_3(self):
# https://github.com/fonttools/fonttools/issues/1879
# Test font contains 'calt' feature with Format 3 ContextSubst lookup subtables
ttx = self.getpath("TestContextSubstFormat3.ttx")
font, fontpath = self.compile_font(ttx, ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=*", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
# check all glyphs are kept via GSUB closure, no changes expected
self.expect_ttx(subsetfont, ttx)
def test_cmap_prune_format12(self):
_, fontpath = self.compile_font(self.getpath("CmapSubsetTest.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=a", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("CmapSubsetTest.subset.ttx"), ["cmap"])
def test_GPOS_PairPos_Format2_useClass0(self):
# Check two things related to class 0 ('every other glyph'):
# 1) that it's reused for ClassDef1 when it becomes empty as the subset glyphset
# is intersected with the table's Coverage
# 2) that it is never reused for ClassDef2 even when it happens to become empty
# because of the subset glyphset. In this case, we don't keep a PairPosClass2
# subtable if only ClassDef2's class0 survived subsetting.
# The test font (from Harfbuzz test suite) is constructed to trigger these two
# situations depending on the input subset --text.
# https://github.com/fonttools/fonttools/pull/2221
_, fontpath = self.compile_font(
self.getpath("GPOS_PairPos_Format2_PR_2221.ttx"), ".ttf"
)
subsetpath = self.temp_path(".ttf")
for n, text in enumerate("!#", start=1):
expected_ttx = self.getpath(
f"GPOS_PairPos_Format2_ClassDef{n}_useClass0.subset.ttx"
)
with self.subTest(text=text, expected_ttx=expected_ttx):
subset.main(
[
fontpath,
f"--text='{text}'",
"--layout-features+=test",
"--output-file=%s" % subsetpath,
]
)
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, expected_ttx, ["GPOS"])
def test_GPOS_SinglePos_prune_post_subset_no_value(self):
_, fontpath = self.compile_font(
self.getpath("GPOS_SinglePos_no_value_issue_2312.ttx"), ".ttf"
)
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "*", "--glyph-names", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(
subsetfont,
self.getpath("GPOS_SinglePos_no_value_issue_2312.subset.ttx"),
["GlyphOrder", "GPOS"],
)
@pytest.fixture
def featureVarsTestFont():
fb = FontBuilder(unitsPerEm=100)
fb.setupGlyphOrder([".notdef", "f", "f_f", "dollar", "dollar.rvrn"])
fb.setupCharacterMap({ord("f"): "f", ord("$"): "dollar"})
fb.setupNameTable({"familyName": "TestFeatureVars", "styleName": "Regular"})
fb.setupPost()
fb.setupFvar(axes=[("wght", 100, 400, 900, "Weight")], instances=[])
fb.addOpenTypeFeatures("""\
feature dlig {
sub f f by f_f;
} dlig;
""")
fb.addFeatureVariations(
[([{"wght": (0.20886, 1.0)}], {"dollar": "dollar.rvrn"})],
featureTag="rvrn"
)
buf = io.BytesIO()
fb.save(buf)
buf.seek(0)
return TTFont(buf)
def test_subset_feature_variations_keep_all(featureVarsTestFont):
font = featureVarsTestFont
options = subset.Options()
subsetter = subset.Subsetter(options)
subsetter.populate(unicodes=[ord("f"), ord("$")])
subsetter.subset(font)
featureTags = {
r.FeatureTag for r in font["GSUB"].table.FeatureList.FeatureRecord
}
# 'dlig' is discretionary so it is dropped by default
assert "dlig" not in featureTags
assert "f_f" not in font.getGlyphOrder()
# 'rvrn' is required so it is kept by default
assert "rvrn" in featureTags
assert "dollar.rvrn" in font.getGlyphOrder()
def test_subset_feature_variations_drop_all(featureVarsTestFont):
font = featureVarsTestFont
options = subset.Options()
options.layout_features.remove("rvrn") # drop 'rvrn'
subsetter = subset.Subsetter(options)
subsetter.populate(unicodes=[ord("f"), ord("$")])
subsetter.subset(font)
featureTags = {
r.FeatureTag for r in font["GSUB"].table.FeatureList.FeatureRecord
}
glyphs = set(font.getGlyphOrder())
assert "rvrn" not in featureTags
assert glyphs == {".notdef", "f", "dollar"}
# all FeatureVariationRecords were dropped
assert font["GSUB"].table.FeatureVariations is None
assert font["GSUB"].table.Version == 0x00010000
# TODO test_subset_feature_variations_drop_from_end_empty_records
# https://github.com/fonttools/fonttools/issues/1881#issuecomment-619415044
def test_subset_single_pos_format():
fb = FontBuilder(unitsPerEm=1000)
fb.setupGlyphOrder([".notdef", "a", "b", "c"])
fb.setupCharacterMap({ord("a"): "a", ord("b"): "b", ord("c"): "c"})
fb.setupNameTable({"familyName": "TestSingePosFormat", "styleName": "Regular"})
fb.setupPost()
fb.addOpenTypeFeatures("""
feature kern {
pos a -50;
pos b -40;
pos c -50;
} kern;
""")
buf = io.BytesIO()
fb.save(buf)
buf.seek(0)
font = TTFont(buf)
# The input font has a SinglePos Format 2 subtable where each glyph has
# different ValueRecords
assert getXML(font["GPOS"].table.LookupList.Lookup[0].toXML, font) == [
'<Lookup>',
' <LookupType value="1"/>',
' <LookupFlag value="0"/>',
' <!-- SubTableCount=1 -->',
' <SinglePos index="0" Format="2">',
' <Coverage>',
' <Glyph value="a"/>',
' <Glyph value="b"/>',
' <Glyph value="c"/>',
' </Coverage>',
' <ValueFormat value="4"/>',
' <!-- ValueCount=3 -->',
' <Value index="0" XAdvance="-50"/>',
' <Value index="1" XAdvance="-40"/>',
' <Value index="2" XAdvance="-50"/>',
' </SinglePos>',
'</Lookup>',
]
options = subset.Options()
subsetter = subset.Subsetter(options)
subsetter.populate(unicodes=[ord("a"), ord("c")])
subsetter.subset(font)
# All the subsetted glyphs from the original SinglePos Format2 subtable
# now have the same ValueRecord, so we use a more compact Format 1 subtable.
assert getXML(font["GPOS"].table.LookupList.Lookup[0].toXML, font) == [
'<Lookup>',
' <LookupType value="1"/>',
' <LookupFlag value="0"/>',
' <!-- SubTableCount=1 -->',
' <SinglePos index="0" Format="1">',
' <Coverage>',
' <Glyph value="a"/>',
' <Glyph value="c"/>',
' </Coverage>',
' <ValueFormat value="4"/>',
' <Value XAdvance="-50"/>',
' </SinglePos>',
'</Lookup>',
]
@pytest.fixture
def ttf_path(tmp_path):
# $(dirname $0)/../ttLib/data
ttLib_data = pathlib.Path(__file__).parent.parent / "ttLib" / "data"
font = TTFont()
font.importXML(ttLib_data / "TestTTF-Regular.ttx")
font_path = tmp_path / "TestTTF-Regular.ttf"
font.save(font_path)
return font_path
def test_subset_empty_glyf(tmp_path, ttf_path):
subset_path = tmp_path / (ttf_path.name + ".subset")
# only keep empty .notdef and space glyph, resulting in an empty glyf table
subset.main(
[
str(ttf_path),
"--no-notdef-outline",
"--glyph-names",
f"--output-file={subset_path}",
"--glyphs=.notdef space",
]
)
subset_font = TTFont(subset_path)
assert subset_font.getGlyphOrder() == [".notdef", "space"]
assert subset_font.reader['glyf'] == b"\x00"
glyf = subset_font["glyf"]
assert all(glyf[g].numberOfContours == 0 for g in subset_font.getGlyphOrder())
loca = subset_font["loca"]
assert all(loc == 0 for loc in loca)
@pytest.fixture
def colrv1_path(tmp_path):
base_glyph_names = ["uni%04X" % i for i in range(0xE000, 0xE000 + 10)]
layer_glyph_names = ["glyph%05d" % i for i in range(10, 20)]
glyph_order = [".notdef"] + base_glyph_names + layer_glyph_names
pen = TTGlyphPen(glyphSet=None)
pen.moveTo((0, 0))
pen.lineTo((0, 500))
pen.lineTo((500, 500))
pen.lineTo((500, 0))
pen.closePath()
glyph = pen.glyph()
glyphs = {g: glyph for g in glyph_order}
fb = FontBuilder(unitsPerEm=1024, isTTF=True)
fb.setupGlyphOrder(glyph_order)
fb.setupCharacterMap({int(name[3:], 16): name for name in base_glyph_names})
fb.setupGlyf(glyphs)
fb.setupHorizontalMetrics({g: (500, 0) for g in glyph_order})
fb.setupHorizontalHeader()
fb.setupOS2()
fb.setupPost()
fb.setupNameTable({"familyName": "TestCOLRv1", "styleName": "Regular"})
fb.setupCOLR(
{
"uniE000": (
ot.PaintFormat.PaintColrLayers,
[
{
"Format": ot.PaintFormat.PaintGlyph,
"Paint": (ot.PaintFormat.PaintSolid, 0),
"Glyph": "glyph00010",
},
{
"Format": ot.PaintFormat.PaintGlyph,
"Paint": (ot.PaintFormat.PaintSolid, (2, 0.3)),
"Glyph": "glyph00011",
},
],
),
"uniE001": (
ot.PaintFormat.PaintColrLayers,
[
{
"Format": ot.PaintFormat.PaintTransform,
"Paint": {
"Format": ot.PaintFormat.PaintGlyph,
"Paint": {
"Format": ot.PaintFormat.PaintRadialGradient,
"x0": 250,
"y0": 250,
"r0": 250,
"x1": 200,
"y1": 200,
"r1": 0,
"ColorLine": {
"ColorStop": [(0.0, 1), (1.0, 2)],
"Extend": "repeat",
},
},
"Glyph": "glyph00012",
},
"Transform": (0.7071, 0.7071, -0.7071, 0.7071, 0, 0),
},
{
"Format": ot.PaintFormat.PaintGlyph,
"Paint": (ot.PaintFormat.PaintSolid, (1, 0.5)),
"Glyph": "glyph00013",
},
],
),
"uniE002": (
ot.PaintFormat.PaintColrLayers,
[
{
"Format": ot.PaintFormat.PaintGlyph,
"Paint": {
"Format": ot.PaintFormat.PaintLinearGradient,
"x0": 0,
"y0": 0,
"x1": 500,
"y1": 500,
"x2": -500,
"y2": 500,
"ColorLine": {"ColorStop": [(0.0, 1), (1.0, 2)]},
},
"Glyph": "glyph00014",
},
{
"Format": ot.PaintFormat.PaintTransform,
"Paint": {
"Format": ot.PaintFormat.PaintGlyph,
"Paint": (ot.PaintFormat.PaintSolid, 1),
"Glyph": "glyph00015",
},
"Transform": (1, 0, 0, 1, 400, 400),
},
],
),
"uniE003": {
"Format": ot.PaintFormat.PaintRotate,
"Paint": {
"Format": ot.PaintFormat.PaintColrGlyph,
"Glyph": "uniE001",
},
"angle": 45,
"centerX": 250,
"centerY": 250,
},
"uniE004": [
("glyph00016", 1),
("glyph00017", 0xFFFF), # special palette index for foreground text
("glyph00018", 2),
],
},
)
fb.setupCPAL(
[
[
(1.0, 0.0, 0.0, 1.0), # red
(0.0, 1.0, 0.0, 1.0), # green
(0.0, 0.0, 1.0, 1.0), # blue
],
],
)
output_path = tmp_path / "TestCOLRv1.ttf"
fb.save(output_path)
return output_path
def test_subset_COLRv1_and_CPAL(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
subset.main(
[
str(colrv1_path),
"--glyph-names",
f"--output-file={subset_path}",
"--unicodes=E002,E003,E004",
]
)
subset_font = TTFont(subset_path)
glyph_set = set(subset_font.getGlyphOrder())
# uniE000 and its children are excluded from subset
assert "uniE000" not in glyph_set
assert "glyph00010" not in glyph_set
assert "glyph00011" not in glyph_set
# uniE001 and children are pulled in indirectly as PaintColrGlyph by uniE003
assert "uniE001" in glyph_set
assert "glyph00012" in glyph_set
assert "glyph00013" in glyph_set
assert "uniE002" in glyph_set
assert "glyph00014" in glyph_set
assert "glyph00015" in glyph_set
assert "uniE003" in glyph_set
assert "uniE004" in glyph_set
assert "glyph00016" in glyph_set
assert "glyph00017" in glyph_set
assert "glyph00018" in glyph_set
assert "COLR" in subset_font
colr = subset_font["COLR"].table
assert colr.Version == 1
assert len(colr.BaseGlyphRecordArray.BaseGlyphRecord) == 1
assert len(colr.BaseGlyphV1List.BaseGlyphV1Record) == 3 # was 4
base = colr.BaseGlyphV1List.BaseGlyphV1Record[0]
assert base.BaseGlyph == "uniE001"
layers = colr.LayerV1List.Paint[
base.Paint.FirstLayerIndex: base.Paint.FirstLayerIndex + base.Paint.NumLayers
]
assert len(layers) == 2
# check v1 palette indices were remapped
assert layers[0].Paint.Paint.ColorLine.ColorStop[0].Color.PaletteIndex == 0
assert layers[0].Paint.Paint.ColorLine.ColorStop[1].Color.PaletteIndex == 1
assert layers[1].Paint.Color.PaletteIndex == 0
baseRecV0 = colr.BaseGlyphRecordArray.BaseGlyphRecord[0]
assert baseRecV0.BaseGlyph == "uniE004"
layersV0 = colr.LayerRecordArray.LayerRecord
assert len(layersV0) == 3
# check v0 palette indices were remapped (except for 0xFFFF)
assert layersV0[0].PaletteIndex == 0
assert layersV0[1].PaletteIndex == 0xFFFF
assert layersV0[2].PaletteIndex == 1
assert "CPAL" in subset_font
cpal = subset_font["CPAL"]
assert [
tuple(v / 255 for v in (c.red, c.green, c.blue, c.alpha))
for c in cpal.palettes[0]
] == [
# the first color 'red' was pruned
(0.0, 1.0, 0.0, 1.0), # green
(0.0, 0.0, 1.0, 1.0), # blue
]
def test_subset_COLRv1_and_CPAL_drop_empty(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
subset.main(
[
str(colrv1_path),
"--glyph-names",
f"--output-file={subset_path}",
"--glyphs=glyph00010",
]
)
subset_font = TTFont(subset_path)
glyph_set = set(subset_font.getGlyphOrder())
assert "glyph00010" in glyph_set
assert "uniE000" not in glyph_set
assert "COLR" not in subset_font
assert "CPAL" not in subset_font
def test_subset_COLRv1_downgrade_version(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
subset.main(
[
str(colrv1_path),
"--glyph-names",
f"--output-file={subset_path}",
"--unicodes=E004",
]
)
subset_font = TTFont(subset_path)
assert set(subset_font.getGlyphOrder()) == {
".notdef",
"uniE004",
"glyph00016",
"glyph00017",
"glyph00018",
}
assert "COLR" in subset_font
assert subset_font["COLR"].version == 0
def test_subset_COLRv1_drop_all_v0_glyphs(colrv1_path):
subset_path = colrv1_path.parent / (colrv1_path.name + ".subset")
subset.main(
[
str(colrv1_path),
"--glyph-names",
f"--output-file={subset_path}",
"--unicodes=E003",
]
)
subset_font = TTFont(subset_path)
assert set(subset_font.getGlyphOrder()) == {
".notdef",
"uniE001",
"uniE003",
"glyph00012",
"glyph00013",
}
assert "COLR" in subset_font
colr = subset_font["COLR"]
assert colr.version == 1
assert colr.table.BaseGlyphRecordCount == 0
assert colr.table.BaseGlyphRecordArray is None
assert colr.table.LayerRecordArray is None
assert colr.table.LayerRecordCount is 0
def test_subset_keep_size_drop_empty_stylistic_set():
fb = FontBuilder(unitsPerEm=1000, isTTF=True)
glyph_order = [".notdef", "a", "b", "b.ss01"]
fb.setupGlyphOrder(glyph_order)
fb.setupGlyf({g: TTGlyphPen(None).glyph() for g in glyph_order})
fb.setupCharacterMap({ord("a"): "a", ord("b"): "b"})
fb.setupHorizontalMetrics({g: (500, 0) for g in glyph_order})
fb.setupHorizontalHeader()
fb.setupOS2()
fb.setupPost()
fb.setupNameTable({"familyName": "TestKeepSizeFeature", "styleName": "Regular"})
fb.addOpenTypeFeatures("""
feature size {
parameters 10.0 0;
} size;
feature ss01 {
featureNames {
name "Alternate b";
};
sub b by b.ss01;
} ss01;
""")
buf = io.BytesIO()
fb.save(buf)
buf.seek(0)
font = TTFont(buf)
gpos_features = font["GPOS"].table.FeatureList.FeatureRecord
assert gpos_features[0].FeatureTag == "size"
assert isinstance(gpos_features[0].Feature.FeatureParams, ot.FeatureParamsSize)
assert gpos_features[0].Feature.LookupCount == 0
gsub_features = font["GSUB"].table.FeatureList.FeatureRecord
assert gsub_features[0].FeatureTag == "ss01"
assert isinstance(
gsub_features[0].Feature.FeatureParams, ot.FeatureParamsStylisticSet
)
options = subset.Options(layout_features=["*"])
subsetter = subset.Subsetter(options)
subsetter.populate(unicodes=[ord("a")])
subsetter.subset(font)
# empty size feature was kept
gpos_features = font["GPOS"].table.FeatureList.FeatureRecord
assert gpos_features[0].FeatureTag == "size"
assert isinstance(gpos_features[0].Feature.FeatureParams, ot.FeatureParamsSize)
assert gpos_features[0].Feature.LookupCount == 0
# empty ss01 feature was dropped
assert font["GSUB"].table.FeatureList.FeatureCount == 0
if __name__ == "__main__":
sys.exit(unittest.main())
|
# -*- coding: utf-8 -*-
"""
linegen
~~~~~~~
An advanced line generation tool using a HarfBuzz for proper text shaping.
"""
from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
from pgi.repository import HarfBuzz as hb
First draft of pango based artifical line generation utility
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All rights reserved.
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""=
linegen
~~~~~~~
An advanced line generation tool using Pango for proper text shaping. The
actual drawing code was adapted from the create_image utility from nototools
available at [0].
[0] https://github.com/googlei18n/nototools
"""
from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
from PIL import Image
from jinja2 import Environment, PackageLoader
import tempfile
import shutil
import os
import cairo
import pango
import pangocairo
def set_fonts(font_file):
"""
Activates a temporary fontconfig environment and loads pango.
Writes a temporary fontconfig configuration with ``font_file`` being the
only font in the cache. It is then activated by setting the FONTCONFIG_FILE
environment variable and loading pango/cairo.
.. warning::
This function can only be executed once as letting pango/cairo
reinitialize fontconfig doesn't seem to be possible.
Args:
font_file (unicode): Location of an font file understood by pango
"""
global cairo
global pango
global pangocairo
font_dir = tempfile.mkdtemp()
shutil.copy(font_file, font_dir)
env = Environment(loader=PackageLoader('kraken', 'templates'))
template = env.get_template('fonts.conf')
fp = tempfile.NamedTemporaryFile(delete=False)
fp.write(template.render(font_dir=font_dir, font_file=fp.name).encode('utf-8'))
os.putenv("FONTCONFIG_FILE", fp.name)
def draw_on_surface(surface, text, family, font_size, language, rtl, vertical):
pangocairo_ctx = pangocairo.CairoContext(cairo.Context(surface))
layout = pangocairo_ctx.create_layout()
pango_ctx = layout.get_context()
if language is not None:
pango_ctx.set_language(pango.Language(language))
if rtl:
if vertical:
base_dir = pango.DIRECTION_TTB_RTL
else:
base_dir = pango.DIRECTION_RTL
else:
if vertical:
base_dir = pango.DIRECTION_TTB_LTR
else:
base_dir = pango.DIRECTION_LTR
pango_ctx.set_base_dir(base_dir)
font = pango.FontDescription()
font.set_family(family)
font.set_size(font_size * pango.SCALE)
layout.set_font_description(font)
layout.set_text(text)
extents = layout.get_pixel_extents()
top_usage = min(extents[0][1], extents[1][1], 0)
bottom_usage = max(extents[0][3], extents[1][3])
width = max(extents[0][2], extents[1][2])
pangocairo_ctx.set_antialias(cairo.ANTIALIAS_GRAY)
pangocairo_ctx.set_source_rgb(1, 1, 1) # White background
pangocairo_ctx.paint()
pangocairo_ctx.translate(0, -top_usage)
pangocairo_ctx.set_source_rgb(0, 0, 0) # Black text color
pangocairo_ctx.show_layout(layout)
return bottom_usage - top_usage, width
def render_line(text, family, font_size=32, language=None, rtl=False, vertical=False):
"""
Renders ``text`` into a PIL Image using pango and cairo.
Args:
text (unicode): A unicode string to be rendered
family (unicode): Font family to use for rendering
font_size (unicode): Font size in points
language (unicode): RFC-3066 language tag
rtl (bool): Set base horizontal text direction. The BiDi algorithm will
still apply so it's usually not necessary to touch this
option.
vertical (bool): Set vertical text direction (True = Top-to-Bottom)
Returns:
(B/W) PIL.Image in RGBA mode
"""
temp_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 0, 0)
height, width = draw_on_surface(temp_surface, text, family,
font_size,language, rtl, vertical)
real_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
draw_on_surface(real_surface, text, family, font_size, language, rtl, vertical)
return Image.frombuffer("RGBA", (width, height), real_surface.get_data(), "raw", "BGRA", 0, 1)
def degrade_line():
|
KIBOM_VERSION = "1.51"
KIBOM_DATE = "2018-8-10"
Update version.py
KIBOM_VERSION = "1.52"
KIBOM_DATE = "2018-9-16"
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import matplotlib as mpl
import matplotlib.dates as mdates
import bench_util as bu
import shutil
import os
# Set the matplotlib settings (eventually this will go at the top of the graph_util)
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 24
mpl.rcParams['legend.fontsize'] = 20
mpl.rcParams['font.size'] = 20.0
mpl.rcParams['figure.figsize'] = [15,10]
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
# Set the style for the graphs
plt.style.use('bmh')
# Additional matplotlib formatting settings
months = mdates.MonthLocator()
# This formats the months as three-letter abbreviations
months_format = mdates.DateFormatter('%b')
def beautify_legend(df, col_list):
""" This function takes a dataframe and the list of columns that
will ultimately be displayed and re-formats the names so that they
are prettier when displayed in the legend. Returns a list of column
names"""
pretty_cols = []
for col in col_list:
new_col = col.replace("_mmbtu", "")
new_col = new_col.replace("_unit_cost", " unit cost")
new_col = new_col.replace("_cost", "")
new_col = new_col.replace("kwh", "electricity usage")
new_col = new_col.replace("kw_avg", "electricity demand")
new_col = new_col.replace("hdd", "heating degree days")
new_col = new_col.replace("_", " ")
new_col = new_col.title()
df = df.rename(columns={col:new_col})
pretty_cols.append(new_col)
return df, pretty_cols
def color_formatter(col_name_list):
"""This function takes in a list of dataframe column names and then
converts them to standardized colors so that the final graphs show
each fuel type using the same color.
"""
color_dict = {}
for col_name in col_name_list:
if 'natural' in col_name.lower():
color_dict[col_name] = '#1f78b4'
elif 'fuel' in col_name.lower():
color_dict[col_name] = '#e31a1c'
elif 'water' in col_name.lower():
color_dict[col_name] = '#b3df8a'
elif 'sewer' in col_name.lower():
color_dict[col_name] = '#fdbf6f'
elif 'district' in col_name.lower():
color_dict[col_name] = '#fb9a99'
elif 'kw_' in col_name.lower() or 'demand' in col_name.lower():
color_dict[col_name] = '#33a02c'
elif 'electricity' in col_name.lower() or 'kwh' in col_name.lower() or 'Electricity' in col_name:
color_dict[col_name] = '#a6cee3'
elif 'refuse' in col_name.lower():
color_dict[col_name] = '#ff7f00'
else:
color_dict[col_name] = '#000000'
return color_dict
def area_cost_distribution(df, fiscal_year_col, utility_col_list, filename):
# Inputs include the dataframe, the column name for the fiscal year column, and the list of column names for the
# different utility bills. The dataframe should already include the summed bills for each fiscal year.
fig, ax = plt.subplots()
# Makes the legend prettier.
df, utility_col_list = beautify_legend(df, utility_col_list)
# Take costs for each utility type and convert to percent of total cost by fiscal year
df['total_costs'] = df[utility_col_list].sum(axis=1)
# Standardize colors using color_formatter utility
color_dict = color_formatter(utility_col_list)
percent_columns = []
# Create dictionary for differently named keys
percent_col_colors = {}
for col in utility_col_list:
percent_col = "Percent " + col
percent_columns.append(percent_col)
df[percent_col] = df[col] / df.total_costs
percent_col_colors[percent_col] = color_dict[col]
df = df.fillna(0)
# Create stacked area plot
ax.stackplot(df[fiscal_year_col], df[percent_columns].T, labels=percent_columns,
colors=[ percent_col_colors[i] for i in percent_columns])
# Format the y axis to be in percent
ax.yaxis.set_major_formatter(FuncFormatter('{0:.0%}'.format))
# Format the x-axis to include all fiscal years
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0))
# Add title and axis labels
plt.title('Annual Utility Cost Distribution')
plt.ylabel('Utility Cost Distribution')
plt.xlabel('Fiscal Year')
# Add legend
plt.legend(loc='lower right', ncol=2, fancybox=True, shadow=True)
# Save and show
plt.savefig(filename)
plt.close('all')
def area_use_distribution(df, fiscal_year_col, utility_col_list, filename):
# Inputs include the dataframe, the column name for the fiscal year column, and the list of column names for the
# different utility bills. The dataframe should already include the summed bills for each fiscal year.
# Makes the legend prettier.
df, utility_col_list = beautify_legend(df, utility_col_list)
fig, ax = plt.subplots()
# Take usage for each utility type and convert to percent of total cost by fiscal year
df['total_use'] = df[utility_col_list].sum(axis=1)
# Standardize colors using color_formatter utility
color_dict = color_formatter(utility_col_list)
percent_columns = []
# Create dictionary for differently named keys
percent_col_colors = {}
for col in utility_col_list:
percent_col = "Percent " + col
percent_columns.append(percent_col)
df[percent_col] = df[col] / df.total_use
percent_col_colors[percent_col] = color_dict[col]
# Fill the NaNs
df = df.fillna(0)
# Create stacked area plot
ax.stackplot(df[fiscal_year_col], df[percent_columns].T, labels=percent_columns,
colors=[ percent_col_colors[i] for i in percent_columns])
# Format the y axis to be in percent
ax.yaxis.set_major_formatter(FuncFormatter('{0:.0%}'.format))
# Format the x-axis to include all fiscal years
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0))
# Add title and axis labels
plt.title('Annual Energy Usage Distribution')
plt.ylabel('Annual Energy Usage Distribution')
plt.xlabel('Fiscal Year')
# Add legend
plt.legend(loc='lower right', ncol=2, fancybox=True, shadow=True)
# Save and show
plt.savefig(filename)
plt.close('all')
def create_stacked_bar(df, fiscal_year_col, column_name_list, ylabel, title, filename):
# Parameters include the dataframe, the name of the column where the fiscal year is listed, a list of the column names
# with the correct data for the chart, and the filename where the output should be saved.
# Check to see if the dataframe is empty, and if so, set the saved figure as an empty filename
if df.empty:
shutil.copyfile(os.path.abspath('data/water_sewer_no_data_available.png'), os.path.abspath(filename))
else:
# Makes the legend prettier.
df, column_name_list = beautify_legend(df, column_name_list)
# Create the figure
fig, ax = plt.subplots()
# Set the bar width
width = 0.50
# Standardize colors using color_formatter utility
color_dict = color_formatter(column_name_list)
# Create the stacked bars. The "bottom" is the sum of all previous bars to set the starting point for the next bar.
previous_col_name = 0
# Fill the NaNs
df = df.fillna(0)
for col in column_name_list:
col_name = plt.bar(df[fiscal_year_col], df[col], width, label=col, bottom=previous_col_name, color=color_dict[col])
previous_col_name = previous_col_name + df[col]
# label axes
plt.ylabel(ylabel)
plt.xlabel('Fiscal Year')
# Make one bar for each fiscal year
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0),
np.sort(list(df[fiscal_year_col].unique())))
df['total_cost'] = df[column_name_list].sum(axis=1)
ax.set_ylim(bottom=0, top=df.total_cost.max() + df.total_cost.max()*0.10)
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
plt.title(title)
plt.legend(loc='lower right', ncol=2, fancybox=True, shadow=True)
# Save and show
plt.savefig(filename)
plt.close('all')
def energy_use_stacked_bar(df, fiscal_year_col, column_name_list, filename):
# Parameters include the dataframe, the name of the column where the fiscal year is listed, a list of the column names
# with the correct data for the chart, and the filename where the output should be saved.
# Makes the legend prettier.
df, column_name_list = beautify_legend(df, column_name_list)
# Create the figure
fig, ax = plt.subplots()
# Set the bar width
width = 0.50
# Standardize colors using color_formatter utility
color_dict = color_formatter(column_name_list)
# Fill the NaNs
df = df.fillna(0)
# Create the stacked bars. The "bottom" is the sum of all previous bars to set the starting point for the next bar.
previous_col_name = 0
for col in column_name_list:
col_name = ax.bar(df[fiscal_year_col].values, df[col].values, width, label=col, bottom=previous_col_name,
color=color_dict[col])
previous_col_name = previous_col_name + df[col]
# label axes
plt.ylabel('Annual Energy Usage [MMBTU]')
plt.xlabel('Fiscal Year')
plt.title('Total Annual Energy Usage')
# Make one bar for each fiscal year
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0),
np.sort(list(df[fiscal_year_col].unique())))
# Set the yticks to go up to the total usage in increments of 1,000
df['total_use'] = df[column_name_list].sum(axis=1)
plt.yticks(np.arange(0, df.total_use.max()+df.total_use.max()*0.10, 1000))
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
plt.legend(loc='lower right', ncol=2, fancybox=True, shadow=True)
# Save and show
plt.savefig(filename)
plt.close('all')
def usage_pie_charts(df, use_or_cost_cols, chart_type, base_filename, site_id):
# df: A dataframe with the fiscal_year as the index and needs to include the values for the passed in list of columns.
# use_or_cost_cols: a list of the energy usage or energy cost column names
# chart_type: 1 for an energy use pie chart, 2 for an energy cost pie chart
# base_filename: Base filename for that graph. A final filename will be
# created that includes the Site ID, the correct output directory, and
# the pertinent year.
# site_id: The Site ID to be used to create the filename.
#
# This function returns a list of the URLs that can be used to access the
# three graphs from the report page.
mpl.rcParams.update({'font.size': 32})
# Makes the legend prettier.
df, use_or_cost_cols = beautify_legend(df, use_or_cost_cols)
# Standardize colors using color_formatter utility
color_dict = color_formatter(use_or_cost_cols)
# Get the three most recent complete years of data
sorted_completes = df.sort_index(ascending=False)
most_recent_complete_years = sorted_completes[0:3]
years = list(most_recent_complete_years.index.values)
# Create percentages from usage
most_recent_complete_years = most_recent_complete_years[use_or_cost_cols]
most_recent_complete_years['Totals'] = most_recent_complete_years.sum(axis=1)
for col in use_or_cost_cols:
most_recent_complete_years[col] = most_recent_complete_years[col] / most_recent_complete_years.Totals
most_recent_complete_years = most_recent_complete_years.drop('Totals', axis=1)
# List holding the Matplotlib figures created.
figs = []
# List holding the URLs that will access the saved graph image files.
urls = []
# Create a pie chart for each of 3 most recent complete years
for year in years:
# Make current year dataframe
year_df = most_recent_complete_years.query("fiscal_year == @year")
updated_use_or_cost_cols = []
# Drop columns that only have zero usage
for col in use_or_cost_cols:
if year_df[col].iloc[0] == 0:
year_df = year_df.drop(col, axis=1)
else:
updated_use_or_cost_cols.append(col)
fig, ax = plt.subplots()
ax.pie(list(year_df.iloc[0].values), labels=list(year_df.columns.values), autopct='%1.1f%%',
shadow=True, startangle=90, colors=[ color_dict[i] for i in updated_use_or_cost_cols])
plt.tick_params(axis='both', which='both', labelsize=28)
# Create the title based on whether it is an energy use or energy cost pie chart.
if chart_type == 1:
title = "FY " + str(year) + " Energy Usage [MMBTU]"
else:
title = "FY " + str(year) + " Energy Cost [$]"
plt.title(title, fontsize=20)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# Include the year in the file
fn_with_yr = '{}_{}'.format(base_filename, year)
final_fn, url = graph_filename_url(site_id, fn_with_yr)
urls.append(url)
# Save and show
plt.savefig(final_fn)
figs.append(fig)
plt.close('all')
return urls
def create_monthly_profile(df, graph_column_name, yaxis_name, color_choice, title, filename):
# Parameters:
# df: A dataframe with the fiscal_year, fiscal_mo, and appropriate graph column name ('kWh', 'kW', etc.)
# graph_column_name: The name of the column containing the data to be graphed on the y-axis
# yaxis_name: A string that will be displayed on the y-axis
# color_choice: 'blue', 'red', or 'green' depending on the desired color palette.
# Get five most recent years
recent_years = (sorted(list(df.index.levels[0].values), reverse=True)[0:5])
# Reset the index of the dataframe for more straightforward queries
df_reset = df.reset_index()
# Create a color dictionary of progressively lighter colors of three different shades and convert to dataframe
color_dict = {'blue': ['#08519c', '#3182bd', '#6baed6', '#bdd7e7', '#eff3ff'],
'red': ['#a50f15', '#de2d26', '#fb6a4a', '#fcae91', '#fee5d9'],
'green': ['#006d2c', '#31a354', '#74c476', '#bae4b3', '#edf8e9']
}
color_df = pd.DataFrame.from_dict(color_dict)
# i is the counter for the different colors
i=0
# Create the plots
fig, ax = plt.subplots()
for year in recent_years:
# Create df for one year only so it's plotted as a single line
year_df = df_reset.query("fiscal_year == @year")
# Plot the data
ax.plot_date(year_df['fiscal_mo'], year_df[graph_column_name], fmt='-', color=color_df.iloc[i][color_choice],
label=str(year_df.fiscal_year.iloc[0]))
# Increase counter by one to use the next color
i += 1
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
# Set x-axis labels to be fiscal months, starting in July
ax.set_xticks(year_df.fiscal_mo.values)
ax.set_xticklabels(bu.mo_list)
# Add the labels
plt.xlabel('Month of Year')
plt.ylabel(yaxis_name)
plt.legend()
plt.title(title)
# Save and show
plt.savefig(filename)
plt.close('all')
def stacked_bar_with_line(df, fiscal_year_col, bar_col_list, line_col, ylabel1, ylabel2, title, filename):
# Parameters:
# fiscal_year_col: the name of the column where the fiscal year is listed (use reset_index() if it is currently the index
# bar_col_list: a list of the column names for the bar chart portion of the graph
# line_col: The column with the data to plot the line
# ylabel1 and ylabel2: Strings to name the y-axes
# filename: A string with the filename where the output should be saved.
# Makes the legend prettier.
df, bar_col_list = beautify_legend(df, bar_col_list)
# Makes the legend prettier.
df, line_col = beautify_legend(df, [line_col])
# Create the figure
fig, ax = plt.subplots()
# Set the bar width
width = 0.50
# Standardize colors using color_formatter utility
color_dict = color_formatter(bar_col_list)
# Create the stacked bars. The "bottom" is the sum of all previous bars to set the starting point for the next bar.
previous_col_name = 0
# Fill the NaNs
df = df.fillna(0)
for col in bar_col_list:
col_name = ax.bar(df[fiscal_year_col], df[col], width, label=col, bottom=previous_col_name, color=color_dict[col])
previous_col_name = previous_col_name + df[col]
# label axes
ax.set_ylabel(ylabel1)
ax.set_xlabel('Fiscal Year')
# Make one bar for each fiscal year
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0),
np.sort(list(df[fiscal_year_col].unique())))
ax.set_ylim(bottom=0, top=previous_col_name.max() + previous_col_name.max()*0.10)
# Create the line on the same graph but on a separate axis.
ax2 = ax.twinx()
ax2.plot(df[fiscal_year_col], df[line_col[0]], label=line_col[0], color='k',linewidth=5, marker='D', markersize=10)
ax2.set_ylabel(ylabel2)
# Ensure that the axis starts at 0.
ax2.set_ylim(bottom=0, top=df[line_col[0]].max() + df[line_col[0]].max()*0.10)
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
ax2.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax.legend(h1+h2, l1+l2, loc='lower left')
plt.title(title)
# Save and show
plt.savefig(filename)
plt.close('all')
def fuel_price_comparison_graph(unit_cost_df, date_col, unit_cost_cols, bldg_unit_cost_col, filename):
# Makes the legend prettier.
unit_cost_df, unit_cost_cols = beautify_legend(unit_cost_df, unit_cost_cols)
# Makes the legend prettier.
unit_cost_df, bldg_unit_cost_col = beautify_legend(unit_cost_df, [bldg_unit_cost_col])
# Standardize colors using color_formatter utility
color_dict = color_formatter(unit_cost_cols)
fig, ax = plt.subplots()
# Plot the fuel unit costs for each fuel type
for col in unit_cost_cols:
plt.plot(unit_cost_df[date_col], unit_cost_df[col], label=col, linestyle='--', color=color_dict[col])
# Plot the building unit cost for fuels used
plt.plot(unit_cost_df[date_col], unit_cost_df[bldg_unit_cost_col[0]], label=bldg_unit_cost_col[0], linestyle='-', color='k')
plt.ylabel('Energy Cost [$/MMBTU]')
plt.xlabel('Date')
plt.title("Heating Fuel Unit Price Comparison [$/MMBTU]")
plt.legend()
# Save and show
plt.savefig(filename)
plt.close('all')
def create_monthly_line_graph(df, date_col, graph_col, ylabel, filename):
fig, ax = plt.subplots()
# Create the plot
plt.plot(df[date_col], df[graph_col], color='k')
# Set the ylabel
plt.ylabel(ylabel)
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
plt.title("Realized Cumulative Energy Savings from Fuel Switching")
# Save and show
plt.savefig(filename)
plt.close('all')
def graph_filename_url(site_id, base_graph_name):
"""This function returns a two-tuple: graph file name, graph URL.
The graph file name is used to save the graph to the file system; the
graph URL is used in an HTML site report to load the graph into an
image tag.
Parameters:
'site_id': the Site ID of the site this graph is related to.
'base_graph_name': a graph file name, not including the Site ID and not
including the 'png' extension. For example: 'eco_g1', which will
produce a graph file name of 'ANSBG1_eco_g1.png' assuming
the Site ID is ANSBG1.
"""
fn = 'output/images/{}_{}.png'.format(site_id, base_graph_name)
url = 'images/{}_{}.png'.format(site_id, base_graph_name)
return fn, url
Fixed Pie Charts
Made graphs bigger and increased font sizes
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import matplotlib as mpl
import matplotlib.dates as mdates
import bench_util as bu
import shutil
import os
from matplotlib import font_manager as fm
# Set the matplotlib settings (eventually this will go at the top of the graph_util)
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 24
mpl.rcParams['legend.fontsize'] = 20
mpl.rcParams['font.size'] = 20.0
mpl.rcParams['figure.figsize'] = [15,10]
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
# Set the style for the graphs
plt.style.use('bmh')
# Additional matplotlib formatting settings
months = mdates.MonthLocator()
# This formats the months as three-letter abbreviations
months_format = mdates.DateFormatter('%b')
def beautify_legend(df, col_list):
""" This function takes a dataframe and the list of columns that
will ultimately be displayed and re-formats the names so that they
are prettier when displayed in the legend. Returns a list of column
names"""
pretty_cols = []
for col in col_list:
new_col = col.replace("_mmbtu", "")
new_col = new_col.replace("_unit_cost", " unit cost")
new_col = new_col.replace("_cost", "")
new_col = new_col.replace("kwh", "electricity usage")
new_col = new_col.replace("kw_avg", "electricity demand")
new_col = new_col.replace("hdd", "heating degree days")
new_col = new_col.replace("_", " ")
new_col = new_col.title()
df = df.rename(columns={col:new_col})
pretty_cols.append(new_col)
return df, pretty_cols
def color_formatter(col_name_list):
"""This function takes in a list of dataframe column names and then
converts them to standardized colors so that the final graphs show
each fuel type using the same color.
"""
color_dict = {}
for col_name in col_name_list:
if 'natural' in col_name.lower():
color_dict[col_name] = '#1f78b4'
elif 'fuel' in col_name.lower():
color_dict[col_name] = '#e31a1c'
elif 'water' in col_name.lower():
color_dict[col_name] = '#b3df8a'
elif 'sewer' in col_name.lower():
color_dict[col_name] = '#fdbf6f'
elif 'district' in col_name.lower():
color_dict[col_name] = '#fb9a99'
elif 'kw_' in col_name.lower() or 'demand' in col_name.lower():
color_dict[col_name] = '#33a02c'
elif 'electricity' in col_name.lower() or 'kwh' in col_name.lower() or 'Electricity' in col_name:
color_dict[col_name] = '#a6cee3'
elif 'refuse' in col_name.lower():
color_dict[col_name] = '#ff7f00'
else:
color_dict[col_name] = '#000000'
return color_dict
def area_cost_distribution(df, fiscal_year_col, utility_col_list, filename):
# Inputs include the dataframe, the column name for the fiscal year column, and the list of column names for the
# different utility bills. The dataframe should already include the summed bills for each fiscal year.
fig, ax = plt.subplots()
# Makes the legend prettier.
df, utility_col_list = beautify_legend(df, utility_col_list)
# Take costs for each utility type and convert to percent of total cost by fiscal year
df['total_costs'] = df[utility_col_list].sum(axis=1)
# Standardize colors using color_formatter utility
color_dict = color_formatter(utility_col_list)
percent_columns = []
# Create dictionary for differently named keys
percent_col_colors = {}
for col in utility_col_list:
percent_col = "Percent " + col
percent_columns.append(percent_col)
df[percent_col] = df[col] / df.total_costs
percent_col_colors[percent_col] = color_dict[col]
df = df.fillna(0)
# Create stacked area plot
ax.stackplot(df[fiscal_year_col], df[percent_columns].T, labels=percent_columns,
colors=[ percent_col_colors[i] for i in percent_columns])
# Format the y axis to be in percent
ax.yaxis.set_major_formatter(FuncFormatter('{0:.0%}'.format))
# Format the x-axis to include all fiscal years
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0))
# Add title and axis labels
plt.title('Annual Utility Cost Distribution')
plt.ylabel('Utility Cost Distribution')
plt.xlabel('Fiscal Year')
# Add legend
plt.legend(loc='lower right', ncol=2, fancybox=True, shadow=True)
# Save and show
plt.savefig(filename)
plt.close('all')
def area_use_distribution(df, fiscal_year_col, utility_col_list, filename):
# Inputs include the dataframe, the column name for the fiscal year column, and the list of column names for the
# different utility bills. The dataframe should already include the summed bills for each fiscal year.
# Makes the legend prettier.
df, utility_col_list = beautify_legend(df, utility_col_list)
fig, ax = plt.subplots()
# Take usage for each utility type and convert to percent of total cost by fiscal year
df['total_use'] = df[utility_col_list].sum(axis=1)
# Standardize colors using color_formatter utility
color_dict = color_formatter(utility_col_list)
percent_columns = []
# Create dictionary for differently named keys
percent_col_colors = {}
for col in utility_col_list:
percent_col = "Percent " + col
percent_columns.append(percent_col)
df[percent_col] = df[col] / df.total_use
percent_col_colors[percent_col] = color_dict[col]
# Fill the NaNs
df = df.fillna(0)
# Create stacked area plot
ax.stackplot(df[fiscal_year_col], df[percent_columns].T, labels=percent_columns,
colors=[ percent_col_colors[i] for i in percent_columns])
# Format the y axis to be in percent
ax.yaxis.set_major_formatter(FuncFormatter('{0:.0%}'.format))
# Format the x-axis to include all fiscal years
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0))
# Add title and axis labels
plt.title('Annual Energy Usage Distribution')
plt.ylabel('Annual Energy Usage Distribution')
plt.xlabel('Fiscal Year')
# Add legend
plt.legend(loc='lower right', ncol=2, fancybox=True, shadow=True)
# Save and show
plt.savefig(filename)
plt.close('all')
def create_stacked_bar(df, fiscal_year_col, column_name_list, ylabel, title, filename):
# Parameters include the dataframe, the name of the column where the fiscal year is listed, a list of the column names
# with the correct data for the chart, and the filename where the output should be saved.
# Check to see if the dataframe is empty, and if so, set the saved figure as an empty filename
if df.empty:
shutil.copyfile(os.path.abspath('data/water_sewer_no_data_available.png'), os.path.abspath(filename))
else:
# Makes the legend prettier.
df, column_name_list = beautify_legend(df, column_name_list)
# Create the figure
fig, ax = plt.subplots()
# Set the bar width
width = 0.50
# Standardize colors using color_formatter utility
color_dict = color_formatter(column_name_list)
# Create the stacked bars. The "bottom" is the sum of all previous bars to set the starting point for the next bar.
previous_col_name = 0
# Fill the NaNs
df = df.fillna(0)
for col in column_name_list:
col_name = plt.bar(df[fiscal_year_col], df[col], width, label=col, bottom=previous_col_name, color=color_dict[col])
previous_col_name = previous_col_name + df[col]
# label axes
plt.ylabel(ylabel)
plt.xlabel('Fiscal Year')
# Make one bar for each fiscal year
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0),
np.sort(list(df[fiscal_year_col].unique())))
df['total_cost'] = df[column_name_list].sum(axis=1)
ax.set_ylim(bottom=0, top=df.total_cost.max() + df.total_cost.max()*0.10)
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
plt.title(title)
plt.legend(loc='lower right', ncol=2, fancybox=True, shadow=True)
# Save and show
plt.savefig(filename)
plt.close('all')
def energy_use_stacked_bar(df, fiscal_year_col, column_name_list, filename):
# Parameters include the dataframe, the name of the column where the fiscal year is listed, a list of the column names
# with the correct data for the chart, and the filename where the output should be saved.
# Makes the legend prettier.
df, column_name_list = beautify_legend(df, column_name_list)
# Create the figure
fig, ax = plt.subplots()
# Set the bar width
width = 0.50
# Standardize colors using color_formatter utility
color_dict = color_formatter(column_name_list)
# Fill the NaNs
df = df.fillna(0)
# Create the stacked bars. The "bottom" is the sum of all previous bars to set the starting point for the next bar.
previous_col_name = 0
for col in column_name_list:
col_name = ax.bar(df[fiscal_year_col].values, df[col].values, width, label=col, bottom=previous_col_name,
color=color_dict[col])
previous_col_name = previous_col_name + df[col]
# label axes
plt.ylabel('Annual Energy Usage [MMBTU]')
plt.xlabel('Fiscal Year')
plt.title('Total Annual Energy Usage')
# Make one bar for each fiscal year
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0),
np.sort(list(df[fiscal_year_col].unique())))
# Set the yticks to go up to the total usage in increments of 1,000
df['total_use'] = df[column_name_list].sum(axis=1)
plt.yticks(np.arange(0, df.total_use.max()+df.total_use.max()*0.10, 1000))
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
plt.legend(loc='lower right', ncol=2, fancybox=True, shadow=True)
# Save and show
plt.savefig(filename)
plt.close('all')
def usage_pie_charts(df, use_or_cost_cols, chart_type, base_filename, site_id):
# df: A dataframe with the fiscal_year as the index and needs to include the values for the passed in list of columns.
# use_or_cost_cols: a list of the energy usage or energy cost column names
# chart_type: 1 for an energy use pie chart, 2 for an energy cost pie chart
# base_filename: Base filename for that graph. A final filename will be
# created that includes the Site ID, the correct output directory, and
# the pertinent year.
# site_id: The Site ID to be used to create the filename.
#
# This function returns a list of the URLs that can be used to access the
# three graphs from the report page.
# Makes the legend prettier.
df, use_or_cost_cols = beautify_legend(df, use_or_cost_cols)
# Standardize colors using color_formatter utility
color_dict = color_formatter(use_or_cost_cols)
# Get the three most recent complete years of data
sorted_completes = df.sort_index(ascending=False)
most_recent_complete_years = sorted_completes[0:3]
years = list(most_recent_complete_years.index.values)
# Create percentages from usage
most_recent_complete_years = most_recent_complete_years[use_or_cost_cols]
most_recent_complete_years['Totals'] = most_recent_complete_years.sum(axis=1)
for col in use_or_cost_cols:
most_recent_complete_years[col] = most_recent_complete_years[col] / most_recent_complete_years.Totals
most_recent_complete_years = most_recent_complete_years.drop('Totals', axis=1)
# List holding the Matplotlib figures created.
figs = []
# List holding the URLs that will access the saved graph image files.
urls = []
# Create a pie chart for each of 3 most recent complete years
for year in years:
# Make current year dataframe
year_df = most_recent_complete_years.query("fiscal_year == @year")
updated_use_or_cost_cols = []
# Drop columns that only have zero usage
for col in use_or_cost_cols:
if year_df[col].iloc[0] == 0:
year_df = year_df.drop(col, axis=1)
else:
updated_use_or_cost_cols.append(col)
fig, ax = plt.subplots()
patches, texts, autotexts = ax.pie(list(year_df.iloc[0].values), labels=list(year_df.columns.values), autopct='%1.1f%%',
shadow=True, startangle=90, colors=[ color_dict[i] for i in updated_use_or_cost_cols])
plt.tick_params(axis='both', which='both', labelsize=32)
# Create the title based on whether it is an energy use or energy cost pie chart.
if chart_type == 1:
title = "FY " + str(year) + " Energy Usage [MMBTU]"
else:
title = "FY " + str(year) + " Energy Cost [$]"
plt.title(title)
# Make the graph take up a larger portion of the figure
plt.axis([0.75, 0.75, 0.75, 0.75])
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# Increase the font size of the labels
props = fm.FontProperties()
props.set_size(32)
plt.setp(autotexts, fontproperties=props)
plt.setp(texts, fontproperties=props)
# Include the year in the file
fn_with_yr = '{}_{}'.format(base_filename, year)
final_fn, url = graph_filename_url(site_id, fn_with_yr)
urls.append(url)
# Save and show
plt.savefig(final_fn)
figs.append(fig)
plt.close('all')
return urls
def create_monthly_profile(df, graph_column_name, yaxis_name, color_choice, title, filename):
# Parameters:
# df: A dataframe with the fiscal_year, fiscal_mo, and appropriate graph column name ('kWh', 'kW', etc.)
# graph_column_name: The name of the column containing the data to be graphed on the y-axis
# yaxis_name: A string that will be displayed on the y-axis
# color_choice: 'blue', 'red', or 'green' depending on the desired color palette.
# Get five most recent years
recent_years = (sorted(list(df.index.levels[0].values), reverse=True)[0:5])
# Reset the index of the dataframe for more straightforward queries
df_reset = df.reset_index()
# Create a color dictionary of progressively lighter colors of three different shades and convert to dataframe
color_dict = {'blue': ['#08519c', '#3182bd', '#6baed6', '#bdd7e7', '#eff3ff'],
'red': ['#a50f15', '#de2d26', '#fb6a4a', '#fcae91', '#fee5d9'],
'green': ['#006d2c', '#31a354', '#74c476', '#bae4b3', '#edf8e9']
}
color_df = pd.DataFrame.from_dict(color_dict)
# i is the counter for the different colors
i=0
# Create the plots
fig, ax = plt.subplots()
for year in recent_years:
# Create df for one year only so it's plotted as a single line
year_df = df_reset.query("fiscal_year == @year")
# Plot the data
ax.plot_date(year_df['fiscal_mo'], year_df[graph_column_name], fmt='-', color=color_df.iloc[i][color_choice],
label=str(year_df.fiscal_year.iloc[0]))
# Increase counter by one to use the next color
i += 1
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
# Set x-axis labels to be fiscal months, starting in July
ax.set_xticks(year_df.fiscal_mo.values)
ax.set_xticklabels(bu.mo_list)
# Add the labels
plt.xlabel('Month of Year')
plt.ylabel(yaxis_name)
plt.legend()
plt.title(title)
# Save and show
plt.savefig(filename)
plt.close('all')
def stacked_bar_with_line(df, fiscal_year_col, bar_col_list, line_col, ylabel1, ylabel2, title, filename):
# Parameters:
# fiscal_year_col: the name of the column where the fiscal year is listed (use reset_index() if it is currently the index
# bar_col_list: a list of the column names for the bar chart portion of the graph
# line_col: The column with the data to plot the line
# ylabel1 and ylabel2: Strings to name the y-axes
# filename: A string with the filename where the output should be saved.
# Makes the legend prettier.
df, bar_col_list = beautify_legend(df, bar_col_list)
# Makes the legend prettier.
df, line_col = beautify_legend(df, [line_col])
# Create the figure
fig, ax = plt.subplots()
# Set the bar width
width = 0.50
# Standardize colors using color_formatter utility
color_dict = color_formatter(bar_col_list)
# Create the stacked bars. The "bottom" is the sum of all previous bars to set the starting point for the next bar.
previous_col_name = 0
# Fill the NaNs
df = df.fillna(0)
for col in bar_col_list:
col_name = ax.bar(df[fiscal_year_col], df[col], width, label=col, bottom=previous_col_name, color=color_dict[col])
previous_col_name = previous_col_name + df[col]
# label axes
ax.set_ylabel(ylabel1)
ax.set_xlabel('Fiscal Year')
# Make one bar for each fiscal year
plt.xticks(np.arange(df[fiscal_year_col].min(), df[fiscal_year_col].max()+1, 1.0),
np.sort(list(df[fiscal_year_col].unique())))
ax.set_ylim(bottom=0, top=previous_col_name.max() + previous_col_name.max()*0.10)
# Create the line on the same graph but on a separate axis.
ax2 = ax.twinx()
ax2.plot(df[fiscal_year_col], df[line_col[0]], label=line_col[0], color='k',linewidth=5, marker='D', markersize=10)
ax2.set_ylabel(ylabel2)
# Ensure that the axis starts at 0.
ax2.set_ylim(bottom=0, top=df[line_col[0]].max() + df[line_col[0]].max()*0.10)
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
ax2.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax.legend(h1+h2, l1+l2, loc='lower left')
plt.title(title)
# Save and show
plt.savefig(filename)
plt.close('all')
def fuel_price_comparison_graph(unit_cost_df, date_col, unit_cost_cols, bldg_unit_cost_col, filename):
# Makes the legend prettier.
unit_cost_df, unit_cost_cols = beautify_legend(unit_cost_df, unit_cost_cols)
# Makes the legend prettier.
unit_cost_df, bldg_unit_cost_col = beautify_legend(unit_cost_df, [bldg_unit_cost_col])
# Standardize colors using color_formatter utility
color_dict = color_formatter(unit_cost_cols)
fig, ax = plt.subplots()
# Plot the fuel unit costs for each fuel type
for col in unit_cost_cols:
plt.plot(unit_cost_df[date_col], unit_cost_df[col], label=col, linestyle='--', color=color_dict[col])
# Plot the building unit cost for fuels used
plt.plot(unit_cost_df[date_col], unit_cost_df[bldg_unit_cost_col[0]], label=bldg_unit_cost_col[0], linestyle='-', color='k')
plt.ylabel('Energy Cost [$/MMBTU]')
plt.xlabel('Date')
plt.title("Heating Fuel Unit Price Comparison [$/MMBTU]")
plt.legend()
# Save and show
plt.savefig(filename)
plt.close('all')
def create_monthly_line_graph(df, date_col, graph_col, ylabel, filename):
fig, ax = plt.subplots()
# Create the plot
plt.plot(df[date_col], df[graph_col], color='k')
# Set the ylabel
plt.ylabel(ylabel)
# Format the y-axis so a comma is displayed for thousands
ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))
plt.title("Realized Cumulative Energy Savings from Fuel Switching")
# Save and show
plt.savefig(filename)
plt.close('all')
def graph_filename_url(site_id, base_graph_name):
"""This function returns a two-tuple: graph file name, graph URL.
The graph file name is used to save the graph to the file system; the
graph URL is used in an HTML site report to load the graph into an
image tag.
Parameters:
'site_id': the Site ID of the site this graph is related to.
'base_graph_name': a graph file name, not including the Site ID and not
including the 'png' extension. For example: 'eco_g1', which will
produce a graph file name of 'ANSBG1_eco_g1.png' assuming
the Site ID is ANSBG1.
"""
fn = 'output/images/{}_{}.png'.format(site_id, base_graph_name)
url = 'images/{}_{}.png'.format(site_id, base_graph_name)
return fn, url
|
from django.conf import settings
from django.contrib.sites.models import Site
def get_site_id(request):
if hasattr(request, 'site') and isinstance(request.site, Site):
return request.site.pk
try:
return Site.objects.get(pk=settings.SITE_ID).pk
except Site.DoesNotExist:
return None
Add get_site helper
from django.conf import settings
from django.contrib.sites.models import Site
def get_site(request):
if hasattr(request, 'site') and isinstance(request.site, Site):
return request.site
try:
return Site.objects.get(pk=settings.SITE_ID)
except Site.DoesNotExist:
return None
def get_site_id(request):
site = get_site(request)
if site is None:
return None
return site.pk
|
# -*- coding: utf-8 -*-
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
from tests.fiware_region_base_tests import FiwareRegionsBaseTests
from commons.constants import *
from novaclient.exceptions import Forbidden, OverLimit, ClientException as NovaClientException
from neutronclient.common.exceptions import NeutronClientException, IpAddressGenerationFailureClient
from datetime import datetime
from commons.dbus_phonehome_service import DbusPhoneHomeClient
from commons.template_utils import replace_template_properties
import re
import json
import uuid
def _build_path_resource(path_resource):
"""Build url path with a transactionId param"""
return path_resource + '?TransactionId=' + str(uuid.uuid1())
class FiwareRegionWithNetworkTest(FiwareRegionsBaseTests):
with_networks = True
def __deploy_instance_helper__(self, instance_name,
network_name=None, is_network_new=True, cidr=None,
keypair_name=None, is_keypair_new=True,
sec_group_name=None, metadata=None, userdata=None):
"""
HELPER. Creates a new instance according to the given parameters:
- If a network name is given, a new network (`is_network_new==True`) or an existing one is associated.
- If a keypair name is given, a new keypair (`is_keypair_new==True`) or an existing one is associated.
- If a security group name is given, a new sec_group is created and associated to the instance.
- Optional metadata and userdata may also be associated.
:param instance_name: Name of the new instance
:param network_name: Name of the network to use (either existing or a new one)
:param is_network_new: If True, a new network should be created and appended to the `TestWorld`
:param cidr: Optional CIDR to use in the network's subnet (otherwise, one is chosen from default range)
:param keypair_name: Name of the keypair to use (either existing or a new one)
:param is_keypair_new: If True, a new keypair should be created and appended to the `TestWorld`
:param sec_group_name: Name of the new security group that will be created
:param metadata: Python dict with metadata info {"key": "value"}
:param userdata: userdata file content (String)
:return: Server ID (String)
"""
flavor_id = self.nova_operations.get_any_flavor_id()
self.assertIsNotNone(flavor_id, "Problems retrieving a flavor")
base_image_name = self.nova_operations.test_image
image_id = self.nova_operations.find_image_id_by_name(image_name=base_image_name)
self.assertIsNotNone(image_id, "Problems retrieving the image '{}'".format(base_image_name))
# instance prerequisites
try:
network_id_list = None
if network_name:
if is_network_new:
# Create the given network
network = self.neutron_operations.create_network(network_name)
self.test_world['networks'].append(network['id'])
network_id_list = [{'net-id': network['id']}]
# Create a subnet
self.neutron_operations.create_subnet(network, cidr)
else:
# Look for the network id
net_list = self.neutron_operations.find_networks(name=network_name)
self.assertTrue(len(net_list) != 0, "Required network '%s' could not be found" % network_name)
network_id_list = [{'net-id': net_list[0]['id']}]
except NeutronClientException as e:
self.logger.debug("Required network could not be created: %s", e)
self.fail(e)
try:
if keypair_name:
if is_keypair_new:
self.nova_operations.create_keypair(keypair_name)
self.test_world['keypair_names'].append(keypair_name)
else:
keypair_found = self.nova_operations.find_keypair(name=keypair_name)
self.assertIsNotNone(keypair_found, "Required Keypair '%s' could not be found" % keypair_name)
except NovaClientException as e:
self.logger.debug("Required keypair could not be created: %s", e)
self.fail(e)
try:
security_group_name_list = None
if sec_group_name:
sec_group_id = self.nova_operations.create_security_group_and_rules(sec_group_name)
self.test_world['sec_groups'].append(sec_group_id)
security_group_name_list = [sec_group_name]
except NovaClientException as e:
self.logger.debug("Required security group could not be created: %s", e)
self.fail(e)
# create new instance
try:
server_data = self.nova_operations.launch_instance(instance_name=instance_name,
flavor_id=flavor_id,
image_id=image_id,
metadata=metadata,
keypair_name=keypair_name,
security_group_name_list=security_group_name_list,
network_id_list=network_id_list,
userdata=userdata)
except Forbidden as e:
self.logger.debug("Quota exceeded when launching a new instance")
self.fail(e)
except OverLimit as e:
self.logger.debug("Not enough resources to launch new instance: %s", e)
self.fail(e)
else:
self.test_world['servers'].append(server_data['id'])
# Wait for status=ACTIVE
status, detail = self.nova_operations.wait_for_task_status(server_data['id'], 'ACTIVE')
self.assertEqual(status, 'ACTIVE', "{detail}. Current status is {status}".format(detail=detail, status=status))
return server_data['id']
def __get_external_network_test_helper__(self):
"""
HELPER. Finds and returns the external network id of current region
:return: External network id
"""
external_network_id = None
external_network_list = self.neutron_operations.find_networks(router_external=True)
if len(external_network_list) != 0:
external_net_region = self.conf[PROPERTIES_CONFIG_REGION][PROPERTIES_CONFIG_REGION_EXTERNAL_NET]
if self.region_name in external_net_region:
ext_net_config = external_net_region[self.region_name]
for external_network in external_network_list:
if external_network['name'] == ext_net_config:
external_network_id = external_network['id']
if external_network_id is None:
external_network_id = external_network_list[0]['id']
self.assertIsNotNone(external_network_id, "No external networks found")
return external_network_id
def __get_shared_network_test_helper__(self):
"""
HELPER. Finds and returns the shared network name of current region
:return: Shared network name
"""
# get from settings the name of the shared network to lookup
lookup_network_name = TEST_SHARED_NET_DEFAULT
shared_network_conf = self.conf[PROPERTIES_CONFIG_REGION].get(PROPERTIES_CONFIG_REGION_SHARED_NET)
if shared_network_conf:
lookup_network_name = shared_network_conf.get(self.region_name, TEST_SHARED_NET_DEFAULT)
# find the network in the list of existing shared networks
lookup_network_list = self.neutron_operations.find_networks(name=lookup_network_name,
shared=True,
router_external=False)
shared_network_name = lookup_network_list[0]['name'] if lookup_network_list else None
self.assertIsNotNone(shared_network_name, "No shared network %s found" % lookup_network_name)
return shared_network_name
def __e2e_connection_using_public_ip_test_helper__(self, use_shared_network=True):
"""
HELPER. Test whether it is possible to deploy an instance, assign an allocated public IP and establish
a SSH connection
:param use_shared_network: If True, use the existing shared network associated to the new instance
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# Allocate an IP
allocated_ip = self.__allocate_ip_test_helper__()
# Create Keypair
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
keypair_name = TEST_KEYPAIR_PREFIX + "_" + suffix
private_keypair_value = self.__create_keypair_test_helper__(keypair_name)
# Network
if use_shared_network:
network_name = self.__get_shared_network_test_helper__()
else:
# Create Router with an external network gateway
router_name = TEST_ROUTER_PREFIX + "_e2e_" + suffix
external_network_id = self.__get_external_network_test_helper__()
router_id = self.__create_router_test_helper__(router_name, external_network_id)
# Create Network
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
# Add interface to router
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
# Deploy VM (it will have only one IP from the Public Pool)
instance_name = TEST_SERVER_PREFIX + "_e2e_" + suffix
keypair_name = TEST_KEYPAIR_PREFIX + "_" + suffix
sec_group_name = TEST_SEC_GROUP_PREFIX + "_" + suffix
server_id = self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name, is_network_new=False,
keypair_name=keypair_name, is_keypair_new=False,
sec_group_name=sec_group_name)
# Associate the public IP to Server
self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)
# SSH Connection
self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)
def __e2e_snat_connection_test_helper__(self, use_shared_network=True):
"""
HELPER. Test whether it is possible to deploy an instance and connect to the internet (PhoneHome service)
:param use_shared_network: If True, use the existing shared network associated to the new instance
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# skip test if no PhoneHome service endpoint was given by configuration (either in settings or by environment)
phonehome_endpoint = self.conf[PROPERTIES_CONFIG_TEST][PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT]
if not phonehome_endpoint:
self.skipTest("No value found for '{}.{}' setting".format(
PROPERTIES_CONFIG_TEST, PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT))
path_resource = PHONEHOME_DBUS_OBJECT_PATH
# Load userdata from file and compile the template (replacing variable values)
self.logger.debug("Loading userdata from file '%s'", PHONEHOME_USERDATA_PATH)
with open(PHONEHOME_USERDATA_PATH, "r") as userdata_file:
userdata_content = userdata_file.read()
userdata_content = replace_template_properties(userdata_content, phonehome_endpoint=phonehome_endpoint,
path_resource=_build_path_resource(path_resource))
self.logger.debug("Userdata content: %s", userdata_content)
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
# Network
if use_shared_network:
network_name = self.__get_shared_network_test_helper__()
else:
# Create Router with an external network gateway
router_name = TEST_ROUTER_PREFIX + "_snat_" + suffix
external_network_id = self.__get_external_network_test_helper__()
router_id = self.__create_router_test_helper__(router_name, external_network_id)
# Create Network
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
# Add interface to router
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
# Deploy VM
instance_name = self.region_name.lower() + "_" + TEST_SERVER_PREFIX + "_snat_" + suffix
server_id = self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name, is_network_new=False,
userdata=userdata_content)
# VM will have as hostname, the instance_name with "-" instead of "_"
expected_instance_name = instance_name.replace("_", "-")
# Create new new DBus connection and wait for emitted signal from HTTP PhoneHome service
client = DbusPhoneHomeClient(self.logger)
result = client.connect_and_wait_for_phonehome_signal(PHONEHOME_DBUS_NAME, PHONEHOME_DBUS_OBJECT_PATH,
PHONEHOME_SIGNAL, expected_instance_name)
self.assertIsNotNone(result, "PhoneHome request not received from VM '%s'" % server_id)
self.logger.debug("Request received from VM when 'calling home': %s", result)
# Get hostname from data received
self.assertIn("hostname", result, "PhoneHome request has been received but 'hostname' param is not in")
received_hostname = re.match(".*hostname=([\w-]*)", result).group(1)
# Check hostname
self.assertEqual(expected_instance_name, received_hostname,
"Received hostname '%s' in PhoneHome request does not match with the expected instance name" %
received_hostname)
def __create_router_test_helper__(self, router_name, external_network_id=None):
"""
HELPER. Creates a router and links it to an external network (if not None)
:param external_network_id: External network id
:return: Router id (String)
"""
try:
router = self.neutron_operations.create_router(router_name, external_network_id)
except IpAddressGenerationFailureClient as e:
self.logger.debug("An error occurred creating router: %s", e)
self.fail(e)
self.assertIsNotNone(router, "Problems creating router")
self.assertEqual(router['status'], 'ACTIVE', "Router status is NOT ACTIVE")
self.test_world['routers'].append(router['id'])
self.logger.debug("%s", router)
return router['id']
def __create_network_and_subnet_test_helper__(self, network_name, cidr=None):
"""
HELPER. Creates network and subnet.
:param network_name: Network name
:param cidr: Optional CIDR to use in the subnet (otherwise, one is chosen from default range)
:return: (NetworkId, SubnetworkId) (String, String)
"""
network = self.neutron_operations.create_network(network_name)
self.assertIsNotNone(network, "Problems creating network")
self.assertEqual(network['status'], 'ACTIVE', "Network status is not ACTIVE")
self.test_world['networks'].append(network['id'])
network = self.neutron_operations.create_subnet(network, cidr)
self.assertIsNotNone(network['subnet']['id'], "Problems creating subnet")
self.logger.debug("%s", network)
return network['id'], network['subnet']['id']
def test_create_network_and_subnet(self):
"""
Test whether it is possible to create a new network with subnets
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__create_network_and_subnet_test_helper__(network_name)
def test_external_networks(self):
"""
Test whether there are external networks configured in the region
"""
network_list = self.neutron_operations.find_networks(router_external=True)
self.assertNotEqual(len(network_list), 0, "No external networks found")
def test_create_router_no_external_network(self):
"""
Test whether it is possible to create a new router without setting the gateway
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_" + suffix
self.__create_router_test_helper__(router_name)
def test_create_router_no_external_network_and_add_network_port(self):
"""
Test whether it is possible to create a new router without external gateway and link new network port
"""
# Create Router
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_ports_" + suffix
router_id = self.__create_router_test_helper__(router_name)
# Create Network with only one subnet
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
def test_create_router_external_network(self):
"""
Test whether it is possible to create a new router with a default gateway
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# First, get external network id
external_network_id = self.__get_external_network_test_helper__()
# Then, create router
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_ext_" + suffix
self.__create_router_test_helper__(router_name, external_network_id)
def test_deploy_instance_with_new_network(self):
"""
Test whether it is possible to deploy an instance with a new network
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_" + suffix
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name)
def test_deploy_instance_with_new_network_and_metadata(self):
"""
Test whether it is possible to deploy an instance with a new network and custom metadata
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_metadata_" + suffix
instance_meta = {"test_item": "test_value"}
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name,
metadata=instance_meta)
def test_deploy_instance_with_new_network_and_keypair(self):
"""
Test whether it is possible to deploy an instance with a new network and new keypair
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_keypair_" + suffix
keypair_name = TEST_KEYPAIR_PREFIX + "_" + suffix
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name,
keypair_name=keypair_name)
def test_deploy_instance_with_new_network_and_sec_group(self):
"""
Test whether it is possible to deploy an instance with a new network and new security group
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_sec_group_" + suffix
sec_group_name = TEST_SEC_GROUP_PREFIX + "_" + suffix
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name,
sec_group_name=sec_group_name)
def test_deploy_instance_with_new_network_and_all_params(self):
"""
Test whether it is possible to deploy an instance with a new network and all params
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_all_params_" + suffix
instance_meta = {"test_item": "test_value"}
keypair_name = TEST_KEYPAIR_PREFIX + "_" + suffix
sec_group_name = TEST_SEC_GROUP_PREFIX + "_" + suffix
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name,
metadata=instance_meta,
keypair_name=keypair_name,
sec_group_name=sec_group_name)
def test_deploy_instance_with_new_network_and_associate_public_ip(self):
"""
Test whether it is possible to deploy an instance with a new network and assign an allocated public IP
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# Allocate IP
allocated_ip = self.__allocate_ip_test_helper__()
# Create Router with an external network gateway
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_public_ip_" + suffix
external_network_id = self.__get_external_network_test_helper__()
router_id = self.__create_router_test_helper__(router_name, external_network_id)
# Create Network
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
# Add interface to router
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
# Deploy VM (it will have only one IP from the Public Pool)
instance_name = TEST_SERVER_PREFIX + "_public_ip_" + suffix
server_id = self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name, is_network_new=False)
# Associate Public IP to Server
self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)
def test_deploy_instance_with_new_network_and_e2e_connection_using_public_ip(self):
"""
Test whether it is possible to deploy an instance with new network, assign an allocated public IP
and establish a SSH connection
"""
self.__e2e_connection_using_public_ip_test_helper__(use_shared_network=False)
def test_deploy_instance_with_shared_network_and_e2e_connection_using_public_ip(self):
"""
Test whether it is possible to deploy an instance with shared network, assign an allocated public IP
and establish a SSH connection
"""
self.__e2e_connection_using_public_ip_test_helper__(use_shared_network=True)
def test_deploy_instance_with_new_network_and_e2e_snat_connection(self):
"""
Test whether it is possible to deploy an instance with new network and connect to the internet (PhoneHome)
"""
self.__e2e_snat_connection_test_helper__(use_shared_network=False)
def test_deploy_instance_with_shared_network_and_e2e_snat_connection(self):
"""
Test whether it is possible to deploy an instance with shared network and connect to the internet
"""
self.__e2e_snat_connection_test_helper__(use_shared_network=True)
def test_deploy_instance_with_new_network_and_check_metadata_service(self):
"""
Test whether it is possible to deploy an instance and check if metadata service is working properly (phonehome)
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# skip test if no PhoneHome service endpoint was given by configuration (either in settings or by environment)
phonehome_endpoint = self.conf[PROPERTIES_CONFIG_TEST][PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT]
if not phonehome_endpoint:
self.skipTest("No value found for '{}.{}' setting".format(
PROPERTIES_CONFIG_TEST, PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT))
path_resource = PHONEHOME_DBUS_OBJECT_METADATA_PATH
metadata_service_url = self.conf[PROPERTIES_CONFIG_TEST][PROPERTIES_CONFIG_METADATA_SERVICE_URL]
# Load userdata from file and compile the template (replacing variable values)
self.logger.debug("Loading userdata from file '%s'", PHONEHOME_USERDATA_METADATA_PATH)
with open(PHONEHOME_USERDATA_METADATA_PATH, "r") as userdata_file:
userdata_content = userdata_file.read()
userdata_content = replace_template_properties(userdata_content, phonehome_endpoint=phonehome_endpoint,
path_resource=path_resource,
openstack_metadata_service_url=metadata_service_url)
self.logger.debug("Userdata content: %s", userdata_content)
# Create Router with an external network gateway
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_meta_" + suffix
external_network_id = self.__get_external_network_test_helper__()
router_id = self.__create_router_test_helper__(router_name, external_network_id)
# Create Network
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
# Add interface to router
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
# Create Metadata
metadata = {"region": self.region_name, "foo": "bar-" + suffix}
# Deploy VM
instance_name = self.region_name.lower() + "_" + TEST_SERVER_PREFIX + "_meta_" + suffix
server_id = self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name, is_network_new=False,
metadata=metadata,
userdata=userdata_content)
# VM should have this metadata associated
expected_metadata = {'region': self.region_name, 'foo': 'bar-' + suffix}
expected_instance_name = instance_name.replace("_", "-")
# Create new DBus connection and wait for emitted signal from HTTP PhoneHome service
client = DbusPhoneHomeClient(self.logger)
result = client.connect_and_wait_for_phonehome_signal(PHONEHOME_DBUS_NAME, PHONEHOME_DBUS_OBJECT_METADATA_PATH,
PHONEHOME_METADATA_SIGNAL, expected_instance_name)
# First, check that the DBus is registered on the system
self.assertNotEqual(result, False, "PhoneHome bus or object not found. Please check the PhoneHome services.")
self.assertIsNotNone(result, "PhoneHome request not received from VM '%s'" % server_id)
self.logger.debug("Request received from VM when 'calling home': %s", result)
# Get metadata from data received
self.assertIn("meta", result, "PhoneHome request has been received but 'meta' param is not in")
received_metadata = json.loads(str(result))["meta"]
# Check metadata
self.assertEqual(expected_metadata, received_metadata,
"Received metadata '%s' in PhoneHome request does not match with the expected metadata" %
received_metadata)
use TransactionId constant
# -*- coding: utf-8 -*-
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
from tests.fiware_region_base_tests import FiwareRegionsBaseTests
from commons.constants import *
from novaclient.exceptions import Forbidden, OverLimit, ClientException as NovaClientException
from neutronclient.common.exceptions import NeutronClientException, IpAddressGenerationFailureClient
from datetime import datetime
from commons.dbus_phonehome_service import DbusPhoneHomeClient
from commons.template_utils import replace_template_properties
from commons.constants import PHONEHOME_TX_ID_HEADER
import re
import json
import uuid
def _build_path_resource(path_resource):
"""Build url path with a transactionId param"""
return '{0}?{1}={2}'.format(path_resource, PHONEHOME_TX_ID_HEADER, str(uuid.uuid1()))
class FiwareRegionWithNetworkTest(FiwareRegionsBaseTests):
with_networks = True
def __deploy_instance_helper__(self, instance_name,
network_name=None, is_network_new=True, cidr=None,
keypair_name=None, is_keypair_new=True,
sec_group_name=None, metadata=None, userdata=None):
"""
HELPER. Creates a new instance according to the given parameters:
- If a network name is given, a new network (`is_network_new==True`) or an existing one is associated.
- If a keypair name is given, a new keypair (`is_keypair_new==True`) or an existing one is associated.
- If a security group name is given, a new sec_group is created and associated to the instance.
- Optional metadata and userdata may also be associated.
:param instance_name: Name of the new instance
:param network_name: Name of the network to use (either existing or a new one)
:param is_network_new: If True, a new network should be created and appended to the `TestWorld`
:param cidr: Optional CIDR to use in the network's subnet (otherwise, one is chosen from default range)
:param keypair_name: Name of the keypair to use (either existing or a new one)
:param is_keypair_new: If True, a new keypair should be created and appended to the `TestWorld`
:param sec_group_name: Name of the new security group that will be created
:param metadata: Python dict with metadata info {"key": "value"}
:param userdata: userdata file content (String)
:return: Server ID (String)
"""
flavor_id = self.nova_operations.get_any_flavor_id()
self.assertIsNotNone(flavor_id, "Problems retrieving a flavor")
base_image_name = self.nova_operations.test_image
image_id = self.nova_operations.find_image_id_by_name(image_name=base_image_name)
self.assertIsNotNone(image_id, "Problems retrieving the image '{}'".format(base_image_name))
# instance prerequisites
try:
network_id_list = None
if network_name:
if is_network_new:
# Create the given network
network = self.neutron_operations.create_network(network_name)
self.test_world['networks'].append(network['id'])
network_id_list = [{'net-id': network['id']}]
# Create a subnet
self.neutron_operations.create_subnet(network, cidr)
else:
# Look for the network id
net_list = self.neutron_operations.find_networks(name=network_name)
self.assertTrue(len(net_list) != 0, "Required network '%s' could not be found" % network_name)
network_id_list = [{'net-id': net_list[0]['id']}]
except NeutronClientException as e:
self.logger.debug("Required network could not be created: %s", e)
self.fail(e)
try:
if keypair_name:
if is_keypair_new:
self.nova_operations.create_keypair(keypair_name)
self.test_world['keypair_names'].append(keypair_name)
else:
keypair_found = self.nova_operations.find_keypair(name=keypair_name)
self.assertIsNotNone(keypair_found, "Required Keypair '%s' could not be found" % keypair_name)
except NovaClientException as e:
self.logger.debug("Required keypair could not be created: %s", e)
self.fail(e)
try:
security_group_name_list = None
if sec_group_name:
sec_group_id = self.nova_operations.create_security_group_and_rules(sec_group_name)
self.test_world['sec_groups'].append(sec_group_id)
security_group_name_list = [sec_group_name]
except NovaClientException as e:
self.logger.debug("Required security group could not be created: %s", e)
self.fail(e)
# create new instance
try:
server_data = self.nova_operations.launch_instance(instance_name=instance_name,
flavor_id=flavor_id,
image_id=image_id,
metadata=metadata,
keypair_name=keypair_name,
security_group_name_list=security_group_name_list,
network_id_list=network_id_list,
userdata=userdata)
except Forbidden as e:
self.logger.debug("Quota exceeded when launching a new instance")
self.fail(e)
except OverLimit as e:
self.logger.debug("Not enough resources to launch new instance: %s", e)
self.fail(e)
else:
self.test_world['servers'].append(server_data['id'])
# Wait for status=ACTIVE
status, detail = self.nova_operations.wait_for_task_status(server_data['id'], 'ACTIVE')
self.assertEqual(status, 'ACTIVE', "{detail}. Current status is {status}".format(detail=detail, status=status))
return server_data['id']
def __get_external_network_test_helper__(self):
"""
HELPER. Finds and returns the external network id of current region
:return: External network id
"""
external_network_id = None
external_network_list = self.neutron_operations.find_networks(router_external=True)
if len(external_network_list) != 0:
external_net_region = self.conf[PROPERTIES_CONFIG_REGION][PROPERTIES_CONFIG_REGION_EXTERNAL_NET]
if self.region_name in external_net_region:
ext_net_config = external_net_region[self.region_name]
for external_network in external_network_list:
if external_network['name'] == ext_net_config:
external_network_id = external_network['id']
if external_network_id is None:
external_network_id = external_network_list[0]['id']
self.assertIsNotNone(external_network_id, "No external networks found")
return external_network_id
def __get_shared_network_test_helper__(self):
"""
HELPER. Finds and returns the shared network name of current region
:return: Shared network name
"""
# get from settings the name of the shared network to lookup
lookup_network_name = TEST_SHARED_NET_DEFAULT
shared_network_conf = self.conf[PROPERTIES_CONFIG_REGION].get(PROPERTIES_CONFIG_REGION_SHARED_NET)
if shared_network_conf:
lookup_network_name = shared_network_conf.get(self.region_name, TEST_SHARED_NET_DEFAULT)
# find the network in the list of existing shared networks
lookup_network_list = self.neutron_operations.find_networks(name=lookup_network_name,
shared=True,
router_external=False)
shared_network_name = lookup_network_list[0]['name'] if lookup_network_list else None
self.assertIsNotNone(shared_network_name, "No shared network %s found" % lookup_network_name)
return shared_network_name
def __e2e_connection_using_public_ip_test_helper__(self, use_shared_network=True):
"""
HELPER. Test whether it is possible to deploy an instance, assign an allocated public IP and establish
a SSH connection
:param use_shared_network: If True, use the existing shared network associated to the new instance
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# Allocate an IP
allocated_ip = self.__allocate_ip_test_helper__()
# Create Keypair
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
keypair_name = TEST_KEYPAIR_PREFIX + "_" + suffix
private_keypair_value = self.__create_keypair_test_helper__(keypair_name)
# Network
if use_shared_network:
network_name = self.__get_shared_network_test_helper__()
else:
# Create Router with an external network gateway
router_name = TEST_ROUTER_PREFIX + "_e2e_" + suffix
external_network_id = self.__get_external_network_test_helper__()
router_id = self.__create_router_test_helper__(router_name, external_network_id)
# Create Network
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
# Add interface to router
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
# Deploy VM (it will have only one IP from the Public Pool)
instance_name = TEST_SERVER_PREFIX + "_e2e_" + suffix
keypair_name = TEST_KEYPAIR_PREFIX + "_" + suffix
sec_group_name = TEST_SEC_GROUP_PREFIX + "_" + suffix
server_id = self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name, is_network_new=False,
keypair_name=keypair_name, is_keypair_new=False,
sec_group_name=sec_group_name)
# Associate the public IP to Server
self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)
# SSH Connection
self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)
def __e2e_snat_connection_test_helper__(self, use_shared_network=True):
"""
HELPER. Test whether it is possible to deploy an instance and connect to the internet (PhoneHome service)
:param use_shared_network: If True, use the existing shared network associated to the new instance
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# skip test if no PhoneHome service endpoint was given by configuration (either in settings or by environment)
phonehome_endpoint = self.conf[PROPERTIES_CONFIG_TEST][PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT]
if not phonehome_endpoint:
self.skipTest("No value found for '{}.{}' setting".format(
PROPERTIES_CONFIG_TEST, PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT))
path_resource = PHONEHOME_DBUS_OBJECT_PATH
# Load userdata from file and compile the template (replacing variable values)
self.logger.debug("Loading userdata from file '%s'", PHONEHOME_USERDATA_PATH)
with open(PHONEHOME_USERDATA_PATH, "r") as userdata_file:
userdata_content = userdata_file.read()
userdata_content = replace_template_properties(userdata_content, phonehome_endpoint=phonehome_endpoint,
path_resource=_build_path_resource(path_resource))
self.logger.debug("Userdata content: %s", userdata_content)
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
# Network
if use_shared_network:
network_name = self.__get_shared_network_test_helper__()
else:
# Create Router with an external network gateway
router_name = TEST_ROUTER_PREFIX + "_snat_" + suffix
external_network_id = self.__get_external_network_test_helper__()
router_id = self.__create_router_test_helper__(router_name, external_network_id)
# Create Network
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
# Add interface to router
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
# Deploy VM
instance_name = self.region_name.lower() + "_" + TEST_SERVER_PREFIX + "_snat_" + suffix
server_id = self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name, is_network_new=False,
userdata=userdata_content)
# VM will have as hostname, the instance_name with "-" instead of "_"
expected_instance_name = instance_name.replace("_", "-")
# Create new new DBus connection and wait for emitted signal from HTTP PhoneHome service
client = DbusPhoneHomeClient(self.logger)
result = client.connect_and_wait_for_phonehome_signal(PHONEHOME_DBUS_NAME, PHONEHOME_DBUS_OBJECT_PATH,
PHONEHOME_SIGNAL, expected_instance_name)
self.assertIsNotNone(result, "PhoneHome request not received from VM '%s'" % server_id)
self.logger.debug("Request received from VM when 'calling home': %s", result)
# Get hostname from data received
self.assertIn("hostname", result, "PhoneHome request has been received but 'hostname' param is not in")
received_hostname = re.match(".*hostname=([\w-]*)", result).group(1)
# Check hostname
self.assertEqual(expected_instance_name, received_hostname,
"Received hostname '%s' in PhoneHome request does not match with the expected instance name" %
received_hostname)
def __create_router_test_helper__(self, router_name, external_network_id=None):
"""
HELPER. Creates a router and links it to an external network (if not None)
:param external_network_id: External network id
:return: Router id (String)
"""
try:
router = self.neutron_operations.create_router(router_name, external_network_id)
except IpAddressGenerationFailureClient as e:
self.logger.debug("An error occurred creating router: %s", e)
self.fail(e)
self.assertIsNotNone(router, "Problems creating router")
self.assertEqual(router['status'], 'ACTIVE', "Router status is NOT ACTIVE")
self.test_world['routers'].append(router['id'])
self.logger.debug("%s", router)
return router['id']
def __create_network_and_subnet_test_helper__(self, network_name, cidr=None):
"""
HELPER. Creates network and subnet.
:param network_name: Network name
:param cidr: Optional CIDR to use in the subnet (otherwise, one is chosen from default range)
:return: (NetworkId, SubnetworkId) (String, String)
"""
network = self.neutron_operations.create_network(network_name)
self.assertIsNotNone(network, "Problems creating network")
self.assertEqual(network['status'], 'ACTIVE', "Network status is not ACTIVE")
self.test_world['networks'].append(network['id'])
network = self.neutron_operations.create_subnet(network, cidr)
self.assertIsNotNone(network['subnet']['id'], "Problems creating subnet")
self.logger.debug("%s", network)
return network['id'], network['subnet']['id']
def test_create_network_and_subnet(self):
"""
Test whether it is possible to create a new network with subnets
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__create_network_and_subnet_test_helper__(network_name)
def test_external_networks(self):
"""
Test whether there are external networks configured in the region
"""
network_list = self.neutron_operations.find_networks(router_external=True)
self.assertNotEqual(len(network_list), 0, "No external networks found")
def test_create_router_no_external_network(self):
"""
Test whether it is possible to create a new router without setting the gateway
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_" + suffix
self.__create_router_test_helper__(router_name)
def test_create_router_no_external_network_and_add_network_port(self):
"""
Test whether it is possible to create a new router without external gateway and link new network port
"""
# Create Router
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_ports_" + suffix
router_id = self.__create_router_test_helper__(router_name)
# Create Network with only one subnet
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
def test_create_router_external_network(self):
"""
Test whether it is possible to create a new router with a default gateway
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# First, get external network id
external_network_id = self.__get_external_network_test_helper__()
# Then, create router
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_ext_" + suffix
self.__create_router_test_helper__(router_name, external_network_id)
def test_deploy_instance_with_new_network(self):
"""
Test whether it is possible to deploy an instance with a new network
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_" + suffix
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name)
def test_deploy_instance_with_new_network_and_metadata(self):
"""
Test whether it is possible to deploy an instance with a new network and custom metadata
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_metadata_" + suffix
instance_meta = {"test_item": "test_value"}
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name,
metadata=instance_meta)
def test_deploy_instance_with_new_network_and_keypair(self):
"""
Test whether it is possible to deploy an instance with a new network and new keypair
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_keypair_" + suffix
keypair_name = TEST_KEYPAIR_PREFIX + "_" + suffix
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name,
keypair_name=keypair_name)
def test_deploy_instance_with_new_network_and_sec_group(self):
"""
Test whether it is possible to deploy an instance with a new network and new security group
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_sec_group_" + suffix
sec_group_name = TEST_SEC_GROUP_PREFIX + "_" + suffix
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name,
sec_group_name=sec_group_name)
def test_deploy_instance_with_new_network_and_all_params(self):
"""
Test whether it is possible to deploy an instance with a new network and all params
"""
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
instance_name = TEST_SERVER_PREFIX + "_network_all_params_" + suffix
instance_meta = {"test_item": "test_value"}
keypair_name = TEST_KEYPAIR_PREFIX + "_" + suffix
sec_group_name = TEST_SEC_GROUP_PREFIX + "_" + suffix
network_name = TEST_NETWORK_PREFIX + "_" + suffix
self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name,
metadata=instance_meta,
keypair_name=keypair_name,
sec_group_name=sec_group_name)
def test_deploy_instance_with_new_network_and_associate_public_ip(self):
"""
Test whether it is possible to deploy an instance with a new network and assign an allocated public IP
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# Allocate IP
allocated_ip = self.__allocate_ip_test_helper__()
# Create Router with an external network gateway
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_public_ip_" + suffix
external_network_id = self.__get_external_network_test_helper__()
router_id = self.__create_router_test_helper__(router_name, external_network_id)
# Create Network
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
# Add interface to router
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
# Deploy VM (it will have only one IP from the Public Pool)
instance_name = TEST_SERVER_PREFIX + "_public_ip_" + suffix
server_id = self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name, is_network_new=False)
# Associate Public IP to Server
self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)
def test_deploy_instance_with_new_network_and_e2e_connection_using_public_ip(self):
"""
Test whether it is possible to deploy an instance with new network, assign an allocated public IP
and establish a SSH connection
"""
self.__e2e_connection_using_public_ip_test_helper__(use_shared_network=False)
def test_deploy_instance_with_shared_network_and_e2e_connection_using_public_ip(self):
"""
Test whether it is possible to deploy an instance with shared network, assign an allocated public IP
and establish a SSH connection
"""
self.__e2e_connection_using_public_ip_test_helper__(use_shared_network=True)
def test_deploy_instance_with_new_network_and_e2e_snat_connection(self):
"""
Test whether it is possible to deploy an instance with new network and connect to the internet (PhoneHome)
"""
self.__e2e_snat_connection_test_helper__(use_shared_network=False)
def test_deploy_instance_with_shared_network_and_e2e_snat_connection(self):
"""
Test whether it is possible to deploy an instance with shared network and connect to the internet
"""
self.__e2e_snat_connection_test_helper__(use_shared_network=True)
def test_deploy_instance_with_new_network_and_check_metadata_service(self):
"""
Test whether it is possible to deploy an instance and check if metadata service is working properly (phonehome)
"""
# skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)
if self.suite_world['allocated_ips']:
self.skipTest("There were pre-existing, not deallocated IPs")
# skip test if no PhoneHome service endpoint was given by configuration (either in settings or by environment)
phonehome_endpoint = self.conf[PROPERTIES_CONFIG_TEST][PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT]
if not phonehome_endpoint:
self.skipTest("No value found for '{}.{}' setting".format(
PROPERTIES_CONFIG_TEST, PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT))
path_resource = PHONEHOME_DBUS_OBJECT_METADATA_PATH
metadata_service_url = self.conf[PROPERTIES_CONFIG_TEST][PROPERTIES_CONFIG_METADATA_SERVICE_URL]
# Load userdata from file and compile the template (replacing variable values)
self.logger.debug("Loading userdata from file '%s'", PHONEHOME_USERDATA_METADATA_PATH)
with open(PHONEHOME_USERDATA_METADATA_PATH, "r") as userdata_file:
userdata_content = userdata_file.read()
userdata_content = replace_template_properties(userdata_content, phonehome_endpoint=phonehome_endpoint,
path_resource=path_resource,
openstack_metadata_service_url=metadata_service_url)
self.logger.debug("Userdata content: %s", userdata_content)
# Create Router with an external network gateway
suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')
router_name = TEST_ROUTER_PREFIX + "_meta_" + suffix
external_network_id = self.__get_external_network_test_helper__()
router_id = self.__create_router_test_helper__(router_name, external_network_id)
# Create Network
network_name = TEST_NETWORK_PREFIX + "_" + suffix
network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name)
# Add interface to router
port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)
self.test_world['ports'].append(port_id)
# Create Metadata
metadata = {"region": self.region_name, "foo": "bar-" + suffix}
# Deploy VM
instance_name = self.region_name.lower() + "_" + TEST_SERVER_PREFIX + "_meta_" + suffix
server_id = self.__deploy_instance_helper__(instance_name=instance_name,
network_name=network_name, is_network_new=False,
metadata=metadata,
userdata=userdata_content)
# VM should have this metadata associated
expected_metadata = {'region': self.region_name, 'foo': 'bar-' + suffix}
expected_instance_name = instance_name.replace("_", "-")
# Create new DBus connection and wait for emitted signal from HTTP PhoneHome service
client = DbusPhoneHomeClient(self.logger)
result = client.connect_and_wait_for_phonehome_signal(PHONEHOME_DBUS_NAME, PHONEHOME_DBUS_OBJECT_METADATA_PATH,
PHONEHOME_METADATA_SIGNAL, expected_instance_name)
# First, check that the DBus is registered on the system
self.assertNotEqual(result, False, "PhoneHome bus or object not found. Please check the PhoneHome services.")
self.assertIsNotNone(result, "PhoneHome request not received from VM '%s'" % server_id)
self.logger.debug("Request received from VM when 'calling home': %s", result)
# Get metadata from data received
self.assertIn("meta", result, "PhoneHome request has been received but 'meta' param is not in")
received_metadata = json.loads(str(result))["meta"]
# Check metadata
self.assertEqual(expected_metadata, received_metadata,
"Received metadata '%s' in PhoneHome request does not match with the expected metadata" %
received_metadata)
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright 2012 F2E.im
# Do have a faith in what you're doing.
# Make your life a story worth telling.
import uuid
import hashlib
import Image
import StringIO
import time
import json
import re
import urllib2
import tornado.web
import lib.jsonp
import pprint
import math
import datetime
from base import BaseHandler
from lib.variables import *
from form.topic import *
from lib.variables import gen_random
from lib.xss import XssCleaner
from lib.utils import find_mentions
class IndexHandler(BaseHandler):
def get(self, template_variables = {}):
user_info = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["status_counter"] = {
"users": self.user_model.get_all_users_count(),
"nodes": self.node_model.get_all_nodes_count(),
"topics": self.topic_model.get_all_topics_count(),
"replies": self.reply_model.get_all_replies_count(),
}
template_variables["topics"] = self.topic_model.get_all_topics(current_page = page)
template_variables["planes"] = self.plane_model.get_all_planes_with_nodes()
template_variables["hot_nodes"] = self.node_model.get_all_hot_nodes()
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/topics.html", **template_variables)
class NodeTopicsHandler(BaseHandler):
def get(self, node_slug, template_variables = {}):
user_info = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["topics"] = self.topic_model.get_all_topics_by_node_slug(current_page = page, node_slug = node_slug)
template_variables["node"] = self.node_model.get_node_by_node_slug(node_slug)
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/node_topics.html", **template_variables)
class ViewHandler(BaseHandler):
def get(self, topic_id, template_variables = {}):
user_info = self.current_user
page = int(self.get_argument("p", "1"))
user_info = self.get_current_user()
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["topic_favorited"] = self.favorite_model.get_favorite_by_topic_id_and_owner_user_id(topic_id, user_info["uid"]);
template_variables["gen_random"] = gen_random
template_variables["topic"] = self.topic_model.get_topic_by_topic_id(topic_id)
# check reply count and cal current_page if `p` not given
reply_num = 106
reply_count = template_variables["topic"]["reply_count"]
reply_last_page = (reply_count / reply_num + (reply_count % reply_num and 1)) or 1
page = int(self.get_argument("p", reply_last_page))
template_variables["reply_num"] = reply_num
template_variables["current_page"] = page
template_variables["replies"] = self.reply_model.get_all_replies_by_topic_id(topic_id, current_page = page, num = reply_num)
template_variables["active_page"] = "topic"
# update topic reply_count and hits
self.topic_model.update_topic_by_topic_id(topic_id, {
"reply_count": template_variables["replies"]["page"]["total"],
"hits": (template_variables["topic"]["hits"] or 0) + 1,
})
self.render("topic/view.html", **template_variables)
@tornado.web.authenticated
def post(self, template_variables = {}):
template_variables = {}
# validate the fields
form = ReplyForm(self)
if not form.validate():
self.get(form.tid.data, {"errors": form.errors})
return
# continue while validate succeed
topic_info = self.topic_model.get_topic_by_topic_id(form.tid.data)
replied_info = self.reply_model.get_user_last_reply_by_topic_id(self.current_user["uid"], form.tid.data)
if(not topic_info):
template_variables["errors"] = {}
template_variables["errors"]["invalid_topic_info"] = [u"要回复的帖子不存在"]
self.get(form.tid.data, template_variables)
return
if(replied_info):
last_replied_fingerprint = hashlib.sha1(str(replied_info.topic_id) + str(replied_info.author_id) + replied_info.content).hexdigest()
new_replied_fingerprint = hashlib.sha1(str(form.tid.data) + str(self.current_user["uid"]) + form.content.data).hexdigest()
if last_replied_fingerprint == new_replied_fingerprint:
template_variables["errors"] = {}
template_variables["errors"]["duplicated_reply"] = [u"回复重复提交"]
self.get(form.tid.data, template_variables)
return
reply_info = {
"author_id": self.current_user["uid"],
"topic_id": form.tid.data,
# "content": XssCleaner().strip(form.content.data),
"content": form.content.data,
"created": time.strftime('%Y-%m-%d %H:%M:%S'),
}
reply_id = self.reply_model.add_new_reply(reply_info)
# update topic last_replied_by and last_replied_time
self.topic_model.update_topic_by_topic_id(form.tid.data, {
"last_replied_by": self.current_user["uid"],
"last_replied_time": time.strftime('%Y-%m-%d %H:%M:%S'),
"last_touched": time.strftime('%Y-%m-%d %H:%M:%S'),
})
# create reply notification
if not self.current_user["uid"] == topic_info["author_id"]:
self.notification_model.add_new_notification({
"trigger_user_id": self.current_user["uid"],
"involved_type": 1, # 0: mention, 1: reply
"involved_user_id": topic_info["author_id"],
"involved_topic_id": form.tid.data,
"content": form.content.data,
"status": 0,
"occurrence_time": time.strftime('%Y-%m-%d %H:%M:%S'),
})
# create @username notification
for username in set(find_mentions(form.content.data)):
mentioned_user = self.user_model.get_user_by_username(username)
if not mentioned_user:
continue
if mentioned_user["uid"] == self.current_user["uid"]:
continue
if mentioned_user["uid"] == topic_info["author_id"]:
continue
self.notification_model.add_new_notification({
"trigger_user_id": self.current_user["uid"],
"involved_type": 0, # 0: mention, 1: reply
"involved_user_id": mentioned_user["uid"],
"involved_topic_id": form.tid.data,
"content": form.content.data,
"status": 0,
"occurrence_time": time.strftime('%Y-%m-%d %H:%M:%S'),
})
# update reputation of topic author
if not self.current_user["uid"] == topic_info["author_id"] and not replied_info:
topic_time_diff = datetime.datetime.now() - topic_info["created"]
reputation = topic_info["author_reputation"] or 0
reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10)
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
# self.get(form.tid.data)
self.redirect("/t/%s#reply%s" % (form.tid.data, topic_info["reply_count"] + 1))
class CreateHandler(BaseHandler):
@tornado.web.authenticated
def get(self, node_slug = None, template_variables = {}):
user_info = self.current_user
template_variables["user_info"] = user_info
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["gen_random"] = gen_random
template_variables["node_slug"] = node_slug
template_variables["active_page"] = "topic"
self.render("topic/create.html", **template_variables)
@tornado.web.authenticated
def post(self, node_slug = None, template_variables = {}):
template_variables = {}
# validate the fields
form = CreateForm(self)
if not form.validate():
self.get(node_slug, {"errors": form.errors})
return
# continue while validate succeed
node = self.node_model.get_node_by_node_slug(node_slug)
last_created = self.topic_model.get_user_last_created_topic(self.current_user["uid"])
if last_created:
last_created_fingerprint = hashlib.sha1(last_created.title + last_created.content + str(last_created.node_id)).hexdigest()
new_created_fingerprint = hashlib.sha1(form.title.data + form.content.data + str(node["id"])).hexdigest()
if last_created_fingerprint == new_created_fingerprint:
template_variables["errors"] = {}
template_variables["errors"]["duplicated_topic"] = [u"帖子重复提交"]
self.get(node_slug, template_variables)
return
topic_info = {
"author_id": self.current_user["uid"],
"title": form.title.data,
# "content": XssCleaner().strip(form.content.data),
"content": form.content.data,
"node_id": node["id"],
"created": time.strftime('%Y-%m-%d %H:%M:%S'),
"reply_count": 0,
"last_touched": time.strftime('%Y-%m-%d %H:%M:%S'),
}
reply_id = self.topic_model.add_new_topic(topic_info)
# update reputation of topic author
reputation = self.current_user["reputation"] or 0
reputation = reputation - 5
reputation = 0 if reputation < 0 else reputation
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
self.redirect("/")
class EditHandler(BaseHandler):
@tornado.web.authenticated
def get(self, topic_id, template_variables = {}):
user_info = self.current_user
template_variables["user_info"] = user_info
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["topic"] = self.topic_model.get_topic_by_topic_id(topic_id)
template_variables["gen_random"] = gen_random
template_variables["active_page"] = "topic"
self.render("topic/edit.html", **template_variables)
@tornado.web.authenticated
def post(self, topic_id, template_variables = {}):
template_variables = {}
# validate the fields
form = CreateForm(self)
if not form.validate():
self.get(topic_id, {"errors": form.errors})
return
# continue while validate succeed
topic_info = self.topic_model.get_topic_by_topic_id(topic_id)
if(not topic_info["author_id"] == self.current_user["uid"]):
template_variables["errors"] = {}
template_variables["errors"]["invalid_permission"] = [u"没有权限修改该主题"]
self.get(topic_id, template_variables)
return
update_topic_info = {
"title": form.title.data,
# "content": XssCleaner().strip(form.content.data),
"content": form.content.data,
"updated": time.strftime('%Y-%m-%d %H:%M:%S'),
"last_touched": time.strftime('%Y-%m-%d %H:%M:%S'),
}
reply_id = self.topic_model.update_topic_by_topic_id(topic_id, update_topic_info)
# update reputation of topic author
reputation = topic_info["author_reputation"] or 0
reputation = reputation - 2
reputation = 0 if reputation < 0 else reputation
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
self.redirect("/t/%s" % topic_id)
class ProfileHandler(BaseHandler):
def get(self, user, template_variables = {}):
if(re.match(r'^\d+$', user)):
user_info = self.user_model.get_user_by_uid(user)
else:
user_info = self.user_model.get_user_by_username(user)
if not user_info:
self.write_error(404)
return
current_user = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
if(current_user):
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]);
'''
if user_info["github"]:
github_repos = self.mc.get(str("%s_github_repos" % user_info["github"])) or json.JSONDecoder().decode(urllib2.urlopen('https://api.github.com/users/%s/repos' % user_info["github"]).read())
self.mc.set(str("%s_github_repos" % user_info["github"]), github_repos)
template_variables["github_repos"] = github_repos
'''
template_variables["topics"] = self.topic_model.get_user_all_topics(user_info["uid"], current_page = page)
template_variables["replies"] = self.reply_model.get_user_all_replies(user_info["uid"], current_page = page)
template_variables["gen_random"] = gen_random
template_variables["active_page"] = "_blank"
self.render("topic/profile.html", **template_variables)
class VoteHandler(BaseHandler):
def get(self, template_variables = {}):
topic_id = int(self.get_argument("topic_id"))
topic_info = self.topic_model.get_topic_by_topic_id(topic_id)
if not topic_info:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "topic_not_exist",
}))
return
if self.current_user["uid"] == topic_info["author_id"]:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "can_not_vote_your_topic",
}))
return
if self.vote_model.get_vote_by_topic_id_and_trigger_user_id(topic_id, self.current_user["uid"]):
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "already_voted",
}))
return
self.vote_model.add_new_vote({
"trigger_user_id": self.current_user["uid"],
"involved_type": 0, # 0: topic, 1: reply
"involved_user_id": topic_info["author_id"],
"involved_topic_id": topic_id,
"status": 0,
"occurrence_time": time.strftime('%Y-%m-%d %H:%M:%S'),
})
self.write(lib.jsonp.print_JSON({
"success": 1,
"message": "thanks_for_your_vote",
}))
# update reputation of topic author
topic_time_diff = datetime.datetime.now() - topic_info["created"]
reputation = topic_info["author_reputation"] or 0
reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10)
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
class UserTopicsHandler(BaseHandler):
def get(self, user, template_variables = {}):
if(re.match(r'^\d+$', user)):
user_info = self.user_model.get_user_by_uid(user)
else:
user_info = self.user_model.get_user_by_username(user)
current_user = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
if(current_user):
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]);
template_variables["topics"] = self.topic_model.get_user_all_topics(user_info["uid"], current_page = page)
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/user_topics.html", **template_variables)
class UserRepliesHandler(BaseHandler):
def get(self, user, template_variables = {}):
if(re.match(r'^\d+$', user)):
user_info = self.user_model.get_user_by_uid(user)
else:
user_info = self.user_model.get_user_by_username(user)
current_user = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
if(current_user):
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]);
template_variables["replies"] = self.reply_model.get_user_all_replies(user_info["uid"], current_page = page)
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/user_replies.html", **template_variables)
class UserFavoritesHandler(BaseHandler):
def get(self, user, template_variables = {}):
if(re.match(r'^\d+$', user)):
user_info = self.user_model.get_user_by_uid(user)
else:
user_info = self.user_model.get_user_by_username(user)
current_user = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
if(current_user):
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]);
template_variables["favorites"] = self.favorite_model.get_user_all_favorites(user_info["uid"], current_page = page)
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/user_favorites.html", **template_variables)
class ReplyEditHandler(BaseHandler):
@tornado.web.authenticated
def get(self, reply_id, template_variables = {}):
user_info = self.current_user
template_variables["user_info"] = user_info
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["reply"] = self.reply_model.get_reply_by_reply_id(reply_id)
template_variables["gen_random"] = gen_random
template_variables["active_page"] = "topic"
self.render("topic/reply_edit.html", **template_variables)
@tornado.web.authenticated
def post(self, reply_id, template_variables = {}):
template_variables = {}
# validate the fields
form = ReplyEditForm(self)
if not form.validate():
self.get(reply_id, {"errors": form.errors})
return
# continue while validate succeed
reply_info = self.reply_model.get_reply_by_reply_id(reply_id)
if(not reply_info["author_id"] == self.current_user["uid"]):
template_variables["errors"] = {}
template_variables["errors"]["invalid_permission"] = [u"没有权限修改该回复"]
self.get(reply_id, template_variables)
return
update_reply_info = {
# "content": XssCleaner().strip(form.content.data),
"content": form.content.data,
"updated": time.strftime('%Y-%m-%d %H:%M:%S'),
}
reply_id = self.reply_model.update_reply_by_reply_id(reply_id, update_reply_info)
# update reputation of topic author
reputation = self.current_user["reputation"] or 0
reputation = reputation - 2
reputation = 0 if reputation < 0 else reputation
self.user_model.set_user_base_info_by_uid(reply_info["author_id"], {"reputation": reputation})
self.redirect("/t/%s" % reply_info["topic_id"])
class FavoriteHandler(BaseHandler):
def get(self, template_variables = {}):
topic_id = int(self.get_argument("topic_id"))
topic_info = self.topic_model.get_topic_by_topic_id(topic_id)
if not self.current_user:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "user_not_login",
}))
return
if not topic_info:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "topic_not_exist",
}))
return
if self.current_user["uid"] == topic_info["author_id"]:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "can_not_favorite_your_topic",
}))
return
if self.favorite_model.get_favorite_by_topic_id_and_owner_user_id(topic_id, self.current_user["uid"]):
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "already_favorited",
}))
return
self.favorite_model.add_new_favorite({
"owner_user_id": self.current_user["uid"],
"involved_type": 0, # 0: topic, 1: reply
"involved_topic_id": topic_id,
"created": time.strftime('%Y-%m-%d %H:%M:%S'),
})
self.write(lib.jsonp.print_JSON({
"success": 1,
"message": "favorite_success",
}))
# update reputation of topic author
topic_time_diff = datetime.datetime.now() - topic_info["created"]
reputation = topic_info["author_reputation"] or 0
reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10)
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
class CancelFavoriteHandler(BaseHandler):
def get(self, template_variables = {}):
topic_id = int(self.get_argument("topic_id"))
topic_info = self.topic_model.get_topic_by_topic_id(topic_id)
favorite_info = None
if not self.current_user:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "user_not_login",
}))
return
if not topic_info:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "topic_not_exist",
}))
return
favorite_info = self.favorite_model.get_favorite_by_topic_id_and_owner_user_id(topic_id, self.current_user["uid"])
if not favorite_info:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "not_been_favorited",
}))
return
self.favorite_model.cancel_exist_favorite_by_id(favorite_info["id"])
self.write(lib.jsonp.print_JSON({
"success": 1,
"message": "cancel_favorite_success",
}))
# update reputation of topic author
topic_time_diff = datetime.datetime.now() - topic_info["created"]
reputation = topic_info["author_reputation"] or 0
reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10)
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
class MembersHandler(BaseHandler):
def get(self, template_variables = {}):
user_info = self.current_user
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["members"] = self.user_model.get_users_by_latest(num = 49)
template_variables["active_members"] = self.user_model.get_users_by_last_login(num = 49)
template_variables["gen_random"] = gen_random
template_variables["active_page"] = "members"
self.render("topic/members.html", **template_variables)
comment on authenticated
#!/usr/bin/env python
# coding=utf-8
#
# Copyright 2012 F2E.im
# Do have a faith in what you're doing.
# Make your life a story worth telling.
import uuid
import hashlib
import Image
import StringIO
import time
import json
import re
import urllib2
import tornado.web
import lib.jsonp
import pprint
import math
import datetime
from base import BaseHandler
from lib.variables import *
from form.topic import *
from lib.variables import gen_random
from lib.xss import XssCleaner
from lib.utils import find_mentions
class IndexHandler(BaseHandler):
def get(self, template_variables = {}):
user_info = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info): # 像这种情况不适合用authenticated装饰器,因为没有登录情况下也有内容要显示,而auth装饰器不允许这种分支情况
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["status_counter"] = {
"users": self.user_model.get_all_users_count(),
"nodes": self.node_model.get_all_nodes_count(),
"topics": self.topic_model.get_all_topics_count(),
"replies": self.reply_model.get_all_replies_count(),
}
template_variables["topics"] = self.topic_model.get_all_topics(current_page = page)
template_variables["planes"] = self.plane_model.get_all_planes_with_nodes()
template_variables["hot_nodes"] = self.node_model.get_all_hot_nodes()
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/topics.html", **template_variables)
class NodeTopicsHandler(BaseHandler):
def get(self, node_slug, template_variables = {}):
user_info = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["topics"] = self.topic_model.get_all_topics_by_node_slug(current_page = page, node_slug = node_slug)
template_variables["node"] = self.node_model.get_node_by_node_slug(node_slug)
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/node_topics.html", **template_variables)
class ViewHandler(BaseHandler):
def get(self, topic_id, template_variables = {}):
user_info = self.current_user
page = int(self.get_argument("p", "1"))
user_info = self.get_current_user()
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["topic_favorited"] = self.favorite_model.get_favorite_by_topic_id_and_owner_user_id(topic_id, user_info["uid"]);
template_variables["gen_random"] = gen_random
template_variables["topic"] = self.topic_model.get_topic_by_topic_id(topic_id)
# check reply count and cal current_page if `p` not given
reply_num = 106
reply_count = template_variables["topic"]["reply_count"]
reply_last_page = (reply_count / reply_num + (reply_count % reply_num and 1)) or 1
page = int(self.get_argument("p", reply_last_page))
template_variables["reply_num"] = reply_num
template_variables["current_page"] = page
template_variables["replies"] = self.reply_model.get_all_replies_by_topic_id(topic_id, current_page = page, num = reply_num)
template_variables["active_page"] = "topic"
# update topic reply_count and hits
self.topic_model.update_topic_by_topic_id(topic_id, {
"reply_count": template_variables["replies"]["page"]["total"],
"hits": (template_variables["topic"]["hits"] or 0) + 1,
})
self.render("topic/view.html", **template_variables)
@tornado.web.authenticated
def post(self, template_variables = {}):
template_variables = {}
# validate the fields
form = ReplyForm(self)
if not form.validate():
self.get(form.tid.data, {"errors": form.errors})
return
# continue while validate succeed
topic_info = self.topic_model.get_topic_by_topic_id(form.tid.data)
replied_info = self.reply_model.get_user_last_reply_by_topic_id(self.current_user["uid"], form.tid.data)
if(not topic_info):
template_variables["errors"] = {}
template_variables["errors"]["invalid_topic_info"] = [u"要回复的帖子不存在"]
self.get(form.tid.data, template_variables)
return
if(replied_info):
last_replied_fingerprint = hashlib.sha1(str(replied_info.topic_id) + str(replied_info.author_id) + replied_info.content).hexdigest()
new_replied_fingerprint = hashlib.sha1(str(form.tid.data) + str(self.current_user["uid"]) + form.content.data).hexdigest()
if last_replied_fingerprint == new_replied_fingerprint:
template_variables["errors"] = {}
template_variables["errors"]["duplicated_reply"] = [u"回复重复提交"]
self.get(form.tid.data, template_variables)
return
reply_info = {
"author_id": self.current_user["uid"],
"topic_id": form.tid.data,
# "content": XssCleaner().strip(form.content.data),
"content": form.content.data,
"created": time.strftime('%Y-%m-%d %H:%M:%S'),
}
reply_id = self.reply_model.add_new_reply(reply_info)
# update topic last_replied_by and last_replied_time
self.topic_model.update_topic_by_topic_id(form.tid.data, {
"last_replied_by": self.current_user["uid"],
"last_replied_time": time.strftime('%Y-%m-%d %H:%M:%S'),
"last_touched": time.strftime('%Y-%m-%d %H:%M:%S'),
})
# create reply notification
if not self.current_user["uid"] == topic_info["author_id"]:
self.notification_model.add_new_notification({
"trigger_user_id": self.current_user["uid"],
"involved_type": 1, # 0: mention, 1: reply
"involved_user_id": topic_info["author_id"],
"involved_topic_id": form.tid.data,
"content": form.content.data,
"status": 0,
"occurrence_time": time.strftime('%Y-%m-%d %H:%M:%S'),
})
# create @username notification
for username in set(find_mentions(form.content.data)):
mentioned_user = self.user_model.get_user_by_username(username)
if not mentioned_user:
continue
if mentioned_user["uid"] == self.current_user["uid"]:
continue
if mentioned_user["uid"] == topic_info["author_id"]:
continue
self.notification_model.add_new_notification({
"trigger_user_id": self.current_user["uid"],
"involved_type": 0, # 0: mention, 1: reply
"involved_user_id": mentioned_user["uid"],
"involved_topic_id": form.tid.data,
"content": form.content.data,
"status": 0,
"occurrence_time": time.strftime('%Y-%m-%d %H:%M:%S'),
})
# update reputation of topic author
if not self.current_user["uid"] == topic_info["author_id"] and not replied_info:
topic_time_diff = datetime.datetime.now() - topic_info["created"]
reputation = topic_info["author_reputation"] or 0
reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10)
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
# self.get(form.tid.data)
self.redirect("/t/%s#reply%s" % (form.tid.data, topic_info["reply_count"] + 1))
class CreateHandler(BaseHandler):
@tornado.web.authenticated
def get(self, node_slug = None, template_variables = {}):
user_info = self.current_user
template_variables["user_info"] = user_info
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["gen_random"] = gen_random
template_variables["node_slug"] = node_slug
template_variables["active_page"] = "topic"
self.render("topic/create.html", **template_variables)
@tornado.web.authenticated
def post(self, node_slug = None, template_variables = {}):
template_variables = {}
# validate the fields
form = CreateForm(self)
if not form.validate():
self.get(node_slug, {"errors": form.errors})
return
# continue while validate succeed
node = self.node_model.get_node_by_node_slug(node_slug)
last_created = self.topic_model.get_user_last_created_topic(self.current_user["uid"])
if last_created:
last_created_fingerprint = hashlib.sha1(last_created.title + last_created.content + str(last_created.node_id)).hexdigest()
new_created_fingerprint = hashlib.sha1(form.title.data + form.content.data + str(node["id"])).hexdigest()
if last_created_fingerprint == new_created_fingerprint:
template_variables["errors"] = {}
template_variables["errors"]["duplicated_topic"] = [u"帖子重复提交"]
self.get(node_slug, template_variables)
return
topic_info = {
"author_id": self.current_user["uid"],
"title": form.title.data,
# "content": XssCleaner().strip(form.content.data),
"content": form.content.data,
"node_id": node["id"],
"created": time.strftime('%Y-%m-%d %H:%M:%S'),
"reply_count": 0,
"last_touched": time.strftime('%Y-%m-%d %H:%M:%S'),
}
reply_id = self.topic_model.add_new_topic(topic_info)
# update reputation of topic author
reputation = self.current_user["reputation"] or 0
reputation = reputation - 5
reputation = 0 if reputation < 0 else reputation
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
self.redirect("/")
class EditHandler(BaseHandler):
@tornado.web.authenticated
def get(self, topic_id, template_variables = {}):
user_info = self.current_user
template_variables["user_info"] = user_info
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["topic"] = self.topic_model.get_topic_by_topic_id(topic_id)
template_variables["gen_random"] = gen_random
template_variables["active_page"] = "topic"
self.render("topic/edit.html", **template_variables)
@tornado.web.authenticated
def post(self, topic_id, template_variables = {}):
template_variables = {}
# validate the fields
form = CreateForm(self)
if not form.validate():
self.get(topic_id, {"errors": form.errors})
return
# continue while validate succeed
topic_info = self.topic_model.get_topic_by_topic_id(topic_id)
if(not topic_info["author_id"] == self.current_user["uid"]):
template_variables["errors"] = {}
template_variables["errors"]["invalid_permission"] = [u"没有权限修改该主题"]
self.get(topic_id, template_variables)
return
update_topic_info = {
"title": form.title.data,
# "content": XssCleaner().strip(form.content.data),
"content": form.content.data,
"updated": time.strftime('%Y-%m-%d %H:%M:%S'),
"last_touched": time.strftime('%Y-%m-%d %H:%M:%S'),
}
reply_id = self.topic_model.update_topic_by_topic_id(topic_id, update_topic_info)
# update reputation of topic author
reputation = topic_info["author_reputation"] or 0
reputation = reputation - 2
reputation = 0 if reputation < 0 else reputation
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
self.redirect("/t/%s" % topic_id)
class ProfileHandler(BaseHandler):
def get(self, user, template_variables = {}):
if(re.match(r'^\d+$', user)):
user_info = self.user_model.get_user_by_uid(user)
else:
user_info = self.user_model.get_user_by_username(user)
if not user_info:
self.write_error(404)
return
current_user = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
if(current_user):
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]);
'''
if user_info["github"]:
github_repos = self.mc.get(str("%s_github_repos" % user_info["github"])) or json.JSONDecoder().decode(urllib2.urlopen('https://api.github.com/users/%s/repos' % user_info["github"]).read())
self.mc.set(str("%s_github_repos" % user_info["github"]), github_repos)
template_variables["github_repos"] = github_repos
'''
template_variables["topics"] = self.topic_model.get_user_all_topics(user_info["uid"], current_page = page)
template_variables["replies"] = self.reply_model.get_user_all_replies(user_info["uid"], current_page = page)
template_variables["gen_random"] = gen_random
template_variables["active_page"] = "_blank"
self.render("topic/profile.html", **template_variables)
class VoteHandler(BaseHandler):
def get(self, template_variables = {}):
topic_id = int(self.get_argument("topic_id"))
topic_info = self.topic_model.get_topic_by_topic_id(topic_id)
if not topic_info:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "topic_not_exist",
}))
return
if self.current_user["uid"] == topic_info["author_id"]:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "can_not_vote_your_topic",
}))
return
if self.vote_model.get_vote_by_topic_id_and_trigger_user_id(topic_id, self.current_user["uid"]):
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "already_voted",
}))
return
self.vote_model.add_new_vote({
"trigger_user_id": self.current_user["uid"],
"involved_type": 0, # 0: topic, 1: reply
"involved_user_id": topic_info["author_id"],
"involved_topic_id": topic_id,
"status": 0,
"occurrence_time": time.strftime('%Y-%m-%d %H:%M:%S'),
})
self.write(lib.jsonp.print_JSON({
"success": 1,
"message": "thanks_for_your_vote",
}))
# update reputation of topic author
topic_time_diff = datetime.datetime.now() - topic_info["created"]
reputation = topic_info["author_reputation"] or 0
reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10)
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
class UserTopicsHandler(BaseHandler):
def get(self, user, template_variables = {}):
if(re.match(r'^\d+$', user)):
user_info = self.user_model.get_user_by_uid(user)
else:
user_info = self.user_model.get_user_by_username(user)
current_user = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
if(current_user):
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]);
template_variables["topics"] = self.topic_model.get_user_all_topics(user_info["uid"], current_page = page)
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/user_topics.html", **template_variables)
class UserRepliesHandler(BaseHandler):
def get(self, user, template_variables = {}):
if(re.match(r'^\d+$', user)):
user_info = self.user_model.get_user_by_uid(user)
else:
user_info = self.user_model.get_user_by_username(user)
current_user = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
if(current_user):
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]);
template_variables["replies"] = self.reply_model.get_user_all_replies(user_info["uid"], current_page = page)
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/user_replies.html", **template_variables)
class UserFavoritesHandler(BaseHandler):
def get(self, user, template_variables = {}):
if(re.match(r'^\d+$', user)):
user_info = self.user_model.get_user_by_uid(user)
else:
user_info = self.user_model.get_user_by_username(user)
current_user = self.current_user
page = int(self.get_argument("p", "1"))
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
if(current_user):
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]);
template_variables["favorites"] = self.favorite_model.get_user_all_favorites(user_info["uid"], current_page = page)
template_variables["active_page"] = "topic"
template_variables["gen_random"] = gen_random
self.render("topic/user_favorites.html", **template_variables)
class ReplyEditHandler(BaseHandler):
@tornado.web.authenticated
def get(self, reply_id, template_variables = {}):
user_info = self.current_user
template_variables["user_info"] = user_info
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["reply"] = self.reply_model.get_reply_by_reply_id(reply_id)
template_variables["gen_random"] = gen_random
template_variables["active_page"] = "topic"
self.render("topic/reply_edit.html", **template_variables)
@tornado.web.authenticated
def post(self, reply_id, template_variables = {}):
template_variables = {}
# validate the fields
form = ReplyEditForm(self)
if not form.validate():
self.get(reply_id, {"errors": form.errors})
return
# continue while validate succeed
reply_info = self.reply_model.get_reply_by_reply_id(reply_id)
if(not reply_info["author_id"] == self.current_user["uid"]):
template_variables["errors"] = {}
template_variables["errors"]["invalid_permission"] = [u"没有权限修改该回复"]
self.get(reply_id, template_variables)
return
update_reply_info = {
# "content": XssCleaner().strip(form.content.data),
"content": form.content.data,
"updated": time.strftime('%Y-%m-%d %H:%M:%S'),
}
reply_id = self.reply_model.update_reply_by_reply_id(reply_id, update_reply_info)
# update reputation of topic author
reputation = self.current_user["reputation"] or 0
reputation = reputation - 2
reputation = 0 if reputation < 0 else reputation
self.user_model.set_user_base_info_by_uid(reply_info["author_id"], {"reputation": reputation})
self.redirect("/t/%s" % reply_info["topic_id"])
class FavoriteHandler(BaseHandler):
def get(self, template_variables = {}):
topic_id = int(self.get_argument("topic_id"))
topic_info = self.topic_model.get_topic_by_topic_id(topic_id)
if not self.current_user:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "user_not_login",
}))
return
if not topic_info:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "topic_not_exist",
}))
return
if self.current_user["uid"] == topic_info["author_id"]:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "can_not_favorite_your_topic",
}))
return
if self.favorite_model.get_favorite_by_topic_id_and_owner_user_id(topic_id, self.current_user["uid"]):
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "already_favorited",
}))
return
self.favorite_model.add_new_favorite({
"owner_user_id": self.current_user["uid"],
"involved_type": 0, # 0: topic, 1: reply
"involved_topic_id": topic_id,
"created": time.strftime('%Y-%m-%d %H:%M:%S'),
})
self.write(lib.jsonp.print_JSON({
"success": 1,
"message": "favorite_success",
}))
# update reputation of topic author
topic_time_diff = datetime.datetime.now() - topic_info["created"]
reputation = topic_info["author_reputation"] or 0
reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10)
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
class CancelFavoriteHandler(BaseHandler):
def get(self, template_variables = {}):
topic_id = int(self.get_argument("topic_id"))
topic_info = self.topic_model.get_topic_by_topic_id(topic_id)
favorite_info = None
if not self.current_user:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "user_not_login",
}))
return
if not topic_info:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "topic_not_exist",
}))
return
favorite_info = self.favorite_model.get_favorite_by_topic_id_and_owner_user_id(topic_id, self.current_user["uid"])
if not favorite_info:
self.write(lib.jsonp.print_JSON({
"success": 0,
"message": "not_been_favorited",
}))
return
self.favorite_model.cancel_exist_favorite_by_id(favorite_info["id"])
self.write(lib.jsonp.print_JSON({
"success": 1,
"message": "cancel_favorite_success",
}))
# update reputation of topic author
topic_time_diff = datetime.datetime.now() - topic_info["created"]
reputation = topic_info["author_reputation"] or 0
reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10)
self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})
class MembersHandler(BaseHandler):
def get(self, template_variables = {}):
user_info = self.current_user
template_variables["user_info"] = user_info
if(user_info):
template_variables["user_info"]["counter"] = {
"topics": self.topic_model.get_user_all_topics_count(user_info["uid"]),
"replies": self.reply_model.get_user_all_replies_count(user_info["uid"]),
"favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]),
}
template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]);
template_variables["members"] = self.user_model.get_users_by_latest(num = 49)
template_variables["active_members"] = self.user_model.get_users_by_last_login(num = 49)
template_variables["gen_random"] = gen_random
template_variables["active_page"] = "members"
self.render("topic/members.html", **template_variables)
|
import json
import emoji
import requests
from collections import defaultdict
from holster.enum import Enum
from disco.types.message import MessageEmbed
from rowboat.plugins import RowboatPlugin as Plugin
from rowboat.redis import rdb
from rowboat.models.guild import Guild
from rowboat.types.plugin import PluginConfig
from rowboat.types import SlottedModel, DictField, Field, ChannelField
FormatMode = Enum(
'PLAIN',
'PRETTY'
)
class SubRedditConfig(SlottedModel):
channel = Field(ChannelField)
mode = Field(FormatMode, default=FormatMode.PRETTY)
nsfw = Field(bool, default=False)
text_length = Field(int, default=256)
include_stats = Field(bool, default=False)
class RedditConfig(PluginConfig):
# TODO: validate they have less than 3 reddits selected
subs = DictField(str, SubRedditConfig)
def validate(self):
if len(self.subs) > 3:
raise Exception('Cannot have more than 3 subreddits configured')
# TODO: validate each subreddit
@Plugin.with_config(RedditConfig)
class RedditPlugin(Plugin):
@Plugin.schedule(30, init=False)
def check_subreddits(self):
# TODO: sharding
# TODO: filter in query
subs_raw = list(Guild.select(
Guild.guild_id,
Guild.config['plugins']['reddit']
).where(
~(Guild.config['plugins']['reddit'] >> None)
).tuples())
# Group all subreddits, iterate, update channels
subs = defaultdict(list)
for gid, config in subs_raw:
config = json.loads(config)
for k, v in config['subs'].items():
subs[k].append((gid, SubRedditConfig(v)))
for sub, configs in subs.items():
try:
self.update_subreddit(sub, configs)
except requests.HTTPError:
self.log.exception('Error loading sub %s:', sub)
def get_channel(self, guild, ref):
# CLEAN THIS UP TO A RESOLVER
if isinstance(ref, (int, long)):
return guild.channels.get(ref)
else:
return guild.channels.select_one(name=ref)
def send_post(self, config, channel, data):
if config.mode is FormatMode.PLAIN:
channel.send_message('**{}**\n{}'.format(
data['title'],
'https://reddit.com{}'.format(data['permalink'])
))
else:
embed = MessageEmbed()
if 'nsfw' in data and data['nsfw']:
if not config.nsfw:
return
embed.color = 0xff6961
else:
embed.color = 0xaecfc8
# Limit title to 256 characters nicely
if len(data['title']) > 256:
embed.title = data['title'][:253] + '...'
else:
embed.title = data['title']
embed.url = u'https://reddit.com{}'.format(data['permalink'])
embed.set_author(
name=data['author'],
url=u'https://reddit.com/u/{}'.format(data['author'])
)
image = None
if data.get('media'):
if 'oembed' in data['media']:
image = data['media']['oembed']['thumbnail_url']
elif data.get('preview'):
if 'images' in data['preview']:
image = data['preview']['images'][0]['source']['url']
if 'selftext' in data and data['selftext']:
# TODO better place for validation
sz = min(64, max(config.text_length, 1900))
embed.description = data['selftext'][:sz]
if len(data['selftext']) > sz:
embed.description += u'...'
if image:
embed.set_thumbnail(url=image)
elif image:
embed.set_image(url=image)
if config.include_stats:
embed.set_footer(text=emoji.emojize('{} upvotes | {} downvotes | {} comments'.format(
data['ups'], data['downs'], data['num_comments']
)))
channel.send_message('', embed=embed)
def update_subreddit(self, sub, configs):
# TODO: use before on this request
r = requests.get(
'https://www.reddit.com/r/{}/new.json'.format(sub),
headers={
'User-Agent': 'discord:RowBoat:v0.0.1 (by /u/b1naryth1ef)'
}
)
r.raise_for_status()
data = list(reversed(map(lambda i: i['data'], r.json()['data']['children'])))
# TODO:
# 1. instead of tracking per guild, just track globally per subreddit
# 2. fan-out posts to each subscribed channel
for gid, config in configs:
guild = self.state.guilds.get(gid)
if not guild:
self.log.warning('Skipping non existant guild %s', gid)
continue
channel = self.get_channel(guild, config.channel)
if not channel:
self.log.warning('Skipping non existant channel %s for guild %s (%s)', channel, guild.name, gid)
continue
last = float(rdb.get('rdt:lpid:{}:{}'.format(channel.id, sub)) or 0)
item_count, high_time = 0, last
for item in data:
if item['created_utc'] > last:
try:
self.send_post(config, channel, item)
except:
self.log.exception('Failed to post reddit content from %s\n\n', item)
item_count += 1
if item['created_utc'] > high_time:
rdb.set('rdt:lpid:{}:{}'.format(channel.id, sub), item['created_utc'])
high_time = item['created_utc']
if item_count > 10:
break
[reddit] change user-agent, thanks X E N T H Y S
import json
import emoji
import requests
from collections import defaultdict
from holster.enum import Enum
from disco.types.message import MessageEmbed
from rowboat.plugins import RowboatPlugin as Plugin
from rowboat.redis import rdb
from rowboat.models.guild import Guild
from rowboat.types.plugin import PluginConfig
from rowboat.types import SlottedModel, DictField, Field, ChannelField
FormatMode = Enum(
'PLAIN',
'PRETTY'
)
class SubRedditConfig(SlottedModel):
channel = Field(ChannelField)
mode = Field(FormatMode, default=FormatMode.PRETTY)
nsfw = Field(bool, default=False)
text_length = Field(int, default=256)
include_stats = Field(bool, default=False)
class RedditConfig(PluginConfig):
# TODO: validate they have less than 3 reddits selected
subs = DictField(str, SubRedditConfig)
def validate(self):
if len(self.subs) > 3:
raise Exception('Cannot have more than 3 subreddits configured')
# TODO: validate each subreddit
@Plugin.with_config(RedditConfig)
class RedditPlugin(Plugin):
@Plugin.schedule(30, init=False)
def check_subreddits(self):
# TODO: sharding
# TODO: filter in query
subs_raw = list(Guild.select(
Guild.guild_id,
Guild.config['plugins']['reddit']
).where(
~(Guild.config['plugins']['reddit'] >> None)
).tuples())
# Group all subreddits, iterate, update channels
subs = defaultdict(list)
for gid, config in subs_raw:
config = json.loads(config)
for k, v in config['subs'].items():
subs[k].append((gid, SubRedditConfig(v)))
for sub, configs in subs.items():
try:
self.update_subreddit(sub, configs)
except requests.HTTPError:
self.log.exception('Error loading sub %s:', sub)
def get_channel(self, guild, ref):
# CLEAN THIS UP TO A RESOLVER
if isinstance(ref, (int, long)):
return guild.channels.get(ref)
else:
return guild.channels.select_one(name=ref)
def send_post(self, config, channel, data):
if config.mode is FormatMode.PLAIN:
channel.send_message('**{}**\n{}'.format(
data['title'],
'https://reddit.com{}'.format(data['permalink'])
))
else:
embed = MessageEmbed()
if 'nsfw' in data and data['nsfw']:
if not config.nsfw:
return
embed.color = 0xff6961
else:
embed.color = 0xaecfc8
# Limit title to 256 characters nicely
if len(data['title']) > 256:
embed.title = data['title'][:253] + '...'
else:
embed.title = data['title']
embed.url = u'https://reddit.com{}'.format(data['permalink'])
embed.set_author(
name=data['author'],
url=u'https://reddit.com/u/{}'.format(data['author'])
)
image = None
if data.get('media'):
if 'oembed' in data['media']:
image = data['media']['oembed']['thumbnail_url']
elif data.get('preview'):
if 'images' in data['preview']:
image = data['preview']['images'][0]['source']['url']
if 'selftext' in data and data['selftext']:
# TODO better place for validation
sz = min(64, max(config.text_length, 1900))
embed.description = data['selftext'][:sz]
if len(data['selftext']) > sz:
embed.description += u'...'
if image:
embed.set_thumbnail(url=image)
elif image:
embed.set_image(url=image)
if config.include_stats:
embed.set_footer(text=emoji.emojize('{} upvotes | {} downvotes | {} comments'.format(
data['ups'], data['downs'], data['num_comments']
)))
channel.send_message('', embed=embed)
def update_subreddit(self, sub, configs):
# TODO: use before on this request
r = requests.get(
'https://www.reddit.com/r/{}/new.json'.format(sub),
headers={
'User-Agent': 'discordBot/Jetski v1.0'
}
)
r.raise_for_status()
data = list(reversed(map(lambda i: i['data'], r.json()['data']['children'])))
# TODO:
# 1. instead of tracking per guild, just track globally per subreddit
# 2. fan-out posts to each subscribed channel
for gid, config in configs:
guild = self.state.guilds.get(gid)
if not guild:
self.log.warning('Skipping non existant guild %s', gid)
continue
channel = self.get_channel(guild, config.channel)
if not channel:
self.log.warning('Skipping non existant channel %s for guild %s (%s)', channel, guild.name, gid)
continue
last = float(rdb.get('rdt:lpid:{}:{}'.format(channel.id, sub)) or 0)
item_count, high_time = 0, last
for item in data:
if item['created_utc'] > last:
try:
self.send_post(config, channel, item)
except:
self.log.exception('Failed to post reddit content from %s\n\n', item)
item_count += 1
if item['created_utc'] > high_time:
rdb.set('rdt:lpid:{}:{}'.format(channel.id, sub), item['created_utc'])
high_time = item['created_utc']
if item_count > 10:
break
|
__author__ = 'Bohdan Mushkevych'
import calendar
import collections
from synergy.system import time_helper
from synergy.system.time_qualifier import *
class TimeperiodDict(collections.MutableMapping):
def __init__(self, time_qualifier, grouping=1, *args, **kwargs):
assert time_qualifier in [QUALIFIER_HOURLY, QUALIFIER_DAILY, QUALIFIER_MONTHLY, QUALIFIER_YEARLY]
super(TimeperiodDict, self).__init__()
# validation section
if time_qualifier == QUALIFIER_HOURLY:
self.upper_boundary = 23
elif time_qualifier == QUALIFIER_DAILY:
self.upper_boundary = 28
elif time_qualifier == QUALIFIER_MONTHLY:
self.upper_boundary = 12
elif time_qualifier == QUALIFIER_YEARLY:
self.upper_boundary = 1
else:
raise ValueError('unknown time qualifier: {0}'.format(time_qualifier))
assert 1 <= grouping <= self.upper_boundary
self.grouping = grouping
self.time_qualifier = time_qualifier
self.data = dict()
self.update(dict(*args, **kwargs))
def _do_stem_grouping(self, timeperiod, stem):
revisited_upper_boundary = self.upper_boundary
if self.time_qualifier == QUALIFIER_DAILY:
# DAILY upper boundary is month-dependent
# i.e. it is 28 for Feb 2015; and 31 for Mar 2015
year, month, day, hour = time_helper.tokenize_timeperiod(timeperiod)
monthrange_tuple = calendar.monthrange(int(year), int(month))
revisited_upper_boundary = monthrange_tuple[1]
# exclude 00 from lower boundary, unless the grouping == 1
revisited_lower_boundary = 0 if self.grouping == 1 else 1
for i in range(revisited_lower_boundary, revisited_upper_boundary):
candidate = i * self.grouping
if stem <= candidate <= revisited_upper_boundary:
return candidate
return revisited_upper_boundary
def _translate_timeperiod(self, timeperiod):
""" method translates given timeperiod to the grouped timeperiod """
if self.time_qualifier == QUALIFIER_YEARLY:
# YEARLY timeperiods are allowed to have only identity grouping
return timeperiod
# step 1: tokenize timeperiod into: (year, month, day, hour)
# for instance: daily 2015031400 -> ('2015', '03', '14', '00')
year, month, day, hour = time_helper.tokenize_timeperiod(timeperiod)
if self.time_qualifier == QUALIFIER_HOURLY:
stem = hour
elif self.time_qualifier == QUALIFIER_DAILY:
stem = day
else: # self.time_qualifier == QUALIFIER_MONTHLY:
stem = month
# step 2: perform grouping on the stem
# ex1: stem of 14 with grouping 20 -> 20
# ex2: stem of 21 with grouping 20 -> 23
stem = int(stem)
grouped = self._do_stem_grouping(timeperiod, stem)
# step 3: concatenate timeperiod components
# for instance: ('2015', 03', '20', '00') -> 2015032000
if self.time_qualifier == QUALIFIER_HOURLY:
result = '{0}{1}{2}{3:02d}'.format(year, month, day, grouped)
elif self.time_qualifier == QUALIFIER_DAILY:
result = '{0}{1}{2:02d}{3}'.format(year, month, grouped, hour)
else: # self.time_qualifier == QUALIFIER_MONTHLY:
result = '{0}{1:02d}{2}{3}'.format(year, grouped, day, hour)
return result
def __len__(self):
return len(self.data)
def __getitem__(self, key):
grouped_timeperiod = self._translate_timeperiod(key)
return self.data.__getitem__(grouped_timeperiod)
def __setitem__(self, key, value):
grouped_timeperiod = self._translate_timeperiod(key)
self.data.__setitem__(grouped_timeperiod, value)
def __delitem__(self, key):
self.data.__delitem__(key)
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
grouped_timeperiod = self._translate_timeperiod(key)
return self.data.__contains__(grouped_timeperiod)
#42 updating timeperiod_dict
__author__ = 'Bohdan Mushkevych'
import calendar
import collections
from synergy.system import time_helper
from synergy.system.time_qualifier import *
class TimeperiodDict(collections.MutableMapping):
def __init__(self, time_qualifier, grouping=1, *args, **kwargs):
assert time_qualifier in [QUALIFIER_HOURLY, QUALIFIER_DAILY, QUALIFIER_MONTHLY, QUALIFIER_YEARLY]
super(TimeperiodDict, self).__init__()
self.grouping = grouping
self.time_qualifier = time_qualifier
# validation section
upper_boundary = self._get_upper_boundary()
assert 1 <= grouping <= upper_boundary
self.data = dict()
self.update(dict(*args, **kwargs))
def _get_upper_boundary(self, timeperiod=None):
if self.time_qualifier == QUALIFIER_HOURLY:
upper_boundary = 23
elif self.time_qualifier == QUALIFIER_DAILY:
if timeperiod:
# DAILY upper boundary is month-dependent
# i.e. it is 28 for Feb 2015; and 31 for Mar 2015
year, month, day, hour = time_helper.tokenize_timeperiod(timeperiod)
monthrange_tuple = calendar.monthrange(int(year), int(month))
upper_boundary = monthrange_tuple[1]
else:
upper_boundary = 28
elif self.time_qualifier == QUALIFIER_MONTHLY:
upper_boundary = 12
elif self.time_qualifier == QUALIFIER_YEARLY:
upper_boundary = 1
else:
raise ValueError('unknown time qualifier: {0}'.format(self.time_qualifier))
return upper_boundary
def _do_stem_grouping(self, timeperiod, stem):
# exclude 00 from lower boundary, unless the grouping == 1
lower_boundary = 0 if self.grouping == 1 else 1
upper_boundary = self._get_upper_boundary(timeperiod)
for i in range(lower_boundary, upper_boundary):
candidate = i * self.grouping
if stem <= candidate <= upper_boundary:
return candidate
return upper_boundary
def _translate_timeperiod(self, timeperiod):
""" method translates given timeperiod to the grouped timeperiod """
if self.grouping == 1:
# no translation is performed for identity grouping
return timeperiod
# step 1: tokenize timeperiod into: (year, month, day, hour)
# for instance: daily 2015031400 -> ('2015', '03', '14', '00')
year, month, day, hour = time_helper.tokenize_timeperiod(timeperiod)
# step 2: perform grouping on the stem
# ex1: stem of 14 with grouping 20 -> 20
# ex2: stem of 21 with grouping 20 -> 23
if self.time_qualifier == QUALIFIER_HOURLY:
stem = self._do_stem_grouping(timeperiod, int(hour))
result = '{0}{1}{2}{3:02d}'.format(year, month, day, stem)
elif self.time_qualifier == QUALIFIER_DAILY:
stem = self._do_stem_grouping(timeperiod, int(day))
result = '{0}{1}{2:02d}{3}'.format(year, month, stem, hour)
else: # self.time_qualifier == QUALIFIER_MONTHLY:
stem = self._do_stem_grouping(timeperiod, int(month))
result = '{0}{1:02d}{2}{3}'.format(year, stem, day, hour)
return result
def __len__(self):
return len(self.data)
def __getitem__(self, key):
grouped_timeperiod = self._translate_timeperiod(key)
return self.data.__getitem__(grouped_timeperiod)
def __setitem__(self, key, value):
grouped_timeperiod = self._translate_timeperiod(key)
self.data.__setitem__(grouped_timeperiod, value)
def __delitem__(self, key):
self.data.__delitem__(key)
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
grouped_timeperiod = self._translate_timeperiod(key)
return self.data.__contains__(grouped_timeperiod)
|
import openpnm as op
from openpnm.phases import mixtures
import numpy as np
ws = op.Workspace()
proj = ws.new_project()
# ws.settings['loglevel'] = 20
"""
Details about the continum and numerical model equations can be found on:
Agnaou, M., Sadeghi, M. A., Tranter, T. G., & Gostick, J. (2020).
Modeling transport of charged species in pore networks: solution of the
Nernst-Planck equations coupled with fluid flow and charge conservation
equations.
Computers & Geosciences, 104505.
"""
# network, geometry, phase
np.random.seed(0)
net = op.network.Cubic(shape=[8, 8, 1], spacing=9e-4, project=proj)
prs = (net['pore.back'] * net['pore.right'] + net['pore.back']
* net['pore.left'] + net['pore.front'] * net['pore.right']
+ net['pore.front'] * net['pore.left'])
thrts = net['throat.surface']
op.topotools.trim(network=net, pores=net.Ps[prs], throats=net.Ts[thrts])
geo = op.geometry.StickAndBall(network=net, pores=net.Ps, throats=net.Ts)
pore_d = op.models.misc.constant
throat_d = op.models.misc.constant
geo.add_model(propname='pore.diameter', model=pore_d, value=1.5e-4)
geo.add_model(propname='throat.diameter', model=throat_d, value=1e-4)
geo.regenerate_models()
sw = mixtures.SalineWater(network=net)
# Retrieve handles to each species for use below
Na = sw.components['Na_' + sw.name]
Cl = sw.components['Cl_' + sw.name]
H2O = sw.components['H2O_' + sw.name]
# physics
phys = op.physics.GenericPhysics(network=net, phase=sw, geometry=geo)
flow = op.models.physics.hydraulic_conductance.hagen_poiseuille
phys.add_model(propname='throat.hydraulic_conductance',
pore_viscosity='pore.viscosity',
throat_viscosity='throat.viscosity',
model=flow, regen_mode='normal')
current = op.models.physics.ionic_conductance.electroneutrality
phys.add_model(propname='throat.ionic_conductance', ions=[Na.name, Cl.name],
model=current, regen_mode='normal')
eA_dif = op.models.physics.diffusive_conductance.ordinary_diffusion
phys.add_model(propname='throat.diffusive_conductance.' + Na.name,
pore_diffusivity='pore.diffusivity.' + Na.name,
throat_diffusivity='throat.diffusivity.' + Na.name,
model=eA_dif, regen_mode='normal')
eB_dif = op.models.physics.diffusive_conductance.ordinary_diffusion
phys.add_model(propname='throat.diffusive_conductance.' + Cl.name,
pore_diffusivity='pore.diffusivity.' + Cl.name,
throat_diffusivity='throat.diffusivity.' + Cl.name,
model=eB_dif, regen_mode='normal')
s_scheme = 'powerlaw'
ad_dif_mig_Na = op.models.physics.ad_dif_mig_conductance.ad_dif_mig
phys.add_model(propname='throat.ad_dif_mig_conductance.' + Na.name,
pore_pressure='pore.pressure', model=ad_dif_mig_Na,
ion=Na.name, s_scheme=s_scheme)
ad_dif_mig_Cl = op.models.physics.ad_dif_mig_conductance.ad_dif_mig
phys.add_model(propname='throat.ad_dif_mig_conductance.' + Cl.name,
pore_pressure='pore.pressure', model=ad_dif_mig_Cl,
ion=Cl.name, s_scheme=s_scheme)
# settings for algorithms
setts1 = {'solver_max_iter': 5, 'solver_tol': 1e-08, 'solver_rtol': 1e-08,
'nlin_max_iter': 10, 'cache_A': False, 'cache_b': False}
setts2 = {'g_tol': 1e-4, 'g_max_iter': 4, 't_output': 5000, 't_step': 500,
't_final': 20000, 't_scheme': 'implicit', 'cache_A': False,
'cache_b': False}
# algorithms
sf = op.algorithms.StokesFlow(network=net, phase=sw, settings=setts1)
sf.set_value_BC(pores=net.pores('back'), values=0.01)
sf.set_value_BC(pores=net.pores('front'), values=0.00)
sf.run()
sw.update(sf.results())
p = op.algorithms.TransientIonicConduction(network=net, phase=sw,
settings=setts1)
p.set_value_BC(pores=net.pores('left'), values=0.1)
p.set_value_BC(pores=net.pores('right'), values=0.00)
p.settings['charge_conservation'] = 'electroneutrality'
eA = op.algorithms.TransientNernstPlanck(network=net, phase=sw, ion=Na.name,
settings=setts1)
eA.set_value_BC(pores=net.pores('back'), values=100)
eA.set_value_BC(pores=net.pores('front'), values=90)
eB = op.algorithms.TransientNernstPlanck(network=net, phase=sw, ion=Cl.name,
settings=setts1)
eB.set_value_BC(pores=net.pores('back'), values=100)
eB.set_value_BC(pores=net.pores('front'), values=90)
it = op.algorithms.TransientNernstPlanckMultiphysicsSolver(network=net,
phase=sw,
settings=setts2)
it.setup(potential_field=p.name, ions=[eA.name, eB.name])
it.run()
sw.update(sf.results())
sw.update(p.results())
sw.update(eA.results())
sw.update(eB.results())
# output results to a vtk file for visualization on Paraview
# proj.export_data(phases=[sw], filename='OUT', filetype='xdmf')
update transient example
import openpnm as op
from openpnm.phases import mixtures
import numpy as np
ws = op.Workspace()
proj = ws.new_project()
# ws.settings['loglevel'] = 20
"""
Details about the continum and numerical model equations can be found on:
Agnaou, M., Sadeghi, M. A., Tranter, T. G., & Gostick, J. (2020).
Modeling transport of charged species in pore networks: solution of the
Nernst-Planck equations coupled with fluid flow and charge conservation
equations.
Computers & Geosciences, 104505.
"""
# network, geometry, phase
np.random.seed(0)
net = op.network.Cubic(shape=[8, 8, 1], spacing=9e-4, project=proj)
prs = (net['pore.back'] * net['pore.right'] + net['pore.back']
* net['pore.left'] + net['pore.front'] * net['pore.right']
+ net['pore.front'] * net['pore.left'])
thrts = net['throat.surface']
op.topotools.trim(network=net, pores=net.Ps[prs], throats=net.Ts[thrts])
geo = op.geometry.StickAndBall(network=net, pores=net.Ps, throats=net.Ts)
pore_d = op.models.misc.constant
throat_d = op.models.misc.constant
geo.add_model(propname='pore.diameter', model=pore_d, value=1.5e-4)
geo.add_model(propname='throat.diameter', model=throat_d, value=1e-4)
geo.regenerate_models()
sw = mixtures.SalineWater(network=net)
# Retrieve handles to each species for use below
Na = sw.components['Na_' + sw.name]
Cl = sw.components['Cl_' + sw.name]
H2O = sw.components['H2O_' + sw.name]
# physics
phys = op.physics.GenericPhysics(network=net, phase=sw, geometry=geo)
flow = op.models.physics.hydraulic_conductance.hagen_poiseuille
phys.add_model(propname='throat.hydraulic_conductance',
pore_viscosity='pore.viscosity',
throat_viscosity='throat.viscosity',
model=flow, regen_mode='normal')
current = op.models.physics.ionic_conductance.electroneutrality
phys.add_model(propname='throat.ionic_conductance', ions=[Na.name, Cl.name],
model=current, regen_mode='normal')
eA_dif = op.models.physics.diffusive_conductance.ordinary_diffusion
phys.add_model(propname='throat.diffusive_conductance.' + Na.name,
pore_diffusivity='pore.diffusivity.' + Na.name,
throat_diffusivity='throat.diffusivity.' + Na.name,
model=eA_dif, regen_mode='normal')
eB_dif = op.models.physics.diffusive_conductance.ordinary_diffusion
phys.add_model(propname='throat.diffusive_conductance.' + Cl.name,
pore_diffusivity='pore.diffusivity.' + Cl.name,
throat_diffusivity='throat.diffusivity.' + Cl.name,
model=eB_dif, regen_mode='normal')
s_scheme = 'powerlaw'
ad_dif_mig_Na = op.models.physics.ad_dif_mig_conductance.ad_dif_mig
phys.add_model(propname='throat.ad_dif_mig_conductance.' + Na.name,
pore_pressure='pore.pressure', model=ad_dif_mig_Na,
ion=Na.name, s_scheme=s_scheme)
ad_dif_mig_Cl = op.models.physics.ad_dif_mig_conductance.ad_dif_mig
phys.add_model(propname='throat.ad_dif_mig_conductance.' + Cl.name,
pore_pressure='pore.pressure', model=ad_dif_mig_Cl,
ion=Cl.name, s_scheme=s_scheme)
# settings for algorithms
setts1 = {'solver_max_iter': 5, 'solver_tol': 1e-08, 'solver_rtol': 1e-08,
'nlin_max_iter': 10, 'cache_A': False, 'cache_b': False}
setts2 = {'g_tol': 1e-4, 'g_max_iter': 4, 't_output': 5000, 't_step': 500,
't_final': 20000, 't_scheme': 'implicit'}
# algorithms
sf = op.algorithms.StokesFlow(network=net, phase=sw, settings=setts1)
sf.set_value_BC(pores=net.pores('back'), values=0.01)
sf.set_value_BC(pores=net.pores('front'), values=0.00)
sf.run()
sw.update(sf.results())
p = op.algorithms.TransientIonicConduction(network=net, phase=sw,
settings=setts1)
p.set_value_BC(pores=net.pores('left'), values=0.1)
p.set_value_BC(pores=net.pores('right'), values=0.00)
p.settings['charge_conservation'] = 'electroneutrality'
eA = op.algorithms.TransientNernstPlanck(network=net, phase=sw, ion=Na.name,
settings=setts1)
eA.set_value_BC(pores=net.pores('back'), values=100)
eA.set_value_BC(pores=net.pores('front'), values=90)
eB = op.algorithms.TransientNernstPlanck(network=net, phase=sw, ion=Cl.name,
settings=setts1)
eB.set_value_BC(pores=net.pores('back'), values=100)
eB.set_value_BC(pores=net.pores('front'), values=90)
it = op.algorithms.TransientNernstPlanckMultiphysicsSolver(network=net,
phase=sw,
settings=setts2)
it.setup(potential_field=p.name, ions=[eA.name, eB.name])
it.run()
sw.update(sf.results())
sw.update(p.results())
sw.update(eA.results())
sw.update(eB.results())
# output results to a vtk file for visualization on Paraview
# proj.export_data(phases=[sw], filename='OUT', filetype='xdmf')
|
# coding=utf-8
# Filename: test_config.py
"""
Test suite for configuration related functions and classes.
"""
from __future__ import division, absolute_import, print_function
from km3pipe.testing import TestCase, StringIO, patch
from km3pipe.config import Config
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
CONFIGURATION = StringIO("\n".join((
"[DB]",
"username=foo",
"password=narf",
"timeout=10",
)))
class TestConfig(TestCase):
def setUp(self):
self.config = Config(None)
self.config._read_from_file(CONFIGURATION)
CONFIGURATION.seek(0, 0)
def test_db_credentials(self):
self.assertEqual('foo', self.config.db_credentials[0])
self.assertEqual('narf', self.config.db_credentials[1])
def test_check_for_updates_defaults_to_true(self):
self.assertTrue(self.config.check_for_updates)
def test_time_zone_defaults_to_utc(self):
self.assertEqual('UTC', self.config.time_zone._tzname)
def test_slack_token_raises_error_by_default(self):
with self.assertRaises(ValueError):
self.config.slack_token
def test_get_retrieves_correct_value(self):
self.assertEqual("foo", self.config.get("DB", "username"))
def test_get_returns_none_if_section_not_found(self):
self.assertTrue(self.config.get("a", "b") is None)
def test_get_returns_none_if_option_not_found(self):
self.assertTrue(self.config.get("DB", "a") is None)
def test_get_returns_float_if_option_is_numberlike(self):
self.assertTrue(isinstance(self.config.get("DB", "timeout"), float))
def test_create_irods_session_returns_none_if_irods_module_missing(self):
session = self.config.create_irods_session()
self.assertTrue(session is None)
def test_tofail(self):
assert False
Revert "make tests fail to test jenkins"
This reverts commit c4224b4dc219214e22f795458d8d887be7b1ce3c.
# coding=utf-8
# Filename: test_config.py
"""
Test suite for configuration related functions and classes.
"""
from __future__ import division, absolute_import, print_function
from km3pipe.testing import TestCase, StringIO, patch
from km3pipe.config import Config
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
CONFIGURATION = StringIO("\n".join((
"[DB]",
"username=foo",
"password=narf",
"timeout=10",
)))
class TestConfig(TestCase):
def setUp(self):
self.config = Config(None)
self.config._read_from_file(CONFIGURATION)
CONFIGURATION.seek(0, 0)
def test_db_credentials(self):
self.assertEqual('foo', self.config.db_credentials[0])
self.assertEqual('narf', self.config.db_credentials[1])
def test_check_for_updates_defaults_to_true(self):
self.assertTrue(self.config.check_for_updates)
def test_time_zone_defaults_to_utc(self):
self.assertEqual('UTC', self.config.time_zone._tzname)
def test_slack_token_raises_error_by_default(self):
with self.assertRaises(ValueError):
self.config.slack_token
def test_get_retrieves_correct_value(self):
self.assertEqual("foo", self.config.get("DB", "username"))
def test_get_returns_none_if_section_not_found(self):
self.assertTrue(self.config.get("a", "b") is None)
def test_get_returns_none_if_option_not_found(self):
self.assertTrue(self.config.get("DB", "a") is None)
def test_get_returns_float_if_option_is_numberlike(self):
self.assertTrue(isinstance(self.config.get("DB", "timeout"), float))
def test_create_irods_session_returns_none_if_irods_module_missing(self):
session = self.config.create_irods_session()
self.assertTrue(session is None)
|
import functools
import itertools
import operator
import xp
einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
einsum_symbols_set = set(einsum_symbols)
options = {
'sum_ellipsis': False,
}
def _concat(lists):
return sum(lists, [])
def _prod(xs):
return functools.reduce(operator.mul, xs, 1)
def _parse_int_subscript(sub):
subscripts = ""
for s in sub:
if s is Ellipsis:
subscripts += "@"
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
return subscripts
def _parse_einsum_input(operands, parse_ellipsis=True):
"""Parse einsum operands.
This function is based on `numpy.core.einsumfunc._parse_einsum_input`
function in NumPy 1.14.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
(['@a, @a'], 'xz', [a, b])
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
(['@a, @a'], 'xz', [a, b])
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = list(operands[1:])
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
# Parse "..."
subscripts = subscripts.replace("...", "@")
if "." in subscripts:
raise ValueError("Invalid Ellipses.")
# Parse "->"
if ("-" in subscripts) or (">" in subscripts):
# Check for proper "->"
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
subscripts = subscripts.split("->")
if invalid or len(subscripts) != 2:
raise ValueError("Subscripts can only contain one '->'.")
input_subscripts, output_subscript = subscripts
else:
input_subscripts = subscripts
output_subscript = None
input_subscripts = input_subscripts.split(",")
if len(input_subscripts) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
else:
tmp_operands = list(operands)
operands = []
input_subscripts = []
while len(tmp_operands) >= 2:
operands.append(tmp_operands.pop(0))
input_subscripts.append(_parse_int_subscript(
tmp_operands.pop(0)))
if tmp_operands:
output_subscript = _parse_int_subscript(tmp_operands[0])
else:
output_subscript = None
return input_subscripts, output_subscript, operands
def _chr(char):
if char < 0:
return "...[%d]" % char
else:
return chr(char)
def _parse_ellipsis_subscript(subscript, ndim=None, ellipsis_len=None):
subs = subscript.split('@')
if len(subs) == 1:
sub, = subs
if ndim is not None and len(sub) != ndim:
# raise ValueError later
return "Einstein sum subscript %s does not contain the correct number of indices " % subs
return list(map(ord, sub))
elif len(subs) == 2:
left_sub, right_sub = subs
if ndim is not None:
ellipsis_len = ndim - (len(left_sub) + len(right_sub))
if ellipsis_len < 0:
# raise ValueError later
return "Einstein sum subscript %s...%s does not contain the correct number of indices " % (left_sub, right_sub)
return list(itertools.chain(
map(ord, left_sub),
range(-ellipsis_len, 0),
map(ord, right_sub),
))
else:
# >= 2 ellipses for an operand
raise ValueError("Invalid Ellipses.")
def einsum(*operands, **kwargs):
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
assert isinstance(input_subscripts, list)
assert isinstance(operands, list)
dtype = kwargs.pop('dtype', None)
optimize = kwargs.pop('optimize', False)
assert optimize is False, "optimize: sorry"
if kwargs:
raise TypeError("Did not understand the following kwargs: %s"
% list(kwargs.keys))
operands = [
xp.asanyarray(arr)
for arr in operands
]
result_dtype = dtype or xp.result_type(*operands)
input_subscripts = [
_parse_ellipsis_subscript(sub, ndim=arr.ndim)
for sub, arr in zip(input_subscripts, operands)
]
for i, sub_or_err in enumerate(input_subscripts):
if isinstance(sub_or_err, str):
raise ValueError(sub_or_err + "for operand %d." % i)
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
for tnum, term in enumerate(input_subscripts):
sh = operands[tnum].shape
for cnum, char in enumerate(term):
dim = sh[cnum]
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
dimension_dict[char] = dim
elif dim not in (1, dimension_dict[char]):
dim_old = dimension_dict[char]
raise ValueError("Size of label '%s' for operand %d (%d) "
"does not match previous terms (%d)."
% (_chr(char), tnum, dim, dim_old))
else:
dimension_dict[char] = dim
if output_subscript is None:
# Build output subscripts
tmp_subscripts = _concat(input_subscripts)
output_subscript = [
s
for s in sorted(set(tmp_subscripts))
if s < 0 or tmp_subscripts.count(s) == 1
]
else:
if not options['sum_ellipsis']:
if '@' not in output_subscript and -1 in dimension_dict:
raise ValueError("output had too few broadcast dimensions")
output_subscript = _parse_ellipsis_subscript(
output_subscript,
ellipsis_len=len(list(s for s in dimension_dict.keys() if s < 0))
)
# Make sure output subscripts are in the input
tmp_subscripts = set(_concat(input_subscripts))
for char in output_subscript:
if char not in tmp_subscripts:
raise ValueError(
"Output character %s did not appear in the input" % _chr(char))
path = [(0, 1)] * (len(operands) - 1) # TODO(kataoka): optimize
# diagonal
for num, sub in enumerate(input_subscripts):
i = 0
while i < len(sub):
s = sub[i]
if sub.count(s) > 1:
indices = []
sub = []
for j, t in enumerate(input_subscripts[num]):
if j == i:
indices.append(j)
sub.append(t)
elif t == s:
indices.append(j)
else:
sub.append(t)
input_subscripts[num] = sub
diag_ndim = len(indices)
op = operands[num]
dim = op.shape[i]
op = xp.moveaxis(
op,
tuple(indices), tuple(range(diag_ndim))
)
operands[num] = xp.moveaxis(
op[(xp.arange(dim),) * diag_ndim],
0, i
)
del s
i += 1
del i
returns_view = len(operands) == 1 # and there's no sum
# unary sum
for num, sub in enumerate(input_subscripts):
other_subscripts = input_subscripts.copy()
other_subscripts[num] = output_subscript
other_subscripts = _concat(other_subscripts)
sum_axes = tuple(
i
for i, s in enumerate(sub)
if s not in other_subscripts
)
if sum_axes:
returns_view = False
input_subscripts[num] = [
s
for i, s in enumerate(sub)
if i not in sum_axes
]
op = operands[num]
# numpy.sum uses platform integer types by default
operands[num] = op.sum(axis=sum_axes, dtype=dtype or op.dtype)
for num in range(len(operands)):
op = operands[num]
if 1 in op.shape:
squeeze_indices = []
sub = []
for i, s in enumerate(input_subscripts[num]):
if op.shape[i] == 1:
squeeze_indices.append(i)
else:
sub.append(s)
input_subscripts[num] = sub
operands[num] = xp.squeeze(op, axis=tuple(squeeze_indices))
"""
count_dict = {k: 0 for k in dimension_dict}
for sub in input_subscripts:
for s in sub:
count_dict[s] += 1
"""
for idx0, idx1 in path:
# repeat binary einsum
assert idx0 < idx1
sub1 = input_subscripts.pop(idx1)
op1 = operands.pop(idx1)
sub0 = input_subscripts.pop(idx0)
op0 = operands.pop(idx0)
set0 = set(sub0)
set1 = set(sub1)
assert len(set0) == len(sub0)
assert len(set1) == len(sub1)
set_out = set(_concat([output_subscript] + input_subscripts))
shared = set0 & set1
batch_dims = shared & set_out
contract_dims = shared - batch_dims
bs0, cs0, ts0 = _make_transpose_axes(sub0, batch_dims, contract_dims)
bs1, cs1, ts1 = _make_transpose_axes(sub1, batch_dims, contract_dims)
batch_size = _prod([dimension_dict[s] for s in batch_dims])
contract_size = _prod([dimension_dict[s] for s in contract_dims])
tmp0 = op0.transpose(bs0 + ts0 + cs0).reshape(batch_size, -1, contract_size)
tmp1 = op1.transpose(bs1 + cs1 + ts1).reshape(batch_size, contract_size, -1)
if dtype and xp.result_type(tmp0, tmp1) != dtype:
tmp0 = tmp0.astype(dtype)
tmp1 = tmp1.astype(dtype)
tmp_out = xp.matmul(tmp0, tmp1)
sub_b = [sub0[i] for i in bs0]
assert sub_b == [sub1[i] for i in bs1]
sub_l = [sub0[i] for i in ts0]
sub_r = [sub1[i] for i in ts1]
sub_out = sub_b + sub_l + sub_r
op_out = tmp_out.reshape([dimension_dict[s] for s in sub_out])
input_subscripts.append(sub_out)
operands.append(op_out)
# unary einsum at last
op0, = operands
sub0, = input_subscripts
transpose_axes = []
for s in output_subscript:
try:
transpose_axes.append(sub0.index(s))
except ValueError:
pass
op_out = op0.transpose(transpose_axes).reshape([
dimension_dict[s]
for s in output_subscript
])
if optimize is False:
assert returns_view or op_out.dtype == result_dtype
"""
if op_out.dtype != result_dtype:
op_out = op_out.astype(result_dtype)
"""
return op_out
def _tuple_sorted_by_0(zs):
return tuple(i for _, i in sorted(zs))
def _make_transpose_axes(sub, b_dims, c_dims):
bs = []
cs = []
ts = []
for i, s in enumerate(sub):
if s in b_dims:
bs.append((s, i))
elif s in c_dims:
cs.append((s, i))
else:
ts.append((s, i))
return (
_tuple_sorted_by_0(bs),
_tuple_sorted_by_0(cs),
_tuple_sorted_by_0(ts),
)
"""
if position == 0:
it = itertools.chain(sorted(bs), sorted(ts), sorted(cs))
else:
it = itertools.chain(sorted(bs), sorted(cs), sorted(ts))
return tuple(i for _, i in it)
"""
Add option broadcast_diagonal and refactor
import functools
import itertools
import operator
import xp
einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
einsum_symbols_set = set(einsum_symbols)
options = {
'sum_ellipsis': False,
'broadcast_diagonal': False,
}
def _concat(lists):
return sum(lists, [])
def _prod(xs):
return functools.reduce(operator.mul, xs, 1)
def _parse_int_subscript(sub):
subscripts = ""
for s in sub:
if s is Ellipsis:
subscripts += "@"
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
return subscripts
def _parse_einsum_input(operands, parse_ellipsis=True):
"""Parse einsum operands.
This function is based on `numpy.core.einsumfunc._parse_einsum_input`
function in NumPy 1.14.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
(['@a, @a'], 'xz', [a, b])
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
(['@a, @a'], 'xz', [a, b])
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = list(operands[1:])
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
# Parse "..."
subscripts = subscripts.replace("...", "@")
if "." in subscripts:
raise ValueError("Invalid Ellipses.")
# Parse "->"
if ("-" in subscripts) or (">" in subscripts):
# Check for proper "->"
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
subscripts = subscripts.split("->")
if invalid or len(subscripts) != 2:
raise ValueError("Subscripts can only contain one '->'.")
input_subscripts, output_subscript = subscripts
else:
input_subscripts = subscripts
output_subscript = None
input_subscripts = input_subscripts.split(",")
if len(input_subscripts) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
else:
tmp_operands = list(operands)
operands = []
input_subscripts = []
while len(tmp_operands) >= 2:
operands.append(tmp_operands.pop(0))
input_subscripts.append(_parse_int_subscript(
tmp_operands.pop(0)))
if tmp_operands:
output_subscript = _parse_int_subscript(tmp_operands[0])
else:
output_subscript = None
return input_subscripts, output_subscript, operands
def _chr(char):
if char < 0:
return "...[%d]" % char
else:
return chr(char)
def _parse_ellipsis_subscript(subscript, ndim=None, ellipsis_len=None):
subs = subscript.split('@')
if len(subs) == 1:
sub, = subs
if ndim is not None and len(sub) != ndim:
# raise ValueError later
return "Einstein sum subscript %s does not contain the correct number of indices " % subs
return list(map(ord, sub))
elif len(subs) == 2:
left_sub, right_sub = subs
if ndim is not None:
ellipsis_len = ndim - (len(left_sub) + len(right_sub))
if ellipsis_len < 0:
# raise ValueError later
return "Einstein sum subscript %s...%s does not contain the correct number of indices " % (left_sub, right_sub)
return list(itertools.chain(
map(ord, left_sub),
range(-ellipsis_len, 0),
map(ord, right_sub),
))
else:
# >= 2 ellipses for an operand
raise ValueError("Invalid Ellipses.")
def _einsum_diagonals(input_subscripts, operands):
"""Compute diagonal for each operand
This function mutates args.
"""
for num, sub in enumerate(input_subscripts):
i = 0
while i < len(sub):
s = sub[i]
if sub.count(s) > 1:
op = operands[num]
indices = []
sub = []
for j, t in enumerate(input_subscripts[num]):
if j == i:
indices.append(j)
sub.append(t)
elif t == s:
indices.append(j)
else:
sub.append(t)
if options['broadcast_diagonal']:
assert False # TODO(kataoka)
else:
dims = list({op.shape[j] for j in indices})
if len(dims) >= 2:
raise ValueError(
"dimensions in operand %d"
" for collapsing index '%s' don't match (%d != %d)"
% (num, _chr(s), dims[0], dims[1])
)
dim, = dims
input_subscripts[num] = sub
diag_ndim = len(indices)
op = xp.moveaxis(
op,
tuple(indices), tuple(range(diag_ndim))
)
operands[num] = xp.moveaxis(
op[(xp.arange(dim),) * diag_ndim],
0, i
)
i += 1
def einsum(*operands, **kwargs):
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
assert isinstance(input_subscripts, list)
assert isinstance(operands, list)
dtype = kwargs.pop('dtype', None)
optimize = kwargs.pop('optimize', False)
assert optimize is False, "optimize: sorry"
if kwargs:
raise TypeError("Did not understand the following kwargs: %s"
% list(kwargs.keys))
operands = [
xp.asanyarray(arr)
for arr in operands
]
result_dtype = dtype or xp.result_type(*operands)
input_subscripts = [
_parse_ellipsis_subscript(sub, ndim=arr.ndim)
for sub, arr in zip(input_subscripts, operands)
]
for i, sub_or_err in enumerate(input_subscripts):
if isinstance(sub_or_err, str):
raise ValueError(sub_or_err + "for operand %d." % i)
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
for tnum, term in enumerate(input_subscripts):
sh = operands[tnum].shape
for cnum, char in enumerate(term):
dim = sh[cnum]
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
dimension_dict[char] = dim
elif dim not in (1, dimension_dict[char]):
dim_old = dimension_dict[char]
raise ValueError("Size of label '%s' for operand %d (%d) "
"does not match previous terms (%d)."
% (_chr(char), tnum, dim, dim_old))
else:
dimension_dict[char] = dim
if output_subscript is None:
# Build output subscripts
tmp_subscripts = _concat(input_subscripts)
output_subscript = [
s
for s in sorted(set(tmp_subscripts))
if s < 0 or tmp_subscripts.count(s) == 1
]
else:
if not options['sum_ellipsis']:
if '@' not in output_subscript and -1 in dimension_dict:
raise ValueError("output had too few broadcast dimensions")
output_subscript = _parse_ellipsis_subscript(
output_subscript,
ellipsis_len=len(list(s for s in dimension_dict.keys() if s < 0))
)
# Make sure output subscripts are in the input
tmp_subscripts = set(_concat(input_subscripts))
for char in output_subscript:
if char not in tmp_subscripts:
raise ValueError(
"Output character %s did not appear in the input" % _chr(char))
path = [(0, 1)] * (len(operands) - 1) # TODO(kataoka): optimize
_einsum_diagonals(input_subscripts, operands)
returns_view = len(operands) == 1 # and there's no sum
# unary sum
for num, sub in enumerate(input_subscripts):
other_subscripts = input_subscripts.copy()
other_subscripts[num] = output_subscript
other_subscripts = _concat(other_subscripts)
sum_axes = tuple(
i
for i, s in enumerate(sub)
if s not in other_subscripts
)
if sum_axes:
returns_view = False
input_subscripts[num] = [
s
for i, s in enumerate(sub)
if i not in sum_axes
]
op = operands[num]
# numpy.sum uses platform integer types by default
operands[num] = op.sum(axis=sum_axes, dtype=dtype or op.dtype)
for num in range(len(operands)):
op = operands[num]
if 1 in op.shape:
squeeze_indices = []
sub = []
for i, s in enumerate(input_subscripts[num]):
if op.shape[i] == 1:
squeeze_indices.append(i)
else:
sub.append(s)
input_subscripts[num] = sub
operands[num] = xp.squeeze(op, axis=tuple(squeeze_indices))
"""
count_dict = {k: 0 for k in dimension_dict}
for sub in input_subscripts:
for s in sub:
count_dict[s] += 1
"""
for idx0, idx1 in path:
# repeat binary einsum
assert idx0 < idx1
sub1 = input_subscripts.pop(idx1)
op1 = operands.pop(idx1)
sub0 = input_subscripts.pop(idx0)
op0 = operands.pop(idx0)
set0 = set(sub0)
set1 = set(sub1)
assert len(set0) == len(sub0)
assert len(set1) == len(sub1)
set_out = set(_concat([output_subscript] + input_subscripts))
shared = set0 & set1
batch_dims = shared & set_out
contract_dims = shared - batch_dims
bs0, cs0, ts0 = _make_transpose_axes(sub0, batch_dims, contract_dims)
bs1, cs1, ts1 = _make_transpose_axes(sub1, batch_dims, contract_dims)
batch_size = _prod([dimension_dict[s] for s in batch_dims])
contract_size = _prod([dimension_dict[s] for s in contract_dims])
tmp0 = op0.transpose(bs0 + ts0 + cs0).reshape(batch_size, -1, contract_size)
tmp1 = op1.transpose(bs1 + cs1 + ts1).reshape(batch_size, contract_size, -1)
if dtype and xp.result_type(tmp0, tmp1) != dtype:
tmp0 = tmp0.astype(dtype)
tmp1 = tmp1.astype(dtype)
tmp_out = xp.matmul(tmp0, tmp1)
sub_b = [sub0[i] for i in bs0]
assert sub_b == [sub1[i] for i in bs1]
sub_l = [sub0[i] for i in ts0]
sub_r = [sub1[i] for i in ts1]
sub_out = sub_b + sub_l + sub_r
op_out = tmp_out.reshape([dimension_dict[s] for s in sub_out])
input_subscripts.append(sub_out)
operands.append(op_out)
# unary einsum at last
op0, = operands
sub0, = input_subscripts
transpose_axes = []
for s in output_subscript:
try:
transpose_axes.append(sub0.index(s))
except ValueError:
pass
op_out = op0.transpose(transpose_axes).reshape([
dimension_dict[s]
for s in output_subscript
])
if optimize is False:
assert returns_view or op_out.dtype == result_dtype
"""
if op_out.dtype != result_dtype:
op_out = op_out.astype(result_dtype)
"""
return op_out
def _tuple_sorted_by_0(zs):
return tuple(i for _, i in sorted(zs))
def _make_transpose_axes(sub, b_dims, c_dims):
bs = []
cs = []
ts = []
for i, s in enumerate(sub):
if s in b_dims:
bs.append((s, i))
elif s in c_dims:
cs.append((s, i))
else:
ts.append((s, i))
return (
_tuple_sorted_by_0(bs),
_tuple_sorted_by_0(cs),
_tuple_sorted_by_0(ts),
)
"""
if position == 0:
it = itertools.chain(sorted(bs), sorted(ts), sorted(cs))
else:
it = itertools.chain(sorted(bs), sorted(cs), sorted(ts))
return tuple(i for _, i in it)
"""
|
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Entropy conversion functions.
Depending on the function, input entropy can be expressed
as raw (i.e. binary 0/1 string), bytes, or integer
and their equivalent representations.
Leading zeros in raw or bytes entropy
are never considered redundant padding.
Output entropy is always raw.
"""
import math
import secrets
from hashlib import sha512
from typing import Iterable, List, Optional, Tuple, Union
from .alias import BinStr, Entropy, Octets
from .exceptions import BTClibTypeError, BTClibValueError
from .utils import bytes_from_octets
_bits = 128, 160, 192, 224, 256, 512
_dice_sides = (4, 6, 8, 12, 20, 24, 30, 48, 60, 120)
def _indexes_from_entropy(entropy: BinStr, base: int) -> List[int]:
"""Return the digit indexes for the provided raw entropy.
Return the list of integer indexes into a digit set,
usually a language word-list,
for the provided raw (i.e. binary 0/1 string) entropy;
leading zeros are not considered redundant padding.
"""
bits = len(entropy)
int_entropy = int(entropy, 2)
indexes = []
while int_entropy:
int_entropy, index = divmod(int_entropy, base)
indexes.append(index)
# do not lose leading zeros entropy
bits_per_digit = int(math.log(base, 2))
nwords = math.ceil(bits / bits_per_digit)
while len(indexes) < nwords:
indexes.append(0)
return list(reversed(indexes))
def _entropy_from_indexes(indexes: List[int], base: int) -> BinStr:
"""Return the raw entropy from a list of word-list indexes.
Return the raw (i.e. binary 0/1 string) entropy
from the provided list of integer indexes into
a given language word-list.
"""
entropy = 0
for index in indexes:
entropy = entropy * base + index
binentropy = bin(entropy)[2:] # remove '0b'
# do not lose leading zeros entropy
bits_per_digit = int(math.log(base, 2))
bits = len(indexes) * bits_per_digit
binentropy = binentropy.zfill(bits)
return binentropy
OneOrMoreInt = Union[int, Iterable[int]]
def binstr_from_entropy(entr: Entropy, bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input entropy.
Input entropy can be expressed as:
- raw (i.e. binary 0/1 string) entropy
- bytes (no hex-string, as they would conflict with
raw entropy representation)
- integer (int, no string starting with "0b"/"0x")
In the case of raw entropy and bytes,
entropy is never padded to satisfy the bit-size requirement;
instead,
integer entropy is front-padded with zeros digits
as much as necessary to satisfy the bit-size requirement.
In all cases if more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
if isinstance(entr, str):
return binstr_from_binstr(entr, bits)
if isinstance(entr, bytes):
return binstr_from_bytes(entr, bits)
if isinstance(entr, int):
return binstr_from_int(entr, bits)
m = "Entropy must be raw binary 0/1 string, bytes, or int; "
m += f"not '{type(entr).__name__}'"
raise BTClibTypeError(m)
def binstr_from_bytes(bytes_entropy: Octets, bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input Octets entropy.
Input entropy can be expressed as hex-string or bytes;
it is never padded to satisfy the bit-size requirement.
If more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
bytes_entropy = bytes_from_octets(bytes_entropy)
# if a single int, make it a tuple
if isinstance(bits, int):
bits = (bits,)
# ascending unique sorting of allowed bits
bits = sorted(set(bits))
n_bits = len(bytes_entropy) * 8
n_bits = min(n_bits, bits[-1])
if n_bits not in bits:
m = f"Wrong number of bits: {n_bits} instead of {bits}"
raise BTClibValueError(m)
int_entropy = int.from_bytes(bytes_entropy, byteorder="big", signed=False)
# only the leftmost bits will be retained
return binstr_from_int(int_entropy, n_bits)
def binstr_from_int(int_entropy: Union[int, str], bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input integer entropy.
Input entropy can be expressed as int
or string starting with "0x"/"0b";
it is front-padded with zeros digits
as much as necessary to satisfy the bit-size requirement.
If more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
if isinstance(int_entropy, str):
int_entropy = int_entropy.strip().lower()
if int_entropy[:2] == "0b":
int_entropy = int(int_entropy, 2)
elif int_entropy[:2] == "0x":
int_entropy = int(int_entropy, 16)
if not isinstance(int_entropy, int):
m = "Entropy must be an int, not "
m += f"{type(int_entropy).__name__}"
raise BTClibTypeError(m)
if int_entropy < 0:
raise BTClibValueError(f"Negative entropy: {int_entropy}")
# if a single int, make it a tuple
if isinstance(bits, int):
bits = (bits,)
# ascending unique sorting of allowed bits
bits = sorted(set(bits))
# convert to binary string and remove leading '0b'
bin_str = bin(int_entropy)[2:]
n_bits = len(bin_str)
if n_bits > bits[-1]:
# only the leftmost bits are retained
return bin_str[: bits[-1]]
# pad up to the next allowed bit length
n_bits = next(v for i, v in enumerate(bits) if v >= n_bits)
return bin_str.zfill(n_bits)
def binstr_from_binstr(str_entropy: str, bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input raw entropy.
Input entropy must be expressed as raw entropy;
it is never padded to satisfy the bit-size requirement.
If more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
if not isinstance(str_entropy, str):
m = "Entropy must be a str, not "
m += f"{type(str_entropy).__name__}"
raise BTClibTypeError(m)
# check if it is a valid binary string
int(str_entropy, 2)
# if a single int, make it a tuple
if isinstance(bits, int):
bits = (bits,)
# ascending unique sorting of allowed bits
bits = sorted(set(bits))
n_bits = len(str_entropy)
if n_bits > bits[-1]:
# only the leftmost bits are retained
return str_entropy[: bits[-1]]
if n_bits not in bits:
m = f"Wrong number of bits: {n_bits} instead of {bits}"
raise BTClibValueError(m)
return str_entropy
def collect_rolls(bits: int) -> Tuple[int, List[int]]:
dice_sides = 0
while dice_sides not in _dice_sides:
automate = False
msg = f"{_dice_sides}"
msg = "dice sides " + msg[:-1]
msg += "; prefix with 'a' to automate rolls, hit enter for 'a6'): "
dice_sides_str = input(msg)
dice_sides_str = dice_sides_str.lower()
if dice_sides_str in ["", "a"]:
automate = True
dice_sides = 6
else:
if dice_sides_str.startswith("a"):
automate = True
dice_sides_str = dice_sides_str[1:]
try:
dice_sides = int(dice_sides_str)
except ValueError:
dice_sides = 0
bits_per_roll = math.floor(math.log2(dice_sides))
base = 2 ** bits_per_roll
if not automate:
print(f"rolls are used only if in 1..{base}")
rolls: List[int] = []
min_roll_number = math.ceil(bits / bits_per_roll)
for i in range(min_roll_number):
roll = 0
while not 0 < roll <= base:
try:
if automate:
roll_str = str(1 + secrets.randbelow(dice_sides))
else:
roll_str = input(f"roll #{i+1}/{min_roll_number}: ")
roll = int(roll_str)
except ValueError:
roll = 0
rolls.append(roll)
print(f"collected {min_roll_number} usable D{dice_sides} rolls")
return dice_sides, rolls
def binstr_from_rolls(
bits: int, dice_sides: int, rolls: List[int], shuffle: bool = True
) -> BinStr:
"""Return raw entropy from the input dice rolls.
Dice rolls are represented by integers in the [1-dice_sides] range;
there must be enough rolls to satisfy the bit-size requirement.
Only rolls having value in the [1-base] range are used,
with base being the highest power of 2 that is lower than the
dice_sides (e.g. for a traditional D6 dice, only rolls having value
in [1-4] are used; for a D20 dice, only rolls having value in
[1-16] are used; etc.). Rolls can also be shuffled.
If more bits than required are provided,
the leftmost ones are retained.
"""
if dice_sides < 2:
raise BTClibValueError(f"invalid dice base: {dice_sides}, must be >= 2")
bits_per_roll = math.floor(math.log2(dice_sides))
# used base
base = 2 ** bits_per_roll
if shuffle:
secrets.SystemRandom().shuffle(rolls)
min_roll_number = math.ceil(bits / bits_per_roll)
i = 0
for roll in rolls:
# collect only usable rolls in [1-base)]
if 0 < roll <= base:
i *= base
i += roll - 1
min_roll_number -= 1
# reject invalid rolls not in [1-dice_sides)]
elif not 0 < roll <= dice_sides:
msg = f"invalid roll: {roll} is not in [1-{dice_sides}]"
raise BTClibValueError(msg)
if min_roll_number > 0:
msg = f"Too few rolls in the usable [1-{base}] range"
msg += f", missing {min_roll_number} rolls"
raise BTClibValueError(msg)
return binstr_from_int(i, bits)
def randbinstr(
bits: int, entropy: Optional[BinStr] = None, to_be_hashed: bool = True
) -> BinStr:
"""Return CSPRNG raw entropy XOR-ed with input raw entropy.
The input raw entropy is used as initialization value;
if not provided, then entropy is generated with the system
cryptographically strong pseudo-random number generator (CSPRNG).
Then, this entropy is:
- XOR-ed with CSPRNG system entropy
- possibly hashed (if requested)
"""
if entropy is None or entropy == "":
i = secrets.randbits(bits)
else:
if len(entropy) > bits:
# only the leftmost bits are retained
entropy = entropy[:bits]
i = int(entropy, 2)
# XOR the current entropy with CSPRNG system entropy
i ^= secrets.randbits(bits)
# hash the current entropy
if to_be_hashed:
hf = sha512()
max_bits = hf.digest_size * 8
if bits > max_bits:
m = f"Too many bits required: {bits}, max is {max_bits}"
raise BTClibValueError(m)
n_bytes = math.ceil(i.bit_length() / 8)
h512 = sha512(i.to_bytes(n_bytes, byteorder="big", signed=False)).digest()
i = int.from_bytes(h512, byteorder="big", signed=False)
return binstr_from_int(i, bits)
patched code to avoid pyright "possibly unbound" error
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Entropy conversion functions.
Depending on the function, input entropy can be expressed
as raw (i.e. binary 0/1 string), bytes, or integer
and their equivalent representations.
Leading zeros in raw or bytes entropy
are never considered redundant padding.
Output entropy is always raw.
"""
import math
import secrets
from hashlib import sha512
from typing import Iterable, List, Optional, Tuple, Union
from .alias import BinStr, Entropy, Octets
from .exceptions import BTClibTypeError, BTClibValueError
from .utils import bytes_from_octets
_bits = 128, 160, 192, 224, 256, 512
def _indexes_from_entropy(entropy: BinStr, base: int) -> List[int]:
"""Return the digit indexes for the provided raw entropy.
Return the list of integer indexes into a digit set,
usually a language word-list,
for the provided raw (i.e. binary 0/1 string) entropy;
leading zeros are not considered redundant padding.
"""
bits = len(entropy)
int_entropy = int(entropy, 2)
indexes = []
while int_entropy:
int_entropy, index = divmod(int_entropy, base)
indexes.append(index)
# do not lose leading zeros entropy
bits_per_digit = int(math.log(base, 2))
nwords = math.ceil(bits / bits_per_digit)
while len(indexes) < nwords:
indexes.append(0)
return list(reversed(indexes))
def _entropy_from_indexes(indexes: List[int], base: int) -> BinStr:
"""Return the raw entropy from a list of word-list indexes.
Return the raw (i.e. binary 0/1 string) entropy
from the provided list of integer indexes into
a given language word-list.
"""
entropy = 0
for index in indexes:
entropy = entropy * base + index
binentropy = bin(entropy)[2:] # remove '0b'
# do not lose leading zeros entropy
bits_per_digit = int(math.log(base, 2))
bits = len(indexes) * bits_per_digit
binentropy = binentropy.zfill(bits)
return binentropy
OneOrMoreInt = Union[int, Iterable[int]]
def binstr_from_entropy(entr: Entropy, bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input entropy.
Input entropy can be expressed as:
- raw (i.e. binary 0/1 string) entropy
- bytes (no hex-string, as they would conflict with
raw entropy representation)
- integer (int, no string starting with "0b"/"0x")
In the case of raw entropy and bytes,
entropy is never padded to satisfy the bit-size requirement;
instead,
integer entropy is front-padded with zeros digits
as much as necessary to satisfy the bit-size requirement.
In all cases if more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
if isinstance(entr, str):
return binstr_from_binstr(entr, bits)
if isinstance(entr, bytes):
return binstr_from_bytes(entr, bits)
if isinstance(entr, int):
return binstr_from_int(entr, bits)
m = "Entropy must be raw binary 0/1 string, bytes, or int; "
m += f"not '{type(entr).__name__}'"
raise BTClibTypeError(m)
def binstr_from_bytes(bytes_entropy: Octets, bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input Octets entropy.
Input entropy can be expressed as hex-string or bytes;
it is never padded to satisfy the bit-size requirement.
If more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
bytes_entropy = bytes_from_octets(bytes_entropy)
# if a single int, make it a tuple
if isinstance(bits, int):
bits = (bits,)
# ascending unique sorting of allowed bits
bits = sorted(set(bits))
n_bits = len(bytes_entropy) * 8
n_bits = min(n_bits, bits[-1])
if n_bits not in bits:
m = f"Wrong number of bits: {n_bits} instead of {bits}"
raise BTClibValueError(m)
int_entropy = int.from_bytes(bytes_entropy, byteorder="big", signed=False)
# only the leftmost bits will be retained
return binstr_from_int(int_entropy, n_bits)
def binstr_from_int(int_entropy: Union[int, str], bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input integer entropy.
Input entropy can be expressed as int
or string starting with "0x"/"0b";
it is front-padded with zeros digits
as much as necessary to satisfy the bit-size requirement.
If more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
if isinstance(int_entropy, str):
int_entropy = int_entropy.strip().lower()
if int_entropy[:2] == "0b":
int_entropy = int(int_entropy, 2)
elif int_entropy[:2] == "0x":
int_entropy = int(int_entropy, 16)
if not isinstance(int_entropy, int):
m = "Entropy must be an int, not "
m += f"{type(int_entropy).__name__}"
raise BTClibTypeError(m)
if int_entropy < 0:
raise BTClibValueError(f"Negative entropy: {int_entropy}")
# if a single int, make it a tuple
if isinstance(bits, int):
bits = (bits,)
# ascending unique sorting of allowed bits
bits = sorted(set(bits))
# convert to binary string and remove leading '0b'
bin_str = bin(int_entropy)[2:]
n_bits = len(bin_str)
if n_bits > bits[-1]:
# only the leftmost bits are retained
return bin_str[: bits[-1]]
# pad up to the next allowed bit length
n_bits = next(v for i, v in enumerate(bits) if v >= n_bits)
return bin_str.zfill(n_bits)
def binstr_from_binstr(str_entropy: str, bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input raw entropy.
Input entropy must be expressed as raw entropy;
it is never padded to satisfy the bit-size requirement.
If more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
if not isinstance(str_entropy, str):
m = "Entropy must be a str, not "
m += f"{type(str_entropy).__name__}"
raise BTClibTypeError(m)
# check if it is a valid binary string
int(str_entropy, 2)
# if a single int, make it a tuple
if isinstance(bits, int):
bits = (bits,)
# ascending unique sorting of allowed bits
bits = sorted(set(bits))
n_bits = len(str_entropy)
if n_bits > bits[-1]:
# only the leftmost bits are retained
return str_entropy[: bits[-1]]
if n_bits not in bits:
m = f"Wrong number of bits: {n_bits} instead of {bits}"
raise BTClibValueError(m)
return str_entropy
def collect_rolls(bits: int) -> Tuple[int, List[int]]:
automate = False
dice_sides = 0
_dice_sides = (4, 6, 8, 12, 20, 24, 30, 48, 60, 120)
while dice_sides not in _dice_sides:
msg = f"{_dice_sides}"
msg = "dice sides " + msg[:-1]
msg += "; prefix with 'a' to automate rolls, hit enter for 'a6'): "
dice_sides_str = input(msg)
dice_sides_str = dice_sides_str.lower()
if dice_sides_str in ["", "a"]:
automate = True
dice_sides = 6
else:
automate = False
if dice_sides_str.startswith("a"):
automate = True
dice_sides_str = dice_sides_str[1:]
try:
dice_sides = int(dice_sides_str)
except ValueError:
dice_sides = 0
bits_per_roll = math.floor(math.log2(dice_sides))
base = 2 ** bits_per_roll
if not automate:
print(f"rolls are used only if in 1..{base}")
rolls: List[int] = []
min_roll_number = math.ceil(bits / bits_per_roll)
for i in range(min_roll_number):
roll = 0
while not 0 < roll <= base:
try:
if automate:
roll_str = str(1 + secrets.randbelow(dice_sides))
else:
roll_str = input(f"roll #{i+1}/{min_roll_number}: ")
roll = int(roll_str)
except ValueError:
roll = 0
rolls.append(roll)
print(f"collected {min_roll_number} usable D{dice_sides} rolls")
return dice_sides, rolls
def binstr_from_rolls(
bits: int, dice_sides: int, rolls: List[int], shuffle: bool = True
) -> BinStr:
"""Return raw entropy from the input dice rolls.
Dice rolls are represented by integers in the [1-dice_sides] range;
there must be enough rolls to satisfy the bit-size requirement.
Only rolls having value in the [1-base] range are used,
with base being the highest power of 2 that is lower than the
dice_sides (e.g. for a traditional D6 dice, only rolls having value
in [1-4] are used; for a D20 dice, only rolls having value in
[1-16] are used; etc.). Rolls can also be shuffled.
If more bits than required are provided,
the leftmost ones are retained.
"""
if dice_sides < 2:
raise BTClibValueError(f"invalid dice base: {dice_sides}, must be >= 2")
bits_per_roll = math.floor(math.log2(dice_sides))
# used base
base = 2 ** bits_per_roll
if shuffle:
secrets.SystemRandom().shuffle(rolls)
min_roll_number = math.ceil(bits / bits_per_roll)
i = 0
for roll in rolls:
# collect only usable rolls in [1-base)]
if 0 < roll <= base:
i *= base
i += roll - 1
min_roll_number -= 1
# reject invalid rolls not in [1-dice_sides)]
elif not 0 < roll <= dice_sides:
msg = f"invalid roll: {roll} is not in [1-{dice_sides}]"
raise BTClibValueError(msg)
if min_roll_number > 0:
msg = f"Too few rolls in the usable [1-{base}] range"
msg += f", missing {min_roll_number} rolls"
raise BTClibValueError(msg)
return binstr_from_int(i, bits)
def randbinstr(
bits: int, entropy: Optional[BinStr] = None, to_be_hashed: bool = True
) -> BinStr:
"""Return CSPRNG raw entropy XOR-ed with input raw entropy.
The input raw entropy is used as initialization value;
if not provided, then entropy is generated with the system
cryptographically strong pseudo-random number generator (CSPRNG).
Then, this entropy is:
- XOR-ed with CSPRNG system entropy
- possibly hashed (if requested)
"""
if entropy is None or entropy == "":
i = secrets.randbits(bits)
else:
if len(entropy) > bits:
# only the leftmost bits are retained
entropy = entropy[:bits]
i = int(entropy, 2)
# XOR the current entropy with CSPRNG system entropy
i ^= secrets.randbits(bits)
# hash the current entropy
if to_be_hashed:
hf = sha512()
max_bits = hf.digest_size * 8
if bits > max_bits:
m = f"Too many bits required: {bits}, max is {max_bits}"
raise BTClibValueError(m)
n_bytes = math.ceil(i.bit_length() / 8)
h512 = sha512(i.to_bytes(n_bytes, byteorder="big", signed=False)).digest()
i = int.from_bytes(h512, byteorder="big", signed=False)
return binstr_from_int(i, bits)
|
#!/usr/bin/env python
# (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
=======================
Generic Topology Viewer
=======================
Pygame based display of graph topologies. A simple physics model assists with
visual layout. Rendering and physics laws can be customised for specific
applications.
Example Usage
-------------
A simple console driven topology viewer::
Pipeline( ConsoleReader(),
lines_to_tokenlists(),
TopologyViewer(),
).run()
Then at runtime try typing these commands to change the topology in real time::
>>> DEL ALL
>>> ADD NODE 1 "1st node" randompos -
>>> ADD NODE 2 "2nd node" randompos -
>>> ADD NODE 3 "3rd node" randompos -
>>> ADD LINK 1 2
>>> ADD LINK 3 2
>>> DEL LINK 1 2
>>> DEL NODE 1
User Interface
--------------
TopologyViewer manifests as a pygame display surface. As it is sent
topology information nodes and links between them will appear.
You can click a node with the mouse to select it. Depending on the application,
this may display additional data or, if integrated into another app, have some
other effect.
Click and drag with the left mouse button to move nodes around. Note that a
simple physics model or repulsion and attraction forces is always active. This
causes nodes to move around to help make it visually clearer, however you may
still need to drag nodes about to tidy it up.
The surface on which the nodes appear is notionally infinite. Scroll around
using the arrow keys.
Press the 'f' key to toggle between windowed and fullscreen modes.
How does it work?
-----------------
TopologyViewer is a specialisation of the Kamaeila.UI.MH.PyGameApp
component. See documentation for that component to understand how it obtains
and handles events for a pygame display surface.
A topology (graph) of nodes and links between them is rendered to the surface.
You can specify an initial topology by providing a list of instantiated
particles and another list of pairs of those particles to show how they are
linked.
TopologyViewer reponds to commands arriving at its "inbox" inbox
instructing it on how to change the topology. A command is a list/tuple.
Commands recognised are:
[ "ADD", "NODE", <id>, <name>, <posSpec>, <particle type> ]
Add a node, using:
- id -- a unique ID used to refer to the particle in other topology commands. Cannot be None.
- name -- string name label for the particle
- posSpec -- string describing initial x,y (see _generateXY)
- particleType -- particle type (default provided is "-", unless custom types are provided - see below)
[ "DEL", "NODE", <id> ]
Remove a node (also removes all links to and from it)
[ "ADD", "LINK", <id from>, <id to> ]
Add a link, directional from fromID to toID
[ "DEL", "LINK", <id from>, <id to> ]
Remove a link, directional from fromID to toID
[ "DEL", "ALL" ]
Clears all nodes and links
[ "GET", "ALL" ]
Outputs the current topology as a list of commands, just like
those used to build it. The list begins with a 'DEL ALL'.
[ "UPDATE_NAME", "NODE", <id>, <new name> ]
If the node does not already exist, this does NOT cause it to be created.
[ "GET_NAME", "NODE", <id> ]
Returns UPDATE_NAME NODE message for the specified node
Commands are processed immediately, in the order in which they arrive. You
therefore cannot refer to a node or linkage that has not yet been created, or
that has already been destroyed.
If a stream of commands arrives in quick succession, rendering and physics will
be temporarily stopped, so commands can be processed more quickly. This is
necessary because when there is a large number of particles, physics and
rendering starts to take a long time, and will therefore bottleneck the
handling of commands.
However, there is a 1 second timeout, so at least one update of the visual
output is guaranteed per second.
TopologyViewer sends any output to its "outbox" outbox in the same
list/tuple format as used for commands sent to its "inbox" inbox. The following
may be output:
[ "SELECT", "NODE", <id> ]
Notification that a given node has been selected.
[ "SELECT", "NODE", None ]
Notificaion that *no node* is now selected.
[ "ERROR", <error string> ]
Notification of errors - eg. unrecognised commands arriving at the
"inbox" inbox
[ "TOPOLOGY", <topology command list> ]
List of commands needed to build the topology, as it currently stands.
The list will start with a ("DEL","ALL") command.
This is sent in response to receiving a ("GET","ALL") command.
If a shutdownMicroprocess or producerFinished message is received on this
component's "control" inbox this it will pass it on out of its "signal" outbox
and immediately terminate.
NOTE: Termination is currently rather cludgy - it raises an exception which
will cause the rest of a kamaelia system to halt. Do not rely on this behaviour
as it will be changed to provide cleaner termination at some point.
Customising the topology viewer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can customise:
- the 'types' of particles (nodes)
- visual appearance of particles (nodes) and the links between them;
- the physics laws used to assist with layout
- extra visual furniture to be rendered
For example, see Kamaelia.Visualisation.Axon.AxonVisualiserServer. This
component uses two types of particle - to represent components and
inboxes/outboxes. Each has a different visual appearance, and the laws acting
between them differ depending on which particle types are involved in the
interaction.
Use the particleTypes argument of the initialiser to specify classes that
should be instantiated to render each type of particle (nodes). particleTypes
should be a dictionary mapping names for particle types to the respective
classes, for example::
{ "major" : BigParticle, "minor" : SmallParticle }
See below for information on how to write your own particle classes.
Layout of the nodes on the surface is assisted by a physics model, provided
by an instance of the Kamaelia.Support.Particles.ParticleSystem class.
Customise the laws used for each particle type by providing a
Kamaelia.Phyics.Simple.MultipleLaws object at initialisation.
Writing your own particle class
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
should inherit from Kamaelia.Support.Particles.Particle and implement the following
methods (for rendering purposes):
setOffset( (left,top) )
Notification that the surface has been scrolled by the user. Particles
should adjust the coordinates at which they render. For example, a
particle at (x, y) should be rendered at (x-left, y-top). You can
assume, until setOffset(...) is called, that (left,top) is (0,0).
select()
Called to inform the particle that it is selected (has been clicked on)
deselect()
Called to inform the particle that is has been deselected.
render(surface) -> generator
Called to get a generator for multi-pass rendering of the particle (see
below)
The coordinates of the particle are updated automatically both due to mouse
dragging and due to the physics model. See Kamaelia.Support.Particles.Particle for
more information.
The render(...) method should return a generator that will render the particle
itself and its links/bonds to other particles.
Rendering by the TopologyViewer is multi-pass. This is done so that
irrespective of the order in which particles are chosen to be rendered,
things that need to be rendered before (underneath) other things can be done
consistently.
The generator should yield the number of the rendering pass it wishes to be
next called on. Each time it is subsequently called, it should perform the
rendering required for that pass. It then yields the number of the next required
pass or completes if there is no more rendering required. Passes go in
ascending numerical order.
For example, Kamaelia.Visualisation.PhysicsGraph.RenderingParticle renders in
two passes::
def render(self, surface):
yield 1
# render lines for bonds *from* this particle *to* others
yield 2
# render a blob and the name label for the particle
...in this case it ensures that the blobs for the particles always appear
on top of the lines representing the bonds between them.
Note that rendering passes must be coded in ascending order, but the numbering
can otherwise be arbitrary: The first pass can be any value you like; subsequent
passes can also be any value, provided it is higher.
When writing rendering code for particle(s), make sure they all agree on who
should render what. It is inefficient if all bonds are being rendered twice.
For example, RenderingParticle only renders links *from* that particle *to*
another, but not in another direction.
"""
import random
import time
import re
import sys
import pygame
import Axon
import Kamaelia.Support.Particles
import Kamaelia.UI
from Kamaelia.Visualisation.PhysicsGraph.GridRenderer import GridRenderer
from Kamaelia.Visualisation.PhysicsGraph.ParticleDragger import ParticleDragger
from Kamaelia.Visualisation.PhysicsGraph.RenderingParticle import RenderingParticle
class TopologyViewer(Kamaelia.UI.MH.PyGameApp,Axon.Component.component):
"""\
TopologyViewer(...) -> new TopologyViewer component.
A component that takes incoming topology (change) data and displays it live
using pygame. A simple physics model assists with visual layout. Particle
types, appearance and physics interactions can be customised.
Keyword arguments (in order):
- screensize -- (width,height) of the display area (default = (800,600))
- fullscreen -- True to start up in fullscreen mode (default = False)
- caption -- Caption for the pygame window (default = "Topology Viewer")
- particleTypes -- dict("type" -> klass) mapping types of particle to classes used to render them (default = {"-":RenderingParticle})
- initialTopology -- (nodes,bonds) where bonds=list((src,dst)) starting state for the topology (default=([],[]))
- laws -- Physics laws to apply between particles (default = SimpleLaws(bondlength=100))
- simCyclesPerRedraw -- number of physics sim cycles to run between each redraw (default=1)
- border -- Minimum distance from edge of display area that new particles appear (default=100)
- extraDrawing -- Optional extra object to be rendered (default=None)
- showGrid -- False, or True to show gridlines (default=True)
- transparency -- None, or (r,g,b) colour to make transparent
- position -- None, or (left,top) position for surface within pygame window
"""
Inboxes = { "inbox" : "Topology (change) data describing an Axon system",
"control" : "Shutdown signalling",
"alphacontrol" : "Alpha (transparency) of the image (value 0..255)",
"events" : "Place where we recieve events from the outside world",
"displaycontrol" : "Replies from Pygame Display service",
}
Outboxes = { "signal" : "NOT USED",
"outbox" : "Notification and topology output",
"displaysignal" : "Requests to Pygame Display service",
}
def __init__(self, screensize = (800,600),
fullscreen = False,
caption = "Topology Viewer",
particleTypes = None,
initialTopology = None,
laws = None,
simCyclesPerRedraw = None,
border = 100,
extraDrawing = None,
showGrid = True,
transparency = None,
position = None):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(TopologyViewer, self).__init__(screensize, caption, fullscreen, transparency=transparency, position=position)
self.border = border
pygame.mixer.quit()
if particleTypes == None:
self.particleTypes = {"-":RenderingParticle}
else:
self.particleTypes = particleTypes
if initialTopology == None:
initialTopology = ([],[])
self.initialNodes = list(initialTopology[0])
self.initialBonds = list(initialTopology[1])
if laws==None:
self.laws = Kamaelia.Support.Particles.SimpleLaws(bondLength=100)
else:
self.laws = laws
if simCyclesPerRedraw==None:
self.simCyclesPerRedraw = 1
else:
self.simCyclesPerRedraw = simCyclesPerRedraw
self.graphicalFurniture = []
if showGrid:
self.graphicalFurniture.append( GridRenderer(self.laws.maxInteractRadius, (200,200,200)) )
if extraDrawing != None:
self.graphicalFurniture.append(extraDrawing)
self.biggestRadius = 0
self.selectedParticle = None
self.left = 0
self.top = 0
self.dleft = 0
self.dtop = 0
self.lastIdleTime = time.time()
self.selected = None
def initialiseComponent(self):
"""Initialises."""
self.addHandler(pygame.MOUSEBUTTONDOWN, lambda event,self=self: ParticleDragger.handle(event,self))
self.addHandler(pygame.KEYDOWN, self.keyDownHandler)
self.addHandler(pygame.KEYUP, self.keyUpHandler)
self.physics = Kamaelia.Support.Particles.ParticleSystem(self.laws, [], 0)
for node in self.initialNodes:
self.addParticle(*node)
for source,dest in self.initialBonds:
self.makeBond(source, dest)
return 1
def mainLoop(self):
"""\
Main loop.
Proceses commands from "inbox" inbox, runs physics simulation, then renders display
FIXME: This is massively broken, this component overrides initialiseComponent,
and also has a main *AND* has a mainLoop.
"""
# process incoming messages
if self.dataReady("inbox"):
message = self.recv("inbox")
self.doCommand(message)
else:
self.lastIdleTime = 0
if self.dataReady("alphacontrol"):
alpha = self.recv("alphacontrol")
self.screen.set_alpha(alpha)
if self.lastIdleTime + 1.0 < time.time():
self.physics.run(self.simCyclesPerRedraw)
# draw the background
self.screen.fill( (255,255,255) )
# scroll, if scrolling is active, increasing velocity over time
if self.dleft != 0 or self.dtop != 0:
self.scroll( (self.dleft, self.dtop) )
if self.dleft:
self.dleft = self.dleft + 1 * abs(self.dleft)/self.dleft
if self.dtop:
self.dtop = self.dtop + 1 * abs(self.dtop)/self.dtop
self.render()
self.flip = True
self.lastIdleTime = time.time()
else:
self.flip = False
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, Axon.Ipc.producerFinished) or isinstance(msg, Axon.Ipc.shutdownMicroprocess):
self.send(msg, "signal")
self.quit()
return 1
def render(self):
"""Render elements to self.screen"""
# rendering is done in multiple passes
# renderPasses is a dictionary of pass-number -> list of 'render' generators
# each render generator yields the next pass number on which it wishes to be called
renderPasses = {}
# do the first pass - filling the renderPasses dictionary with rendering
# generators from all particles, and also the extra furniture rendering
for p in self.graphicalFurniture + self.physics.particles:
r = p.render(self.screen)
if r != None:
try:
n = r.next()
try:
renderPasses[n].append(r)
except KeyError:
renderPasses[n] = [r]
except StopIteration:
pass
# keep going through, extracting the lowers render pass number in the dictionary and
# processing generators listed in it, until the renderPasses dictionary is empty
while renderPasses:
nextPass = reduce( min, renderPasses.keys() )
for r in renderPasses.pop(nextPass):
try:
n = r.next()
try:
renderPasses[n].append(r)
except KeyError:
renderPasses[n] = [r]
except StopIteration:
pass
def keyDownHandler(self, event):
"""Handle keypresses:
ESCAPE, Q : quits
F : toggles fullscreen mode
arrows : scroll the view
"""
if event.key==pygame.K_ESCAPE or event.key==pygame.K_q:
self.quit()
elif event.key==pygame.K_f:
pygame.display.toggle_fullscreen()
elif event.key == pygame.K_UP:
self.dtop = -4
elif event.key == pygame.K_DOWN:
self.dtop = +4
elif event.key == pygame.K_LEFT:
self.dleft = -4
elif event.key == pygame.K_RIGHT:
self.dleft = +4
def keyUpHandler(self, event):
"""Handle releases of keys"""
if event.key == pygame.K_UP:
self.dtop = 0
elif event.key == pygame.K_DOWN:
self.dtop = 0
elif event.key == pygame.K_LEFT:
self.dleft = 0
elif event.key == pygame.K_RIGHT:
self.dleft = 0
def doCommand(self, msg):
"""\
Proceses a topology command tuple:
[ "ADD", "NODE", <id>, <name>, <positionSpec>, <particle type> ]
[ "DEL", "NODE", <id> ]
[ "ADD", "LINK", <id from>, <id to> ]
[ "DEL", "LINK", <id from>, <id to> ]
[ "DEL", "ALL" ]
[ "GET", "ALL" ]
"""
try:
if len(msg) >= 2:
cmd = msg[0].upper(), msg[1].upper()
if cmd == ("ADD", "NODE") and len(msg) == 6:
if self.particleTypes.has_key(msg[5]):
ptype = self.particleTypes[msg[5]]
id = msg[2]
name = msg[3]
posSpec = msg[4]
pos = self._generateXY(posSpec)
particle = ptype(position = pos, ID=id, name=name)
particle.originaltype = msg[5]
self.addParticle(particle)
elif cmd == ("DEL", "NODE") and len(msg) == 3:
id = msg[2]
self.removeParticle(id)
elif cmd == ("ADD", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.makeBond(src, dst)
elif cmd == ("DEL", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.breakBond(src, dst)
elif cmd == ("DEL", "ALL") and len(msg) == 2:
self.removeParticle(*self.physics.particleDict.keys())
elif cmd == ("GET", "ALL") and len(msg) == 2:
topology = [("DEL","ALL")]
topology.extend(self.getTopology())
self.send( ("TOPOLOGY", topology), "outbox" )
elif cmd == ("UPDATE_NAME", "NODE") and len(msg) == 4:
node_id = msg[2]
new_name = msg[3]
self.updateParticleLabel(node_id, new_name)
elif cmd == ("GET_NAME", "NODE") and len(msg) == 3:
node_id = msg[2]
name = self.getParticleLabel(node_id)
self.send( ("UPDATE_NAME", "NODE", node_id, name), "outbox" )
else:
raise "Command Error"
else:
raise "Command Error"
except:
import traceback
errmsg = reduce(lambda a,b: a+b, traceback.format_exception(*sys.exc_info()) )
self.send( ("ERROR", "Error processing message : "+str(msg) + " resason:\n"+errmsg), "outbox")
def updateParticleLabel(self, node_id, new_name):
"""\
updateParticleLabel(node_id, new_name) -> updates the given nodes name & visual label if it exists
node_id - an id for an already existing node
new_name - a string (may include spaces) defining the new node name
"""
for p in self.physics.particles:
if p.ID == node_id:
p.set_label(new_name)
return
def getParticleLabel(self, node_id):
"""\
getParticleLabel(node_id) -> particle's name
Returns the name/label of the specified particle.
"""
for p in self.physics.particles:
if p.ID == node_id:
return p.name
def _generateXY(self, posSpec):
"""\
generateXY(posSpec) -> (x,y) or raises ValueError
posSpec == "randompos" or "auto" -> random (x,y) within the surface (specified border distance in from the edege)
posSpec == "(XXX,YYY)" -> specified x,y (positive or negative integers)
"""
posSpec = posSpec.lower()
if posSpec == "randompos" or posSpec == "auto" :
x = self.left + random.randrange(self.border,self.screensize[0]-self.border,1)
y = self.top + random.randrange(self.border,self.screensize[1]-self.border,1)
return x,y
else:
match = re.match("^([+-]?\d+),([+-]?\d+)$", posSpec)
if match:
x = int(match.group(1))
y = int(match.group(2))
return x,y
raise ValueError("Unrecognised position specification")
def addParticle(self, *particles):
"""Add particles to the system"""
for p in particles:
if p.radius > self.biggestRadius:
self.biggestRadius = p.radius
p.setOffset( (self.left, self.top) )
self.physics.add( *particles )
def removeParticle(self, *ids):
"""\
Remove particle(s) specified by their ids.
Also breaks any bonds to/from that particle.
"""
for id in ids:
self.physics.particleDict[id].breakAllBonds()
if self.selected == self.physics.particleDict[id]:
self.selectParticle(None)
self.physics.removeByID(*ids)
def makeBond(self, source, dest):
"""Make a bond from source to destination particle, specified by IDs"""
self.physics.particleDict[source].makeBond(self.physics.particleDict, dest)
def breakBond(self, source, dest):
"""Break a bond from source to destination particle, specified by IDs"""
self.physics.particleDict[source].breakBond(self.physics.particleDict, dest)
def getTopology(self):
"""getTopology() -> list of command tuples that would build the current topology"""
topology = []
# first, enumerate the particles
for particle in self.physics.particles:
topology.append( ( "ADD","NODE",
particle.ID,
particle.name,
"random",
particle.originaltype
) )
# now enumerate the linkages
for particle in self.physics.particles:
for dst in particle.getBondedTo():
topology.append( ( "ADD","LINK", particle.ID, dst.ID ) )
return topology
def quit(self, event=None):
"""Cause termination."""
super(TopologyViewer,self).quit(event)
raise "QUITTING" ### XXX FIXME : need better shutdown than this!
def scroll( self, (dx, dy) ):
"""Scroll the contents being displayed on the surface by (dx,dy) left and up."""
self.left += dx
self.top += dy
for e in self.graphicalFurniture + self.physics.particles:
e.setOffset( (self.left, self.top) )
def selectParticle(self, particle):
"""Select the specified particle."""
if self.selected != particle:
if self.selected != None:
self.selected.deselect()
self.selected = particle
nodeid = None
if self.selected != None:
self.selected.select()
nodeid = self.selected.ID
self.send( ("SELECT","NODE", nodeid), "outbox" )
__kamaelia_components__ = ( TopologyViewer, )
Fixed problem in startup. Since this now uses Pygame.Display and that
calls pygame.mixer.quit, this doesn't need to. In practical terms in
some versions of pygame this isn't a problem. In recent versions (such
as in OpenSuSE 10.2) this causes pygame to die a horrible death since
it looks like it frees the same memory twice.
Code changed to not do this :-)
Michael.
#!/usr/bin/env python
# (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
=======================
Generic Topology Viewer
=======================
Pygame based display of graph topologies. A simple physics model assists with
visual layout. Rendering and physics laws can be customised for specific
applications.
Example Usage
-------------
A simple console driven topology viewer::
Pipeline( ConsoleReader(),
lines_to_tokenlists(),
TopologyViewer(),
).run()
Then at runtime try typing these commands to change the topology in real time::
>>> DEL ALL
>>> ADD NODE 1 "1st node" randompos -
>>> ADD NODE 2 "2nd node" randompos -
>>> ADD NODE 3 "3rd node" randompos -
>>> ADD LINK 1 2
>>> ADD LINK 3 2
>>> DEL LINK 1 2
>>> DEL NODE 1
User Interface
--------------
TopologyViewer manifests as a pygame display surface. As it is sent
topology information nodes and links between them will appear.
You can click a node with the mouse to select it. Depending on the application,
this may display additional data or, if integrated into another app, have some
other effect.
Click and drag with the left mouse button to move nodes around. Note that a
simple physics model or repulsion and attraction forces is always active. This
causes nodes to move around to help make it visually clearer, however you may
still need to drag nodes about to tidy it up.
The surface on which the nodes appear is notionally infinite. Scroll around
using the arrow keys.
Press the 'f' key to toggle between windowed and fullscreen modes.
How does it work?
-----------------
TopologyViewer is a specialisation of the Kamaeila.UI.MH.PyGameApp
component. See documentation for that component to understand how it obtains
and handles events for a pygame display surface.
A topology (graph) of nodes and links between them is rendered to the surface.
You can specify an initial topology by providing a list of instantiated
particles and another list of pairs of those particles to show how they are
linked.
TopologyViewer reponds to commands arriving at its "inbox" inbox
instructing it on how to change the topology. A command is a list/tuple.
Commands recognised are:
[ "ADD", "NODE", <id>, <name>, <posSpec>, <particle type> ]
Add a node, using:
- id -- a unique ID used to refer to the particle in other topology commands. Cannot be None.
- name -- string name label for the particle
- posSpec -- string describing initial x,y (see _generateXY)
- particleType -- particle type (default provided is "-", unless custom types are provided - see below)
[ "DEL", "NODE", <id> ]
Remove a node (also removes all links to and from it)
[ "ADD", "LINK", <id from>, <id to> ]
Add a link, directional from fromID to toID
[ "DEL", "LINK", <id from>, <id to> ]
Remove a link, directional from fromID to toID
[ "DEL", "ALL" ]
Clears all nodes and links
[ "GET", "ALL" ]
Outputs the current topology as a list of commands, just like
those used to build it. The list begins with a 'DEL ALL'.
[ "UPDATE_NAME", "NODE", <id>, <new name> ]
If the node does not already exist, this does NOT cause it to be created.
[ "GET_NAME", "NODE", <id> ]
Returns UPDATE_NAME NODE message for the specified node
Commands are processed immediately, in the order in which they arrive. You
therefore cannot refer to a node or linkage that has not yet been created, or
that has already been destroyed.
If a stream of commands arrives in quick succession, rendering and physics will
be temporarily stopped, so commands can be processed more quickly. This is
necessary because when there is a large number of particles, physics and
rendering starts to take a long time, and will therefore bottleneck the
handling of commands.
However, there is a 1 second timeout, so at least one update of the visual
output is guaranteed per second.
TopologyViewer sends any output to its "outbox" outbox in the same
list/tuple format as used for commands sent to its "inbox" inbox. The following
may be output:
[ "SELECT", "NODE", <id> ]
Notification that a given node has been selected.
[ "SELECT", "NODE", None ]
Notificaion that *no node* is now selected.
[ "ERROR", <error string> ]
Notification of errors - eg. unrecognised commands arriving at the
"inbox" inbox
[ "TOPOLOGY", <topology command list> ]
List of commands needed to build the topology, as it currently stands.
The list will start with a ("DEL","ALL") command.
This is sent in response to receiving a ("GET","ALL") command.
If a shutdownMicroprocess or producerFinished message is received on this
component's "control" inbox this it will pass it on out of its "signal" outbox
and immediately terminate.
NOTE: Termination is currently rather cludgy - it raises an exception which
will cause the rest of a kamaelia system to halt. Do not rely on this behaviour
as it will be changed to provide cleaner termination at some point.
Customising the topology viewer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can customise:
- the 'types' of particles (nodes)
- visual appearance of particles (nodes) and the links between them;
- the physics laws used to assist with layout
- extra visual furniture to be rendered
For example, see Kamaelia.Visualisation.Axon.AxonVisualiserServer. This
component uses two types of particle - to represent components and
inboxes/outboxes. Each has a different visual appearance, and the laws acting
between them differ depending on which particle types are involved in the
interaction.
Use the particleTypes argument of the initialiser to specify classes that
should be instantiated to render each type of particle (nodes). particleTypes
should be a dictionary mapping names for particle types to the respective
classes, for example::
{ "major" : BigParticle, "minor" : SmallParticle }
See below for information on how to write your own particle classes.
Layout of the nodes on the surface is assisted by a physics model, provided
by an instance of the Kamaelia.Support.Particles.ParticleSystem class.
Customise the laws used for each particle type by providing a
Kamaelia.Phyics.Simple.MultipleLaws object at initialisation.
Writing your own particle class
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
should inherit from Kamaelia.Support.Particles.Particle and implement the following
methods (for rendering purposes):
setOffset( (left,top) )
Notification that the surface has been scrolled by the user. Particles
should adjust the coordinates at which they render. For example, a
particle at (x, y) should be rendered at (x-left, y-top). You can
assume, until setOffset(...) is called, that (left,top) is (0,0).
select()
Called to inform the particle that it is selected (has been clicked on)
deselect()
Called to inform the particle that is has been deselected.
render(surface) -> generator
Called to get a generator for multi-pass rendering of the particle (see
below)
The coordinates of the particle are updated automatically both due to mouse
dragging and due to the physics model. See Kamaelia.Support.Particles.Particle for
more information.
The render(...) method should return a generator that will render the particle
itself and its links/bonds to other particles.
Rendering by the TopologyViewer is multi-pass. This is done so that
irrespective of the order in which particles are chosen to be rendered,
things that need to be rendered before (underneath) other things can be done
consistently.
The generator should yield the number of the rendering pass it wishes to be
next called on. Each time it is subsequently called, it should perform the
rendering required for that pass. It then yields the number of the next required
pass or completes if there is no more rendering required. Passes go in
ascending numerical order.
For example, Kamaelia.Visualisation.PhysicsGraph.RenderingParticle renders in
two passes::
def render(self, surface):
yield 1
# render lines for bonds *from* this particle *to* others
yield 2
# render a blob and the name label for the particle
...in this case it ensures that the blobs for the particles always appear
on top of the lines representing the bonds between them.
Note that rendering passes must be coded in ascending order, but the numbering
can otherwise be arbitrary: The first pass can be any value you like; subsequent
passes can also be any value, provided it is higher.
When writing rendering code for particle(s), make sure they all agree on who
should render what. It is inefficient if all bonds are being rendered twice.
For example, RenderingParticle only renders links *from* that particle *to*
another, but not in another direction.
"""
import random
import time
import re
import sys
import pygame
import Axon
import Kamaelia.Support.Particles
import Kamaelia.UI
from Kamaelia.Visualisation.PhysicsGraph.GridRenderer import GridRenderer
from Kamaelia.Visualisation.PhysicsGraph.ParticleDragger import ParticleDragger
from Kamaelia.Visualisation.PhysicsGraph.RenderingParticle import RenderingParticle
class TopologyViewer(Kamaelia.UI.MH.PyGameApp,Axon.Component.component):
"""\
TopologyViewer(...) -> new TopologyViewer component.
A component that takes incoming topology (change) data and displays it live
using pygame. A simple physics model assists with visual layout. Particle
types, appearance and physics interactions can be customised.
Keyword arguments (in order):
- screensize -- (width,height) of the display area (default = (800,600))
- fullscreen -- True to start up in fullscreen mode (default = False)
- caption -- Caption for the pygame window (default = "Topology Viewer")
- particleTypes -- dict("type" -> klass) mapping types of particle to classes used to render them (default = {"-":RenderingParticle})
- initialTopology -- (nodes,bonds) where bonds=list((src,dst)) starting state for the topology (default=([],[]))
- laws -- Physics laws to apply between particles (default = SimpleLaws(bondlength=100))
- simCyclesPerRedraw -- number of physics sim cycles to run between each redraw (default=1)
- border -- Minimum distance from edge of display area that new particles appear (default=100)
- extraDrawing -- Optional extra object to be rendered (default=None)
- showGrid -- False, or True to show gridlines (default=True)
- transparency -- None, or (r,g,b) colour to make transparent
- position -- None, or (left,top) position for surface within pygame window
"""
Inboxes = { "inbox" : "Topology (change) data describing an Axon system",
"control" : "Shutdown signalling",
"alphacontrol" : "Alpha (transparency) of the image (value 0..255)",
"events" : "Place where we recieve events from the outside world",
"displaycontrol" : "Replies from Pygame Display service",
}
Outboxes = { "signal" : "NOT USED",
"outbox" : "Notification and topology output",
"displaysignal" : "Requests to Pygame Display service",
}
def __init__(self, screensize = (800,600),
fullscreen = False,
caption = "Topology Viewer",
particleTypes = None,
initialTopology = None,
laws = None,
simCyclesPerRedraw = None,
border = 100,
extraDrawing = None,
showGrid = True,
transparency = None,
position = None):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(TopologyViewer, self).__init__(screensize, caption, fullscreen, transparency=transparency, position=position)
self.border = border
if particleTypes == None:
self.particleTypes = {"-":RenderingParticle}
else:
self.particleTypes = particleTypes
if initialTopology == None:
initialTopology = ([],[])
self.initialNodes = list(initialTopology[0])
self.initialBonds = list(initialTopology[1])
if laws==None:
self.laws = Kamaelia.Support.Particles.SimpleLaws(bondLength=100)
else:
self.laws = laws
if simCyclesPerRedraw==None:
self.simCyclesPerRedraw = 1
else:
self.simCyclesPerRedraw = simCyclesPerRedraw
self.graphicalFurniture = []
if showGrid:
self.graphicalFurniture.append( GridRenderer(self.laws.maxInteractRadius, (200,200,200)) )
if extraDrawing != None:
self.graphicalFurniture.append(extraDrawing)
self.biggestRadius = 0
self.selectedParticle = None
self.left = 0
self.top = 0
self.dleft = 0
self.dtop = 0
self.lastIdleTime = time.time()
self.selected = None
def initialiseComponent(self):
"""Initialises."""
self.addHandler(pygame.MOUSEBUTTONDOWN, lambda event,self=self: ParticleDragger.handle(event,self))
self.addHandler(pygame.KEYDOWN, self.keyDownHandler)
self.addHandler(pygame.KEYUP, self.keyUpHandler)
self.physics = Kamaelia.Support.Particles.ParticleSystem(self.laws, [], 0)
for node in self.initialNodes:
self.addParticle(*node)
for source,dest in self.initialBonds:
self.makeBond(source, dest)
return 1
def mainLoop(self):
"""\
Main loop.
Proceses commands from "inbox" inbox, runs physics simulation, then renders display
FIXME: This is massively broken, this component overrides initialiseComponent,
and also has a main *AND* has a mainLoop.
"""
# process incoming messages
if self.dataReady("inbox"):
message = self.recv("inbox")
self.doCommand(message)
else:
self.lastIdleTime = 0
if self.dataReady("alphacontrol"):
alpha = self.recv("alphacontrol")
self.screen.set_alpha(alpha)
if self.lastIdleTime + 1.0 < time.time():
self.physics.run(self.simCyclesPerRedraw)
# draw the background
self.screen.fill( (255,255,255) )
# scroll, if scrolling is active, increasing velocity over time
if self.dleft != 0 or self.dtop != 0:
self.scroll( (self.dleft, self.dtop) )
if self.dleft:
self.dleft = self.dleft + 1 * abs(self.dleft)/self.dleft
if self.dtop:
self.dtop = self.dtop + 1 * abs(self.dtop)/self.dtop
self.render()
self.flip = True
self.lastIdleTime = time.time()
else:
self.flip = False
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, Axon.Ipc.producerFinished) or isinstance(msg, Axon.Ipc.shutdownMicroprocess):
self.send(msg, "signal")
self.quit()
return 1
def render(self):
"""Render elements to self.screen"""
# rendering is done in multiple passes
# renderPasses is a dictionary of pass-number -> list of 'render' generators
# each render generator yields the next pass number on which it wishes to be called
renderPasses = {}
# do the first pass - filling the renderPasses dictionary with rendering
# generators from all particles, and also the extra furniture rendering
for p in self.graphicalFurniture + self.physics.particles:
r = p.render(self.screen)
if r != None:
try:
n = r.next()
try:
renderPasses[n].append(r)
except KeyError:
renderPasses[n] = [r]
except StopIteration:
pass
# keep going through, extracting the lowers render pass number in the dictionary and
# processing generators listed in it, until the renderPasses dictionary is empty
while renderPasses:
nextPass = reduce( min, renderPasses.keys() )
for r in renderPasses.pop(nextPass):
try:
n = r.next()
try:
renderPasses[n].append(r)
except KeyError:
renderPasses[n] = [r]
except StopIteration:
pass
def keyDownHandler(self, event):
"""Handle keypresses:
ESCAPE, Q : quits
F : toggles fullscreen mode
arrows : scroll the view
"""
if event.key==pygame.K_ESCAPE or event.key==pygame.K_q:
self.quit()
elif event.key==pygame.K_f:
pygame.display.toggle_fullscreen()
elif event.key == pygame.K_UP:
self.dtop = -4
elif event.key == pygame.K_DOWN:
self.dtop = +4
elif event.key == pygame.K_LEFT:
self.dleft = -4
elif event.key == pygame.K_RIGHT:
self.dleft = +4
def keyUpHandler(self, event):
"""Handle releases of keys"""
if event.key == pygame.K_UP:
self.dtop = 0
elif event.key == pygame.K_DOWN:
self.dtop = 0
elif event.key == pygame.K_LEFT:
self.dleft = 0
elif event.key == pygame.K_RIGHT:
self.dleft = 0
def doCommand(self, msg):
"""\
Proceses a topology command tuple:
[ "ADD", "NODE", <id>, <name>, <positionSpec>, <particle type> ]
[ "DEL", "NODE", <id> ]
[ "ADD", "LINK", <id from>, <id to> ]
[ "DEL", "LINK", <id from>, <id to> ]
[ "DEL", "ALL" ]
[ "GET", "ALL" ]
"""
try:
if len(msg) >= 2:
cmd = msg[0].upper(), msg[1].upper()
if cmd == ("ADD", "NODE") and len(msg) == 6:
if self.particleTypes.has_key(msg[5]):
ptype = self.particleTypes[msg[5]]
id = msg[2]
name = msg[3]
posSpec = msg[4]
pos = self._generateXY(posSpec)
particle = ptype(position = pos, ID=id, name=name)
particle.originaltype = msg[5]
self.addParticle(particle)
elif cmd == ("DEL", "NODE") and len(msg) == 3:
id = msg[2]
self.removeParticle(id)
elif cmd == ("ADD", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.makeBond(src, dst)
elif cmd == ("DEL", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.breakBond(src, dst)
elif cmd == ("DEL", "ALL") and len(msg) == 2:
self.removeParticle(*self.physics.particleDict.keys())
elif cmd == ("GET", "ALL") and len(msg) == 2:
topology = [("DEL","ALL")]
topology.extend(self.getTopology())
self.send( ("TOPOLOGY", topology), "outbox" )
elif cmd == ("UPDATE_NAME", "NODE") and len(msg) == 4:
node_id = msg[2]
new_name = msg[3]
self.updateParticleLabel(node_id, new_name)
elif cmd == ("GET_NAME", "NODE") and len(msg) == 3:
node_id = msg[2]
name = self.getParticleLabel(node_id)
self.send( ("UPDATE_NAME", "NODE", node_id, name), "outbox" )
else:
raise "Command Error"
else:
raise "Command Error"
except:
import traceback
errmsg = reduce(lambda a,b: a+b, traceback.format_exception(*sys.exc_info()) )
self.send( ("ERROR", "Error processing message : "+str(msg) + " resason:\n"+errmsg), "outbox")
def updateParticleLabel(self, node_id, new_name):
"""\
updateParticleLabel(node_id, new_name) -> updates the given nodes name & visual label if it exists
node_id - an id for an already existing node
new_name - a string (may include spaces) defining the new node name
"""
for p in self.physics.particles:
if p.ID == node_id:
p.set_label(new_name)
return
def getParticleLabel(self, node_id):
"""\
getParticleLabel(node_id) -> particle's name
Returns the name/label of the specified particle.
"""
for p in self.physics.particles:
if p.ID == node_id:
return p.name
def _generateXY(self, posSpec):
"""\
generateXY(posSpec) -> (x,y) or raises ValueError
posSpec == "randompos" or "auto" -> random (x,y) within the surface (specified border distance in from the edege)
posSpec == "(XXX,YYY)" -> specified x,y (positive or negative integers)
"""
posSpec = posSpec.lower()
if posSpec == "randompos" or posSpec == "auto" :
x = self.left + random.randrange(self.border,self.screensize[0]-self.border,1)
y = self.top + random.randrange(self.border,self.screensize[1]-self.border,1)
return x,y
else:
match = re.match("^([+-]?\d+),([+-]?\d+)$", posSpec)
if match:
x = int(match.group(1))
y = int(match.group(2))
return x,y
raise ValueError("Unrecognised position specification")
def addParticle(self, *particles):
"""Add particles to the system"""
for p in particles:
if p.radius > self.biggestRadius:
self.biggestRadius = p.radius
p.setOffset( (self.left, self.top) )
self.physics.add( *particles )
def removeParticle(self, *ids):
"""\
Remove particle(s) specified by their ids.
Also breaks any bonds to/from that particle.
"""
for id in ids:
self.physics.particleDict[id].breakAllBonds()
if self.selected == self.physics.particleDict[id]:
self.selectParticle(None)
self.physics.removeByID(*ids)
def makeBond(self, source, dest):
"""Make a bond from source to destination particle, specified by IDs"""
self.physics.particleDict[source].makeBond(self.physics.particleDict, dest)
def breakBond(self, source, dest):
"""Break a bond from source to destination particle, specified by IDs"""
self.physics.particleDict[source].breakBond(self.physics.particleDict, dest)
def getTopology(self):
"""getTopology() -> list of command tuples that would build the current topology"""
topology = []
# first, enumerate the particles
for particle in self.physics.particles:
topology.append( ( "ADD","NODE",
particle.ID,
particle.name,
"random",
particle.originaltype
) )
# now enumerate the linkages
for particle in self.physics.particles:
for dst in particle.getBondedTo():
topology.append( ( "ADD","LINK", particle.ID, dst.ID ) )
return topology
def quit(self, event=None):
"""Cause termination."""
super(TopologyViewer,self).quit(event)
raise "QUITTING" ### XXX FIXME : need better shutdown than this!
def scroll( self, (dx, dy) ):
"""Scroll the contents being displayed on the surface by (dx,dy) left and up."""
self.left += dx
self.top += dy
for e in self.graphicalFurniture + self.physics.particles:
e.setOffset( (self.left, self.top) )
def selectParticle(self, particle):
"""Select the specified particle."""
if self.selected != particle:
if self.selected != None:
self.selected.deselect()
self.selected = particle
nodeid = None
if self.selected != None:
self.selected.select()
nodeid = self.selected.ID
self.send( ("SELECT","NODE", nodeid), "outbox" )
__kamaelia_components__ = ( TopologyViewer, )
|
from projectname.lib.base import *
class SampleController(BaseController):
def index(self):
return Response('basic index page')
def session_increment(self):
session.setdefault('counter', -1)
session['counter'] += 1
session.save()
return Response('session incrementer')
def globalup(self):
return Response(g.message)
def global_store(self, id):
if id:
g.counter += int(id)
return Response(str(g.counter))
def myself(self):
return Response(h.url_for())
def myparams(self):
return Response(str(params))
def testdefault(self):
return render_response('testkid')
def test_extra_engine(self):
return render_response('kid', 'testkid')
def test_template_caching(self):
return render_response('/test_myghty.myt', cache_expire='never')
[rest.dispatch_on(GET='test_only_get')]
[rest.restrict('POST')]
def test_only_post(self):
return Response('It was a post!')
[rest.restrict('GET')]
def test_only_get(self):
return Response('It was a get!')
[rest.restrict('POST')]
[rest.dispatch_on(POST='test_only_post')]
def impossible(self):
return Response('This should never be shown')
[svn] Updating Pylons new app test to use response instead of Response, and request.params instead of params.
--HG--
branch : trunk
from projectname.lib.base import *
class SampleController(BaseController):
def index(self):
return response('basic index page')
def session_increment(self):
session.setdefault('counter', -1)
session['counter'] += 1
session.save()
return response('session incrementer')
def globalup(self):
return response(g.message)
def global_store(self, id):
if id:
g.counter += int(id)
return response(str(g.counter))
def myself(self):
return response(h.url_for())
def myparams(self):
return response(str(request.params))
def testdefault(self):
return render_response('testkid')
def test_extra_engine(self):
return render_response('kid', 'testkid')
def test_template_caching(self):
return render_response('/test_myghty.myt', cache_expire='never')
def test_only_post(self):
return response('It was a post!')
test_only_post = rest.dispatch_on(GET='test_only_get')(rest.restrict('POST')(test_only_post))
def test_only_get(self):
return response('It was a get!')
test_only_get = rest.restrict('GET')(test_only_get)
def impossible(self):
return response('This should never be shown')
impossible = rest.restrict('POST')(rest.dispatch_on(POST='test_only_post')(impossible))
|
import time
import config
import accounts
fields = 'photo_50,country,last_seen'
s = open(accounts.getFile('allowed.txt'), encoding='utf-8').readlines()
noadd = set(map(int, open(accounts.getFile('noadd.txt')).read().split()))
allowed = set(s[0] + ' ')
s = s[1].split()
offline_allowed = config.get('check_friend.offline_allowed', 'i')
def writeNoadd():
with open(accounts.getFile('noadd.txt'), 'w') as f:
f.write('\n'.join(map(str, sorted(noadd))))
def appendNoadd(users):
noadd.update(users)
with open(accounts.getFile('noadd.txt'), 'a') as f:
f.write('\n' + '\n'.join(map(str, sorted(users))))
def check_char(c):
return c in allowed
checks = [
(lambda fr:'deactivated' not in fr, 'Account is deactivated'),
(lambda fr:fr['photo_50'] and not fr['photo_50'].endswith('camera_50.png'), 'No avatar'),
(lambda fr:fr.get('country', {'id':0})['id'] in [0, 1, 2, 3], 'Bad country'),
(lambda fr:all(check_char(i) for i in fr['first_name'] + fr['last_name']), 'Bad characters in name'),
(lambda fr:'last_seen' in fr and time.time() - fr['last_seen']['time'] < 3600 * 24 * offline_allowed, 'Offline too long'),
(lambda fr:not any(i in (fr['first_name'] + ' ' + fr['last_name']).lower() for i in s), 'Bad substring in name'),
(lambda fr:fr['id'] not in noadd, 'Ignored'),
(lambda fr:fr['first_name'] != fr['last_name'], 'First name equal to last name'),
]
def is_good(fr, need_reason=False):
reasons = []
for fun, msg in checks:
if not fun(fr):
if need_reason:
reasons.append(msg)
else:
return False
if need_reason:
return ', '.join(reasons) or None
else:
return True
Some fix
import time
import config
import accounts
fields = 'photo_50,country,last_seen'
s = open(accounts.getFile('allowed.txt'), encoding='utf-8').readlines()
noadd = set(map(int, open(accounts.getFile('noadd.txt')).read().split()))
allowed = set(s[0] + ' ')
s = s[1].split()
offline_allowed = config.get('check_friend.offline_allowed', 'i')
def writeNoadd():
with open(accounts.getFile('noadd.txt'), 'w') as f:
f.write('\n'.join(map(str, sorted(noadd))))
def appendNoadd(users):
noadd.update(users)
with open(accounts.getFile('noadd.txt'), 'a') as f:
f.write('\n' + '\n'.join(map(str, sorted(users))))
def check_char(c):
return c in allowed
checks = [
(lambda fr:'deactivated' not in fr, 'Account is deactivated'),
(lambda fr:fr['photo_50'] and not fr['photo_50'].endswith('camera_50.png'), 'No avatar'),
(lambda fr:fr.get('country', {'id':0})['id'] in [0, 1, 2, 3], 'Bad country'),
(lambda fr:all(check_char(i) for i in fr['first_name'] + fr['last_name']), 'Bad characters in name'),
(lambda fr:'last_seen' in fr and time.time() - fr['last_seen']['time'] < 3600 * 24 * offline_allowed, 'Offline too long'),
(lambda fr:not any(i in (fr['first_name'] + ' ' + fr['last_name']).lower() for i in s), 'Bad substring in name'),
(lambda fr:fr['id'] not in noadd, 'Ignored'),
(lambda fr:fr['first_name'] != fr['last_name'], 'First name equal to last name'),
]
def is_good(fr, need_reason=False):
reasons = []
for fun, msg in checks:
if not fun(fr):
if need_reason:
reasons.append(msg.lower() if reasons else msg)
else:
return False
if need_reason:
return ', '.join(reasons) or None
else:
return True
|
import pygame
import sys
#Command Line options
#First argv[1] is game board width in pixels. 300 is default
#tSecond argv[2] is game board height in pixels. 300 is default
#the .py file is consider an arg in len(sys.argv)
#Check if there is a first argument passed in
'''Open with 800 640 after .py script'''
if len(sys.argv) > 1:
if 50 <= int(sys.argv[1]) <= 1200:
width = int(sys.argv[1])
else:
width = 300
else:
width = 300
print("width:", width)
if len(sys.argv) > 2:
if 50 <= int(sys.argv[2]) <= 1200:
height = int(sys.argv[2])
else:
height = 300
else:
height = 300
print("height:", height)
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = ( 255, 0, 0)
CUST_COL = ( 0, 200, 50)
GREY = ( 200, 200, 200)
#This defines the height and the width of the displayed screen
#width = 300
#height = 300
# Set row 1, cell 5 to one. (Remember rows and
# column numbers start at zero.)
#grid[1][5] = 1
# Initialize pygame
pygame.init()
# Set the height and width of the screen
size = [width, height]
screen = pygame.display.set_mode(size)
#screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
# Set title of screen
pygame.display.set_caption("Title")
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# This sets the margin between each cell
margin = 5
'''Number of cells per page'''
#This is the number of cells to be displayed (vertically)
cell_num = 5
#Cell height and width calculated by taking total width or height of the screen and subtracting all the margins
#(each side has margin as well as between each cell) and dividing by the number of cells on each axis
#Also store width and height of side bar
bar_width = 100
bar_height = screen.get_height()-2*margin
cell_width = int((screen.get_width()-(margin*2))) - (bar_width*2)
cell_height = int((screen.get_height()-(margin*(cell_num+1)))/cell_num)
#Height and width of the pop up window. This is th color of the black box that pops up over
#the main screen for settings and currently selected drinks
pop_up_width = int(screen.get_width()*0.9)
pop_up_height = int(screen.get_height()*0.9)
# Create a list with cocktail names
drinks = ["Jack & Coke", "Rum and Coke", "Long Island Iced Tea", "Coke", "Woo-Woo", "SHOTS!!!!!", "Bloody Mary", "Mimosa", "Beer", "Don't Show"]
##drinks = ["Item 1", "Item Two", "Item-o three-o", "Item 4", "Items 5s", "Item 6", "Item 7"]
font = pygame.font.Font(None, 50) #36 default
#Function to get eligible drinks to make, returns list of lists, where each sublist has cell_num number of items
def get_drink_list():
'''Get eligible drinks into big list here, call drinks'''
drinks_avail = []
done = False
num_drinks_avail = len(drinks)
#Keep track of where we are in list
cur_drink = 0
while cur_drink < num_drinks_avail:#not done:
#Try to create sublist of 5
sublist = []
for i in range(5):
try:
sublist.append(drinks[cur_drink])
#sublist.append(next(drinks_iter))
except:
sublist.append(" ")
done = True
cur_drink += 1
#print(sublist)
drinks_avail.append(sublist)
return drinks_avail
drink_list = get_drink_list()
def open_drink(surface, drink):
pop_up_x = screen.get_width()*0.05
pop_up_y = screen.get_height()*0.05
#Draw black box over screen
pygame.draw.rect(screen, BLACK,
[pop_up_x,
pop_up_y,
pop_up_width,
pop_up_height])
#Draw white box to display info
pygame.draw.rect(screen, WHITE,
[pop_up_x+margin,
pop_up_y+margin,
pop_up_width*0.7-margin,
pop_up_height*0.7-margin])
#Draw Pour button
pygame.draw.rect(screen, WHITE,
[pop_up_x+margin+(pop_up_width*0.7),
pop_up_y+margin,
pop_up_width*0.3-(margin*2),
pop_up_height*0.6-margin])
#Loop until the user clicks the close button.
done = False
#Which page of the drink menu the user is on
drink_page = 0
#Determines if a drink menu is open
drink_menu_open = True
#Keeps track of currently open drink
cur_drink = ""
# -------- Main Program Loop -----------
while done == False:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
elif event.type == pygame.MOUSEBUTTONDOWN:
# User clicks the mouse. Get the position
click_pos = pygame.mouse.get_pos()
#Check if click is in the left sidebar
##print(click_pos)
if click_pos[0] <= (bar_width+margin):
##print("Left sidebar clicked")
#Check if button clicked
#Check if menu can page left, if it can, decrement drink_page
if drink_page > 0:
drink_page -= 1
'''
#Else loop to end of drink pages
else:
drink_page = len(drink_list)-1
'''
#Check if click is in the right sidebar
elif click_pos[0] >= (screen.get_width()-(bar_width+margin)):
##print("Right sidebar clicked")
#Check if button clicked
#Check if menu can page right, if it can, increment drink_page
if drink_page < (len(drink_list)-1):
drink_page += 1
'''
#Else at end of list and loop back to beginning
else:
drink_page = 0
'''
#Else, click is in the middle section (list)
else:
##print("Menu click")
#Gets which cell is clicked, margin counts as part of cell above it
drink_click = int((click_pos[1]-margin)/(cell_height+margin))
print(drink_list[drink_page][drink_click])
# Set the screen background
screen.fill(BLACK)
'''
if game_over:
# If game over is true, draw game over
if winner == 1:
text = font.render("Green Wins", True, WHITE)
if winner == 2:
text = font.render("Blue Wins", True, WHITE)
text_rect = text.get_rect()
text_x = screen.get_width() / 2 - text_rect.width / 2
text_y = screen.get_height() / 2 - text_rect.height / 2
screen.blit(text, [text_x, text_y])
pygame.display.flip()
pygame.time.delay(1000)
else:
'''
'''Draw the side bars'''
'''color = WHITE'''
color = GREY
#Left side bar
pygame.draw.rect(screen, color,
[margin,
margin,
bar_width-margin,
screen.get_height()-(2*margin)])
#Right side bar
pygame.draw.rect(screen, color,
[(margin*2)+cell_width+bar_width,
margin,
bar_width-margin,
screen.get_height()-(2*margin)])
'''Draw the arrows in the side bar'''
'''Old triangle arrows
#Point lists of filled polygons for right and left arrows
##points_left = [(50,260), (20, 310), (50, 360)]
##points_right = [(750, 260), (780, 310), (750, 360)]
#Create filled triangle on left side
##pygame.draw.polygon(screen, CUST_COL, points_left, 0)
#Create filled triangle on right side
##pygame.draw.polygon(screen, BLACK, points_right, 0)
'''
#Point list of > shape, use percent of side bar width and height to set points
arrow_right = [(margin+cell_width+bar_width + bar_width*0.3, margin+(bar_height*0.4)),#260),
(margin+cell_width+bar_width + bar_width*0.5, margin+(bar_height*0.4)),
(margin+cell_width+bar_width + bar_width*0.7, margin+(bar_height*0.5)),
(margin+cell_width+bar_width + bar_width*0.5, margin+(bar_height*0.6)),
(margin+cell_width+bar_width + bar_width*0.3, margin+(bar_height*0.6)),
(margin+cell_width+bar_width + bar_width*0.5, margin+(bar_height*0.5))]
#Create filled > shape on right side
pygame.draw.polygon(screen, CUST_COL, arrow_right, 0)
#Point list of < shape, use percent of side bar width and height to set points
arrow_left = [(margin+(bar_width*0.7), margin+(bar_height*0.4)),
(margin+(bar_width*0.5), margin+(bar_height*0.4)),
(margin+(bar_width*0.3), margin+(bar_height*0.5)),
(margin+(bar_width*0.5), margin+(bar_height*0.6)),
(margin+(bar_width*0.7), margin+(bar_height*0.6)),
(margin+(bar_width*0.5), margin+(bar_height*0.5))]
#Create filled < shape on left side
pygame.draw.polygon(screen, CUST_COL, arrow_left, 0)
'''Draw the grid'''
iterat = 0
for row in range(cell_num):
'''color = WHITE'''
color = GREY
'''Draw the cells for the drinks'''
#screen, color, [dist from left edge, dist from top edge], width of rectangle, height of rectangle
pygame.draw.rect(screen,
color,
[(margin+bar_width),
(margin+cell_height)*row+margin,
cell_width,
cell_height])
text = font.render(drink_list[drink_page][iterat], True, BLACK)
#Length of text, height of text (in pixels)
text_size = font.size(drink_list[drink_page][iterat])
text_x = margin + 5
#Center text vertically
#Calculate height of cells * row, account for margin, then center in cell. -2 to account for text starting after 2 pixels
text_y = (cell_height*row) + (margin*(row+1)) + ((cell_height-text_size[1])/2) - 2
#Center text horizontally
text_x = ((screen.get_width() - text_size[0])/2) - 2
#Coordinates are top left of text box, text is actually 2 pixels down and right from coordinates
#text, [dist from left edge, dist from top edge]
screen.blit(text, [text_x, text_y])
iterat += 1
'''Check if drink menu is open'''
if drink_menu_open:
open_drink(screen, cur_drink)
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
Added shape to pop up menu
import pygame
import sys
#Command Line options
#First argv[1] is game board width in pixels. 300 is default
#tSecond argv[2] is game board height in pixels. 300 is default
#the .py file is consider an arg in len(sys.argv)
#Check if there is a first argument passed in
'''Open with 800 640 after .py script'''
if len(sys.argv) > 1:
if 50 <= int(sys.argv[1]) <= 1200:
width = int(sys.argv[1])
else:
width = 300
else:
width = 300
print("width:", width)
if len(sys.argv) > 2:
if 50 <= int(sys.argv[2]) <= 1200:
height = int(sys.argv[2])
else:
height = 300
else:
height = 300
print("height:", height)
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = ( 255, 0, 0)
CUST_COL = ( 0, 200, 50)
GREY = ( 200, 200, 200)
#This defines the height and the width of the displayed screen
#width = 300
#height = 300
# Set row 1, cell 5 to one. (Remember rows and
# column numbers start at zero.)
#grid[1][5] = 1
# Initialize pygame
pygame.init()
# Set the height and width of the screen
size = [width, height]
screen = pygame.display.set_mode(size)
#screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
# Set title of screen
pygame.display.set_caption("Title")
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# This sets the margin between each cell
margin = 5
'''Number of cells per page'''
#This is the number of cells to be displayed (vertically)
cell_num = 5
#Cell height and width calculated by taking total width or height of the screen and subtracting all the margins
#(each side has margin as well as between each cell) and dividing by the number of cells on each axis
#Also store width and height of side bar
bar_width = 100
bar_height = screen.get_height()-2*margin
cell_width = int((screen.get_width()-(margin*2))) - (bar_width*2)
cell_height = int((screen.get_height()-(margin*(cell_num+1)))/cell_num)
#Height and width of the pop up window. This is th color of the black box that pops up over
#the main screen for settings and currently selected drinks
pop_up_width = int(screen.get_width()*0.9)
pop_up_height = int(screen.get_height()*0.9)
# Create a list with cocktail names
drinks = ["Jack & Coke", "Rum and Coke", "Long Island Iced Tea", "Coke", "Woo-Woo", "SHOTS!!!!!", "Bloody Mary", "Mimosa", "Beer", "Don't Show"]
##drinks = ["Item 1", "Item Two", "Item-o three-o", "Item 4", "Items 5s", "Item 6", "Item 7"]
font = pygame.font.Font(None, 50) #36 default
#Function to get eligible drinks to make, returns list of lists, where each sublist has cell_num number of items
def get_drink_list():
'''Get eligible drinks into big list here, call drinks'''
drinks_avail = []
done = False
num_drinks_avail = len(drinks)
#Keep track of where we are in list
cur_drink = 0
while cur_drink < num_drinks_avail:#not done:
#Try to create sublist of 5
sublist = []
for i in range(5):
try:
sublist.append(drinks[cur_drink])
#sublist.append(next(drinks_iter))
except:
sublist.append(" ")
done = True
cur_drink += 1
#print(sublist)
drinks_avail.append(sublist)
return drinks_avail
drink_list = get_drink_list()
def open_drink(surface, drink):
pop_up_x = surface.get_width()*0.05
pop_up_y = surface.get_height()*0.05
#Draw black box over screen
pygame.draw.rect(surface, BLACK,
[pop_up_x,
pop_up_y,
pop_up_width,
pop_up_height])
#Draw white box to display info
pygame.draw.rect(surface, WHITE,
[pop_up_x+margin,
pop_up_y+margin,
pop_up_width*0.7-margin,
pop_up_height*0.7-margin])
#Draw Pour button
pygame.draw.rect(surface, WHITE,
[pop_up_x+margin+(pop_up_width*0.7),
pop_up_y+margin,
pop_up_width*0.3-(margin*2),
pop_up_height*0.6-margin])
#Draw Done button
pygame.draw.rect(surface, WHITE,
[pop_up_x+margin+(pop_up_width*0.7),
pop_up_y+margin+(pop_up_height*0.6),
pop_up_width*0.3-(margin*2),
pop_up_height*0.4-(margin*2)])
'''Draw size buttons'''
#Whichever size button is currently selected will have a different background
#Background color of 1.5 oz button
button_15_color = WHITE
#Background color of 3 oz button
button_30_color = CUST_COL
#Background color of 4.5 oz button
button_45_color = WHITE
#Draw 1.5 oz button
pygame.draw.rect(surface, button_15_color,
[pop_up_x+margin,
pop_up_y+margin+(pop_up_height*0.7),
pop_up_width*(0.7/3)-margin,
pop_up_height*0.3-(margin*2)])
#Draw 3.0 oz button
pygame.draw.rect(surface, button_30_color,
[pop_up_x+margin+(pop_up_width*(0.7/3)),
pop_up_y+margin+(pop_up_height*0.7),
pop_up_width*(0.7/3)-margin,
pop_up_height*0.3-(margin*2)])
#Draw 4.5 oz button (Use rest of width to the sise button for width on this button)
pygame.draw.rect(surface, button_45_color,
[pop_up_x+margin+(pop_up_width*(0.7/3*2)),
pop_up_y+margin+(pop_up_height*0.7),
(pop_up_width*(0.7)-margin)-(pop_up_width*(0.7/3*2)),
pop_up_height*0.3-(margin*2)])
#Loop until the user clicks the close button.
done = False
#Which page of the drink menu the user is on
drink_page = 0
#Determines if a drink menu is open
drink_menu_open = False
#Keeps track of currently open drink
cur_drink = ""
# -------- Main Program Loop -----------
while done == False:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
elif event.type == pygame.MOUSEBUTTONDOWN:
# User clicks the mouse. Get the position
click_pos = pygame.mouse.get_pos()
#Check if click is in the left sidebar
##print(click_pos)
if click_pos[0] <= (bar_width+margin):
##print("Left sidebar clicked")
#Check if button clicked
#Check if menu can page left, if it can, decrement drink_page
if drink_page > 0:
drink_page -= 1
'''
#Else loop to end of drink pages
else:
drink_page = len(drink_list)-1
'''
#Check if click is in the right sidebar
elif click_pos[0] >= (screen.get_width()-(bar_width+margin)):
##print("Right sidebar clicked")
#Check if button clicked
#Check if menu can page right, if it can, increment drink_page
if drink_page < (len(drink_list)-1):
drink_page += 1
'''
#Else at end of list and loop back to beginning
else:
drink_page = 0
'''
#Else, click is in the middle section (list)
else:
##print("Menu click")
#Gets which cell is clicked, margin counts as part of cell above it
drink_click = int((click_pos[1]-margin)/(cell_height+margin))
print(drink_list[drink_page][drink_click])
drink_menu_open = True
cur_drink = drink_list[drink_page][drink_click]
# Set the screen background
screen.fill(BLACK)
'''
if game_over:
# If game over is true, draw game over
if winner == 1:
text = font.render("Green Wins", True, WHITE)
if winner == 2:
text = font.render("Blue Wins", True, WHITE)
text_rect = text.get_rect()
text_x = screen.get_width() / 2 - text_rect.width / 2
text_y = screen.get_height() / 2 - text_rect.height / 2
screen.blit(text, [text_x, text_y])
pygame.display.flip()
pygame.time.delay(1000)
else:
'''
'''Draw the side bars'''
'''color = WHITE'''
color = GREY
#Left side bar
pygame.draw.rect(screen, color,
[margin,
margin,
bar_width-margin,
screen.get_height()-(2*margin)])
#Right side bar
pygame.draw.rect(screen, color,
[(margin*2)+cell_width+bar_width,
margin,
bar_width-margin,
screen.get_height()-(2*margin)])
'''Draw the arrows in the side bar'''
'''Old triangle arrows
#Point lists of filled polygons for right and left arrows
##points_left = [(50,260), (20, 310), (50, 360)]
##points_right = [(750, 260), (780, 310), (750, 360)]
#Create filled triangle on left side
##pygame.draw.polygon(screen, CUST_COL, points_left, 0)
#Create filled triangle on right side
##pygame.draw.polygon(screen, BLACK, points_right, 0)
'''
#Point list of > shape, use percent of side bar width and height to set points
arrow_right = [(margin+cell_width+bar_width + bar_width*0.3, margin+(bar_height*0.4)),#260),
(margin+cell_width+bar_width + bar_width*0.5, margin+(bar_height*0.4)),
(margin+cell_width+bar_width + bar_width*0.7, margin+(bar_height*0.5)),
(margin+cell_width+bar_width + bar_width*0.5, margin+(bar_height*0.6)),
(margin+cell_width+bar_width + bar_width*0.3, margin+(bar_height*0.6)),
(margin+cell_width+bar_width + bar_width*0.5, margin+(bar_height*0.5))]
#Create filled > shape on right side
pygame.draw.polygon(screen, CUST_COL, arrow_right, 0)
#Point list of < shape, use percent of side bar width and height to set points
arrow_left = [(margin+(bar_width*0.7), margin+(bar_height*0.4)),
(margin+(bar_width*0.5), margin+(bar_height*0.4)),
(margin+(bar_width*0.3), margin+(bar_height*0.5)),
(margin+(bar_width*0.5), margin+(bar_height*0.6)),
(margin+(bar_width*0.7), margin+(bar_height*0.6)),
(margin+(bar_width*0.5), margin+(bar_height*0.5))]
#Create filled < shape on left side
pygame.draw.polygon(screen, CUST_COL, arrow_left, 0)
'''Draw the grid'''
iterat = 0
for row in range(cell_num):
'''color = WHITE'''
color = GREY
'''Draw the cells for the drinks'''
#screen, color, [dist from left edge, dist from top edge], width of rectangle, height of rectangle
pygame.draw.rect(screen,
color,
[(margin+bar_width),
(margin+cell_height)*row+margin,
cell_width,
cell_height])
text = font.render(drink_list[drink_page][iterat], True, BLACK)
#Length of text, height of text (in pixels)
text_size = font.size(drink_list[drink_page][iterat])
text_x = margin + 5
#Center text vertically
#Calculate height of cells * row, account for margin, then center in cell. -2 to account for text starting after 2 pixels
text_y = (cell_height*row) + (margin*(row+1)) + ((cell_height-text_size[1])/2) - 2
#Center text horizontally
text_x = ((screen.get_width() - text_size[0])/2) - 2
#Coordinates are top left of text box, text is actually 2 pixels down and right from coordinates
#text, [dist from left edge, dist from top edge]
screen.blit(text, [text_x, text_y])
iterat += 1
'''Check if drink menu is open'''
if drink_menu_open:
open_drink(screen, cur_drink)
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit() |
auto-commit
|
# -*- coding: utf-8 -*-
from qtpy import QtGui, QtCore, QtWidgets
import pyqtgraph as pg
from pyqtgraph.graphicsItems.ROI import Handle
from skimage.draw import polygon, line
import numpy as np
import os
from scipy.ndimage.interpolation import rotate
from . import global_vars as g
from .utils.misc import random_color, open_file_gui
from .tracefig import roiPlot
class ROI_Drawing(pg.GraphicsObject):
"""
This class is used by the g.currentWindow when an roi is being created. Once the creation is finished, drawFinished()
is called and this class returns an ROI object.
"""
def __init__(self, window, x, y, kind):
pg.GraphicsObject.__init__(self)
window.imageview.addItem(self)
self.window = window
self.pts = [pg.Point(round(x), round(y))]
if self.extendRectLine():
window.imageview.removeItem(self)
return
self.kind = kind
self.state = {'pos': pg.Point(x, y), 'size': pg.Point(0, 0)}
self.color = QtGui.QColor(g.settings['roi_color']) if g.settings['roi_color'] != 'random' else random_color()
def cancel(self):
g.currentWindow.imageview.removeItem(self)
g.currentWindow.currentROI = None
def extendRectLine(self):
for roi in self.window.rois:
if isinstance(roi, ROI_rect_line):
a = roi.getNearestHandle(self.pts[0])
if a:
roi.extendHandle = a
self.extend = roi.extend
self.drawFinished = roi.extendFinished
#self.__dict__.update(roi.__dict__)
self.boundingRect = roi.boundingRect
return True
return False
def extend(self, x, y):
new_pt = pg.Point(round(x), round(y))
if self.kind == 'freehand':
if self.pts[-1] != new_pt:
self.pts.append(new_pt)
elif self.kind in ('line', 'rectangle', 'rect_line'):
if len(self.pts) == 1:
self.pts.append(new_pt)
else:
self.pts[1] = new_pt
#self.pts = sorted(self.pts, key=lambda a: a.x()/a.y())
self.state['pos'] = pg.Point(*np.min(self.pts, 0))
self.state['size'] = pg.Point(*np.ptp(self.pts, 0))
self.prepareGeometryChange()
self.update()
def paint(self, p, *args):
pen = QtGui.QPen(self.color)
pen.setWidth(0)
p.setPen(pen)
if self.kind == 'freehand':
p.drawPolyline(*self.pts)
elif self.kind == 'rectangle':
p.drawRect(self.boundingRect())
elif self.kind in ('rect_line', 'line'):
p.drawLine(*self.pts)
def drawFinished(self):
self.window.imageview.removeItem(self)
if self.kind == 'freehand':
if len(self.pts) < 4:
return None
r = ROI(self.window, self.pts)
elif self.kind == 'rectangle':
r = ROI_rectangle(self.window, self.state['pos'], self.state['size'])
elif self.kind == 'line':
r = ROI_line(self.window, self.pts)
elif self.kind == 'rect_line':
r = ROI_rect_line(self.window, self.pts)
r.drawFinished()
pen = QtGui.QPen(self.color)
pen.setWidth(0)
r.setPen(pen)
return r
def contains(self, *args):
if len(args) == 2:
args = [pg.Point(*args)]
return pg.GraphicsObject.contains(self, *args)
def boundingRect(self):
return QtCore.QRectF(self.state['pos'].x(), self.state['pos'].y(), self.state['size'].x(), self.state['size'].y())
class ROI_Base():
""" ROI_Base interface for all ROI types, template class for duplicate functions and functions to override.
connect window.closeEvent to ROI delete
set the window.currentROI to self
Attributes:
colorDialog: dialog for selecting the color of the ROI and its trace
traceWindow: the tracewindow that this ROI is plotted to, or None
mask: array of XY values that are contained within the ROI
pts: array of XY values used to copy the ROI
linkedROIs: set of rois that act as one ROI
Not Implemented Functions:
getMask()
getPoints()
Functions:
plot():
run the roiPlot function and link this window to the traceWindow
Returns the traceWindow
unplot():
Remove the roi from its traceWindow
link(roi):
add an roi to the linkedROIs set, so they translate together
colorSelected(QColor):
set the color of the roi
copy():
store the roi in the clipboard
paste():
Create an roi from the clipboard ROI using roi.getPoints()
delete():
unplot the ROI, remove the ROI from the window, clear it from the clipboard if it was copied, disconnect all signals
drawFinished():
add the ROI to the window, called by ROI_Drawing
str():
return kind and self.pts for recreating the ROI
"""
INITIAL_ARGS = {'translateSnap': True, 'removable': True, 'snapSize': 1, 'scaleSnap': True}
def __init__(self, window, pts):
self.window = window
self.colorDialog=QtWidgets.QColorDialog()
self.colorDialog.colorSelected.connect(self.colorSelected)
self.window.closeSignal.connect(self.delete)
self.window.currentROI = self
self.traceWindow = None # To test if roi is plotted, check if traceWindow is None
self.pts = np.array(pts)
self.linkedROIs = set()
self.resetSignals()
self.makeMenu()
def resetSignals(self):
try:
self.sigRegionChanged.disconnect()
except:
pass
try:
self.sigRegionChangeFinished.disconnect()
except:
pass
self.sigRegionChanged.connect(self.onRegionChange)
self.sigRegionChangeFinished.connect(self.onRegionChangeFinished)
def updateLinkedROIs(self, finish=False):
for roi in self.linkedROIs:
roi.blockSignals(True)
roi.draw_from_points(self.pts, finish=False)
if roi.traceWindow is not None:
if not finish:
roi.traceWindow.translated(roi)
else:
roi.traceWindow.translateFinished(roi)
roi.blockSignals(False)
def redraw_trace(self):
if self.traceWindow is not None:
self.traceWindow.translateFinished(self)
def getSnapPosition(self, *args, **kargs):
shift = pg.Point(.5, .5) if isinstance(self, (ROI_rect_line, )) else pg.Point(0, 0)
return pg.ROI.getSnapPosition(self, *args, **kargs) + shift
def onRegionChange(self):
self.pts = self.getPoints()
self.updateLinkedROIs(finish=False)
def onRegionChangeFinished(self):
self.pts = self.getPoints()
self.updateLinkedROIs(finish=True)
def link(self,roi):
'''This function links this roi to another, so a translation of one will cause a translation of the other'''
if not isinstance(roi, type(self)):
return
join = self.linkedROIs | roi.linkedROIs | {self, roi}
self.linkedROIs = join - {self}
roi.linkedROIs = join - {roi}
def getMask(self):
'''
Returns the list of integer points contained within the ROI
'''
raise NotImplementedError()
def getTrace(self, bounds=None):
'''
Compute the average of the pixels within this ROI for the window of this ROI, return an array of average values, cropped by bounds
'''
trace = None
if self.window.image.ndim == 4 or self.window.metadata['is_rgb']:
g.alert("Plotting trace of RGB movies is not supported. Try splitting the channels.")
return None
s1, s2 = self.getMask()
if np.size(s1) == 0 or np.size(s2) == 0:
trace = np.zeros(self.window.mt)
elif self.window.image.ndim == 3:
trace = self.window.image[:, s1, s2]
while trace.ndim > 1:
trace = np.average(trace, 1)
elif self.window.image.ndim == 2:
trace = self.window.image[s1, s2]
trace = [np.average(trace)]
if bounds:
trace = trace[bounds[0]:bounds[1]]
return trace
def getPoints(self):
'''
return the points that represent this ROI. Used for exporting
'''
raise NotImplementedError()
def draw_from_points(self, pts, finish=True):
'''
Redraw the ROI from the given points, used on linked ROIs
'''
raise NotImplementedError()
def setMouseHover(self, hover):
## Inform the ROI that the mouse is(not) hovering over it
if self.mouseHovering == hover:
return
self.mouseHovering = hover
if hover:
self.currentPen = pg.mkPen(QtGui.QColor(255, 0, 0))
else:
self.currentPen = self.pen
self.update()
def plot(self):
self.traceWindow = roiPlot(self)
if self.traceWindow == None:
return
self.traceWindow.indexChanged.connect(self.window.setIndex)
self.plotSignal.emit()
return self.traceWindow
def changeColor(self):
self.colorDialog.open()
def colorSelected(self, color):
if color.isValid():
self.setPen(QtGui.QColor(color.name()))
self.sigRegionChangeFinished.emit(self)
def unplot(self):
try:
self.traceWindow.indexChanged.disconnect(self.window.setIndex)
except:
# sometimes errors, says signals not connected
pass
if self.traceWindow != None:
self.traceWindow.removeROI(self)
self.traceWindow = None
def copy(self):
g.clipboard=self
def raiseContextMenu(self, ev):
pos = ev.screenPos()
self.menu.addSeparator()
self.menu.addActions(self.window.menu.actions())
self.menu.popup(QtCore.QPoint(pos.x(), pos.y()))
def makeMenu(self):
def plotPressed():
if plotAct.text() == "&Plot":
self.plot()
else:
self.unplot()
plotAct = QtWidgets.QAction("&Plot", self, triggered=plotPressed)
colorAct = QtWidgets.QAction("&Change Color",self,triggered=self.changeColor)
copyAct = QtWidgets.QAction("&Copy", self, triggered=self.copy)
remAct = QtWidgets.QAction("&Delete", self, triggered=self.delete)
self.menu = QtWidgets.QMenu("ROI Menu")
def updateMenu():
#plotAct.setEnabled(self.window.image.ndim > 2)
plotAct.setText("&Plot" if self.traceWindow == None else "&Unplot")
self.window.menu.aboutToShow.emit()
self.menu.addAction(plotAct)
self.menu.addAction(colorAct)
self.menu.addAction(copyAct)
self.menu.addAction(remAct)
self.menu.aboutToShow.connect(updateMenu)
def delete(self):
self.unplot()
for roi in self.linkedROIs:
if self in roi.linkedROIs:
roi.linkedROIs.remove(self)
if self in self.window.rois:
self.window.rois.remove(self)
self.window.currentROI=None
self.window.imageview.removeItem(self)
self.window.closeSignal.disconnect(self.delete)
if g.clipboard == self:
g.clipboard = None
def drawFinished(self):
self.window.imageview.addItem(self)
self.window.rois.append(self)
self.window.currentROI = self
def str(self):
s = self.kind + '\n'
for x, y in self.pts:
s += '%d %d\n' % (x, y)
return s
def showMask(self):
from .window import Window
im = np.zeros_like(self.window.imageview.getImageItem().image)
s1, s2 = self.getMask()
im[s1, s2] = 1
return Window(im)
class ROI_line(ROI_Base, pg.LineSegmentROI):
'''
ROI Line class for selecting a straight line of pixels between two points
Extends from the ROI_Base class and pyqtgraph ROI.LineSegmentROI
'''
kind = 'line'
plotSignal = QtCore.Signal()
def __init__(self, window, positions, **kargs):
roiArgs = self.INITIAL_ARGS.copy()
roiArgs.update(kargs)
pg.LineSegmentROI.__init__(self, positions=positions, **roiArgs)
self.kymograph = None
self.kymographAct = QtWidgets.QAction("&Kymograph", self, triggered=self.update_kymograph)
ROI_Base.__init__(self, window, positions)
def paint(self, p, *args):
p.setRenderHint(QtGui.QPainter.Antialiasing)
p.setPen(self.currentPen)
h1 = self.handles[0]['item'].pos()
h2 = self.handles[1]['item'].pos()
p.drawLine(h1, h2)
def resetSignals(self):
ROI_Base.resetSignals(self)
self.sigRegionChanged.connect(self.snapPoints)
def snapPoints(self):
fix = False
self.blockSignals(True)
for handle in self.handles:
pos = handle['pos']
pos_snap = self.getSnapPosition(pg.Point(pos))# + pg.Point(.5, .5)
if not (pos == pos_snap):
handle['item'].setPos(pos_snap)
handle['pos'] = pos_snap
fix = True
self.blockSignals(False)
#if fix:
# self.sigRegionChanged.emit(self)
def draw_from_points(self, pts, finish=True):
self.blockSignals(True)
self.movePoint(self.handles[0]['item'], pts[0], finish=False)
self.movePoint(self.handles[1]['item'], pts[1], finish=False)
self.pts = pts
self.blockSignals(False)
if finish:
self.sigRegionChangeFinished.emit(self)
def delete(self):
ROI_Base.delete(self)
if self.kymograph:
self.deleteKymograph()
def getMask(self):
x=np.array([p[0] for p in self.pts], dtype=int)
y=np.array([p[1] for p in self.pts], dtype=int)
xx, yy = line(x[0],y[0],x[1],y[1])
idx_to_keep = np.logical_not( (xx>=self.window.mx) | (xx<0) | (yy>=self.window.my) | (yy<0))
xx = xx[idx_to_keep]
yy = yy[idx_to_keep]
return xx, yy
def getPoints(self):
return np.array([handle['pos'] + self.state['pos'] for handle in self.handles])
def makeMenu(self):
ROI_Base.makeMenu(self)
self.menu.addAction(self.kymographAct)
self.kymographAct.setEnabled(self.window.image.ndim == 3 and not self.window.metadata['is_rgb'])
def update_kymograph(self):
tif=self.window.image
if tif.ndim != 3:
g.alert("Can only kymograph a 3d movie")
return
xx, yy = self.getMask()
mt = len(tif)
if len(xx) == 0:
return
xx = np.array(xx)
yy = np.array(yy)
if len(xx) == 0:
return
mn=np.zeros((mt,len(xx)))
for t in np.arange(mt):
mn[t]=tif[t,xx,yy]
mn=mn.T
if self.kymograph is None:
self.createKymograph(mn)
else:
self.kymograph.imageview.setImage(mn,autoLevels=False,autoRange=False)
#self.kymograph.imageview.view.setAspectLocked(lock=True,ratio=mn.shape[1]/mn.shape[0])
def createKymograph(self,mn):
from .window import Window
oldwindow=g.currentWindow
name=oldwindow.name+' - Kymograph'
self.kymograph=Window(mn,name,metadata=self.window.metadata)
self.sigRegionChanged.connect(self.update_kymograph)
self.kymograph.closeSignal.connect(self.deleteKymograph)
self.sigRemoveRequested.connect(self.deleteKymograph)
def deleteKymograph(self):
self.kymograph.closeSignal.disconnect(self.deleteKymograph)
self.kymograph=None
class ROI_rectangle(ROI_Base, pg.ROI):
'''
ROI rectangle class for selecting a set width and height group of pixels on an image
Extends from pyqtgraph ROI and ROI_Base
Parameters:
window: parent window to draw the ROI in
pos: XY coordinate of the upper left corner of the rectangle
size: (width, height) tuple of the ROI
resizable: scale handles will be drawn on each corner if this is True
See pg.ROI for other parameters
Functions:
crop():
create a new window with the original image cropped within this ROI
'''
kind = 'rectangle'
plotSignal = QtCore.Signal()
def __init__(self, window, pos, size, resizable=True, **kargs):
roiArgs = self.INITIAL_ARGS.copy()
roiArgs.update(kargs)
pos = np.array(pos, dtype=int)
size = np.array(size, dtype=int)
pg.ROI.__init__(self, pos, size, **roiArgs)
if resizable:
self.addScaleHandle([0, 1], [1, 0])
self.addScaleHandle([1, 0], [0, 1])
self.addScaleHandle([0, 0], [1, 1])
self.addScaleHandle([1, 1], [0, 0])
self.cropAction = QtWidgets.QAction('&Crop', self, triggered=self.crop)
ROI_Base.__init__(self, window, [pos, size])
def center_around(self, x, y):
old_pts = self.getPoints()
old_center = old_pts[0] + .5 * old_pts[1]
new_center = np.array([x, y])
diff = new_center - old_center
new_pts = np.array([old_pts[0]+diff, old_pts[1]])
self.draw_from_points(new_pts)
def getPoints(self):
return np.array([self.state['pos'], self.state['size']], dtype=int)
def contains_pts(self, x, y):
target = np.array([x, y])
return np.all(self.pts[0] < target) and np.all(target < self.pts[0]+self.pts[1])
def getMask(self):
x, y = self.state['pos']
w, h = self.state['size']
xmin = max(x, 0)
ymin = max(y, 0)
xmax = min(x+w, self.window.mx)
ymax = min(y+h, self.window.my)
xx, yy = np.meshgrid(np.arange(xmin, xmax, dtype=int), np.arange(ymin, ymax, dtype=int))
return xx.flatten(), yy.flatten()
def draw_from_points(self, pts, finish=True):
self.setPos(pts[0], finish=False)
self.setSize(pts[1], finish=False)
self.pts = np.array(pts)
if finish:
self.sigRegionChangeFinished.emit(self)
def makeMenu(self):
ROI_Base.makeMenu(self)
self.menu.addAction(self.cropAction)
def crop(self):
from .window import Window
r = self.boundingRect()
p1 = r.topLeft() + self.state['pos']
p2 = r.bottomRight() + self.state['pos']
x1, y1 = int(p1.x()), int(p1.y())
x2, y2 = int(p2.x()), int(p2.y())
tif=self.window.image
if tif.ndim==3:
mt,mx,my=tif.shape
if x1<0: x1=0
if y1<0: y1=0
if x2>=mx: x2=mx-1
if y2>=my: y2=my-1
newtif=tif[:,x1:x2,y1:y2]
elif tif.ndim==2:
mx,my=tif.shape
if x1<0: x1=0
if y1<0: y1=0
if x2>=mx: x2=mx-1
if y2>=my: y2=my-1
mx,my=tif.shape
newtif=tif[x1:x2,y1:y2]
elif tif.ndim==4:
mt,mx,my,mc=tif.shape
if x1<0: x1=0
if y1<0: y1=0
if x2>=mx: x2=mx-1
if y2>=my: y2=my-1
newtif=tif[:,x1:x2,y1:y2]
else:
g.alert("Image dimensions not understood")
return None
return Window(newtif,self.window.name+' Cropped',metadata=self.window.metadata)
class ROI_freehand(ROI_Base, pg.PolyLineROI):
kind = 'freehand'
plotSignal = QtCore.Signal()
def __init__(self, window, pts, **kargs):
roiArgs = self.INITIAL_ARGS.copy()
roiArgs.update(kargs)
roiArgs['closed'] = True
pg.PolyLineROI.__init__(self, pts, **roiArgs)
ROI_Base.__init__(self, window, pts)
self._untranslated_mask = None
def draw_from_points(self, pts, finish=False):
return
self.blockSignals(True)
self.setPoints([pg.Point(p) for p in pts], closed=True)
self.blockSignals(False)
def setMouseHover(self, hover):
for seg in self.segments:
seg.setPen(QtGui.QColor(255, 0, 0) if hover else self.currentPen)
def translate(self, pos, y=None, *args, **kargs):
if y is None:
pos = pg.Point(pos)
else:
# avoid ambiguity where update is provided as a positional argument
if isinstance(y, bool):
raise TypeError("Positional arguments to setPos() must be numerical.")
pos = pg.Point(pos, y)
pos = self.getSnapPosition(pos)
pg.PolyLineROI.translate(self, pos, *args, **kargs)
for roi in self.linkedROIs:
roi.blockSignals(True)
roi.setPos(roi.state['pos'] + pos)
roi.pts = roi.getPoints()
roi.blockSignals(False)
def getPoints(self):
return np.array([h.pos() + self.state['pos'] for h in self.getHandles()], dtype=int)
def removeSegment(self, seg):
for handle in seg.handles[:]:
seg.removeHandle(handle['item'])
self.segments.remove(seg)
self.scene().removeItem(seg)
def addSegment(self, h1, h2, index=None):
seg = pg.LineSegmentROI(handles=(h1, h2), pen=self.pen, parent=self, movable=False)
if index is None:
self.segments.append(seg)
else:
self.segments.insert(index, seg)
seg.setAcceptedMouseButtons(QtCore.Qt.LeftButton)
seg.setZValue(self.zValue()+1)
seg.setMouseHover = self.setMouseHover
for h in seg.handles:
h['item'].setAcceptedMouseButtons(h['item'].acceptedMouseButtons() | QtCore.Qt.LeftButton) ## have these handles take left clicks too, so that handles cannot be added on top of other handles
h['item'].setOpacity(0)
def getMask(self):
if self._untranslated_mask is not None:
xx = self._untranslated_mask[0] + int(self.state['pos'][0])
yy = self._untranslated_mask[1] + int(self.state['pos'][1])
else:
x, y = np.transpose(self.pts)
mask=np.zeros(self.window.imageDimensions())
xx,yy=polygon(x,y,shape=mask.shape)
self._untranslated_mask = xx, yy
idx_to_keep = np.logical_not( (xx>=self.window.mx) | (xx<0) | (yy>=self.window.my) | (yy<0))
xx = xx[idx_to_keep]
yy = yy[idx_to_keep]
return xx, yy
class ROI_rect_line(ROI_Base, QtWidgets.QGraphicsObject):
"""
This ROI is a line with an adjustable width that can be composed of multiple straight line segments.
"""
kind = 'rect_line'
plotSignal = QtCore.Signal()
sigRegionChanged = QtCore.Signal(object)
sigRegionChangeFinished = QtCore.Signal(object)
def __init__(self, window, pts, width=1, **kargs):
self.roiArgs = self.INITIAL_ARGS.copy()
self.roiArgs.update(kargs)
self.roiArgs['scaleSnap'] = False
self.width = width
self.currentLine = None
self.kymograph = None
QtWidgets.QGraphicsObject.__init__(self)
self.kymographAct = QtWidgets.QAction("&Kymograph", self, triggered=self.update_kymograph)
self.removeLinkAction = QtWidgets.QAction('Remove Last Link', self, triggered=self.removeSegment)
self.setWidthAction = QtWidgets.QAction("Set Width", self, triggered=lambda: self.setWidth())
ROI_Base.__init__(self, window, pts)
self.getPoints = self.getHandlePositions
self.pen = QtGui.QPen(QtGui.QColor(255, 255, 0))
self.pen.setWidth(0)
self.lines = []
if len(pts) < 2:
raise Exception("Must start with at least 2 points")
self.addSegment(pts[1], connectTo=pts[0])
for p in pts[2:]:
self.addSegment(p)
self.extending = False
def delete(self):
ROI_Base.delete(self)
if self.kymograph:
self.deleteKymograph()
def draw_from_points(self, pts, finish=False):
while len(self.lines) > 1:
self.removeSegment(self.lines[-1])
self.lines[0].movePoint(0, pts[0])
self.lines[0].movePoint(1, pts[1])
for p in pts[2:]:
self.addSegment(p)
if finish:
self.sigRegionChangeFinished.emit(self)
self.pts = pts
def getTrace(self, bounds=None):
if self.window.image.ndim > 3 or self.window.metadata['is_rgb']:
g.alert("Plotting trace of RGB movies is not supported. Try splitting the channels.")
return None
if self.window.image.ndim == 3:
region = self.getArrayRegion(self.window.imageview.image, self.window.imageview.getImageItem(), (1, 2))
while region.ndim > 1:
region = np.average(region, 1)
elif self.window.image.ndim == 2:
region = self.getArrayRegion(self.window.imageview.image, self.window.imageview.getImageItem(), (0, 1))
region = np.average(region)
if bounds:
region = region[bounds[0]:bounds[1]]
return region
def preview(self):
im = self.getArrayRegion(self.window.imageview.getImageItem().image, self.window.imageview.getImageItem(), (0, 1))
if not hasattr(self, 'prev'):
from .window import Window
self.prev = Window(im)
self.sigRegionChanged.connect(lambda a: self.preview())
else:
self.prev.imageview.setImage(im)
def lineRegionChange(self, line):
line.blockSignals(True)
for i in range(2):
p = self.mapFromScene(line.getHandles()[i].scenePos())
p = line.getSnapPosition([p.x(), p.y()])
if line.getHandles()[i].isMoving:
line.movePoint(i, p)
line.blockSignals(False)
self.pts = self.getPoints()
self.sigRegionChanged.emit(self)
def getHandlePositions(self):
"""Return the positions of all handles in local coordinates."""
p = self.mapFromScene(self.lines[0].getHandles()[0].scenePos())
p = self.lines[0].getSnapPosition([p.x(), p.y()])
pos = [p]
for l in self.lines:
p = self.mapFromScene(l.getHandles()[1].scenePos())
p = l.getSnapPosition([p.x(), p.y()])
pos.append(p)
self.pts = pos
return self.pts
def getArrayRegion(self, arr, img=None, axes=(0,1), **kwds):
rgns = []
for l in self.lines:
rgn = l.getArrayRegion(arr, img, axes=axes, **kwds)
if rgn is None:
continue
#return None
rgns.append(rgn)
#print l.state['size']
## make sure orthogonal axis is the same size
## (sometimes fp errors cause differences)
if img.axisOrder == 'row-major':
axes = axes[::-1]
ms = min([r.shape[axes[1]] for r in rgns])
sl = [slice(None)] * rgns[0].ndim
sl[axes[1]] = slice(0,ms)
rgns = [r[sl] for r in rgns]
#print [r.shape for r in rgns], axes
return np.concatenate(rgns, axis=axes[0])
def addSegment(self, pos=(0,0), connectTo=None):
"""
Add a new segment to the ROI connecting from the previous endpoint to *pos*.
(pos is specified in the parent coordinate system of the MultiRectROI)
"""
## by default, connect to the previous endpoint
if connectTo is None:
connectTo = self.lines[-1].getHandles()[1]
## create new ROI
newRoi = pg.ROI((0,0), [1, self.width], parent=self, pen=self.pen, **self.roiArgs)
self.lines.append(newRoi)
## Add first SR handle
if isinstance(connectTo, Handle):
h = self.lines[-1].addScaleRotateHandle([0, 0.5], [1, 0.5], item=connectTo)
newRoi.movePoint(connectTo, connectTo.scenePos(), coords='scene')
else:
h = self.lines[-1].addScaleRotateHandle([0, 0.5], [1, 0.5])
newRoi.movePoint(h, connectTo, coords='scene')
## add second SR handle
h = self.lines[-1].addScaleRotateHandle([1, 0.5], [0, 0.5])
newRoi.movePoint(h, pos)
newRoi.translatable = False
newRoi.hoverEvent = lambda e: self.hoverEvent(newRoi, e)
newRoi.sigRegionChanged.connect(self.lineRegionChange)
newRoi.raiseContextMenu = self.raiseContextMenu
#newRoi.sigRegionChangeStarted.connect(self.roiChangeStartedEvent)
newRoi.sigRegionChangeFinished.connect( lambda a: self.sigRegionChangeFinished.emit(self))
self.sigRegionChanged.emit(self)
def getNearestHandle(self, pos, max_distance=None):
h = None
d = max_distance
for l in self.lines:
for i in range(2):
p = self.window.imageview.getImageItem().mapFromScene(l.getSceneHandlePositions(i)[1])
d = pg.Point(p - pos).manhattanLength()
if max_distance == None:
if h == None or d < dist:
h = l.handles[i]['item']
dist = d
else:
if d <= dist:
h = l.handles[i]['item']
dist = d
return h
def removeSegment(self, segment=None):
"""Remove a segment from the ROI."""
if isinstance(segment, int):
segment = self.lines[segment]
if not isinstance(segment, pg.ROI):
segment = self.currentLine
for h in segment.getHandles():
if len(h.rois) == 2 and h.parentItem() == segment:
otherROI = [line for line in h.rois if line != segment][0]
h.setParentItem(otherROI)
h.setPos(0, .5)
h.disconnectROI(segment)
if segment in self.lines:
self.lines.remove(segment)
self.scene().removeItem(segment)
segment.sigRegionChanged.disconnect()
segment.sigRegionChangeFinished.disconnect()
if len(self.lines) == 0:
self.delete()
else:
self.sigRegionChanged.emit(self)
def extend(self, x, y, finish=True):
print('extend being called {} {}'.format(x,y,))
point = self.lines[0].getSnapPosition([x, y])
if not self.extending:
h = self.getNearestHandle(pg.Point(x, y))
if h is not None and len(h.rois) > 1:
return
self.extending = True
self.addSegment(point, connectTo=h)
else:
self.lines[-1].handles[-1]['item'].movePoint(self.window.imageview.getImageItem().mapToScene(point))
self.sigRegionChanged.emit(self)
if finish:
self.sigRegionChangeFinished.emit(self)
def extendFinished(self):
print('extend finished')
self.extending = False
self.extendHandle = None
self.sigRegionChangeFinished.emit(self)
if self.lines[0].getHandles()[0] in self.lines[-1].getHandles():
self.lines.insert(0, self.lines[-1])
self.lines = self.lines[:-1]
self.lines[0].handles = self.lines[0].handles[::-1]
def hoverEvent(self, l, ev):
self.currentLine = l
if ev.enter:
pen = QtGui.QPen(QtGui.QColor(255, 0, 0))
pen.setWidth(0)
self.setCurrentPen(pen)
elif ev.exit:
self.setCurrentPen(self.pen)
def getMask(self):
xxs = []
yys = []
for i in range(len(self.pts)-1):
p1, p2 = self.pts[i], self.pts[i+1]
xx, yy = line(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1]))
idx_to_keep = np.logical_not( (xx>=self.window.mx) | (xx<0) | (yy>=self.window.my) | (yy<0))
xx = xx[idx_to_keep]
yy = yy[idx_to_keep]
xxs.extend(xx)
yys.extend(yy)
return np.array(xxs, dtype=int), np.array(yys, dtype=int)
def makeMenu(self):
ROI_Base.makeMenu(self)
self.menu.addAction(self.removeLinkAction)
self.menu.addAction(self.setWidthAction)
self.menu.addAction(self.kymographAct)
self.kymographAct.setEnabled(self.window.image.ndim > 2)
def raiseContextMenu(self, ev):
if np.any([len(i.rois)<2 for i in self.currentLine.getHandles()]):
self.removeLinkAction.setText("Remove Link")
self.removeLinkAction.setVisible(True)
else:
self.removeLinkAction.setVisible(False)
ROI_Base.raiseContextMenu(self, ev)
def boundingRect(self):
return QtCore.QRectF()
def paint(self, p, *args):
pass
def setPen(self, pen):
pen = QtGui.QPen(pen)
pen.setWidth(0)
self.pen = pen
self.setCurrentPen(pen)
def setCurrentPen(self, pen):
pen = QtGui.QPen(pen)
pen.setWidth(0)
for l in self.lines:
l.currentPen = pen
l.update()
def update_kymograph(self):
tif=self.window.image
if tif.ndim != 3:
g.alert("Can only kymograph on 3D movies")
return
if self.width == 1:
w, h = self.window.imageDimensions()
r = QtCore.QRect(0, 0, w, h)
xx, yy = self.getMask()
mn = tif[:, xx, yy].T
else:
region = self.getArrayRegion(self.window.imageview.image, self.window.imageview.getImageItem(), (1, 2))
mn = np.average(region, 2).T
if self.kymograph is None:
self.createKymograph(mn)
else:
if mn.size > 0:
self.kymograph.imageview.setImage(mn,autoLevels=False,autoRange=False)
#self.kymograph.imageview.view.setAspectLocked(lock=True,ratio=mn.shape[1]/mn.shape[0])
def setWidth(self, newWidth=None):
s = True
if newWidth == None:
newWidth, s = QtWidgets.QInputDialog.getInt(None, "Enter a width value", 'Float Value', value = self.width)
if not s or s == 0:
return
for l in self.lines:
l.scale([1.0, newWidth/self.width], center=[0.5,0.5])
self.width = newWidth
self.sigRegionChangeFinished.emit(self)
def createKymograph(self,mn):
from .window import Window
oldwindow=g.currentWindow
name=oldwindow.name+' - Kymograph'
self.kymograph=Window(mn,name,metadata=self.window.metadata)
self.kymographproxy = pg.SignalProxy(self.sigRegionChanged, rateLimit=1, slot=self.update_kymograph) #This will only update 3 Hz
self.sigRegionChanged.connect(self.update_kymograph)
self.kymograph.closeSignal.connect(self.deleteKymograph)
def deleteKymograph(self):
self.kymographproxy.disconnect()
self.kymograph.closeSignal.disconnect(self.deleteKymograph)
self.kymograph=None
def makeROI(kind, pts, window=None, **kargs):
if window is None:
window = g.currentWindow
if window is None:
g.alert('ERROR: In order to make and ROI a window needs to be selected')
return None
if kind == 'freehand':
roi = ROI_freehand(window, pts, **kargs)
elif kind == 'rectangle':
if len(pts) > 2:
size = np.ptp(pts,0)
top_left = np.min(pts,0)
else:
size = pts[1]
top_left = pts[0]
roi=ROI_rectangle(window, top_left, size, **kargs)
elif kind == 'line':
roi = ROI_line(window, (pts), **kargs)
elif kind == 'rect_line':
roi = ROI_rect_line(window, pts, **kargs)
else:
g.alert("ERROR: THIS KIND OF ROI COULD NOT BE FOUND: {}".format(kind))
return None
pen = QtGui.QPen(QtGui.QColor(g.settings['roi_color']) if g.settings['roi_color'] != 'random' else random_color())
pen.setWidth(0)
roi.drawFinished()
roi.setPen(pen)
return roi
def open_rois(filename=None):
"""
Open an roi.txt file, creates ROI objects and places them in the current Window.
Args:
filename (str): The filename (including full path) of the roi.txt file.
Returns:
list of rois
"""
if filename is None:
filetypes = '*.txt'
prompt = 'Load ROIs from file'
filename = open_file_gui(prompt, filetypes=filetypes)
if filename is None:
return None
text = open(filename, 'r').read()
rois = []
kind = None
pts = None
for text_line in text.split('\n'):
if kind is None:
kind = text_line
pts = []
elif text_line == '':
roi = makeROI(kind,pts)
rois.append(roi)
kind = None
pts = None
else:
pts.append(tuple(int(float(i)) for i in text_line.split()))
return rois
cleaning up roi.py
# -*- coding: utf-8 -*-
from qtpy import QtGui, QtCore, QtWidgets
import pyqtgraph as pg
from pyqtgraph.graphicsItems.ROI import Handle
from skimage.draw import polygon, line
import numpy as np
import os
from scipy.ndimage.interpolation import rotate
from . import global_vars as g
from .utils.misc import random_color, open_file_gui
from .tracefig import roiPlot
class ROI_Drawing(pg.GraphicsObject):
"""
This class is used by the g.currentWindow when an roi is being created. Once the creation is finished, drawFinished()
is called and this class returns an ROI object.
"""
def __init__(self, window, x, y, kind):
pg.GraphicsObject.__init__(self)
window.imageview.addItem(self)
self.window = window
self.pts = [pg.Point(round(x), round(y))]
if self.extendRectLine():
window.imageview.removeItem(self)
return
self.kind = kind
self.state = {'pos': pg.Point(x, y), 'size': pg.Point(0, 0)}
self.color = QtGui.QColor(g.settings['roi_color']) if g.settings['roi_color'] != 'random' else random_color()
def cancel(self):
g.currentWindow.imageview.removeItem(self)
g.currentWindow.currentROI = None
def extendRectLine(self):
for roi in self.window.rois:
if isinstance(roi, ROI_rect_line):
a = roi.getNearestHandle(self.pts[0])
if a:
roi.extendHandle = a
self.extend = roi.extend
self.drawFinished = roi.extendFinished
#self.__dict__.update(roi.__dict__)
self.boundingRect = roi.boundingRect
return True
return False
def extend(self, x, y):
new_pt = pg.Point(round(x), round(y))
if self.kind == 'freehand':
if self.pts[-1] != new_pt:
self.pts.append(new_pt)
elif self.kind in ('line', 'rectangle', 'rect_line'):
if len(self.pts) == 1:
self.pts.append(new_pt)
else:
self.pts[1] = new_pt
#self.pts = sorted(self.pts, key=lambda a: a.x()/a.y())
self.state['pos'] = pg.Point(*np.min(self.pts, 0))
self.state['size'] = pg.Point(*np.ptp(self.pts, 0))
self.prepareGeometryChange()
self.update()
def paint(self, p, *args):
pen = QtGui.QPen(self.color)
pen.setWidth(0)
p.setPen(pen)
if self.kind == 'freehand':
p.drawPolyline(*self.pts)
elif self.kind == 'rectangle':
p.drawRect(self.boundingRect())
elif self.kind in ('rect_line', 'line'):
p.drawLine(*self.pts)
def drawFinished(self):
self.window.imageview.removeItem(self)
if self.kind == 'rectangle':
pts = [self.state['pos'], self.state['size']]
else:
pts = self.pts
return makeROI(self.kind, pts, self.window)
def contains(self, *args):
if len(args) == 2:
args = [pg.Point(*args)]
return pg.GraphicsObject.contains(self, *args)
def boundingRect(self):
return QtCore.QRectF(self.state['pos'].x(), self.state['pos'].y(), self.state['size'].x(), self.state['size'].y())
class ROI_Base():
""" ROI_Base interface for all ROI types, template class for duplicate functions and functions to override.
connect window.closeEvent to ROI delete
set the window.currentROI to self
Attributes:
colorDialog: dialog for selecting the color of the ROI and its trace
traceWindow: the tracewindow that this ROI is plotted to, or None
mask: array of XY values that are contained within the ROI
pts: array of XY values used to copy the ROI
linkedROIs: set of rois that act as one ROI
Not Implemented Functions:
getMask()
getPoints()
Functions:
plot():
run the roiPlot function and link this window to the traceWindow
Returns the traceWindow
unplot():
Remove the roi from its traceWindow
link(roi):
add an roi to the linkedROIs set, so they translate together
colorSelected(QColor):
set the color of the roi
copy():
store the roi in the clipboard
paste():
Create an roi from the clipboard ROI using roi.getPoints()
delete():
unplot the ROI, remove the ROI from the window, clear it from the clipboard if it was copied, disconnect all signals
drawFinished():
add the ROI to the window, called by ROI_Drawing
str():
return kind and self.pts for recreating the ROI
"""
INITIAL_ARGS = {'translateSnap': True, 'removable': True, 'snapSize': 1, 'scaleSnap': True}
def __init__(self, window, pts):
self.window = window
self.colorDialog=QtWidgets.QColorDialog()
self.colorDialog.colorSelected.connect(self.colorSelected)
self.window.closeSignal.connect(self.delete)
self.window.currentROI = self
self.traceWindow = None # To test if roi is plotted, check if traceWindow is None
self.pts = np.array(pts)
self.linkedROIs = set()
self.resetSignals()
self.makeMenu()
self.mouseHovering = False
self.pen = None # This is the permanent pen for the ROI.
self.currentPen = None # This is the current, temporary pen.
self.set_ROI_pen()
self.drawFinished()
def set_ROI_pen(self, pen=None):
""" This should be called when permanently setting the pen. This doesn't need to be overwritten when the ROI has
components that need to be set individually. """
if pen is None:
color = QtGui.QColor(g.settings['roi_color']) if g.settings['roi_color'] != 'random' else random_color()
pen = QtGui.QPen(color)
pen.setWidth(0)
self.pen = pen
self.set_currentPen(pen)
def set_currentPen(self, pen):
""" This should be called when temporarily setting the pen. This needs to be overwritten when the ROI has
components that need to be set individually. """
self.currentPen = pen
self.setPen(self.currentPen)
self.update()
def paint(self, p, *args):
""" This is overwritten by most classes"""
pass
def setPen(self, pen):
""" This is overwritten by most classes"""
pass
def resetSignals(self):
try:
self.sigRegionChanged.disconnect()
except:
pass
try:
self.sigRegionChangeFinished.disconnect()
except:
pass
self.sigRegionChanged.connect(self.onRegionChange)
self.sigRegionChangeFinished.connect(self.onRegionChangeFinished)
def updateLinkedROIs(self, finish=False):
for roi in self.linkedROIs:
roi.blockSignals(True)
roi.draw_from_points(self.pts, finish=False)
if roi.traceWindow is not None:
if not finish:
roi.traceWindow.translated(roi)
else:
roi.traceWindow.translateFinished(roi)
roi.blockSignals(False)
def redraw_trace(self):
if self.traceWindow is not None:
self.traceWindow.translateFinished(self)
def getSnapPosition(self, *args, **kargs):
shift = pg.Point(.5, .5) if isinstance(self, (ROI_rect_line, )) else pg.Point(0, 0)
return pg.ROI.getSnapPosition(self, *args, **kargs) + shift
def onRegionChange(self):
self.pts = self.getPoints()
self.updateLinkedROIs(finish=False)
def onRegionChangeFinished(self):
self.pts = self.getPoints()
self.updateLinkedROIs(finish=True)
def link(self,roi):
'''This function links this roi to another, so a translation of one will cause a translation of the other'''
if not isinstance(roi, type(self)):
return
join = self.linkedROIs | roi.linkedROIs | {self, roi}
self.linkedROIs = join - {self}
roi.linkedROIs = join - {roi}
def getMask(self):
'''
Returns the list of integer points contained within the ROI
'''
raise NotImplementedError()
def getTrace(self, bounds=None):
'''
Compute the average of the pixels within this ROI for the window of this ROI, return an array of average values, cropped by bounds
'''
trace = None
if self.window.image.ndim == 4 or self.window.metadata['is_rgb']:
g.alert("Plotting trace of RGB movies is not supported. Try splitting the channels.")
return None
s1, s2 = self.getMask()
if np.size(s1) == 0 or np.size(s2) == 0:
trace = np.zeros(self.window.mt)
elif self.window.image.ndim == 3:
trace = self.window.image[:, s1, s2]
while trace.ndim > 1:
trace = np.average(trace, 1)
elif self.window.image.ndim == 2:
trace = self.window.image[s1, s2]
trace = [np.average(trace)]
if bounds:
trace = trace[bounds[0]:bounds[1]]
return trace
def getPoints(self):
'''
return the points that represent this ROI. Used for exporting
'''
raise NotImplementedError()
def draw_from_points(self, pts, finish=True):
'''
Redraw the ROI from the given points, used on linked ROIs
'''
raise NotImplementedError()
def setMouseHover(self, hover):
"""
Inform the ROI that the mouse is or is not hovering over it.
Args:
hover (bool)
"""
if self.mouseHovering is hover:
return
self.mouseHovering = hover
if hover:
self.set_currentPen(QtGui.QPen(QtGui.QColor(255, 0, 0)))
else:
self.set_currentPen(self.pen)
def plot(self):
self.traceWindow = roiPlot(self)
if self.traceWindow == None:
return
self.traceWindow.indexChanged.connect(self.window.setIndex)
self.plotSignal.emit()
return self.traceWindow
def changeColor(self):
self.colorDialog.open()
def colorSelected(self, color):
if color.isValid():
self.set_ROI_pen(QtGui.QPen(QtGui.QColor(color.name())))
self.sigRegionChangeFinished.emit(self)
def unplot(self):
try:
self.traceWindow.indexChanged.disconnect(self.window.setIndex)
except:
# sometimes errors, says signals not connected
pass
if self.traceWindow != None:
self.traceWindow.removeROI(self)
self.traceWindow = None
def copy(self):
g.clipboard=self
def raiseContextMenu(self, ev):
pos = ev.screenPos()
self.menu.addSeparator()
self.menu.addActions(self.window.menu.actions())
self.menu.popup(QtCore.QPoint(pos.x(), pos.y()))
def makeMenu(self):
def plotPressed():
if plotAct.text() == "&Plot":
self.plot()
else:
self.unplot()
plotAct = QtWidgets.QAction("&Plot", self, triggered=plotPressed)
colorAct = QtWidgets.QAction("&Change Color",self,triggered=self.changeColor)
copyAct = QtWidgets.QAction("&Copy", self, triggered=self.copy)
remAct = QtWidgets.QAction("&Delete", self, triggered=self.delete)
self.menu = QtWidgets.QMenu("ROI Menu")
def updateMenu():
#plotAct.setEnabled(self.window.image.ndim > 2)
plotAct.setText("&Plot" if self.traceWindow == None else "&Unplot")
self.window.menu.aboutToShow.emit()
self.menu.addAction(plotAct)
self.menu.addAction(colorAct)
self.menu.addAction(copyAct)
self.menu.addAction(remAct)
self.menu.aboutToShow.connect(updateMenu)
def delete(self):
self.unplot()
for roi in self.linkedROIs:
if self in roi.linkedROIs:
roi.linkedROIs.remove(self)
if self in self.window.rois:
self.window.rois.remove(self)
self.window.currentROI=None
self.window.imageview.removeItem(self)
self.window.closeSignal.disconnect(self.delete)
if g.clipboard == self:
g.clipboard = None
def drawFinished(self):
self.window.imageview.addItem(self)
self.window.rois.append(self)
self.window.currentROI = self
def str(self):
s = self.kind + '\n'
for x, y in self.pts:
s += '{} {}\n'.format(x, y)
return s
def showMask(self):
from .window import Window
im = np.zeros_like(self.window.imageview.getImageItem().image)
s1, s2 = self.getMask()
im[s1, s2] = 1
return Window(im)
class ROI_line(ROI_Base, pg.LineSegmentROI):
'''
ROI Line class for selecting a straight line of pixels between two points
Extends from the ROI_Base class and pyqtgraph ROI.LineSegmentROI
'''
kind = 'line'
plotSignal = QtCore.Signal()
def __init__(self, window, positions, **kargs):
roiArgs = self.INITIAL_ARGS.copy()
roiArgs.update(kargs)
pg.LineSegmentROI.__init__(self, positions=positions, **roiArgs)
self.kymograph = None
self.kymographAct = QtWidgets.QAction("&Kymograph", self, triggered=self.update_kymograph)
ROI_Base.__init__(self, window, positions)
def paint(self, p, *args):
p.setRenderHint(QtGui.QPainter.Antialiasing)
p.setPen(self.currentPen)
h1 = self.handles[0]['item'].pos()
h2 = self.handles[1]['item'].pos()
p.drawLine(h1, h2)
def resetSignals(self):
ROI_Base.resetSignals(self)
self.sigRegionChanged.connect(self.snapPoints)
def snapPoints(self):
fix = False
self.blockSignals(True)
for handle in self.handles:
pos = handle['pos']
pos_snap = self.getSnapPosition(pg.Point(pos))# + pg.Point(.5, .5)
if not (pos == pos_snap):
handle['item'].setPos(pos_snap)
handle['pos'] = pos_snap
fix = True
self.blockSignals(False)
#if fix:
# self.sigRegionChanged.emit(self)
def draw_from_points(self, pts, finish=True):
self.blockSignals(True)
self.movePoint(self.handles[0]['item'], pts[0], finish=False)
self.movePoint(self.handles[1]['item'], pts[1], finish=False)
self.pts = pts
self.blockSignals(False)
if finish:
self.sigRegionChangeFinished.emit(self)
def delete(self):
ROI_Base.delete(self)
if self.kymograph:
self.deleteKymograph()
def getMask(self):
x=np.array([p[0] for p in self.pts], dtype=int)
y=np.array([p[1] for p in self.pts], dtype=int)
xx, yy = line(x[0],y[0],x[1],y[1])
idx_to_keep = np.logical_not( (xx>=self.window.mx) | (xx<0) | (yy>=self.window.my) | (yy<0))
xx = xx[idx_to_keep]
yy = yy[idx_to_keep]
return xx, yy
def getPoints(self):
return np.array([handle['pos'] + self.state['pos'] for handle in self.handles])
def makeMenu(self):
ROI_Base.makeMenu(self)
self.menu.addAction(self.kymographAct)
self.kymographAct.setEnabled(self.window.image.ndim == 3 and not self.window.metadata['is_rgb'])
def update_kymograph(self):
tif=self.window.image
if tif.ndim != 3:
g.alert("Can only kymograph a 3d movie")
return
xx, yy = self.getMask()
mt = len(tif)
if len(xx) == 0:
return
xx = np.array(xx)
yy = np.array(yy)
if len(xx) == 0:
return
mn=np.zeros((mt,len(xx)))
for t in np.arange(mt):
mn[t]=tif[t,xx,yy]
mn=mn.T
if self.kymograph is None:
self.createKymograph(mn)
else:
self.kymograph.imageview.setImage(mn,autoLevels=False,autoRange=False)
#self.kymograph.imageview.view.setAspectLocked(lock=True,ratio=mn.shape[1]/mn.shape[0])
def createKymograph(self,mn):
from .window import Window
oldwindow=g.currentWindow
name=oldwindow.name+' - Kymograph'
self.kymograph=Window(mn,name,metadata=self.window.metadata)
self.sigRegionChanged.connect(self.update_kymograph)
self.kymograph.closeSignal.connect(self.deleteKymograph)
self.sigRemoveRequested.connect(self.deleteKymograph)
def deleteKymograph(self):
self.kymograph.closeSignal.disconnect(self.deleteKymograph)
self.kymograph=None
class ROI_rectangle(ROI_Base, pg.ROI):
'''
ROI rectangle class for selecting a set width and height group of pixels on an image
Extends from pyqtgraph ROI and ROI_Base
Parameters:
window: parent window to draw the ROI in
pos: XY coordinate of the upper left corner of the rectangle
size: (width, height) tuple of the ROI
resizable: scale handles will be drawn on each corner if this is True
See pg.ROI for other parameters
Functions:
crop():
create a new window with the original image cropped within this ROI
'''
kind = 'rectangle'
plotSignal = QtCore.Signal()
def __init__(self, window, pos, size, resizable=True, **kargs):
roiArgs = self.INITIAL_ARGS.copy()
roiArgs.update(kargs)
pos = np.array(pos, dtype=int)
size = np.array(size, dtype=int)
pg.ROI.__init__(self, pos, size, **roiArgs)
if resizable:
self.addScaleHandle([0, 1], [1, 0])
self.addScaleHandle([1, 0], [0, 1])
self.addScaleHandle([0, 0], [1, 1])
self.addScaleHandle([1, 1], [0, 0])
self.cropAction = QtWidgets.QAction('&Crop', self, triggered=self.crop)
ROI_Base.__init__(self, window, [pos, size])
def center_around(self, x, y):
old_pts = self.getPoints()
old_center = old_pts[0] + .5 * old_pts[1]
new_center = np.array([x, y])
diff = new_center - old_center
new_pts = np.array([old_pts[0]+diff, old_pts[1]])
self.draw_from_points(new_pts)
def getPoints(self):
return np.array([self.state['pos'], self.state['size']], dtype=int)
def contains_pts(self, x, y):
target = np.array([x, y])
return np.all(self.pts[0] < target) and np.all(target < self.pts[0]+self.pts[1])
def getMask(self):
x, y = self.state['pos']
w, h = self.state['size']
xmin = max(x, 0)
ymin = max(y, 0)
xmax = min(x+w, self.window.mx)
ymax = min(y+h, self.window.my)
xx, yy = np.meshgrid(np.arange(xmin, xmax, dtype=int), np.arange(ymin, ymax, dtype=int))
return xx.flatten(), yy.flatten()
def paint(self, p, *args):
pg.ROI.paint(self, p, *args)
def draw_from_points(self, pts, finish=True):
self.setPos(pts[0], finish=False)
self.setSize(pts[1], finish=False)
self.pts = np.array(pts)
if finish:
self.sigRegionChangeFinished.emit(self)
def makeMenu(self):
ROI_Base.makeMenu(self)
self.menu.addAction(self.cropAction)
def crop(self):
from .window import Window
r = self.boundingRect()
p1 = r.topLeft() + self.state['pos']
p2 = r.bottomRight() + self.state['pos']
x1, y1 = int(p1.x()), int(p1.y())
x2, y2 = int(p2.x()), int(p2.y())
tif=self.window.image
if tif.ndim==3:
mt,mx,my=tif.shape
if x1<0: x1=0
if y1<0: y1=0
if x2>=mx: x2=mx-1
if y2>=my: y2=my-1
newtif=tif[:,x1:x2,y1:y2]
elif tif.ndim==2:
mx,my=tif.shape
if x1<0: x1=0
if y1<0: y1=0
if x2>=mx: x2=mx-1
if y2>=my: y2=my-1
mx,my=tif.shape
newtif=tif[x1:x2,y1:y2]
elif tif.ndim==4:
mt,mx,my,mc=tif.shape
if x1<0: x1=0
if y1<0: y1=0
if x2>=mx: x2=mx-1
if y2>=my: y2=my-1
newtif=tif[:,x1:x2,y1:y2]
else:
g.alert("Image dimensions not understood")
return None
return Window(newtif,self.window.name+' Cropped',metadata=self.window.metadata)
class ROI_freehand(ROI_Base, pg.PolyLineROI):
kind = 'freehand'
plotSignal = QtCore.Signal()
def __init__(self, window, pts, **kargs):
roiArgs = self.INITIAL_ARGS.copy()
roiArgs.update(kargs)
roiArgs['closed'] = True
self.pen = None
pg.PolyLineROI.__init__(self, pts, **roiArgs)
ROI_Base.__init__(self, window, pts)
self._untranslated_mask = None
def draw_from_points(self, pts, finish=False):
return
self.blockSignals(True)
self.setPoints([pg.Point(p) for p in pts], closed=True)
self.blockSignals(False)
def set_currentPen(self, pen):
super(ROI_freehand, self).set_currentPen(pen)
for seg in self.segments:
seg.setPen(self.currentPen)
def translate(self, pos, y=None, *args, **kargs):
if y is None:
pos = pg.Point(pos)
else:
# avoid ambiguity where update is provided as a positional argument
if isinstance(y, bool):
raise TypeError("Positional arguments to setPos() must be numerical.")
pos = pg.Point(pos, y)
pos = self.getSnapPosition(pos)
pg.PolyLineROI.translate(self, pos, *args, **kargs)
for roi in self.linkedROIs:
roi.blockSignals(True)
roi.setPos(roi.state['pos'] + pos)
roi.pts = roi.getPoints()
roi.blockSignals(False)
def getPoints(self):
return np.array([h.pos() + self.state['pos'] for h in self.getHandles()], dtype=int)
def removeSegment(self, seg):
for handle in seg.handles[:]:
seg.removeHandle(handle['item'])
self.segments.remove(seg)
self.scene().removeItem(seg)
def addSegment(self, h1, h2, index=None):
seg = pg.LineSegmentROI(handles=(h1, h2), pen=self.pen, parent=self, movable=False)
if index is None:
self.segments.append(seg)
else:
self.segments.insert(index, seg)
seg.setAcceptedMouseButtons(QtCore.Qt.LeftButton)
seg.setZValue(self.zValue()+1)
seg.setMouseHover = self.setMouseHover
for h in seg.handles:
h['item'].setAcceptedMouseButtons(h['item'].acceptedMouseButtons() | QtCore.Qt.LeftButton) ## have these handles take left clicks too, so that handles cannot be added on top of other handles
h['item'].setOpacity(0)
def getMask(self):
if self._untranslated_mask is not None:
xx = self._untranslated_mask[0] + int(self.state['pos'][0])
yy = self._untranslated_mask[1] + int(self.state['pos'][1])
else:
x, y = np.transpose(self.pts)
mask=np.zeros(self.window.imageDimensions())
xx,yy=polygon(x,y,shape=mask.shape)
self._untranslated_mask = xx, yy
idx_to_keep = np.logical_not( (xx>=self.window.mx) | (xx<0) | (yy>=self.window.my) | (yy<0))
xx = xx[idx_to_keep]
yy = yy[idx_to_keep]
return xx, yy
class ROI_rect_line(ROI_Base, QtWidgets.QGraphicsObject):
"""
This ROI is a line with an adjustable width that can be composed of multiple straight line segments.
"""
kind = 'rect_line'
plotSignal = QtCore.Signal()
sigRegionChanged = QtCore.Signal(object)
sigRegionChangeFinished = QtCore.Signal(object)
def __init__(self, window, pts, width=1, **kargs):
self.roiArgs = self.INITIAL_ARGS.copy()
self.roiArgs.update(kargs)
self.roiArgs['scaleSnap'] = False
self.width = width
self.currentLine = None
self.kymograph = None
QtWidgets.QGraphicsObject.__init__(self)
self.kymographAct = QtWidgets.QAction("&Kymograph", self, triggered=self.update_kymograph)
self.removeLinkAction = QtWidgets.QAction('Remove Last Link', self, triggered=self.removeSegment)
self.setWidthAction = QtWidgets.QAction("Set Width", self, triggered=lambda: self.setWidth())
self.lines = []
ROI_Base.__init__(self, window, pts)
self.getPoints = self.getHandlePositions
self.pen = QtGui.QPen(QtGui.QColor(255, 255, 0))
self.pen.setWidth(0)
if len(pts) < 2:
raise Exception("Must start with at least 2 points")
print('ROI_rect_line self.pts at drawFinished: {}'.format(pts))
self.addSegment(pts[1], connectTo=pts[0])
for p in pts[2:]:
self.addSegment(p)
self.extending = False
def delete(self):
ROI_Base.delete(self)
if self.kymograph:
self.deleteKymograph()
def draw_from_points(self, pts, finish=False):
while len(self.lines) > 1:
self.removeSegment(self.lines[-1])
self.lines[0].movePoint(0, pts[0])
self.lines[0].movePoint(1, pts[1])
for p in pts[2:]:
self.addSegment(p)
if finish:
self.sigRegionChangeFinished.emit(self)
self.pts = pts
def getTrace(self, bounds=None):
if self.window.image.ndim > 3 or self.window.metadata['is_rgb']:
g.alert("Plotting trace of RGB movies is not supported. Try splitting the channels.")
return None
if self.window.image.ndim == 3:
region = self.getArrayRegion(self.window.imageview.image, self.window.imageview.getImageItem(), (1, 2))
while region.ndim > 1:
region = np.average(region, 1)
elif self.window.image.ndim == 2:
region = self.getArrayRegion(self.window.imageview.image, self.window.imageview.getImageItem(), (0, 1))
region = np.average(region)
if bounds:
region = region[bounds[0]:bounds[1]]
return region
def preview(self):
im = self.getArrayRegion(self.window.imageview.getImageItem().image, self.window.imageview.getImageItem(), (0, 1))
if not hasattr(self, 'prev'):
from .window import Window
self.prev = Window(im)
self.sigRegionChanged.connect(lambda a: self.preview())
else:
self.prev.imageview.setImage(im)
def lineRegionChange(self, line):
print('lineRegionChange')
line.blockSignals(True)
for i in range(2):
p = self.mapFromScene(line.getHandles()[i].scenePos())
p = line.getSnapPosition([p.x(), p.y()])
if line.getHandles()[i].isMoving:
line.movePoint(i, p)
line.blockSignals(False)
self.pts = self.getPoints()
self.sigRegionChanged.emit(self)
def getHandlePositions(self):
"""Return the positions of all handles in local coordinates."""
print("getHandlePositions")
return None
p = self.mapFromScene(self.lines[0].getHandles()[0].scenePos())
p = self.lines[0].getSnapPosition([p.x(), p.y()])
pos = [p]
for l in self.lines:
p = self.mapFromScene(l.getHandles()[1].scenePos())
p = l.getSnapPosition([p.x(), p.y()])
pos.append(p)
self.pts = pos
return self.pts
def getArrayRegion(self, arr, img=None, axes=(0,1), **kwds):
rgns = []
for l in self.lines:
rgn = l.getArrayRegion(arr, img, axes=axes, **kwds)
if rgn is None:
continue
#return None
rgns.append(rgn)
#print l.state['size']
## make sure orthogonal axis is the same size
## (sometimes fp errors cause differences)
if img.axisOrder == 'row-major':
axes = axes[::-1]
ms = min([r.shape[axes[1]] for r in rgns])
sl = [slice(None)] * rgns[0].ndim
sl[axes[1]] = slice(0,ms)
rgns = [r[sl] for r in rgns]
#print [r.shape for r in rgns], axes
return np.concatenate(rgns, axis=axes[0])
def addSegment(self, pos=(0, 0), connectTo=None):
"""
Add a new segment to the ROI connecting from the previous endpoint to *pos*.
(pos is specified in the parent coordinate system of the MultiRectROI)
"""
## by default, connect to the previous endpoint
if connectTo is None:
connectTo = self.lines[-1].getHandles()[1]
## create new ROI
newRoi = pg.ROI((0,0), [1, self.width], parent=self, pen=self.pen, **self.roiArgs)
self.lines.append(newRoi)
## Add first SR handle
if isinstance(connectTo, Handle):
h = self.lines[-1].addScaleRotateHandle([0, 0.5], [1, 0.5], item=connectTo)
newRoi.movePoint(connectTo, connectTo.scenePos(), coords='scene')
else:
h = self.lines[-1].addScaleRotateHandle([0, 0.5], [1, 0.5])
newRoi.movePoint(h, connectTo, coords='scene')
## add second SR handle
h = self.lines[-1].addScaleRotateHandle([1, 0.5], [0, 0.5])
newRoi.movePoint(h, pos)
newRoi.translatable = False
newRoi.hoverEvent = lambda e: self.hoverEvent(newRoi, e)
newRoi.sigRegionChanged.connect(self.lineRegionChange)
newRoi.raiseContextMenu = self.raiseContextMenu
#newRoi.sigRegionChangeStarted.connect(self.roiChangeStartedEvent)
newRoi.sigRegionChangeFinished.connect( lambda a: self.sigRegionChangeFinished.emit(self))
self.sigRegionChanged.emit(self)
def getNearestHandle(self, pos, max_distance=None):
h = None
d = max_distance
for l in self.lines:
for i in range(2):
p = self.window.imageview.getImageItem().mapFromScene(l.getSceneHandlePositions(i)[1])
d = pg.Point(p - pos).manhattanLength()
if max_distance == None:
if h == None or d < dist:
h = l.handles[i]['item']
dist = d
else:
if d <= dist:
h = l.handles[i]['item']
dist = d
return h
def removeSegment(self, segment=None):
"""Remove a segment from the ROI."""
if isinstance(segment, int):
segment = self.lines[segment]
if not isinstance(segment, pg.ROI):
segment = self.currentLine
for h in segment.getHandles():
if len(h.rois) == 2 and h.parentItem() == segment:
otherROI = [line for line in h.rois if line != segment][0]
h.setParentItem(otherROI)
h.setPos(0, .5)
h.disconnectROI(segment)
if segment in self.lines:
self.lines.remove(segment)
self.scene().removeItem(segment)
segment.sigRegionChanged.disconnect()
segment.sigRegionChangeFinished.disconnect()
if len(self.lines) == 0:
self.delete()
else:
self.sigRegionChanged.emit(self)
def extend(self, x, y, finish=True):
point = self.lines[0].getSnapPosition([x, y])
if not self.extending:
h = self.getNearestHandle(pg.Point(x, y))
if h is not None and len(h.rois) > 1:
return
self.extending = True
self.addSegment(point, connectTo=h)
else:
self.lines[-1].handles[-1]['item'].movePoint(self.window.imageview.getImageItem().mapToScene(point))
self.sigRegionChanged.emit(self)
if finish:
self.sigRegionChangeFinished.emit(self)
def extendFinished(self):
self.extending = False
self.extendHandle = None
self.sigRegionChangeFinished.emit(self)
if self.lines[0].getHandles()[0] in self.lines[-1].getHandles():
self.lines.insert(0, self.lines[-1])
self.lines = self.lines[:-1]
self.lines[0].handles = self.lines[0].handles[::-1]
def hoverEvent(self, l, ev):
self.currentLine = l
if ev.enter:
pen = QtGui.QPen(QtGui.QColor(255, 0, 0))
pen.setWidth(0)
self.set_currentPen(pen)
elif ev.exit:
self.set_currentPen(self.pen)
def getMask(self):
print('getMask')
xxs = []
yys = []
for i in range(len(self.pts)-1):
p1, p2 = self.pts[i], self.pts[i+1]
xx, yy = line(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1]))
idx_to_keep = np.logical_not( (xx>=self.window.mx) | (xx<0) | (yy>=self.window.my) | (yy<0))
xx = xx[idx_to_keep]
yy = yy[idx_to_keep]
xxs.extend(xx)
yys.extend(yy)
return np.array(xxs, dtype=int), np.array(yys, dtype=int)
def makeMenu(self):
ROI_Base.makeMenu(self)
self.menu.addAction(self.removeLinkAction)
self.menu.addAction(self.setWidthAction)
self.menu.addAction(self.kymographAct)
self.kymographAct.setEnabled(self.window.image.ndim > 2)
def raiseContextMenu(self, ev):
if np.any([len(i.rois)<2 for i in self.currentLine.getHandles()]):
self.removeLinkAction.setText("Remove Link")
self.removeLinkAction.setVisible(True)
else:
self.removeLinkAction.setVisible(False)
ROI_Base.raiseContextMenu(self, ev)
def boundingRect(self):
return QtCore.QRectF()
def set_currentPen(self, pen):
super(ROI_rect_line, self).set_currentPen(pen)
for line in self.lines:
line.setPen(pen)
def update_kymograph(self):
tif=self.window.image
if tif.ndim != 3:
g.alert("Can only kymograph on 3D movies")
return
if self.width == 1:
w, h = self.window.imageDimensions()
r = QtCore.QRect(0, 0, w, h)
xx, yy = self.getMask()
mn = tif[:, xx, yy].T
else:
region = self.getArrayRegion(self.window.imageview.image, self.window.imageview.getImageItem(), (1, 2))
mn = np.average(region, 2).T
if self.kymograph is None:
self.createKymograph(mn)
else:
if mn.size > 0:
self.kymograph.imageview.setImage(mn,autoLevels=False,autoRange=False)
#self.kymograph.imageview.view.setAspectLocked(lock=True,ratio=mn.shape[1]/mn.shape[0])
def setWidth(self, newWidth=None):
s = True
if newWidth == None:
newWidth, s = QtWidgets.QInputDialog.getInt(None, "Enter a width value", 'Float Value', value = self.width)
if not s or s == 0:
return
for l in self.lines:
l.scale([1.0, newWidth/self.width], center=[0.5,0.5])
self.width = newWidth
self.sigRegionChangeFinished.emit(self)
def createKymograph(self,mn):
from .window import Window
oldwindow=g.currentWindow
name=oldwindow.name+' - Kymograph'
self.kymograph=Window(mn,name,metadata=self.window.metadata)
self.kymographproxy = pg.SignalProxy(self.sigRegionChanged, rateLimit=1, slot=self.update_kymograph) #This will only update 3 Hz
self.sigRegionChanged.connect(self.update_kymograph)
self.kymograph.closeSignal.connect(self.deleteKymograph)
def deleteKymograph(self):
self.kymographproxy.disconnect()
self.kymograph.closeSignal.disconnect(self.deleteKymograph)
self.kymograph=None
def makeROI(kind, pts, window=None, **kargs):
if window is None:
window = g.currentWindow
if window is None:
g.alert('ERROR: In order to make and ROI a window needs to be selected')
return None
if kind == 'freehand':
if len(pts) < 4:
return None
roi = ROI_freehand(window, pts, **kargs)
elif kind == 'rectangle':
if len(pts) > 2:
size = np.ptp(pts,0)
top_left = np.min(pts,0)
else:
size = pts[1]
top_left = pts[0]
roi=ROI_rectangle(window, top_left, size, **kargs)
elif kind == 'line':
roi = ROI_line(window, (pts), **kargs)
elif kind == 'rect_line':
roi = ROI_rect_line(window, pts, **kargs)
else:
g.alert("ERROR: THIS KIND OF ROI COULD NOT BE FOUND: {}".format(kind))
return None
return roi
def open_rois(filename=None):
"""
Open an roi.txt file, creates ROI objects and places them in the current Window.
Args:
filename (str): The filename (including full path) of the roi.txt file.
Returns:
list of rois
"""
if filename is None:
filetypes = '*.txt'
prompt = 'Load ROIs from file'
filename = open_file_gui(prompt, filetypes=filetypes)
if filename is None:
return None
text = open(filename, 'r').read()
rois = []
kind = None
pts = None
for text_line in text.split('\n'):
if kind is None:
kind = text_line
pts = []
elif text_line == '':
roi = makeROI(kind,pts)
rois.append(roi)
kind = None
pts = None
else:
pts.append(tuple(int(float(i)) for i in text_line.split()))
return rois
|
import hal
from wpilib.iterativerobot import IterativeRobot
from wpilib.command.scheduler import Scheduler
from wpilib.livewindow import LiveWindow
class CommandBasedRobot(IterativeRobot):
'''
The base class for a Command-Based Robot. To use, instantiate commands and
trigger them.
'''
def startCompetition(self):
"""
Provide an alternate "main loop" via startCompetition(). Rewritten
from IterativeRobot for readability and to initialize scheduler.
"""
hal.report(hal.HALUsageReporting.kResourceType_Framework,
hal.HALUsageReporting.kFramework_Iterative)
self.scheduler = Scheduler.getInstance()
self.robotInit()
# Tell the DS that the robot is ready to be enabled
hal.observeUserProgramStarting()
# loop forever, calling the appropriate mode-dependent function
while True:
if self.ds.isDisabled():
hal.observeUserProgramDisabled()
self.disabledInit()
while self.ds.isDisabled():
self.disabledPeriodic()
self.ds.waitForData()
elif self.ds.isAutonomous():
hal.observeUserProgramAutonomous()
self.autonomousInit()
while self.ds.isEnabled() and self.ds.isAutonomous():
self.autonomousPeriodic()
self.ds.waitForData()
elif self.ds.isTest():
hal.observeUserProgramTest()
LiveWindow.setEnabled(True)
self.testInit()
while self.ds.isEnabled() and self.ds.isTest():
self.testPeriodic()
self.ds.waitForData()
LiveWindow.setEnabled(False)
else:
hal.observeUserProgramTeleop()
self.teleopInit()
# isOperatorControl checks "not autonomous or test", so we need
# to check isEnabled as well, since otherwise it will continue
# looping while disabled.
while self.ds.isEnabled() and self.ds.isOperatorControl():
self.teleopPeriodic()
self.ds.waitForData()
def commandPeriodic(self):
'''
Run the scheduler regularly. If an error occurs during a competition,
prevent it from crashing the program.
'''
try:
self.scheduler.run()
except Exception as error:
if not self.ds.isFMSAttached():
raise
'''Just to be safe, stop all running commands.'''
self.scheduler.removeAll()
self.handleCrash(error)
autonomousPeriodic = commandPeriodic
teleopPeriodic = commandPeriodic
disabledPeriodic = commandPeriodic
def testPeriodic(self):
'''
Test mode will not run normal commands, but motors can be controlled
and sensors viewed with the SmartDashboard.
'''
LiveWindow.run()
def handleCrash(self, error):
'''
Called if an exception is raised in the Scheduler during a competition.
Writes an error message to the driver station by default. If you want
more complex behavior, override this method in your robot class.
'''
self.ds.reportError(str(error), printTrace=True)
Makes CommandBasedRobot extend TimedRobot
import hal
from wpilib.timedrobot import TimedRobot
from wpilib.command.scheduler import Scheduler
from wpilib.livewindow import LiveWindow
class CommandBasedRobot(TimedRobot):
'''
The base class for a Command-Based Robot. To use, instantiate commands and
trigger them.
'''
def startCompetition(self):
"""Initalizes the scheduler before starting robotInit()"""
self.scheduler = Scheduler.getInstance()
super().startCompetition()
def commandPeriodic(self):
'''
Run the scheduler regularly. If an error occurs during a competition,
prevent it from crashing the program.
'''
try:
self.scheduler.run()
except Exception as error:
if not self.ds.isFMSAttached():
raise
'''Just to be safe, stop all running commands.'''
self.scheduler.removeAll()
self.handleCrash(error)
autonomousPeriodic = commandPeriodic
teleopPeriodic = commandPeriodic
disabledPeriodic = commandPeriodic
def testPeriodic(self):
'''
Test mode will not run normal commands, but motors can be controlled
and sensors viewed with the SmartDashboard.
'''
LiveWindow.run()
def handleCrash(self, error):
'''
Called if an exception is raised in the Scheduler during a competition.
Writes an error message to the driver station by default. If you want
more complex behavior, override this method in your robot class.
'''
self.ds.reportError(str(error), printTrace=True)
|
import sys
sys.path.insert(1, "../../../")
import h2o
def cupMediumGBM(ip,port):
# Connect to h2o
h2o.init(ip,port)
train = h2o.import_frame(path=h2o.locate("bigdata/laptop/usecases/cup98LRN_z.csv"))
test = h2o.import_frame(path=h2o.locate("bigdata/laptop/usecases/cup98VAL_z.csv"))
train["TARGET_B"] = train["TARGET_B"].asfactor()
# Train H2O GBM Model:
train_cols = train.names()
for c in ["TARGET_D", "TARGET_B", "CONTROLN"]:
train_cols.remove(c)
model = h2o.gbm(x=train[train_cols], y=train["TARGET_B"], distribution = "bernoulli", ntrees = 5)
if __name__ == "__main__":
h2o.run_test(sys.argv, cupMediumGBM)
already fixed by brandon
import sys
sys.path.insert(1, "../../../")
import h2o
def cupMediumGBM(ip,port):
# Connect to h2o
h2o.init(ip,port)
train = h2o.import_frame(path=h2o.locate("bigdata/laptop/usecases/cup98LRN_z.csv"))
test = h2o.import_frame(path=h2o.locate("bigdata/laptop/usecases/cup98VAL_z.csv"))
train["TARGET_B"] = train["TARGET_B"].asfactor()
# Train H2O GBM Model:
train_cols = train.names()
for c in ['C1', "TARGET_D", "TARGET_B", "CONTROLN"]:
train_cols.remove(c)
model = h2o.gbm(x=train[train_cols], y=train["TARGET_B"], distribution = "bernoulli", ntrees = 5)
if __name__ == "__main__":
h2o.run_test(sys.argv, cupMediumGBM)
|
from django.shortcuts import render, render_to_response
from django.template import RequestContext
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.db import transaction
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect, HttpResponse
import datetime
from django.views.generic import View, UpdateView, DetailView
from actual_play.models import GameGroup, Game, Player
from blog.models import Blog, Entry, Category
from library.models import Stack, Item
from itertools import chain
from nonhumanuser.utils import *
<<<<<<< HEAD
from app.models import UserProfile
from app.utils import get_query
=======
from app.models import Profile
from app.forms import UserForm, ProfileForm
from django.contrib import messages
>>>>>>> tweaked_profile_stuff
# Create your views here.
class IndexView(View):
def get(self, request, *args, **kwargs):
story = Entry.objects.filter(category=1).last()
article = Entry.objects.filter(category=2).last()
library_item = Item.objects.filter(active=True).last()
game = Game.objects.last()
game_group = GameGroup.objects.filter(name=game.group).first()
entry_recent = Entry.objects.filter(active=True).order_by('-created_date')
library_recent = Item.objects.filter(active=True).order_by('-created_date')
games_recent = Game.objects.filter(active=True).order_by('-created_date')
items_recent = list(chain(entry_recent, library_recent, games_recent))
entry_popular = Entry.objects.filter(active=True).order_by('-number_comments')
library_popular = Item.objects.filter(active=True).order_by('-number_comments')
games_popular = Game.objects.filter(active=True).order_by('-number_comments')
items_popular = list(chain(entry_popular, library_popular, games_popular))
links = get_main_links()
context = {
"site": {
'title': 'NonHumanUser',
'description': 'Stories, articles resources and supplements for Call of Cthulhu and related genre games.',
},
'og_type': 'webpage',
'og_url': 'http://www.nonhumanuser.com',
'og_title': 'NonHumanUser',
'og_description': 'Stories, articles resources and supplements for Call of Cthulhu and related genre games.',
'og_image': 'http://www.nonhumanuser.com/images/logo.png',
'story': story,
'article': article,
'library_item': library_item,
'game': game,
'game_group': game_group,
'items_recent': items_recent[0:5],
'items_popular': items_popular[0:5],
'section': 'main',
'links': links,
}
return render(request, 'app/index.html', context)
class ProfileView(View):
"""
Display user profile
"""
user_form = UserForm
profile_form = ProfileForm
initial = {'key': 'value'}
template_name = 'app/profile.html'
def get(self, request, *args, **kwargs):
user_form = self.user_form(initial=self.initial)
profile_form = self.profile_form(initial=self.initial)
return render(request, self.template_name, {
'user_form': user_form,
'profile_form': profile_form,
})
def post(self, request, *args, **kwargs):
user_form = self.user_form(request.POST, instance=request.user)
profile_form = self.profile_form(request.POST, request.FILES,
instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Profile update successful.')
else:
messages.error(request, 'Please correct the error below.')
return render(request, 'app/profile.html', {
'user_form': user_form,
'profile_form': profile_form,
})
class SearchView(View):
template = 'app/search_results.html'
def get(self, request, *args, **kwargs):
query_string = ''
found_entries = None
# Sidebar
entry_recent = Entry.objects.filter(active=True).order_by('-created_date')
library_recent = Item.objects.filter(active=True).order_by('-created_date')
games_recent = Game.objects.filter(active=True).order_by('-created_date')
items_recent = list(chain(entry_recent, library_recent, games_recent))
entry_popular = Entry.objects.filter(active=True).order_by('-number_comments')
library_popular = Item.objects.filter(active=True).order_by('-number_comments')
games_popular = Game.objects.filter(active=True).order_by('-number_comments')
items_popular = list(chain(entry_popular, library_popular, games_popular))
links = get_main_links()
context = {
'items_recent': items_recent[0:5],
'items_popular': items_popular[0:5],
'links': links,
}
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = get_query(query_string, ['title', 'body',])
# have to figure out the type here
entries = Entry.objects.filter(entry_query)\
.order_by('-publish_date')
items = Item.objects.filter(entry_query).order_by('-publish_date')
games = Game.objects.filter(entry_query).order_by('-publish_date')
found_entries = list(chain(entries, items, games))
context['query_string'] = query_string
context['found_entries'] = found_entries
return render(request, self.template,
context, context_instance=RequestContext(request))
fix imports on app/views.py
from django.shortcuts import render, render_to_response
from django.template import RequestContext
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.db import transaction
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect, HttpResponse
import datetime
from django.views.generic import View, UpdateView, DetailView
from actual_play.models import GameGroup, Game, Player
from blog.models import Blog, Entry, Category
from library.models import Stack, Item
from itertools import chain
from nonhumanuser.utils import *
from app.utils import get_query
from app.models import Profile
from app.forms import UserForm, ProfileForm
from django.contrib import messages
# Create your views here.
class IndexView(View):
def get(self, request, *args, **kwargs):
story = Entry.objects.filter(category=1).last()
article = Entry.objects.filter(category=2).last()
library_item = Item.objects.filter(active=True).last()
game = Game.objects.last()
game_group = GameGroup.objects.filter(name=game.group).first()
entry_recent = Entry.objects.filter(active=True).order_by('-created_date')
library_recent = Item.objects.filter(active=True).order_by('-created_date')
games_recent = Game.objects.filter(active=True).order_by('-created_date')
items_recent = list(chain(entry_recent, library_recent, games_recent))
entry_popular = Entry.objects.filter(active=True).order_by('-number_comments')
library_popular = Item.objects.filter(active=True).order_by('-number_comments')
games_popular = Game.objects.filter(active=True).order_by('-number_comments')
items_popular = list(chain(entry_popular, library_popular, games_popular))
links = get_main_links()
context = {
"site": {
'title': 'NonHumanUser',
'description': 'Stories, articles resources and supplements for Call of Cthulhu and related genre games.',
},
'og_type': 'webpage',
'og_url': 'http://www.nonhumanuser.com',
'og_title': 'NonHumanUser',
'og_description': 'Stories, articles resources and supplements for Call of Cthulhu and related genre games.',
'og_image': 'http://www.nonhumanuser.com/images/logo.png',
'story': story,
'article': article,
'library_item': library_item,
'game': game,
'game_group': game_group,
'items_recent': items_recent[0:5],
'items_popular': items_popular[0:5],
'section': 'main',
'links': links,
}
return render(request, 'app/index.html', context)
class ProfileView(View):
"""
Display user profile
"""
user_form = UserForm
profile_form = ProfileForm
initial = {'key': 'value'}
template_name = 'app/profile.html'
def get(self, request, *args, **kwargs):
user_form = self.user_form(initial=self.initial)
profile_form = self.profile_form(initial=self.initial)
return render(request, self.template_name, {
'user_form': user_form,
'profile_form': profile_form,
})
def post(self, request, *args, **kwargs):
user_form = self.user_form(request.POST, instance=request.user)
profile_form = self.profile_form(request.POST, request.FILES,
instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Profile update successful.')
else:
messages.error(request, 'Please correct the error below.')
return render(request, 'app/profile.html', {
'user_form': user_form,
'profile_form': profile_form,
})
class SearchView(View):
template = 'app/search_results.html'
def get(self, request, *args, **kwargs):
query_string = ''
found_entries = None
# Sidebar
entry_recent = Entry.objects.filter(active=True).order_by('-created_date')
library_recent = Item.objects.filter(active=True).order_by('-created_date')
games_recent = Game.objects.filter(active=True).order_by('-created_date')
items_recent = list(chain(entry_recent, library_recent, games_recent))
entry_popular = Entry.objects.filter(active=True).order_by('-number_comments')
library_popular = Item.objects.filter(active=True).order_by('-number_comments')
games_popular = Game.objects.filter(active=True).order_by('-number_comments')
items_popular = list(chain(entry_popular, library_popular, games_popular))
links = get_main_links()
context = {
'items_recent': items_recent[0:5],
'items_popular': items_popular[0:5],
'links': links,
}
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = get_query(query_string, ['title', 'body',])
# have to figure out the type here
entries = Entry.objects.filter(entry_query)\
.order_by('-publish_date')
items = Item.objects.filter(entry_query).order_by('-publish_date')
games = Game.objects.filter(entry_query).order_by('-publish_date')
found_entries = list(chain(entries, items, games))
context['query_string'] = query_string
context['found_entries'] = found_entries
return render(request, self.template,
context, context_instance=RequestContext(request)) |
import random
from django.test import TestCase
from nodeconductor.openstack import models as openstack_models
from nodeconductor.openstack.tests import factories as openstack_factories
class QuotaModelMixinTest(TestCase):
def test_quotas_sum_calculation_if_all_values_are_positive(self):
# We have 3 links
links = openstack_factories.OpenStackServiceProjectLinkFactory.create_batch(3)
model = openstack_models.OpenStackServiceProjectLink
# Each link has non-zero quotas
for link in links:
for quota_name in link.QUOTAS_NAMES:
limit = random.choice([10, 20, 30, 40])
link.set_quota_limit(quota_name, limit)
link.set_quota_usage(quota_name, limit / 2)
qs = model.objects.all()
sum_of_quotas = model.get_sum_of_quotas_for_querysets([qs])
expected = {}
for quota_name in model.QUOTAS_NAMES:
expected[quota_name] = sum(
link.quotas.get(name=quota_name).limit for link in links)
expected[quota_name + '_usage'] = sum(
link.quotas.get(name=quota_name).usage for link in links)
self.assertEqual(expected, sum_of_quotas)
Remove leftover (NC-1175)
|
import os
import subprocess
__all__ = ['__author__', '__author_email__', '__version__', '__git_uri__', '__dependencies__', '__optional_dependencies__']
__author__ = "Erik Ritter (maintainer), Serena Jiang, John Bodley, Bill Ulammandakh, Robert Chang, Dan Frank, Chetan Sharma, Matthew Wardrop"
__author_email__ = "erik.ritter@airbnb.com, serena.jiang@airbnb.com, john.bodley@airbnb.com, bill.ulammandakh@airbnb.com, robert.chang@airbnb.com, dan.frank@airbnb.com, chetan.sharma@airbnb.com, mpwardrop@gmail.com"
__version__ = "0.8.9"
try:
with open(os.devnull, 'w') as devnull:
__version__ += '_' + subprocess.check_output(['git', 'rev-parse', 'HEAD'], shell=False, stderr=devnull).decode('utf-8').replace('\n', '')
except:
pass
__git_uri__ = "https://github.com/airbnb/knowledge-repo.git"
# These are the core dependencies, and should include all packages needed for accessing repositories
# and running a non-server-side instance of the flask application. Optional dependencies for converters/etc
# should be defined elsewhere.
__dependencies__ = [
# Knowledge Repository Dependencies
'pyyaml', # Yaml parser and utilities
'markdown', # Markdown conversion utilities
'pygments', # Code highlighting support in markdown
'gitpython', # Git abstraction
'tabulate', # Rendering information prettily in knowledge_repo script
'pyyaml', # Used to configure knowledge repositories
'cooked_input', # Used for interactive input from user in CLI tooling
'requests', # Used for downloading images
# Flask App Dependencies
'flask', # Main flask framework
'flask_login', # User management framework
'flask_principal', # Permissions management framework
'flask_mail', # Mail client and utilities
'Flask-Migrate', # Database migration utilities
'sqlalchemy', # Database abstractions
'jinja2>=2.7', # Templating engine
'werkzeug>=1.0', # Development webserver
'gunicorn', # Deployed webserver
'inflection', # String transformation library
'pillow', # Image thumbnailing
'weasyprint', # Post PDF download option
]
__optional_dependencies__ = {
# ipynb notebook conversion suport
'ipynb': [
'nbformat',
'nbconvert[execute]',
'traitlets'
],
# PDF to image conversion used by app
'pdf': [
'PyPDF2', # image for parsing PDFs to images
'wand', # imagemagick integration for image uploading
],
# Optional OAuth library for external authentication support
'oauth': [
'requests_oauthlib'
],
# Optional ldap library for ldap authentication
'ldap': [
'ldap3'
],
# Testing dependencies
'dev': [
'pycodestyle', # PEP8 conformance
'nose', # Testing framework
'beautifulsoup4', # HTML/XML parser
'coverage' # Documentation coverage tester
]
}
__optional_dependencies__['all'] = [dep for deps in __optional_dependencies__.values() for dep in deps]
adding pin for nbconvert <6.0.0 (fixes #578)
import os
import subprocess
__all__ = ['__author__', '__author_email__', '__version__', '__git_uri__', '__dependencies__', '__optional_dependencies__']
__author__ = "Erik Ritter (maintainer), Serena Jiang, John Bodley, Bill Ulammandakh, Robert Chang, Dan Frank, Chetan Sharma, Matthew Wardrop"
__author_email__ = "erik.ritter@airbnb.com, serena.jiang@airbnb.com, john.bodley@airbnb.com, bill.ulammandakh@airbnb.com, robert.chang@airbnb.com, dan.frank@airbnb.com, chetan.sharma@airbnb.com, mpwardrop@gmail.com"
__version__ = "0.8.9"
try:
with open(os.devnull, 'w') as devnull:
__version__ += '_' + subprocess.check_output(['git', 'rev-parse', 'HEAD'], shell=False, stderr=devnull).decode('utf-8').replace('\n', '')
except:
pass
__git_uri__ = "https://github.com/airbnb/knowledge-repo.git"
# These are the core dependencies, and should include all packages needed for accessing repositories
# and running a non-server-side instance of the flask application. Optional dependencies for converters/etc
# should be defined elsewhere.
__dependencies__ = [
# Knowledge Repository Dependencies
'pyyaml', # Yaml parser and utilities
'markdown', # Markdown conversion utilities
'pygments', # Code highlighting support in markdown
'gitpython', # Git abstraction
'tabulate', # Rendering information prettily in knowledge_repo script
'pyyaml', # Used to configure knowledge repositories
'cooked_input', # Used for interactive input from user in CLI tooling
'requests', # Used for downloading images
# Flask App Dependencies
'flask', # Main flask framework
'flask_login', # User management framework
'flask_principal', # Permissions management framework
'flask_mail', # Mail client and utilities
'Flask-Migrate', # Database migration utilities
'sqlalchemy', # Database abstractions
'jinja2>=2.7', # Templating engine
'werkzeug>=1.0', # Development webserver
'gunicorn', # Deployed webserver
'inflection', # String transformation library
'pillow', # Image thumbnailing
'weasyprint', # Post PDF download option
]
__optional_dependencies__ = {
# ipynb notebook conversion suport
'ipynb': [
'nbformat',
'nbconvert<6.0.0[execute]',
'traitlets'
],
# PDF to image conversion used by app
'pdf': [
'PyPDF2', # image for parsing PDFs to images
'wand', # imagemagick integration for image uploading
],
# Optional OAuth library for external authentication support
'oauth': [
'requests_oauthlib'
],
# Optional ldap library for ldap authentication
'ldap': [
'ldap3'
],
# Testing dependencies
'dev': [
'pycodestyle', # PEP8 conformance
'nose', # Testing framework
'beautifulsoup4', # HTML/XML parser
'coverage' # Documentation coverage tester
]
}
__optional_dependencies__['all'] = [dep for deps in __optional_dependencies__.values() for dep in deps]
|
import os
import time
from datetime import date
import cluster
from cluster import PBS
import XnatUtils
from constant import RESULTS_DIR
# Job Statuses
NEED_TO_RUN='NEED_TO_RUN' # assessor that is ready to be launch on the cluster (ACCRE). All the input data for the process to run are there.
NEED_INPUTS='NEED_INPUTS' # assessor where input data are missing from a scan, multiple scans or other assessor.
JOB_RUNNING='JOB_RUNNING' # the job has been submitted on the cluster and is running right now.
JOB_FAILED='JOB_FAILED' # the job failed on the cluster.
READY_TO_UPLOAD='READY_TO_UPLOAD' # Job done, waiting for the Spider to upload the results
UPLOADING='UPLOADING' # in the process of uploading the resources on XNAT.
COMPLETE='COMPLETE' # the assessors contains all the files. The upload and the job are done.
READY_TO_COMPLETE='READY_TO_COMPLETE' # the job finished and upload is complete
DOES_NOT_EXIST='DOES_NOT_EXIST'
OPEN_STATUS_LIST = [NEED_TO_RUN, UPLOADING, JOB_RUNNING, READY_TO_COMPLETE, JOB_FAILED]
# QA Statuses
JOB_PENDING = 'Job Pending' # job is still running, not ready for QA yet
NEEDS_QA='Needs QA' # For FS, the complete status
PASSED_QA='Passed' # QA status set by the Image Analyst after looking at the results.
FAILED='Failed' # QA status set by the Image Analyst after looking at the results.
FAILED_NEEDS_REPROC='Failed-needs reprocessing'
PASSED_EDITED_QA='Passed with edits'
RERUN='Rerun' # will cause spider to delete results and rerun the processing
REPROC='Reproc' # will cause spider to zip the current results and put in OLD, and then processing
OPEN_QC_LIST = [RERUN, REPROC]
# Other Constants
DEFAULT_PBS_DIR=os.path.join(RESULTS_DIR,'PBS')
DEFAULT_OUT_DIR=os.path.join(RESULTS_DIR,'OUTLOG')
READY_TO_UPLOAD_FLAG_FILENAME = 'READY_TO_UPLOAD.txt'
OLD_RESOURCE = 'OLD'
EDITS_RESOURCE = 'EDITS'
REPROC_RES_SKIP_LIST = [OLD_RESOURCE, EDITS_RESOURCE]
class Task(object):
def __init__(self, processor, assessor, upload_dir):
self.processor = processor
self.assessor = assessor
self.upload_dir = upload_dir
self.atype = processor.xsitype.lower()
# Create assessor if needed
if not assessor.exists():
assessor.create(assessors=self.atype)
self.set_createdate_today()
if self.atype == 'proc:genprocdata':
assessor.attrs.set('proc:genprocdata/proctype', self.get_processor_name())
assessor.attrs.set('proc:genprocdata/validation/status', JOB_PENDING)
assessor.attrs.set('proc:genprocdata/procversion', self.get_processor_version())
if processor.has_inputs(assessor):
self.set_status(NEED_TO_RUN)
else:
self.set_status(NEED_INPUTS)
# Cache for convenience
self.assessor_id = assessor.id()
self.assessor_label = assessor.label()
def get_processor_name(self):
return self.processor.name
def get_processor_version(self):
return self.processor.version
def is_open(self):
astatus = self.get_status()
return astatus in OPEN_STATUS_LIST
def copy_memused(self):
memusedmb = ''
if self.atype == 'proc:genprocdata':
memusedmb = self.assessor.attrs.get('proc:genprocdata/memusedmb')
elif self.atype == 'fs:fsdata':
#memusedmb = ''.join(self.assessor.xpath("//xnat:addParam[@name='memusedmb']/child::text()")).replace("\n","")
memusedmb = self.assessor.attrs.get('fs:fsdata/memusedmb')
if memusedmb.strip() != '':
memused = memusedmb + 'mb'
#print 'DEBUG:copy memused:'+self.assessor_label+':'+memused
self.set_memused(memused)
def check_job_usage(self):
#self.copy_memused()
memused = self.get_memused()
walltime = self.get_walltime()
if walltime != '':
if memused == '':
self.set_memused('NotFound')
else:
pass
#print('DEBUG:memused and walltime already set, skipping')
return
jobstartdate = self.get_jobstartdate()
# We can't get info from cluster if job too old
if not cluster.is_traceable_date(jobstartdate):
self.set_walltime('NotFound')
self.set_memused('NotFound')
return
# Get usage with tracejob
jobinfo = cluster.tracejob_info(self.get_jobid(), jobstartdate)
if jobinfo['mem_used'] != '':
memused = str(int(jobinfo['mem_used'].split('kb')[0])/1024)+'mb'
self.set_memused(memused)
if jobinfo['walltime_used'] != '':
self.set_walltime(jobinfo['walltime_used'])
def get_memused(self):
memused = ''
if self.atype == 'proc:genprocdata':
memused = self.assessor.attrs.get('proc:genprocdata/memused')
elif self.atype == 'fs:fsdata':
#memused = ''.join(self.assessor.xpath("//xnat:addParam[@name='memused']/child::text()")).replace("\n","")
memused = self.assessor.attrs.get('fs:fsdata/memused')
return memused.strip()
def set_memused(self,memused):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genprocdata/memused', memused)
elif self.atype == 'fs:fsdata':
#self.assessor.attrs.set("fs:fsdata/parameters/addParam[name=memused]/addField", memused)
self.assessor.attrs.set('fs:fsdata/memused', memused)
def get_walltime(self):
walltime = ''
if self.atype == 'proc:genprocdata':
walltime = self.assessor.attrs.get('proc:genprocdata/walltimeused')
elif self.atype == 'fs:fsdata':
#walltime = ''.join(self.assessor.xpath("//xnat:addParam[@name='walltimeused']/child::text()")).replace("\n","")
walltime = self.assessor.attrs.get('fs:fsdata/walltimeused')
return walltime.strip()
def set_walltime(self,walltime):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genprocdata/walltimeused', walltime)
elif self.atype == 'fs:fsdata':
#self.assessor.attrs.set("fs:fsdata/parameters/addParam[name=walltimeused]/addField", walltime)
self.assessor.attrs.set('fs:fsdata/walltimeused', walltime)
def undo_processing(self):
from pyxnat.core.errors import DatabaseError
self.set_qcstatus(JOB_PENDING)
self.set_jobid(' ')
self.set_memused(' ')
self.set_walltime(' ')
out_resource_list = self.assessor.out_resources()
for out_resource in out_resource_list:
if out_resource.label() not in REPROC_RES_SKIP_LIST:
print('\t Removing '+out_resource.label())
try:
out_resource.delete()
except DatabaseError:
print('\t ERROR:deleting resource.')
pass
def reproc_processing(self):
curtime = time.strftime("%Y%m%d-%H%M%S")
local_dir = self.assessor_label+'_'+curtime
local_zip = local_dir+'.zip'
xml_filename = self.upload_dir+'/'+local_dir+'/'+self.assessor_label+'.xml'
# Make the temp dir
os.makedirs(self.upload_dir+'/'+local_dir)
# Download the current resources
out_resource_list = self.assessor.out_resources()
for out_resource in out_resource_list:
olabel = out_resource.label()
if olabel not in REPROC_RES_SKIP_LIST:
print('\tDownloading:'+olabel)
out_res = self.assessor.out_resource(olabel)
out_res.get(self.upload_dir+'/'+local_dir, extract=True)
# Download xml of assessor
xml = self.assessor.get()
f = open(xml_filename,'w')
f.write(xml+'\n')
f.close()
# Zip it all up
cmd = 'cd '+self.upload_dir + ' && zip -qr '+local_zip+' '+local_dir+'/'
print('DEBUG:running cmd:'+cmd)
os.system(cmd)
# Upload it to Archive
self.assessor.out_resource(OLD_RESOURCE).file(local_zip).put(self.upload_dir+'/'+local_zip)
# Run undo
self.undo_processing()
# TODO:
# delete the local copies
def update_status(self):
old_status = self.get_status()
new_status = old_status
if old_status == COMPLETE or old_status == JOB_FAILED:
qcstatus = self.get_qcstatus()
if qcstatus == REPROC:
print('\t *INFO:qcstatus=REPROC, running reproc_processing...')
self.reproc_processing()
new_status = NEED_TO_RUN
elif qcstatus == RERUN:
print('\t *INFO:qcstatus=RERUN, running undo_processing...')
self.undo_processing()
new_status = NEED_TO_RUN
else:
#self.check_date()
pass
elif old_status == NEED_TO_RUN:
# TODO: anything, not yet???
pass
elif old_status == READY_TO_COMPLETE:
self.check_job_usage()
self.set_qcstatus(NEEDS_QA)
new_status = COMPLETE
elif old_status == NEED_INPUTS:
# Check it again in case available inputs changed
if self.has_inputs():
new_status = NEED_TO_RUN
elif old_status == JOB_RUNNING:
new_status = self.check_running()
elif old_status == READY_TO_UPLOAD:
# TODO: let upload spider handle it???
#self.check_date()
pass
elif old_status == UPLOADING:
# TODO: can we see if it's really uploading???
pass
else:
print('\t *ERROR:unknown status:'+old_status)
if (new_status != old_status):
print('\t *INFO:changing status from '+old_status+' to '+new_status)
self.set_status(new_status)
# Update QC Status
if new_status == COMPLETE:
self.set_qcstatus(NEEDS_QA)
return new_status
def get_jobid(self):
jobid = ''
if self.atype == 'proc:genprocdata':
jobid = self.assessor.attrs.get('proc:genprocdata/jobid')
elif self.atype == 'fs:fsdata':
#jobid = ''.join(self.assessor.xpath("//xnat:addParam[@name='jobid']/child::text()")).replace("\n","")
jobid = self.assessor.attrs.get('fs:fsdata/jobid')
return jobid
def get_job_status(self):
jobstatus = 'UNKNOWN'
jobid = self.get_jobid()
if jobid != '' and jobid != '0':
jobstatus = cluster.job_status(jobid)
return jobstatus
def launch(self,jobdir,job_email=None,job_email_options='bae'):
cmds = self.commands(jobdir)
pbsfile = self.pbs_path()
outlog = self.outlog_path()
pbs = PBS(pbsfile,outlog,cmds,self.processor.walltime_str,self.processor.memreq_mb,self.processor.ppn,job_email,job_email_options)
pbs.write()
jobid = pbs.submit()
if jobid == '' or jobid == '0':
# TODO: raise exception
print('ERROR:failed to launch job on cluster')
return False
else:
self.set_status(JOB_RUNNING)
self.set_jobid(jobid)
self.set_jobstartdate_today()
#save record on redcap for the job that has been launch
project=self.assessor_label.split('-x-')[0]
SM_name=self.get_processor_name()
data,record_id=XnatUtils.create_record_redcap(project, SM_name)
run=XnatUtils.save_job_redcap(data,record_id)
if not run:
print(' ->ERROR: did not send the job to redcap for jobID <'+str(jobid)+'>: '+record_id)
return True
def check_date(self):
if self.get_createdate() != '':
return
jobstartdate = self.get_jobstartdate()
if jobstartdate != '':
self.set_createdate(jobstartdate)
def get_jobstartdate(self):
jobstartdate = ''
if self.atype == 'proc:genprocdata':
jobstartdate = self.assessor.attrs.get('proc:genProcData/jobstartdate')
elif self.atype == 'fs:fsdata':
#jobstartdate = ''.join(self.assessor.xpath("//xnat:addParam[@name='jobstartdate']/child::text()")).replace("\n","")
jobstartdate = self.assessor.attrs.get('fs:fsdata/jobstartdate')
return jobstartdate
def set_jobstartdate_today(self):
today_str = str(date.today())
return self.set_jobstartdate(today_str)
def set_jobstartdate(self,date_str):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genProcData/jobstartdate', date_str)
elif self.atype == 'fs:fsdata':
#self.assessor.attrs.set("fs:fsdata/parameters/addParam[name=jobstartdate]/addField", date_str)
self.assessor.attrs.set('fs:fsdata/jobstartdate', date_str)
def get_createdate(self):
createdate = ''
if self.atype == 'proc:genprocdata':
createdate = self.assessor.attrs.get('proc:genProcData/date')
elif self.atype == 'fs:fsdata':
createdate = self.assessor.attrs.get('fs:fsData/date')
return createdate
def set_createdate(self,date_str):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genProcData/date', date_str)
elif self.atype == 'fs:fsdata':
self.assessor.attrs.set('fs:fsData/date', date_str)
return date_str
def set_createdate_today(self):
today_str = str(date.today())
self.set_createdate(today_str)
return today_str
def get_status(self):
if not self.assessor.exists():
xnat_status = DOES_NOT_EXIST
elif self.atype == 'proc:genprocdata':
xnat_status = self.assessor.attrs.get('proc:genProcData/procstatus')
elif self.atype == 'fs:fsdata':
xnat_status = self.assessor.attrs.get('fs:fsdata/procstatus')
else:
xnat_status = 'UNKNOWN_xsiType:'+self.atype
return xnat_status
def set_status(self,status):
if self.atype == 'fs:fsdata':
self.assessor.attrs.set('fs:fsdata/procstatus', status)
else:
self.assessor.attrs.set('proc:genprocdata/procstatus', status)
def get_qcstatus(self):
qcstatus = ''
atype = self.atype
if not self.assessor.exists():
qcstatus = DOES_NOT_EXIST
elif atype == 'proc:genprocdata' or atype == 'fs:fsdata':
qcstatus = self.assessor.attrs.get(atype+'/validation/status')
else:
qcstatus = 'UNKNOWN_xsiType:'+atype
return qcstatus
def set_qcstatus(self,qcstatus):
atype = self.atype
self.assessor.attrs.set(atype+'/validation/status', qcstatus)
def has_inputs(self):
return self.processor.has_inputs(self.assessor)
def set_jobid(self,jobid):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genprocdata/jobid', jobid)
elif self.atype == 'fs:fsdata':
#self.assessor.attrs.set("fs:fsdata/parameters/addParam[name=jobid]/addField", jobid)
self.assessor.attrs.set('fs:fsdata/jobid', jobid)
def commands(self,jobdir):
return self.processor.get_cmds(self.assessor,jobdir+"/"+self.assessor_label)
def pbs_path(self):
return DEFAULT_PBS_DIR+'/'+self.assessor_label+'.pbs'
def outlog_path(self):
return DEFAULT_OUT_DIR+'/'+self.assessor_label+'.output'
def ready_flag_exists(self):
flagfile = self.upload_dir+'/'+self.assessor_label+'/'+READY_TO_UPLOAD_FLAG_FILENAME
return os.path.isfile(flagfile)
def check_running(self):
# Check status on cluster
jobstatus = self.get_job_status()
if jobstatus == 'R' or jobstatus == 'Q':
# Still running
return JOB_RUNNING
elif not self.ready_flag_exists():
# Check for a flag file created upon completion, if it's not there then the job failed
return JOB_FAILED
else:
# Let Upload Spider handle the upload
return JOB_RUNNING
Update task.py
import os
import time
from datetime import date
import cluster
from cluster import PBS
import XnatUtils
from constants import RESULTS_DIR
# Job Statuses
NEED_TO_RUN='NEED_TO_RUN' # assessor that is ready to be launch on the cluster (ACCRE). All the input data for the process to run are there.
NEED_INPUTS='NEED_INPUTS' # assessor where input data are missing from a scan, multiple scans or other assessor.
JOB_RUNNING='JOB_RUNNING' # the job has been submitted on the cluster and is running right now.
JOB_FAILED='JOB_FAILED' # the job failed on the cluster.
READY_TO_UPLOAD='READY_TO_UPLOAD' # Job done, waiting for the Spider to upload the results
UPLOADING='UPLOADING' # in the process of uploading the resources on XNAT.
COMPLETE='COMPLETE' # the assessors contains all the files. The upload and the job are done.
READY_TO_COMPLETE='READY_TO_COMPLETE' # the job finished and upload is complete
DOES_NOT_EXIST='DOES_NOT_EXIST'
OPEN_STATUS_LIST = [NEED_TO_RUN, UPLOADING, JOB_RUNNING, READY_TO_COMPLETE, JOB_FAILED]
# QA Statuses
JOB_PENDING = 'Job Pending' # job is still running, not ready for QA yet
NEEDS_QA='Needs QA' # For FS, the complete status
PASSED_QA='Passed' # QA status set by the Image Analyst after looking at the results.
FAILED='Failed' # QA status set by the Image Analyst after looking at the results.
FAILED_NEEDS_REPROC='Failed-needs reprocessing'
PASSED_EDITED_QA='Passed with edits'
RERUN='Rerun' # will cause spider to delete results and rerun the processing
REPROC='Reproc' # will cause spider to zip the current results and put in OLD, and then processing
OPEN_QC_LIST = [RERUN, REPROC]
# Other Constants
DEFAULT_PBS_DIR=os.path.join(RESULTS_DIR,'PBS')
DEFAULT_OUT_DIR=os.path.join(RESULTS_DIR,'OUTLOG')
READY_TO_UPLOAD_FLAG_FILENAME = 'READY_TO_UPLOAD.txt'
OLD_RESOURCE = 'OLD'
EDITS_RESOURCE = 'EDITS'
REPROC_RES_SKIP_LIST = [OLD_RESOURCE, EDITS_RESOURCE]
class Task(object):
def __init__(self, processor, assessor, upload_dir):
self.processor = processor
self.assessor = assessor
self.upload_dir = upload_dir
self.atype = processor.xsitype.lower()
# Create assessor if needed
if not assessor.exists():
assessor.create(assessors=self.atype)
self.set_createdate_today()
if self.atype == 'proc:genprocdata':
assessor.attrs.set('proc:genprocdata/proctype', self.get_processor_name())
assessor.attrs.set('proc:genprocdata/validation/status', JOB_PENDING)
assessor.attrs.set('proc:genprocdata/procversion', self.get_processor_version())
if processor.has_inputs(assessor):
self.set_status(NEED_TO_RUN)
else:
self.set_status(NEED_INPUTS)
# Cache for convenience
self.assessor_id = assessor.id()
self.assessor_label = assessor.label()
def get_processor_name(self):
return self.processor.name
def get_processor_version(self):
return self.processor.version
def is_open(self):
astatus = self.get_status()
return astatus in OPEN_STATUS_LIST
def copy_memused(self):
memusedmb = ''
if self.atype == 'proc:genprocdata':
memusedmb = self.assessor.attrs.get('proc:genprocdata/memusedmb')
elif self.atype == 'fs:fsdata':
#memusedmb = ''.join(self.assessor.xpath("//xnat:addParam[@name='memusedmb']/child::text()")).replace("\n","")
memusedmb = self.assessor.attrs.get('fs:fsdata/memusedmb')
if memusedmb.strip() != '':
memused = memusedmb + 'mb'
#print 'DEBUG:copy memused:'+self.assessor_label+':'+memused
self.set_memused(memused)
def check_job_usage(self):
#self.copy_memused()
memused = self.get_memused()
walltime = self.get_walltime()
if walltime != '':
if memused == '':
self.set_memused('NotFound')
else:
pass
#print('DEBUG:memused and walltime already set, skipping')
return
jobstartdate = self.get_jobstartdate()
# We can't get info from cluster if job too old
if not cluster.is_traceable_date(jobstartdate):
self.set_walltime('NotFound')
self.set_memused('NotFound')
return
# Get usage with tracejob
jobinfo = cluster.tracejob_info(self.get_jobid(), jobstartdate)
if jobinfo['mem_used'] != '':
memused = str(int(jobinfo['mem_used'].split('kb')[0])/1024)+'mb'
self.set_memused(memused)
if jobinfo['walltime_used'] != '':
self.set_walltime(jobinfo['walltime_used'])
def get_memused(self):
memused = ''
if self.atype == 'proc:genprocdata':
memused = self.assessor.attrs.get('proc:genprocdata/memused')
elif self.atype == 'fs:fsdata':
#memused = ''.join(self.assessor.xpath("//xnat:addParam[@name='memused']/child::text()")).replace("\n","")
memused = self.assessor.attrs.get('fs:fsdata/memused')
return memused.strip()
def set_memused(self,memused):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genprocdata/memused', memused)
elif self.atype == 'fs:fsdata':
#self.assessor.attrs.set("fs:fsdata/parameters/addParam[name=memused]/addField", memused)
self.assessor.attrs.set('fs:fsdata/memused', memused)
def get_walltime(self):
walltime = ''
if self.atype == 'proc:genprocdata':
walltime = self.assessor.attrs.get('proc:genprocdata/walltimeused')
elif self.atype == 'fs:fsdata':
#walltime = ''.join(self.assessor.xpath("//xnat:addParam[@name='walltimeused']/child::text()")).replace("\n","")
walltime = self.assessor.attrs.get('fs:fsdata/walltimeused')
return walltime.strip()
def set_walltime(self,walltime):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genprocdata/walltimeused', walltime)
elif self.atype == 'fs:fsdata':
#self.assessor.attrs.set("fs:fsdata/parameters/addParam[name=walltimeused]/addField", walltime)
self.assessor.attrs.set('fs:fsdata/walltimeused', walltime)
def undo_processing(self):
from pyxnat.core.errors import DatabaseError
self.set_qcstatus(JOB_PENDING)
self.set_jobid(' ')
self.set_memused(' ')
self.set_walltime(' ')
out_resource_list = self.assessor.out_resources()
for out_resource in out_resource_list:
if out_resource.label() not in REPROC_RES_SKIP_LIST:
print('\t Removing '+out_resource.label())
try:
out_resource.delete()
except DatabaseError:
print('\t ERROR:deleting resource.')
pass
def reproc_processing(self):
curtime = time.strftime("%Y%m%d-%H%M%S")
local_dir = self.assessor_label+'_'+curtime
local_zip = local_dir+'.zip'
xml_filename = self.upload_dir+'/'+local_dir+'/'+self.assessor_label+'.xml'
# Make the temp dir
os.makedirs(self.upload_dir+'/'+local_dir)
# Download the current resources
out_resource_list = self.assessor.out_resources()
for out_resource in out_resource_list:
olabel = out_resource.label()
if olabel not in REPROC_RES_SKIP_LIST:
print('\tDownloading:'+olabel)
out_res = self.assessor.out_resource(olabel)
out_res.get(self.upload_dir+'/'+local_dir, extract=True)
# Download xml of assessor
xml = self.assessor.get()
f = open(xml_filename,'w')
f.write(xml+'\n')
f.close()
# Zip it all up
cmd = 'cd '+self.upload_dir + ' && zip -qr '+local_zip+' '+local_dir+'/'
print('DEBUG:running cmd:'+cmd)
os.system(cmd)
# Upload it to Archive
self.assessor.out_resource(OLD_RESOURCE).file(local_zip).put(self.upload_dir+'/'+local_zip)
# Run undo
self.undo_processing()
# TODO:
# delete the local copies
def update_status(self):
old_status = self.get_status()
new_status = old_status
if old_status == COMPLETE or old_status == JOB_FAILED:
qcstatus = self.get_qcstatus()
if qcstatus == REPROC:
print('\t *INFO:qcstatus=REPROC, running reproc_processing...')
self.reproc_processing()
new_status = NEED_TO_RUN
elif qcstatus == RERUN:
print('\t *INFO:qcstatus=RERUN, running undo_processing...')
self.undo_processing()
new_status = NEED_TO_RUN
else:
#self.check_date()
pass
elif old_status == NEED_TO_RUN:
# TODO: anything, not yet???
pass
elif old_status == READY_TO_COMPLETE:
self.check_job_usage()
self.set_qcstatus(NEEDS_QA)
new_status = COMPLETE
elif old_status == NEED_INPUTS:
# Check it again in case available inputs changed
if self.has_inputs():
new_status = NEED_TO_RUN
elif old_status == JOB_RUNNING:
new_status = self.check_running()
elif old_status == READY_TO_UPLOAD:
# TODO: let upload spider handle it???
#self.check_date()
pass
elif old_status == UPLOADING:
# TODO: can we see if it's really uploading???
pass
else:
print('\t *ERROR:unknown status:'+old_status)
if (new_status != old_status):
print('\t *INFO:changing status from '+old_status+' to '+new_status)
self.set_status(new_status)
# Update QC Status
if new_status == COMPLETE:
self.set_qcstatus(NEEDS_QA)
return new_status
def get_jobid(self):
jobid = ''
if self.atype == 'proc:genprocdata':
jobid = self.assessor.attrs.get('proc:genprocdata/jobid')
elif self.atype == 'fs:fsdata':
#jobid = ''.join(self.assessor.xpath("//xnat:addParam[@name='jobid']/child::text()")).replace("\n","")
jobid = self.assessor.attrs.get('fs:fsdata/jobid')
return jobid
def get_job_status(self):
jobstatus = 'UNKNOWN'
jobid = self.get_jobid()
if jobid != '' and jobid != '0':
jobstatus = cluster.job_status(jobid)
return jobstatus
def launch(self,jobdir,job_email=None,job_email_options='bae'):
cmds = self.commands(jobdir)
pbsfile = self.pbs_path()
outlog = self.outlog_path()
pbs = PBS(pbsfile,outlog,cmds,self.processor.walltime_str,self.processor.memreq_mb,self.processor.ppn,job_email,job_email_options)
pbs.write()
jobid = pbs.submit()
if jobid == '' or jobid == '0':
# TODO: raise exception
print('ERROR:failed to launch job on cluster')
return False
else:
self.set_status(JOB_RUNNING)
self.set_jobid(jobid)
self.set_jobstartdate_today()
#save record on redcap for the job that has been launch
project=self.assessor_label.split('-x-')[0]
SM_name=self.get_processor_name()
data,record_id=XnatUtils.create_record_redcap(project, SM_name)
run=XnatUtils.save_job_redcap(data,record_id)
if not run:
print(' ->ERROR: did not send the job to redcap for jobID <'+str(jobid)+'>: '+record_id)
return True
def check_date(self):
if self.get_createdate() != '':
return
jobstartdate = self.get_jobstartdate()
if jobstartdate != '':
self.set_createdate(jobstartdate)
def get_jobstartdate(self):
jobstartdate = ''
if self.atype == 'proc:genprocdata':
jobstartdate = self.assessor.attrs.get('proc:genProcData/jobstartdate')
elif self.atype == 'fs:fsdata':
#jobstartdate = ''.join(self.assessor.xpath("//xnat:addParam[@name='jobstartdate']/child::text()")).replace("\n","")
jobstartdate = self.assessor.attrs.get('fs:fsdata/jobstartdate')
return jobstartdate
def set_jobstartdate_today(self):
today_str = str(date.today())
return self.set_jobstartdate(today_str)
def set_jobstartdate(self,date_str):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genProcData/jobstartdate', date_str)
elif self.atype == 'fs:fsdata':
#self.assessor.attrs.set("fs:fsdata/parameters/addParam[name=jobstartdate]/addField", date_str)
self.assessor.attrs.set('fs:fsdata/jobstartdate', date_str)
def get_createdate(self):
createdate = ''
if self.atype == 'proc:genprocdata':
createdate = self.assessor.attrs.get('proc:genProcData/date')
elif self.atype == 'fs:fsdata':
createdate = self.assessor.attrs.get('fs:fsData/date')
return createdate
def set_createdate(self,date_str):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genProcData/date', date_str)
elif self.atype == 'fs:fsdata':
self.assessor.attrs.set('fs:fsData/date', date_str)
return date_str
def set_createdate_today(self):
today_str = str(date.today())
self.set_createdate(today_str)
return today_str
def get_status(self):
if not self.assessor.exists():
xnat_status = DOES_NOT_EXIST
elif self.atype == 'proc:genprocdata':
xnat_status = self.assessor.attrs.get('proc:genProcData/procstatus')
elif self.atype == 'fs:fsdata':
xnat_status = self.assessor.attrs.get('fs:fsdata/procstatus')
else:
xnat_status = 'UNKNOWN_xsiType:'+self.atype
return xnat_status
def set_status(self,status):
if self.atype == 'fs:fsdata':
self.assessor.attrs.set('fs:fsdata/procstatus', status)
else:
self.assessor.attrs.set('proc:genprocdata/procstatus', status)
def get_qcstatus(self):
qcstatus = ''
atype = self.atype
if not self.assessor.exists():
qcstatus = DOES_NOT_EXIST
elif atype == 'proc:genprocdata' or atype == 'fs:fsdata':
qcstatus = self.assessor.attrs.get(atype+'/validation/status')
else:
qcstatus = 'UNKNOWN_xsiType:'+atype
return qcstatus
def set_qcstatus(self,qcstatus):
atype = self.atype
self.assessor.attrs.set(atype+'/validation/status', qcstatus)
def has_inputs(self):
return self.processor.has_inputs(self.assessor)
def set_jobid(self,jobid):
if self.atype == 'proc:genprocdata':
self.assessor.attrs.set('proc:genprocdata/jobid', jobid)
elif self.atype == 'fs:fsdata':
#self.assessor.attrs.set("fs:fsdata/parameters/addParam[name=jobid]/addField", jobid)
self.assessor.attrs.set('fs:fsdata/jobid', jobid)
def commands(self,jobdir):
return self.processor.get_cmds(self.assessor,jobdir+"/"+self.assessor_label)
def pbs_path(self):
return DEFAULT_PBS_DIR+'/'+self.assessor_label+'.pbs'
def outlog_path(self):
return DEFAULT_OUT_DIR+'/'+self.assessor_label+'.output'
def ready_flag_exists(self):
flagfile = self.upload_dir+'/'+self.assessor_label+'/'+READY_TO_UPLOAD_FLAG_FILENAME
return os.path.isfile(flagfile)
def check_running(self):
# Check status on cluster
jobstatus = self.get_job_status()
if jobstatus == 'R' or jobstatus == 'Q':
# Still running
return JOB_RUNNING
elif not self.ready_flag_exists():
# Check for a flag file created upon completion, if it's not there then the job failed
return JOB_FAILED
else:
# Let Upload Spider handle the upload
return JOB_RUNNING
|
#copyright ReportLab Inc. 2000-2001
#see license.txt for license details
#history http://cvs.sourceforge.net/cgi-bin/cvsweb.cgi/reportlab/graphics/charts/axes.py?cvsroot=reportlab
#$Header: /tmp/reportlab/reportlab/graphics/charts/axes.py,v 1.40 2001/09/27 18:10:49 rgbecker Exp $
"""Collection of axes for charts.
The current collection comprises axes for charts using cartesian
coordinate systems. All axes might have tick marks and labels.
There are two dichotomies for axes: one of X and Y flavours and
another of category and value flavours.
Category axes have an ordering but no metric. They are divided
into a number of equal-sized buckets. Their tick marks or labels,
if available, go BETWEEN the buckets, and the labels are placed
below to/left of the X/Y-axis, respectively.
Value axes have an ordering AND metric. They correspond to a nu-
meric quantity. Value axis have a real number quantity associated
with it. The chart tells it where to go.
The most basic axis divides the number line into equal spaces
and has tickmarks and labels associated with each; later we
will add variants where you can specify the sampling
interval.
The charts using axis tell them where the labels should be placed.
Axes of complementary X/Y flavours can be connected to each other
in various ways, i.e. with a specific reference point, like an
x/value axis to a y/value (or category) axis. In this case the
connection can be either at the top or bottom of the former or
at any absolute value (specified in points) or at some value of
the former axes in its own coordinate system.
"""
import string
from types import FunctionType, StringType, TupleType, ListType
from reportlab.lib.validators import isNumber, isNumberOrNone, isListOfStringsOrNone, isListOfNumbers, \
isListOfNumbersOrNone, isColorOrNone, OneOf, isBoolean, SequenceOf, \
isString
from reportlab.lib.attrmap import *
from reportlab.lib import normalDate
from reportlab.graphics.shapes import Drawing, Line, Group, STATE_DEFAULTS, _textBoxLimits, _rotatedBoxLimits
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.charts.utils import nextRoundNumber
# Helpers.
def _findMin(V, x, default):
'''find minimum over V[i][x]'''
try:
if type(V[0][0]) in (TupleType,ListType):
selector = lambda T, x=x: T[x]
m = min(map(selector, V[0]))
for v in V[1:]:
m = min(m,min(map(selector, v)))
else:
m = min(V[0])
for v in V[1:]:
m = min(m,min(v))
except IndexError:
m = default
return m
def _findMax(V, x, default):
'''find maximum over V[i][x]'''
try:
if type(V[0][0]) in (TupleType,ListType):
selector = lambda T, x=x: T[x]
m = max(map(selector, V[0]))
for v in V[1:]:
m = max(m,max(map(selector, v)))
else:
m = max(V[0])
for v in V[1:]:
m = max(m,max(v))
except IndexError:
m = default
return m
# Category axes.
class CategoryAxis(Widget):
"Abstract category axis, unusable in itself."
_nodoc = 1
_attrMap = AttrMap(
visible = AttrMapValue(isNumber, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isNumber, desc='Display axis line, if true.'),
visibleTicks = AttrMapValue(isNumber, desc='Display axis ticks, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
categoryNames = AttrMapValue(isListOfStringsOrNone, desc='List of category names.'),
joinAxis = AttrMapValue(None, desc='Join both axes if true.'),
joinAxisPos = AttrMapValue(isNumberOrNone, desc='Position at which to join with other axis.'),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
style = AttrMapValue(OneOf('parallel','stacked'),"How common category bars are plotted"),
labelAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the axis labels"),
)
def __init__(self):
assert self.__class__.__name__!='CategoryAxis', "Abstract Class CategoryAxis Instantiated"
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
self._x = 50
self._y = 50
self._length = 100
self._catCount = 0
# public properties
self.visible = 1
self.visibleAxis = 1
self.visibleTicks = 1
self.strokeWidth = 1
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.labels = TypedPropertyCollection(Label)
# if None, they don't get labels. If provided,
# you need one name per data point and they are
# used for label text.
self.categoryNames = None
self.joinAxis = None
self.joinAxisPos = None
self.joinAxisMode = None
self.labelAxisMode = 'axis'
self.reverseDirection = 0
self.style = 'parallel'
def setPosition(self, x, y, length):
# ensure floating point
self._x = x
self._y = y
self._length = length
def configure(self, multiSeries,barWidth=None):
self._catCount = max(map(len,multiSeries))
self._barWidth = barWidth or (self._length/float(self._catCount or 1))
def draw(self):
g = Group()
if not self.visible:
return g
g.add(self.makeAxis())
g.add(self.makeTicks())
g.add(self.makeTickLabels())
return g
def _scale(self,idx):
if self.reverseDirection: idx = self._catCount-idx-1
return idx
class XCategoryAxis(CategoryAxis):
"X/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
)
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'n' #north - top edge
self.labels.dy = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickUp = 0 # how far into chart does tick go?
self.tickDown = 5 # how far below axis does tick go?
def demo(self):
self.setPosition(30, 70, 140)
self.configure([(10,20,30,40,50)])
self.categoryNames = ['One','Two','Three','Four','Five']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'n'
self.labels[4].boxAnchor = 'e'
self.labels[4].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
# Make sure we connect only to a y-axis.
axisClassName = yAxis.__class__.__name__
msg = "Cannot connect to other axes (%s), but Y- ones." % axisClassName
assert axisClassName[0] == 'Y', msg
if mode == 'bottom':
self._x = yAxis._x
self._y = yAxis._y
elif mode == 'top':
self._x = yAxis._x
self._y = yAxis._y + yAxis._length
elif mode == 'value':
self._x = yAxis._x
self._y = yAxis.scale(pos)
elif mode == 'points':
self._x = yAxis._x
self._y = pos
def scale(self, idx):
"""returns the x position and width in drawing units of the slice"""
return (self._x + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('bottom', 'top'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
if not self.visibleAxis: return g
axis = Line(self._x, self._y, self._x + self._length, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTicks(self):
g = Group()
if not self.visibleTicks:
return g
if self.tickUp or self.tickDown:
for i in range(self._catCount + 1):
if self.tickUp or self.tickDown:
x = self._x + (1.0 * i * self._barWidth)
tick = Line(x, self._y + self.tickUp, x, self._y - self.tickDown)
tick.strokeColor = self.strokeColor
tick.strokeWidth = self.strokeWidth
tick.strokeDashArray = self.strokeDashArray
g.add(tick)
return g
def _labelAxisPos(self):
axis = self.joinAxis
if axis:
mode = self.labelAxisMode
if mode == 'low':
return axis._y
elif mode == 'high':
return axis._y + axis._length
return self._y
def makeTickLabels(self):
g = Group()
if not self.visibleTicks:
return g
if not (self.categoryNames is None):
catCount = self._catCount
assert len(self.categoryNames) == catCount, \
"expected %d category names but found %d in axis" % (
len(self.categoryNames), catCount
)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
_y = self._labelAxisPos()
_x = self._x
for i in range(catCount):
x = _x + (i+0.5) * barWidth
label = self.labels[i]
label.setOrigin(x, _y)
if reverseDirection: i = catCount-i-1
label.setText(self.categoryNames[i])
g.add(label)
return g
class YCategoryAxis(CategoryAxis):
"Y/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
)
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'e' #east - right edge
self.labels.dx = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickLeft = 5 # how far left of axis does tick go?
self.tickRight = 0 # how far right of axis does tick go?
def demo(self):
self.setPosition(50, 10, 80)
self.configure([(10,20,30)])
self.categoryNames = ['One','Two','Three']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'e'
self.labels[2].boxAnchor = 's'
self.labels[2].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
# Make sure we connect only to a y-axis.
axisClassName = xAxis.__class__.__name__
msg = "Cannot connect to other axes (%s), but X- ones." % axisClassName
assert axisClassName[0] == 'X', msg
if mode == 'left':
self._x = xAxis._x * 1.0
self._y = xAxis._y * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'points':
self._x = pos * 1.0
self._y = xAxis._y * 1.0
def scale(self, idx):
"Returns the y position and width in drawing units of the slice."
return (self._y + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
if not self.visibleAxis:
return g
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('left', 'right'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
axis = Line(self._x, self._y, self._x, self._y + self._length)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTicks(self):
g = Group()
if not self.visibleTicks:
return g
if self.tickLeft or self.tickRight:
for i in range(self._catCount + 1):
if self.tickLeft or self.tickRight:
y = self._y + (1.0 * i * self._barWidth)
tick = Line(self._x - self.tickLeft, y,
self._x + self.tickRight, y)
tick.strokeColor = self.strokeColor
tick.strokeWidth = self.strokeWidth
tick.strokeDashArray = self.strokeDashArray
g.add(tick)
return g
def _labelAxisPos(self):
axis = self.joinAxis
if axis:
mode = self.labelAxisMode
if mode == 'low':
return axis._x
elif mode == 'high':
return axis._x + axis._length
return self._y
def makeTickLabels(self):
g = Group()
if not self.visibleTicks:
return g
if not (self.categoryNames is None):
catCount = self._catCount
assert len(self.categoryNames) == catCount, \
"expected %d category names but found %d in axis" % (
len(self.categoryNames), catCount
)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
labels = self.labels
_x = self._labelAxisPos()
_y = self._y
for i in range(catCount):
y = _y + (i+0.5) * barWidth
label = labels[i]
label.setOrigin(_x, y)
if reverseDirection: i = catCount-i-1
label.setText(self.categoryNames[i])
g.add(label)
return g
# Value axes.
class ValueAxis(Widget):
"Abstract value axis, unusable in itself."
_attrMap = AttrMap(
visible = AttrMapValue(isNumber,
desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isNumber,
desc='Display axis line, if true.'),
visibleTicks = AttrMapValue(isNumber,
desc='Display axis ticks, if true.'),
strokeWidth = AttrMapValue(isNumber,
desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone,
desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone,
desc='Dash array used for axis line.'),
minimumTickSpacing = AttrMapValue(isNumber,
desc='Minimum value for distance between ticks.'),
maximumTicks = AttrMapValue(isNumber,
desc='Maximum number of ticks.'),
labels = AttrMapValue(None,
desc='Handle of the axis labels.'),
labelTextFormat = AttrMapValue(None,
desc='Formatting string or function used for axis labels.'),
valueMin = AttrMapValue(isNumberOrNone,
desc='Minimum value on axis.'),
valueMax = AttrMapValue(isNumberOrNone,
desc='Maximum value on axis.'),
valueStep = AttrMapValue(isNumberOrNone,
desc='Step size used between ticks.'),
valueSteps = AttrMapValue(isListOfNumbersOrNone,
desc='List of step sizes used between ticks.'),
)
def __init__(self):
assert self.__class__.__name__!='ValueAxis', 'Abstract Class ValueAxis Instantiated'
self._configured = 0
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
self._x = 50
self._y = 50
self._length = 100
# public properties
self.visible = 1
self.visibleAxis = 1
self.visibleTicks = 1
self.strokeWidth = 1
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.labels = TypedPropertyCollection(Label)
self.labels.angle = 0
# how close can the ticks be?
self.minimumTickSpacing = 10
self.maximumTicks = 7
# this may be either of (a) a format string like '%0.2f'
# or (b) a function which takes the value as an argument
# and returns a chunk of text. So you can write a
# 'formatMonthEndDate' function and use that on irregular
# data points.
self.labelTextFormat = '%d'
# if set to None, these will be worked out for you.
# if you override any or all of them, your values
# will be used.
self.valueMin = None
self.valueMax = None
self.valueStep = None
def setPosition(self, x, y, length):
# ensure floating point
self._x = x * 1.0
self._y = y * 1.0
self._length = length * 1.0
def configure(self, dataSeries):
"""Let the axis configure its scale and range based on the data.
Called after setPosition. Let it look at a list of lists of
numbers determine the tick mark intervals. If valueMin,
valueMax and valueStep are configured then it
will use them; if any of them are set to None it
will look at the data and make some sensible decision.
You may override this to build custom axes with
irregular intervals. It creates an internal
variable self._values, which is a list of numbers
to use in plotting.
"""
# Set range.
self._setRange(dataSeries)
# Set scale factor.
self._scaleFactor = self._calcScaleFactor()
# Work out where to put tickmarks.
self._tickValues = self._calcTickmarkPositions()
self._configured = 1
def _setRange(self, dataSeries):
"""Set minimum and maximum axis values.
The dataSeries argument is assumed to be a list of data
vectors. Each vector is itself a list or tuple of numbers.
Returns a min, max tuple.
"""
valueMin = self.valueMin
if self.valueMin is None:
valueMin = _findMin(dataSeries,self._dataIndex,valueMin)
valueMax = self.valueMax
if self.valueMax is None:
valueMax = _findMax(dataSeries,self._dataIndex,valueMax)
self._valueMin, self._valueMax = (valueMin, valueMax)
self._rangeAdjust()
def _rangeAdjust(self):
"""Override this if you want to alter the calculated range.
E.g. if want a minumamum range of 30% or don't want 100%
as the first point.
"""
pass
def _calcScaleFactor(self):
"""Calculate the axis' scale factor.
This should be called only *after* the axis' range is set.
Returns a number.
"""
return self._length / float(self._valueMax - self._valueMin)
def _calcTickmarkPositions(self):
"""Calculate a list of tick positions on the axis.
Returns a list of numbers.
"""
if hasattr(self, 'valueSteps') and self.valueSteps:
self._tickValues = self.valueSteps
return self._tickValues
self._calcValueStep()
tickmarkPositions = []
tick = int(self._valueMin / self._valueStep) * self._valueStep
if tick >= self._valueMin:
tickmarkPositions.append(tick)
tick = tick + self._valueStep
while tick <= self._valueMax:
tickmarkPositions.append(tick)
tick = tick + self._valueStep
return tickmarkPositions
def _calcValueStep(self):
'''Calculate _valueStep for the axis or get from valueStep.'''
if self.valueStep is None:
rawRange = self._valueMax - self._valueMin
rawInterval = rawRange / min(float(self.maximumTicks-1),(float(self._length)/self.minimumTickSpacing ))
niceInterval = nextRoundNumber(rawInterval)
self._valueStep = niceInterval
else:
self._valueStep = self.valueStep
def makeTickLabels(self):
g = Group()
f = self.labelTextFormat
pos = [self._x, self._y]
d = self._dataIndex
labels = self.labels
i = 0
for tick in self._tickValues:
if f:
v = self.scale(tick)
if type(f) is StringType: txt = f % tick
elif type(f) in (TupleType,ListType):
#it's a list, use as many items as we get
if i < len(f):
txt = f[i]
else:
txt = ''
else: txt = f(tick)
label = labels[i]
pos[d] = v
apply(label.setOrigin,pos)
label.setText(txt)
g.add(label)
i = i + 1
return g
def draw(self):
g = Group()
if not self.visible:
return g
g.add(self.makeAxis())
g.add(self.makeTicks())
g.add(self.makeTickLabels())
return g
class XValueAxis(ValueAxis):
"X/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 0
def __init__(self):
ValueAxis.__init__(self)
self.labels.boxAnchor = 'n'
self.labels.dx = 0
self.labels.dy = -5
self.tickUp = 0
self.tickDown = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
self.setPosition(20, 50, 150)
self.configure([(10,20,30,40,50)])
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
# Make sure we connect only to a y-axis.
axisClassName = yAxis.__class__.__name__
msg = "Cannot connect to other axes (%s), but Y- ones." % axisClassName
assert axisClassName[0] == 'Y', msg
if mode == 'bottom':
self._x = yAxis._x * 1.0
self._y = yAxis._y * 1.0
elif mode == 'top':
self._x = yAxis._x * 1.0
self._y = (yAxis._y + yAxis._length) * 1.0
elif mode == 'value':
self._x = yAxis._x * 1.0
self._y = yAxis.scale(pos) * 1.0
elif mode == 'points':
self._x = yAxis._x * 1.0
self._y = pos * 1.0
def scale(self, value):
"""Converts a numeric value to a Y position.
The chart first configures the axis, then asks it to
work out the x value for each point when plotting
lines or bars. You could override this to do
logarithmic axes.
"""
msg = "Axis cannot scale numbers before it is configured"
assert self._configured, msg
if value is None:
value = 0
return self._x + self._scaleFactor * (value - self._valueMin)
def makeAxis(self):
g = Group()
if not self.visibleAxis:
return g
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('bottom', 'top'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
axis = Line(self._x, self._y, self._x + self._length, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTicks(self):
g = Group()
if not self.visibleTicks:
return g
i = 0
for tickValue in self._tickValues:
if self.tickUp or self.tickDown:
x = self.scale(tickValue)
tick = Line(x, self._y - self.tickDown,
x, self._y + self.tickUp)
tick.strokeColor = self.strokeColor
tick.strokeWidth = self.strokeWidth
tick.strokeDashArray = self.strokeDashArray
g.add(tick)
return g
class NormalDateXValueAxis(XValueAxis):
"""An X axis applying additional rules.
Depending on the data and some built-in rules, the axis
displays normalDate values as nicely formatted dates.
FidXValueAxis is an axis component for FidLinePlot.
The client chart should have NormalDate values.
"""
_attrMap = AttrMap(BASE = XValueAxis,
bottomAxisLabelSlack = AttrMapValue(isNumber, desc="Fractional amount used to adjust label spacing"),
niceMonth = AttrMapValue(isBoolean, desc="Flag for displaying months 'nicely'."),
forceEndDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of last date value.'),
forceFirstDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of first date value.'),
xLabelFormat = AttrMapValue(None, desc="Label format string (e.g. '{mm}/{yy}') or function."),
dayOfWeekName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=7,hi=7), desc='Weekday names.'),
monthName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=12,hi=12), desc='Month names.'),
dailyFreq = AttrMapValue(isBoolean, desc='True if we are to assume daily data to be ticked at end of month.'),
)
_valueClass = normalDate.ND
def __init__(self, **kw):
apply(XValueAxis.__init__, (self,))
# some global variables still used...
self.bottomAxisLabelSlack = 0.1
self.niceMonth = 1
self.forceEndDate = 0
self.forceFirstDate = 0
self.dailyFreq = 0
self.xLabelFormat = "{mm}/{yy}"
self.dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
self.monthName = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
self.valueSteps = None
def _scalar2ND(self, x):
"Convert a scalar to a NormalDate value."
d = self._valueClass()
d.normalize(x)
return d
def _dateFormatter(self, v):
"Create a formatted label for some value."
if not isinstance(v,normalDate.NormalDate):
v = self._scalar2ND(v)
d, m = normalDate._dayOfWeekName, normalDate._monthName
try:
normalDate._dayOfWeekName, normalDate._monthName = self.dayOfWeekName, self.monthName
return v.formatMS(self.xLabelFormat)
finally:
normalDate._dayOfWeekName, normalDate._monthName = d, m
def _xAxisTicker(self, xVals):
"""Complex stuff...
Needs explanation...
"""
axisLength = self._length
formatter = self._dateFormatter
labels = self.labels
fontName, fontSize, leading = labels.fontName, labels.fontSize, labels.leading
textAnchor, boxAnchor, angle = labels.textAnchor, labels.boxAnchor, labels.angle
RBL = _textBoxLimits(string.split(formatter(xVals[0]),'\n'),fontName,
fontSize,leading or 1.2*fontSize,textAnchor,boxAnchor)
RBL = _rotatedBoxLimits(RBL[0],RBL[1],RBL[2],RBL[3], angle)
xLabelW = RBL[1]-RBL[0]
xLabelH = RBL[3]-RBL[2]
w = max(xLabelW,labels.width,self.minimumTickSpacing)
W = w+w*self.bottomAxisLabelSlack
n = len(xVals)
ticks = []
labels = []
maximumTicks = self.maximumTicks
def addTick(i, xVals=xVals, formatter=formatter, ticks=ticks, labels=labels):
ticks.insert(0,xVals[i])
labels.insert(0,formatter(xVals[i]))
for d in (1,2,3,6,12,24,60,120):
k = n/d
if k<=maximumTicks and k*W <= axisLength:
i = n-1
if self.niceMonth:
j = xVals[-1].month() % (d<=12 and d or 12)
if j:
if self.forceEndDate: addTick(i)
i = i - j
#weird first date ie not at end of month
try:
wfd = xVals[0].month() == xVals[1].month()
except:
wfd = 0
while i>=wfd:
addTick(i)
i = i - d
if self.forceFirstDate and ticks[0] != xVals[0]:
addTick(0)
if (axisLength/(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=w:
del ticks[1], labels[1]
if self.forceEndDate and self.niceMonth and j:
if (axisLength/(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=w:
del ticks[-2], labels[-2]
if labels[0]==labels[1]:
del ticks[1], labels[1]
return ticks, labels
def _convertXV(self,data):
'''Convert all XValues to a standard normalDate type'''
VC = self._valueClass
for D in data:
for i in xrange(len(D)):
x, y = D[i]
if not isinstance(x,VC):
D[i] = (VC(x),y)
def configure(self, data):
self._convertXV(data)
xVals = map(lambda dv: dv[0], data[0])
if self.dailyFreq:
xEOM = []
pm = 0
px = xVals[0]
for x in xVals:
m = x.month()
if pm!=m:
if pm: xEOM.append(px)
pm = m
px = x
px = xVals[-1]
if xEOM[-1]!=x: xEOM.append(px)
steps, labels = self._xAxisTicker(xEOM)
else:
steps, labels = self._xAxisTicker(xVals)
valueMin, valueMax = self.valueMin, self.valueMax
if valueMin is None: valueMin = xVals[0]
if valueMax is None: valueMax = xVals[-1]
self._valueMin, self._valueMax = valueMin, valueMax
self.valueSteps = steps
self.labelTextFormat = labels
self._scaleFactor = self._length / float(valueMax - valueMin)
self._tickValues = steps
self._configured = 1
class YValueAxis(ValueAxis):
"Y/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 1
def __init__(self):
ValueAxis.__init__(self)
self.labels.boxAnchor = 'e'
self.labels.dx = -5
self.labels.dy = 0
self.tickRight = 0
self.tickLeft = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
data = [(10, 20, 30, 42)]
self.setPosition(100, 10, 80)
self.configure(data)
drawing = Drawing(200, 100)
drawing.add(self)
return drawing
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
# Make sure we connect only to a y-axis.
axisClassName = xAxis.__class__.__name__
msg = "Cannot connect to other axes (%s), but X- ones." % axisClassName
assert axisClassName[0] == 'X', msg
if mode == 'left':
self._x = xAxis._x * 1.0
self._y = xAxis._y * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'points':
self._x = pos * 1.0
self._y = xAxis._y * 1.0
def scale(self, value):
"""Converts a numeric value to a Y position.
The chart first configures the axis, then asks it to
work out the x value for each point when plotting
lines or bars. You could override this to do
logarithmic axes.
"""
msg = "Axis cannot scale numbers before it is configured"
assert self._configured, msg
if value is None:
value = 0
return self._y + self._scaleFactor * (value - self._valueMin)
def makeAxis(self):
g = Group()
if not self.visibleAxis:
return g
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('left', 'right'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
axis = Line(self._x, self._y, self._x, self._y + self._length)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTicks(self):
g = Group()
if not self.visibleTicks:
return g
i = 0
for tickValue in self._tickValues:
if self.tickLeft or self.tickRight:
y = self.scale(tickValue)
tick = Line(self._x - self.tickLeft, y,
self._x + self.tickRight, y)
tick.strokeColor = self.strokeColor
tick.strokeWidth = self.strokeWidth
tick.strokeDashArray = self.strokeDashArray
g.add(tick)
return g
# Sample functions.
def sample0a():
"Sample drawing with one xcat axis and two buckets."
drawing = Drawing(400, 200)
data = [(10, 20)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying', 'Yang']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample0b():
"Sample drawing with one xcat axis and one bucket only."
drawing = Drawing(400, 200)
data = [(10,)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample1():
"Sample drawing containing two unconnected axes."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Beer','Wine','Meat','Cannelloni']
xAxis.labels.boxAnchor = 'n'
xAxis.labels[3].dy = -15
xAxis.labels[3].angle = 30
xAxis.labels[3].fontName = 'Times-Bold'
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
##def sample2a():
## "Make sample drawing with two axes, x connected at top of y."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='top')
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample2b():
## "Make two axes, x connected at bottom of y."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='bottom')
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample2c():
## "Make two axes, x connected at fixed value (in points) of y."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='points', pos=100)
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample2d():
## "Make two axes, x connected at fixed value (of y-axes) of y."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='value', pos=20)
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample3a():
## "Make sample drawing with two axes, y connected at left of x."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
## yAxis.joinToAxis(xAxis, mode='left')
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample3b():
## "Make sample drawing with two axes, y connected at right of x."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
## yAxis.joinToAxis(xAxis, mode='right')
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample3c():
## "Make two axes, y connected at fixed value (in points) of x."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XValueAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='points', pos=100)
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
def sample4a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 35
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c():
"Sample drawing, xvalue/yvalue axes, y connected to bottom of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c1():
"xvalue/yvalue axes, without drawing axis lines/ticks."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
yAxis.visibleAxis = 0
yAxis.visibleTicks = 0
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
xAxis.visibleAxis = 0
xAxis.visibleTicks = 0
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4d():
"Sample drawing, xvalue/yvalue axes, y connected to top of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 100
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 35
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5c():
"Sample drawing, xvalue/yvalue axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5d():
"Sample drawing, xvalue/yvalue axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6a():
"Sample drawing, xcat/yvalue axes, x connected at top of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6b():
"Sample drawing, xcat/yvalue axes, x connected at bottom of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6c():
"Sample drawing, xcat/yvalue axes, x connected at 100 pts to y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6d():
"Sample drawing, xcat/yvalue axes, x connected at value 20 of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 20
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7a():
"Sample drawing, xvalue/ycat axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7b():
"Sample drawing, xvalue/ycat axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7c():
"Sample drawing, xvalue/ycat axes, y connected at value 30 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 30
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7d():
"Sample drawing, xvalue/ycat axes, y connected at 200 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 200
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
Added None handling & ValueAxis._adjustAxisTicks
#copyright ReportLab Inc. 2000-2001
#see license.txt for license details
#history http://cvs.sourceforge.net/cgi-bin/cvsweb.cgi/reportlab/graphics/charts/axes.py?cvsroot=reportlab
#$Header: /tmp/reportlab/reportlab/graphics/charts/axes.py,v 1.41 2001/10/02 11:03:35 rgbecker Exp $
"""Collection of axes for charts.
The current collection comprises axes for charts using cartesian
coordinate systems. All axes might have tick marks and labels.
There are two dichotomies for axes: one of X and Y flavours and
another of category and value flavours.
Category axes have an ordering but no metric. They are divided
into a number of equal-sized buckets. Their tick marks or labels,
if available, go BETWEEN the buckets, and the labels are placed
below to/left of the X/Y-axis, respectively.
Value axes have an ordering AND metric. They correspond to a nu-
meric quantity. Value axis have a real number quantity associated
with it. The chart tells it where to go.
The most basic axis divides the number line into equal spaces
and has tickmarks and labels associated with each; later we
will add variants where you can specify the sampling
interval.
The charts using axis tell them where the labels should be placed.
Axes of complementary X/Y flavours can be connected to each other
in various ways, i.e. with a specific reference point, like an
x/value axis to a y/value (or category) axis. In this case the
connection can be either at the top or bottom of the former or
at any absolute value (specified in points) or at some value of
the former axes in its own coordinate system.
"""
import string
from types import FunctionType, StringType, TupleType, ListType
from reportlab.lib.validators import isNumber, isNumberOrNone, isListOfStringsOrNone, isListOfNumbers, \
isListOfNumbersOrNone, isColorOrNone, OneOf, isBoolean, SequenceOf, \
isString
from reportlab.lib.attrmap import *
from reportlab.lib import normalDate
from reportlab.graphics.shapes import Drawing, Line, Group, STATE_DEFAULTS, _textBoxLimits, _rotatedBoxLimits
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.charts.utils import nextRoundNumber
# Helpers.
def _findMinMaxValue(V, x, default, func):
if type(V[0][0]) in (TupleType,ListType):
V=map(lambda e,f=lambda T,x=x: T[x]: map(f,e),V)
V = filter(len,map(lambda x: filter(lambda x: x is not None,x),V))
if len(V)==0: return default
return func(map(func,V))
def _findMin(V, x, default):
'''find minimum over V[i][x]'''
return _findMinMaxValue(V,x,default,min)
def _findMax(V, x, default):
'''find maximum over V[i][x]'''
return _findMinMaxValue(V,x,default,max)
# Category axes.
class CategoryAxis(Widget):
"Abstract category axis, unusable in itself."
_nodoc = 1
_attrMap = AttrMap(
visible = AttrMapValue(isNumber, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isNumber, desc='Display axis line, if true.'),
visibleTicks = AttrMapValue(isNumber, desc='Display axis ticks, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
categoryNames = AttrMapValue(isListOfStringsOrNone, desc='List of category names.'),
joinAxis = AttrMapValue(None, desc='Join both axes if true.'),
joinAxisPos = AttrMapValue(isNumberOrNone, desc='Position at which to join with other axis.'),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
style = AttrMapValue(OneOf('parallel','stacked'),"How common category bars are plotted"),
labelAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the axis labels"),
)
def __init__(self):
assert self.__class__.__name__!='CategoryAxis', "Abstract Class CategoryAxis Instantiated"
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
self._x = 50
self._y = 50
self._length = 100
self._catCount = 0
# public properties
self.visible = 1
self.visibleAxis = 1
self.visibleTicks = 1
self.strokeWidth = 1
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.labels = TypedPropertyCollection(Label)
# if None, they don't get labels. If provided,
# you need one name per data point and they are
# used for label text.
self.categoryNames = None
self.joinAxis = None
self.joinAxisPos = None
self.joinAxisMode = None
self.labelAxisMode = 'axis'
self.reverseDirection = 0
self.style = 'parallel'
def setPosition(self, x, y, length):
# ensure floating point
self._x = x
self._y = y
self._length = length
def configure(self, multiSeries,barWidth=None):
self._catCount = max(map(len,multiSeries))
self._barWidth = barWidth or (self._length/float(self._catCount or 1))
def draw(self):
g = Group()
if not self.visible:
return g
g.add(self.makeAxis())
g.add(self.makeTicks())
g.add(self.makeTickLabels())
return g
def _scale(self,idx):
if self.reverseDirection: idx = self._catCount-idx-1
return idx
class XCategoryAxis(CategoryAxis):
"X/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
)
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'n' #north - top edge
self.labels.dy = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickUp = 0 # how far into chart does tick go?
self.tickDown = 5 # how far below axis does tick go?
def demo(self):
self.setPosition(30, 70, 140)
self.configure([(10,20,30,40,50)])
self.categoryNames = ['One','Two','Three','Four','Five']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'n'
self.labels[4].boxAnchor = 'e'
self.labels[4].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
# Make sure we connect only to a y-axis.
axisClassName = yAxis.__class__.__name__
msg = "Cannot connect to other axes (%s), but Y- ones." % axisClassName
assert axisClassName[0] == 'Y', msg
if mode == 'bottom':
self._x = yAxis._x
self._y = yAxis._y
elif mode == 'top':
self._x = yAxis._x
self._y = yAxis._y + yAxis._length
elif mode == 'value':
self._x = yAxis._x
self._y = yAxis.scale(pos)
elif mode == 'points':
self._x = yAxis._x
self._y = pos
def scale(self, idx):
"""returns the x position and width in drawing units of the slice"""
return (self._x + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('bottom', 'top'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
if not self.visibleAxis: return g
axis = Line(self._x, self._y, self._x + self._length, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTicks(self):
g = Group()
if not self.visibleTicks:
return g
if self.tickUp or self.tickDown:
for i in range(self._catCount + 1):
if self.tickUp or self.tickDown:
x = self._x + (1.0 * i * self._barWidth)
tick = Line(x, self._y + self.tickUp, x, self._y - self.tickDown)
tick.strokeColor = self.strokeColor
tick.strokeWidth = self.strokeWidth
tick.strokeDashArray = self.strokeDashArray
g.add(tick)
return g
def _labelAxisPos(self):
axis = self.joinAxis
if axis:
mode = self.labelAxisMode
if mode == 'low':
return axis._y
elif mode == 'high':
return axis._y + axis._length
return self._y
def makeTickLabels(self):
g = Group()
if not self.visibleTicks:
return g
if not (self.categoryNames is None):
catCount = self._catCount
assert len(self.categoryNames) == catCount, \
"expected %d category names but found %d in axis" % (
len(self.categoryNames), catCount
)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
_y = self._labelAxisPos()
_x = self._x
for i in range(catCount):
x = _x + (i+0.5) * barWidth
label = self.labels[i]
label.setOrigin(x, _y)
if reverseDirection: i = catCount-i-1
label.setText(self.categoryNames[i])
g.add(label)
return g
class YCategoryAxis(CategoryAxis):
"Y/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
)
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'e' #east - right edge
self.labels.dx = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickLeft = 5 # how far left of axis does tick go?
self.tickRight = 0 # how far right of axis does tick go?
def demo(self):
self.setPosition(50, 10, 80)
self.configure([(10,20,30)])
self.categoryNames = ['One','Two','Three']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'e'
self.labels[2].boxAnchor = 's'
self.labels[2].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
# Make sure we connect only to a y-axis.
axisClassName = xAxis.__class__.__name__
msg = "Cannot connect to other axes (%s), but X- ones." % axisClassName
assert axisClassName[0] == 'X', msg
if mode == 'left':
self._x = xAxis._x * 1.0
self._y = xAxis._y * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'points':
self._x = pos * 1.0
self._y = xAxis._y * 1.0
def scale(self, idx):
"Returns the y position and width in drawing units of the slice."
return (self._y + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
if not self.visibleAxis:
return g
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('left', 'right'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
axis = Line(self._x, self._y, self._x, self._y + self._length)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTicks(self):
g = Group()
if not self.visibleTicks:
return g
if self.tickLeft or self.tickRight:
for i in range(self._catCount + 1):
if self.tickLeft or self.tickRight:
y = self._y + (1.0 * i * self._barWidth)
tick = Line(self._x - self.tickLeft, y,
self._x + self.tickRight, y)
tick.strokeColor = self.strokeColor
tick.strokeWidth = self.strokeWidth
tick.strokeDashArray = self.strokeDashArray
g.add(tick)
return g
def _labelAxisPos(self):
axis = self.joinAxis
if axis:
mode = self.labelAxisMode
if mode == 'low':
return axis._x
elif mode == 'high':
return axis._x + axis._length
return self._y
def makeTickLabels(self):
g = Group()
if not self.visibleTicks:
return g
if not (self.categoryNames is None):
catCount = self._catCount
assert len(self.categoryNames) == catCount, \
"expected %d category names but found %d in axis" % (
len(self.categoryNames), catCount
)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
labels = self.labels
_x = self._labelAxisPos()
_y = self._y
for i in range(catCount):
y = _y + (i+0.5) * barWidth
label = labels[i]
label.setOrigin(_x, y)
if reverseDirection: i = catCount-i-1
label.setText(self.categoryNames[i])
g.add(label)
return g
# Value axes.
class ValueAxis(Widget):
"Abstract value axis, unusable in itself."
_attrMap = AttrMap(
visible = AttrMapValue(isNumber,
desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isNumber,
desc='Display axis line, if true.'),
visibleTicks = AttrMapValue(isNumber,
desc='Display axis ticks, if true.'),
strokeWidth = AttrMapValue(isNumber,
desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone,
desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone,
desc='Dash array used for axis line.'),
minimumTickSpacing = AttrMapValue(isNumber,
desc='Minimum value for distance between ticks.'),
maximumTicks = AttrMapValue(isNumber,
desc='Maximum number of ticks.'),
labels = AttrMapValue(None,
desc='Handle of the axis labels.'),
labelTextFormat = AttrMapValue(None,
desc='Formatting string or function used for axis labels.'),
valueMin = AttrMapValue(isNumberOrNone,
desc='Minimum value on axis.'),
valueMax = AttrMapValue(isNumberOrNone,
desc='Maximum value on axis.'),
valueStep = AttrMapValue(isNumberOrNone,
desc='Step size used between ticks.'),
valueSteps = AttrMapValue(isListOfNumbersOrNone,
desc='List of step sizes used between ticks.'),
)
def __init__(self):
assert self.__class__.__name__!='ValueAxis', 'Abstract Class ValueAxis Instantiated'
self._configured = 0
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
self._x = 50
self._y = 50
self._length = 100
# public properties
self.visible = 1
self.visibleAxis = 1
self.visibleTicks = 1
self.strokeWidth = 1
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.labels = TypedPropertyCollection(Label)
self.labels.angle = 0
# how close can the ticks be?
self.minimumTickSpacing = 10
self.maximumTicks = 7
# this may be either of (a) a format string like '%0.2f'
# or (b) a function which takes the value as an argument
# and returns a chunk of text. So you can write a
# 'formatMonthEndDate' function and use that on irregular
# data points.
self.labelTextFormat = '%d'
# if set to None, these will be worked out for you.
# if you override any or all of them, your values
# will be used.
self.valueMin = None
self.valueMax = None
self.valueStep = None
def setPosition(self, x, y, length):
# ensure floating point
self._x = x * 1.0
self._y = y * 1.0
self._length = length * 1.0
def configure(self, dataSeries):
"""Let the axis configure its scale and range based on the data.
Called after setPosition. Let it look at a list of lists of
numbers determine the tick mark intervals. If valueMin,
valueMax and valueStep are configured then it
will use them; if any of them are set to None it
will look at the data and make some sensible decision.
You may override this to build custom axes with
irregular intervals. It creates an internal
variable self._values, which is a list of numbers
to use in plotting.
"""
self._setRange(dataSeries)
self._calcScaleFactor()
self._calcTickmarkPositions()
self._configured = 1
def _setRange(self, dataSeries):
"""Set minimum and maximum axis values.
The dataSeries argument is assumed to be a list of data
vectors. Each vector is itself a list or tuple of numbers.
Returns a min, max tuple.
"""
valueMin = self.valueMin
if self.valueMin is None:
valueMin = _findMin(dataSeries,self._dataIndex,valueMin)
valueMax = self.valueMax
if self.valueMax is None:
valueMax = _findMax(dataSeries,self._dataIndex,valueMax)
self._valueMin, self._valueMax = (valueMin, valueMax)
self._rangeAdjust()
def _rangeAdjust(self):
"""Override this if you want to alter the calculated range.
E.g. if want a minumamum range of 30% or don't want 100%
as the first point.
"""
pass
def _adjustAxisTicks(self):
'''Override if you want to put slack at the ends of the axis
eg if you don't want the last tick to be at the bottom etc
'''
pass
def _calcScaleFactor(self):
"""Calculate the axis' scale factor.
This should be called only *after* the axis' range is set.
Returns a number.
"""
self._scaleFactor = self._length / float(self._valueMax - self._valueMin)
return self._scaleFactor
def _calcTickmarkPositions(self):
"""Calculate a list of tick positions on the axis.
Returns a list of numbers.
"""
if hasattr(self, 'valueSteps') and self.valueSteps:
self._tickValues = self.valueSteps
return self._tickValues
self._calcValueStep()
tickmarkPositions = []
tick = int(self._valueMin / self._valueStep) * self._valueStep
if tick >= self._valueMin:
tickmarkPositions.append(tick)
tick = tick + self._valueStep
while tick <= self._valueMax:
tickmarkPositions.append(tick)
tick = tick + self._valueStep
self._tickValues = tickmarkPositions
self._adjustAxisTicks()
return self._tickValues
def _calcValueStep(self):
'''Calculate _valueStep for the axis or get from valueStep.'''
if self.valueStep is None:
rawRange = self._valueMax - self._valueMin
rawInterval = rawRange / min(float(self.maximumTicks-1),(float(self._length)/self.minimumTickSpacing ))
niceInterval = nextRoundNumber(rawInterval)
self._valueStep = niceInterval
else:
self._valueStep = self.valueStep
def makeTickLabels(self):
g = Group()
f = self.labelTextFormat
pos = [self._x, self._y]
d = self._dataIndex
labels = self.labels
i = 0
for tick in self._tickValues:
if f:
v = self.scale(tick)
if type(f) is StringType: txt = f % tick
elif type(f) in (TupleType,ListType):
#it's a list, use as many items as we get
if i < len(f):
txt = f[i]
else:
txt = ''
else: txt = f(tick)
label = labels[i]
pos[d] = v
apply(label.setOrigin,pos)
label.setText(txt)
g.add(label)
i = i + 1
return g
def draw(self):
g = Group()
if not self.visible:
return g
g.add(self.makeAxis())
g.add(self.makeTicks())
g.add(self.makeTickLabels())
return g
class XValueAxis(ValueAxis):
"X/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 0
def __init__(self):
ValueAxis.__init__(self)
self.labels.boxAnchor = 'n'
self.labels.dx = 0
self.labels.dy = -5
self.tickUp = 0
self.tickDown = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
self.setPosition(20, 50, 150)
self.configure([(10,20,30,40,50)])
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
# Make sure we connect only to a y-axis.
axisClassName = yAxis.__class__.__name__
msg = "Cannot connect to other axes (%s), but Y- ones." % axisClassName
assert axisClassName[0] == 'Y', msg
if mode == 'bottom':
self._x = yAxis._x * 1.0
self._y = yAxis._y * 1.0
elif mode == 'top':
self._x = yAxis._x * 1.0
self._y = (yAxis._y + yAxis._length) * 1.0
elif mode == 'value':
self._x = yAxis._x * 1.0
self._y = yAxis.scale(pos) * 1.0
elif mode == 'points':
self._x = yAxis._x * 1.0
self._y = pos * 1.0
def scale(self, value):
"""Converts a numeric value to a Y position.
The chart first configures the axis, then asks it to
work out the x value for each point when plotting
lines or bars. You could override this to do
logarithmic axes.
"""
msg = "Axis cannot scale numbers before it is configured"
assert self._configured, msg
if value is None:
value = 0
return self._x + self._scaleFactor * (value - self._valueMin)
def makeAxis(self):
g = Group()
if not self.visibleAxis:
return g
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('bottom', 'top'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
axis = Line(self._x, self._y, self._x + self._length, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTicks(self):
g = Group()
if not self.visibleTicks:
return g
i = 0
for tickValue in self._tickValues:
if self.tickUp or self.tickDown:
x = self.scale(tickValue)
tick = Line(x, self._y - self.tickDown,
x, self._y + self.tickUp)
tick.strokeColor = self.strokeColor
tick.strokeWidth = self.strokeWidth
tick.strokeDashArray = self.strokeDashArray
g.add(tick)
return g
class NormalDateXValueAxis(XValueAxis):
"""An X axis applying additional rules.
Depending on the data and some built-in rules, the axis
displays normalDate values as nicely formatted dates.
FidXValueAxis is an axis component for FidLinePlot.
The client chart should have NormalDate values.
"""
_attrMap = AttrMap(BASE = XValueAxis,
bottomAxisLabelSlack = AttrMapValue(isNumber, desc="Fractional amount used to adjust label spacing"),
niceMonth = AttrMapValue(isBoolean, desc="Flag for displaying months 'nicely'."),
forceEndDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of last date value.'),
forceFirstDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of first date value.'),
xLabelFormat = AttrMapValue(None, desc="Label format string (e.g. '{mm}/{yy}') or function."),
dayOfWeekName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=7,hi=7), desc='Weekday names.'),
monthName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=12,hi=12), desc='Month names.'),
dailyFreq = AttrMapValue(isBoolean, desc='True if we are to assume daily data to be ticked at end of month.'),
)
_valueClass = normalDate.ND
def __init__(self, **kw):
apply(XValueAxis.__init__, (self,))
# some global variables still used...
self.bottomAxisLabelSlack = 0.1
self.niceMonth = 1
self.forceEndDate = 0
self.forceFirstDate = 0
self.dailyFreq = 0
self.xLabelFormat = "{mm}/{yy}"
self.dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
self.monthName = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
self.valueSteps = None
def _scalar2ND(self, x):
"Convert a scalar to a NormalDate value."
d = self._valueClass()
d.normalize(x)
return d
def _dateFormatter(self, v):
"Create a formatted label for some value."
if not isinstance(v,normalDate.NormalDate):
v = self._scalar2ND(v)
d, m = normalDate._dayOfWeekName, normalDate._monthName
try:
normalDate._dayOfWeekName, normalDate._monthName = self.dayOfWeekName, self.monthName
return v.formatMS(self.xLabelFormat)
finally:
normalDate._dayOfWeekName, normalDate._monthName = d, m
def _xAxisTicker(self, xVals):
"""Complex stuff...
Needs explanation...
"""
axisLength = self._length
formatter = self._dateFormatter
labels = self.labels
fontName, fontSize, leading = labels.fontName, labels.fontSize, labels.leading
textAnchor, boxAnchor, angle = labels.textAnchor, labels.boxAnchor, labels.angle
RBL = _textBoxLimits(string.split(formatter(xVals[0]),'\n'),fontName,
fontSize,leading or 1.2*fontSize,textAnchor,boxAnchor)
RBL = _rotatedBoxLimits(RBL[0],RBL[1],RBL[2],RBL[3], angle)
xLabelW = RBL[1]-RBL[0]
xLabelH = RBL[3]-RBL[2]
w = max(xLabelW,labels.width,self.minimumTickSpacing)
W = w+w*self.bottomAxisLabelSlack
n = len(xVals)
ticks = []
labels = []
maximumTicks = self.maximumTicks
def addTick(i, xVals=xVals, formatter=formatter, ticks=ticks, labels=labels):
ticks.insert(0,xVals[i])
labels.insert(0,formatter(xVals[i]))
for d in (1,2,3,6,12,24,60,120):
k = n/d
if k<=maximumTicks and k*W <= axisLength:
i = n-1
if self.niceMonth:
j = xVals[-1].month() % (d<=12 and d or 12)
if j:
if self.forceEndDate: addTick(i)
i = i - j
#weird first date ie not at end of month
try:
wfd = xVals[0].month() == xVals[1].month()
except:
wfd = 0
while i>=wfd:
addTick(i)
i = i - d
if self.forceFirstDate and ticks[0] != xVals[0]:
addTick(0)
if (axisLength/(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=w:
del ticks[1], labels[1]
if self.forceEndDate and self.niceMonth and j:
if (axisLength/(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=w:
del ticks[-2], labels[-2]
if labels[0]==labels[1]:
del ticks[1], labels[1]
return ticks, labels
def _convertXV(self,data):
'''Convert all XValues to a standard normalDate type'''
VC = self._valueClass
for D in data:
for i in xrange(len(D)):
x, y = D[i]
if not isinstance(x,VC):
D[i] = (VC(x),y)
def configure(self, data):
self._convertXV(data)
xVals = map(lambda dv: dv[0], data[0])
if self.dailyFreq:
xEOM = []
pm = 0
px = xVals[0]
for x in xVals:
m = x.month()
if pm!=m:
if pm: xEOM.append(px)
pm = m
px = x
px = xVals[-1]
if xEOM[-1]!=x: xEOM.append(px)
steps, labels = self._xAxisTicker(xEOM)
else:
steps, labels = self._xAxisTicker(xVals)
valueMin, valueMax = self.valueMin, self.valueMax
if valueMin is None: valueMin = xVals[0]
if valueMax is None: valueMax = xVals[-1]
self._valueMin, self._valueMax = valueMin, valueMax
self.valueSteps = steps
self.labelTextFormat = labels
self._scaleFactor = self._length / float(valueMax - valueMin)
self._tickValues = steps
self._configured = 1
class YValueAxis(ValueAxis):
"Y/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 1
def __init__(self):
ValueAxis.__init__(self)
self.labels.boxAnchor = 'e'
self.labels.dx = -5
self.labels.dy = 0
self.tickRight = 0
self.tickLeft = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
data = [(10, 20, 30, 42)]
self.setPosition(100, 10, 80)
self.configure(data)
drawing = Drawing(200, 100)
drawing.add(self)
return drawing
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
# Make sure we connect only to a y-axis.
axisClassName = xAxis.__class__.__name__
msg = "Cannot connect to other axes (%s), but X- ones." % axisClassName
assert axisClassName[0] == 'X', msg
if mode == 'left':
self._x = xAxis._x * 1.0
self._y = xAxis._y * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'points':
self._x = pos * 1.0
self._y = xAxis._y * 1.0
def scale(self, value):
"""Converts a numeric value to a Y position.
The chart first configures the axis, then asks it to
work out the x value for each point when plotting
lines or bars. You could override this to do
logarithmic axes.
"""
msg = "Axis cannot scale numbers before it is configured"
assert self._configured, msg
if value is None:
value = 0
return self._y + self._scaleFactor * (value - self._valueMin)
def makeAxis(self):
g = Group()
if not self.visibleAxis:
return g
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('left', 'right'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
axis = Line(self._x, self._y, self._x, self._y + self._length)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTicks(self):
g = Group()
if not self.visibleTicks:
return g
i = 0
for tickValue in self._tickValues:
if self.tickLeft or self.tickRight:
y = self.scale(tickValue)
tick = Line(self._x - self.tickLeft, y,
self._x + self.tickRight, y)
tick.strokeColor = self.strokeColor
tick.strokeWidth = self.strokeWidth
tick.strokeDashArray = self.strokeDashArray
g.add(tick)
return g
# Sample functions.
def sample0a():
"Sample drawing with one xcat axis and two buckets."
drawing = Drawing(400, 200)
data = [(10, 20)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying', 'Yang']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample0b():
"Sample drawing with one xcat axis and one bucket only."
drawing = Drawing(400, 200)
data = [(10,)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample1():
"Sample drawing containing two unconnected axes."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Beer','Wine','Meat','Cannelloni']
xAxis.labels.boxAnchor = 'n'
xAxis.labels[3].dy = -15
xAxis.labels[3].angle = 30
xAxis.labels[3].fontName = 'Times-Bold'
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
##def sample2a():
## "Make sample drawing with two axes, x connected at top of y."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='top')
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample2b():
## "Make two axes, x connected at bottom of y."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='bottom')
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample2c():
## "Make two axes, x connected at fixed value (in points) of y."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='points', pos=100)
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample2d():
## "Make two axes, x connected at fixed value (of y-axes) of y."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='value', pos=20)
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample3a():
## "Make sample drawing with two axes, y connected at left of x."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
## yAxis.joinToAxis(xAxis, mode='left')
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample3b():
## "Make sample drawing with two axes, y connected at right of x."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## xAxis = XCategoryAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
## xAxis.labels.boxAnchor = 'n'
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
## yAxis.joinToAxis(xAxis, mode='right')
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
##
##
##def sample3c():
## "Make two axes, y connected at fixed value (in points) of x."
##
## drawing = Drawing(400, 200)
##
## data = [(10, 20, 30, 42)]
##
## yAxis = YValueAxis()
## yAxis.setPosition(50, 50, 125)
## yAxis.configure(data)
##
## xAxis = XValueAxis()
## xAxis._length = 300
## xAxis.configure(data)
## xAxis.joinToAxis(yAxis, mode='points', pos=100)
##
## drawing.add(xAxis)
## drawing.add(yAxis)
##
## return drawing
def sample4a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 35
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c():
"Sample drawing, xvalue/yvalue axes, y connected to bottom of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c1():
"xvalue/yvalue axes, without drawing axis lines/ticks."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
yAxis.visibleAxis = 0
yAxis.visibleTicks = 0
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
xAxis.visibleAxis = 0
xAxis.visibleTicks = 0
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4d():
"Sample drawing, xvalue/yvalue axes, y connected to top of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 100
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 35
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5c():
"Sample drawing, xvalue/yvalue axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5d():
"Sample drawing, xvalue/yvalue axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6a():
"Sample drawing, xcat/yvalue axes, x connected at top of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6b():
"Sample drawing, xcat/yvalue axes, x connected at bottom of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6c():
"Sample drawing, xcat/yvalue axes, x connected at 100 pts to y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6d():
"Sample drawing, xcat/yvalue axes, x connected at value 20 of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 20
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7a():
"Sample drawing, xvalue/ycat axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7b():
"Sample drawing, xvalue/ycat axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7c():
"Sample drawing, xvalue/ycat axes, y connected at value 30 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 30
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7d():
"Sample drawing, xvalue/ycat axes, y connected at 200 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 200
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
|
"""
HTMLParser-based link extractor
"""
from HTMLParser import HTMLParser
from six.moves.urllib.parse import urljoin
from w3lib.url import safe_url_string
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
link.url = urljoin(base_url, link.url)
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
Add deprecation warning to HtmlParserLinkExtractor
"""
HTMLParser-based link extractor
"""
import warnings
from HTMLParser import HTMLParser
from six.moves.urllib.parse import urljoin
from w3lib.url import safe_url_string
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
from scrapy.exceptions import ScrapyDeprecationWarning
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
warnings.warn(
"HtmlParserLinkExtractor is deprecated and will be removed in "
"future releases. Please use scrapy.linkextractors.LinkExtractor",
ScrapyDeprecationWarning
)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
link.url = urljoin(base_url, link.url)
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
|
import jinja2
import os
from datetime import datetime
from .utils import construct_function_call, Variable
OBJECT_TEMPLATE = '''# {{ cls.filename }}
# Auto-generated by altair_parser {{ date }}
{%- for import in cls.imports %}
{{ import }}
{%- endfor %}
class {{ cls.classname }}({{ cls.baseclass }}):
"""{{ cls.classname }} class
Attributes
----------
{%- for (name, prop) in cls.wrapped_properties().items() %}
{{ name }} : {{ prop.type }}
{{ prop.description }}
{%- endfor %}
"""
{%- for (name, prop) in cls.wrapped_properties().items() %}
{{ name }} = {{ prop.trait_code }}
{%- endfor %}
'''
class TraitCodeExtractor(object):
"""Base class for trait code extractors.
An ordered list of these is passed to JSONSchema, and they are used to
extract appropriate trait codes.
"""
def __init__(self, schema, typecode=None):
self.schema = schema
self.typecode = typecode or schema.type
def check(self):
raise NotImplementedError()
def trait_code(self, **kwargs):
raise NotImplementedError()
class SimpleTraitCode(TraitCodeExtractor):
simple_types = ["boolean", "null", "number", "integer", "string"]
classes = {'boolean': 'jst.JSONBoolean',
'null': 'jst.JSONNull',
'number': 'jst.JSONNumber',
'integer': 'jst.JSONInteger',
'string': 'jst.JSONString'}
validation_keys = {'number': ['minimum', 'exclusiveMinimum',
'maximum', 'exclusiveMaximum',
'multipleOf'],
'integer': ['minimum', 'exclusiveMinimum',
'maximum', 'exclusiveMaximum',
'multipleOf']}
def check(self):
return self.typecode in self.simple_types
def trait_code(self, **kwargs):
cls = self.classes[self.typecode]
keys = self.validation_keys.get(self.typecode, [])
kwargs.update({key: self.schema[key] for key in keys
if key in self.schema})
return construct_function_call(cls, **kwargs)
class CompoundTraitCode(TraitCodeExtractor):
simple_types = SimpleTraitCode.simple_types
def check(self):
return (all(typ in self.simple_types for typ in self.typecode) and
isinstance(self.typecode, list))
def trait_code(self, **kwargs):
if 'null' in self.typecode:
kwargs['allow_none'] = True
typecode = [typ for typ in self.typecode if typ != 'null']
if len(typecode) == 1:
return SimpleTraitCode(self.schema, typecode[0]).trait_code(**kwargs)
else:
item_kwargs = {key:val for key, val in kwargs.items()
if key not in ['allow_none', 'allow_undefined']}
arg = "[{0}]".format(', '.join(SimpleTraitCode(self.schema, typ).trait_code(**item_kwargs)
for typ in typecode))
return construct_function_call('jst.JSONUnion', Variable(arg), **kwargs)
class RefTraitCode(TraitCodeExtractor):
def check(self):
return '$ref' in self.schema
def trait_code(self, **kwargs):
ref = self.schema.get_reference(self.schema['$ref'])
if ref.is_object:
return construct_function_call('jst.JSONInstance',
Variable(ref.classname),
**kwargs)
else:
ref = ref.copy() # TODO: maybe can remove this?
ref.metadata = self.schema.metadata
return ref.trait_code
class EnumTraitCode(TraitCodeExtractor):
def check(self):
return 'enum' in self.schema
def trait_code(self, **kwargs):
return construct_function_call('jst.JSONEnum',
self.schema["enum"],
**kwargs)
class ArrayTraitCode(TraitCodeExtractor):
def check(self):
return self.schema.type == 'array'
def trait_code(self, **kwargs):
# TODO: implement items as list and additionalItems
items = self.schema['items']
if 'minItems' in self.schema:
kwargs['minlen'] = self.schema['minItems']
if 'maxItems' in self.schema:
kwargs['maxlen'] = self.schema['maxItems']
if 'uniqueItems' in self.schema:
kwargs['uniqueItems'] = self.schema['uniqueItems']
if isinstance(items, list):
raise NotImplementedError("'items' keyword as list")
else:
itemtype = self.schema.make_child(items).trait_code
return construct_function_call('jst.JSONArray', Variable(itemtype),
**kwargs)
class AnyOfTraitCode(TraitCodeExtractor):
def check(self):
return 'anyOf' in self.schema
def trait_code(self, **kwargs):
children = [Variable(self.schema.make_child(sub_schema).trait_code)
for sub_schema in self.schema['anyOf']]
return construct_function_call('jst.JSONAnyOf', Variable(children),
**kwargs)
class OneOfTraitCode(TraitCodeExtractor):
def check(self):
return 'oneOf' in self.schema
def trait_code(self, **kwargs):
children = [Variable(self.schema.make_child(sub_schema).trait_code)
for sub_schema in self.schema['oneOf']]
return construct_function_call('jst.JSONOneOf', Variable(children),
**kwargs)
class AllOfTraitCode(TraitCodeExtractor):
def check(self):
return 'allOf' in self.schema
def trait_code(self, **kwargs):
children = [Variable(self.schema.make_child(sub_schema).trait_code)
for sub_schema in self.schema['allOf']]
return construct_function_call('jst.JSONAllOf', Variable(children),
**kwargs)
class NotTraitCode(TraitCodeExtractor):
def check(self):
return 'not' in self.schema
def trait_code(self, **kwargs):
not_this = self.schema.make_child(self.schema['not']).trait_code
return construct_function_call('jst.JSONNot', Variable(not_this),
**kwargs)
class JSONSchema(object):
"""A class to wrap JSON Schema objects and reason about their contents"""
object_template = OBJECT_TEMPLATE
__draft__ = 4
_cached_references = {}
simple_types = ["boolean", "null", "number", "integer", "string"]
valid_types = simple_types + ["array", "object"]
traitlet_map = {'array': {'cls': 'jst.JSONArray'},
'boolean': {'cls': 'jst.JSONBoolean'},
'null': {'cls': 'jst.JSONNull'},
'number': {'cls': 'jst.JSONNumber',
'validation_keys': ['minimum', 'maximum',
'exclusiveMinimum',
'exclusiveMaximum',
'multipleOf']},
'integer': {'cls': 'jst.JSONInteger',
'validation_keys': ['minimum', 'maximum',
'exclusiveMinimum',
'exclusiveMaximum',
'multipleOf']},
'string': {'cls': 'jst.JSONString'},
}
attr_defaults = {'title': '',
'description': '',
'properties': {},
'definitions': {},
'default': None,
'examples': {},
'type': 'object',
'required': []}
basic_imports = ["import traitlets as T",
"from . import jstraitlets as jst",
"from .baseobject import BaseObject"]
def __init__(self, schema, context=None, parent=None, name=None, metadata=None):
self.schema = schema
self.parent = parent
self.name = name
self.metadata = metadata or {}
# if context is not given, then assume this is a root instance that
# defines its context
self.context = context or schema
def __getitem__(self, key):
return self.schema[key]
def __contains__(self, key):
return key in self.schema
def copy(self):
return self.__class__(schema=self.schema, context=self.context,
parent=self.parent, name=self.name,
metadata=self.metadata)
def make_child(self, schema, name=None, metadata=None):
"""
Make a child instance, appropriately defining the parent and context
"""
return self.__class__(schema, context=self.context,
parent=self, name=name, metadata=metadata)
def __getattr__(self, attr):
if attr in self.attr_defaults:
return self.schema.get(attr, self.attr_defaults[attr])
raise AttributeError(f"'{self.__class__.__name__}' object "
f"has no attribute '{attr}'")
@property
def is_root(self):
return self.context is self.schema
@property
def is_trait(self):
return self.type != 'object' and not self.is_reference
@property
def is_object(self):
return self.type == 'object' and not self.is_reference
@property
def is_reference(self):
return '$ref' in self.schema
@property
def classname(self):
if self.name:
return self.name
elif self.is_root:
return "RootInstance"
elif self.is_reference:
return self.schema['$ref'].split('/')[-1]
else:
raise NotImplementedError("Anonymous class name")
@property
def modulename(self):
return self.classname.lower()
@property
def filename(self):
return self.modulename + '.py'
@property
def baseclass(self):
return "BaseObject"
@property
def import_statement(self):
return f"from .{self.modulename} import {self.classname}"
@property
def imports(self):
imports = []
imports.extend(self.basic_imports)
for obj in self.wrapped_properties().values():
if obj.is_reference:
ref = self.get_reference(obj.schema['$ref'])
if ref.is_object:
imports.append(ref.import_statement)
return imports
@property
def module_imports(self):
imports = []
for obj in self.wrapped_definitions().values():
if obj.is_object:
imports.append(obj.import_statement)
return imports
def wrapped_definitions(self):
"""Return definition dictionary wrapped as JSONSchema objects"""
return {name.lower(): self.make_child(schema, name=name)
for name, schema in self.definitions.items()}
def wrapped_properties(self):
"""Return property dictionary wrapped as JSONSchema objects"""
return {name: self.make_child(val, metadata={'required': name in self.required})
for name, val in self.properties.items()}
def get_reference(self, ref, cache=True):
"""
Get the JSONSchema object for the given reference code.
Reference codes should look something like "#/definitions/MyDefinition"
By default, this will cache objects accessed by their ref code.
"""
if cache and ref in self._cached_references:
return self._cached_references[ref]
path = ref.split('/')
name = path[-1]
if path[0] != '#':
raise ValueError(f"Unrecognized $ref format: '{ref}'")
try:
schema = self.context
for key in path[1:]:
schema = schema[key]
except KeyError:
raise ValueError(f"$ref='{ref}' not present in the schema")
wrapped_schema = self.make_child(schema, name=name)
if cache:
self._cached_references[ref] = wrapped_schema
return wrapped_schema
@property
def trait_code(self):
"""Create the trait code for the given typecode"""
typecode = self.type
if self.metadata.get('required', False):
kwargs = {'allow_undefined': False}
else:
kwargs = {}
# TODO: handle multiple entries...
if "not" in self.schema:
validator = NotTraitCode(self, typecode)
elif "$ref" in self.schema:
validator = RefTraitCode(self, typecode)
elif "anyOf" in self.schema:
validator = AnyOfTraitCode(self, typecode)
elif "allOf" in self.schema:
validator = AllOfTraitCode(self, typecode)
elif "oneOf" in self.schema:
validator = OneOfTraitCode(self, typecode)
elif "enum" in self.schema:
validator = EnumTraitCode(self, typecode)
elif typecode in self.simple_types:
validator = SimpleTraitCode(self, typecode)
elif typecode == 'array':
validator = ArrayTraitCode(self, typecode)
elif isinstance(typecode, list):
validator = CompoundTraitCode(self, typecode)
elif typecode == 'object':
raise NotImplementedError("Anonymous Objects")
else:
raise ValueError(f"unrecognized type identifier: {typecode}")
assert validator.check()
return validator.trait_code(**kwargs)
def object_code(self):
"""Return code to define a BaseObject for this schema"""
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return jinja2.Template(self.object_template).render(cls=self, date=now)
def module_spec(self):
"""Return the JSON specification of the module
This can be passed to ``altair_parser.utils.load_dynamic_module``
or to ``altair_parser.utils.save_module``
"""
assert self.is_root
submodroot = self.classname.lower()
modspec = {
'jstraitlets.py': open(os.path.join(os.path.dirname(__file__),
'src', 'jstraitlets.py')).read(),
'baseobject.py': open(os.path.join(os.path.dirname(__file__),
'src', 'baseobject.py')).read(),
self.filename: self.object_code()
}
modspec['__init__.py'] = '\n'.join([self.import_statement]
+ self.module_imports)
modspec.update({schema.filename: schema.object_code()
for schema in self.wrapped_definitions().values()
if schema.is_object})
return modspec
Finish refactor of trait_code generators
import jinja2
import os
from datetime import datetime
from .utils import construct_function_call, Variable
OBJECT_TEMPLATE = '''# {{ cls.filename }}
# Auto-generated by altair_parser {{ date }}
{%- for import in cls.imports %}
{{ import }}
{%- endfor %}
class {{ cls.classname }}({{ cls.baseclass }}):
"""{{ cls.classname }} class
Attributes
----------
{%- for (name, prop) in cls.wrapped_properties().items() %}
{{ name }} : {{ prop.type }}
{{ prop.description }}
{%- endfor %}
"""
{%- for (name, prop) in cls.wrapped_properties().items() %}
{{ name }} = {{ prop.trait_code }}
{%- endfor %}
'''
class TraitCodeExtractor(object):
"""Base class for trait code extractors.
An ordered list of these is passed to JSONSchema, and they are used to
extract appropriate trait codes.
"""
def __init__(self, schema, typecode=None):
self.schema = schema
self.typecode = typecode or schema.type
def check(self):
raise NotImplementedError()
def trait_code(self, **kwargs):
raise NotImplementedError()
class SimpleTraitCode(TraitCodeExtractor):
simple_types = ["boolean", "null", "number", "integer", "string"]
classes = {'boolean': 'jst.JSONBoolean',
'null': 'jst.JSONNull',
'number': 'jst.JSONNumber',
'integer': 'jst.JSONInteger',
'string': 'jst.JSONString'}
validation_keys = {'number': ['minimum', 'exclusiveMinimum',
'maximum', 'exclusiveMaximum',
'multipleOf'],
'integer': ['minimum', 'exclusiveMinimum',
'maximum', 'exclusiveMaximum',
'multipleOf']}
def check(self):
return self.typecode in self.simple_types
def trait_code(self, **kwargs):
cls = self.classes[self.typecode]
keys = self.validation_keys.get(self.typecode, [])
kwargs.update({key: self.schema[key] for key in keys
if key in self.schema})
return construct_function_call(cls, **kwargs)
class CompoundTraitCode(TraitCodeExtractor):
simple_types = SimpleTraitCode.simple_types
def check(self):
return (all(typ in self.simple_types for typ in self.typecode) and
isinstance(self.typecode, list))
def trait_code(self, **kwargs):
if 'null' in self.typecode:
kwargs['allow_none'] = True
typecode = [typ for typ in self.typecode if typ != 'null']
if len(typecode) == 1:
return SimpleTraitCode(self.schema, typecode[0]).trait_code(**kwargs)
else:
item_kwargs = {key:val for key, val in kwargs.items()
if key not in ['allow_none', 'allow_undefined']}
arg = "[{0}]".format(', '.join(SimpleTraitCode(self.schema, typ).trait_code(**item_kwargs)
for typ in typecode))
return construct_function_call('jst.JSONUnion', Variable(arg), **kwargs)
class RefTraitCode(TraitCodeExtractor):
def check(self):
return '$ref' in self.schema
def trait_code(self, **kwargs):
ref = self.schema.get_reference(self.schema['$ref'])
if ref.is_object:
return construct_function_call('jst.JSONInstance',
Variable(ref.classname),
**kwargs)
else:
ref = ref.copy() # TODO: maybe can remove this?
ref.metadata = self.schema.metadata
return ref.trait_code
class EnumTraitCode(TraitCodeExtractor):
def check(self):
return 'enum' in self.schema
def trait_code(self, **kwargs):
return construct_function_call('jst.JSONEnum',
self.schema["enum"],
**kwargs)
class ArrayTraitCode(TraitCodeExtractor):
def check(self):
return self.schema.type == 'array'
def trait_code(self, **kwargs):
# TODO: implement items as list and additionalItems
items = self.schema['items']
if 'minItems' in self.schema:
kwargs['minlen'] = self.schema['minItems']
if 'maxItems' in self.schema:
kwargs['maxlen'] = self.schema['maxItems']
if 'uniqueItems' in self.schema:
kwargs['uniqueItems'] = self.schema['uniqueItems']
if isinstance(items, list):
raise NotImplementedError("'items' keyword as list")
else:
itemtype = self.schema.make_child(items).trait_code
return construct_function_call('jst.JSONArray', Variable(itemtype),
**kwargs)
class AnyOfTraitCode(TraitCodeExtractor):
def check(self):
return 'anyOf' in self.schema
def trait_code(self, **kwargs):
children = [Variable(self.schema.make_child(sub_schema).trait_code)
for sub_schema in self.schema['anyOf']]
return construct_function_call('jst.JSONAnyOf', Variable(children),
**kwargs)
class OneOfTraitCode(TraitCodeExtractor):
def check(self):
return 'oneOf' in self.schema
def trait_code(self, **kwargs):
children = [Variable(self.schema.make_child(sub_schema).trait_code)
for sub_schema in self.schema['oneOf']]
return construct_function_call('jst.JSONOneOf', Variable(children),
**kwargs)
class AllOfTraitCode(TraitCodeExtractor):
def check(self):
return 'allOf' in self.schema
def trait_code(self, **kwargs):
children = [Variable(self.schema.make_child(sub_schema).trait_code)
for sub_schema in self.schema['allOf']]
return construct_function_call('jst.JSONAllOf', Variable(children),
**kwargs)
class NotTraitCode(TraitCodeExtractor):
def check(self):
return 'not' in self.schema
def trait_code(self, **kwargs):
not_this = self.schema.make_child(self.schema['not']).trait_code
return construct_function_call('jst.JSONNot', Variable(not_this),
**kwargs)
class ObjectTraitCode(TraitCodeExtractor):
def check(self):
return self.typecode == 'object'
def trait_code(self, **kwargs):
raise NotImplementedError("Anonymous Objects")
class JSONSchema(object):
"""A class to wrap JSON Schema objects and reason about their contents"""
object_template = OBJECT_TEMPLATE
__draft__ = 4
_cached_references = {}
attr_defaults = {'title': '',
'description': '',
'properties': {},
'definitions': {},
'default': None,
'examples': {},
'type': 'object',
'required': []}
basic_imports = ["import traitlets as T",
"from . import jstraitlets as jst",
"from .baseobject import BaseObject"]
trait_extractors = [NotTraitCode, RefTraitCode, AnyOfTraitCode,
AllOfTraitCode, OneOfTraitCode, EnumTraitCode,
SimpleTraitCode, ArrayTraitCode, ObjectTraitCode,
CompoundTraitCode]
def __init__(self, schema, context=None, parent=None, name=None, metadata=None):
self.schema = schema
self.parent = parent
self.name = name
self.metadata = metadata or {}
# if context is not given, then assume this is a root instance that
# defines its context
self.context = context or schema
def __getitem__(self, key):
return self.schema[key]
def __contains__(self, key):
return key in self.schema
def copy(self):
return self.__class__(schema=self.schema, context=self.context,
parent=self.parent, name=self.name,
metadata=self.metadata)
def make_child(self, schema, name=None, metadata=None):
"""
Make a child instance, appropriately defining the parent and context
"""
return self.__class__(schema, context=self.context,
parent=self, name=name, metadata=metadata)
def __getattr__(self, attr):
if attr in self.attr_defaults:
return self.schema.get(attr, self.attr_defaults[attr])
raise AttributeError(f"'{self.__class__.__name__}' object "
f"has no attribute '{attr}'")
@property
def is_root(self):
return self.context is self.schema
@property
def is_trait(self):
return self.type != 'object' and not self.is_reference
@property
def is_object(self):
return self.type == 'object' and not self.is_reference
@property
def is_reference(self):
return '$ref' in self.schema
@property
def classname(self):
if self.name:
return self.name
elif self.is_root:
return "RootInstance"
elif self.is_reference:
return self.schema['$ref'].split('/')[-1]
else:
raise NotImplementedError("Anonymous class name")
@property
def modulename(self):
return self.classname.lower()
@property
def filename(self):
return self.modulename + '.py'
@property
def baseclass(self):
return "BaseObject"
@property
def import_statement(self):
return f"from .{self.modulename} import {self.classname}"
@property
def imports(self):
imports = []
imports.extend(self.basic_imports)
for obj in self.wrapped_properties().values():
if obj.is_reference:
ref = self.get_reference(obj.schema['$ref'])
if ref.is_object:
imports.append(ref.import_statement)
return imports
@property
def module_imports(self):
imports = []
for obj in self.wrapped_definitions().values():
if obj.is_object:
imports.append(obj.import_statement)
return imports
def wrapped_definitions(self):
"""Return definition dictionary wrapped as JSONSchema objects"""
return {name.lower(): self.make_child(schema, name=name)
for name, schema in self.definitions.items()}
def wrapped_properties(self):
"""Return property dictionary wrapped as JSONSchema objects"""
return {name: self.make_child(val, metadata={'required': name in self.required})
for name, val in self.properties.items()}
def get_reference(self, ref, cache=True):
"""
Get the JSONSchema object for the given reference code.
Reference codes should look something like "#/definitions/MyDefinition"
By default, this will cache objects accessed by their ref code.
"""
if cache and ref in self._cached_references:
return self._cached_references[ref]
path = ref.split('/')
name = path[-1]
if path[0] != '#':
raise ValueError(f"Unrecognized $ref format: '{ref}'")
try:
schema = self.context
for key in path[1:]:
schema = schema[key]
except KeyError:
raise ValueError(f"$ref='{ref}' not present in the schema")
wrapped_schema = self.make_child(schema, name=name)
if cache:
self._cached_references[ref] = wrapped_schema
return wrapped_schema
@property
def trait_code(self):
"""Create the trait code for the given typecode"""
typecode = self.type
if self.metadata.get('required', False):
kwargs = {'allow_undefined': False}
else:
kwargs = {}
# TODO: handle multiple entries...
for TraitExtractor in self.trait_extractors:
trait_extractor = TraitExtractor(self)
if trait_extractor.check():
return trait_extractor.trait_code(**kwargs)
else:
raise ValueError("No recognized trait code for schema with keys "
"{0}".format(tuple(self.schema.keys())))
def object_code(self):
"""Return code to define a BaseObject for this schema"""
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return jinja2.Template(self.object_template).render(cls=self, date=now)
def module_spec(self):
"""Return the JSON specification of the module
This can be passed to ``altair_parser.utils.load_dynamic_module``
or to ``altair_parser.utils.save_module``
"""
assert self.is_root
submodroot = self.classname.lower()
modspec = {
'jstraitlets.py': open(os.path.join(os.path.dirname(__file__),
'src', 'jstraitlets.py')).read(),
'baseobject.py': open(os.path.join(os.path.dirname(__file__),
'src', 'baseobject.py')).read(),
self.filename: self.object_code()
}
modspec['__init__.py'] = '\n'.join([self.import_statement]
+ self.module_imports)
modspec.update({schema.filename: schema.object_code()
for schema in self.wrapped_definitions().values()
if schema.is_object})
return modspec
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Libqglviewer(QMakePackage):
"""libQGLViewer is a C++ library based on Qt that eases the creation of
OpenGL 3D viewers."""
homepage = "http://libqglviewer.com/"
url = "http://libqglviewer.com/src/libQGLViewer-2.7.2.tar.gz"
git = "https://github.com/GillesDebunne/libQGLViewer.git"
version('2.7.2', sha256='e2d2799dec5cff74548e951556a1fa06a11d9bcde2ce6593f9c27a17543b7c08')
# http://libqglviewer.com/installUnix.html
depends_on('qt+gui+opengl')
depends_on('freeglut', when='^qt@:3.0')
build_directory = 'QGLViewer'
def patch(self):
# Build dylib instead of Framework on macOS
if self.spec.satisfies('platform=darwin'):
filter_file('!staticlib: CONFIG *= lib_bundle', '',
join_path('QGLViewer', 'QGLViewer.pro'), string=True)
def qmake_args(self):
return ['PREFIX=' + self.prefix]
@run_after('install')
def darwin_fix(self):
if self.spec.satisfies('platform=darwin'):
fix_darwin_install_name(self.prefix.lib)
libqglviewer: add dependency on glu (#21438)
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Libqglviewer(QMakePackage):
"""libQGLViewer is a C++ library based on Qt that eases the creation of
OpenGL 3D viewers."""
homepage = "http://libqglviewer.com/"
url = "http://libqglviewer.com/src/libQGLViewer-2.7.2.tar.gz"
git = "https://github.com/GillesDebunne/libQGLViewer.git"
version('2.7.2', sha256='e2d2799dec5cff74548e951556a1fa06a11d9bcde2ce6593f9c27a17543b7c08')
# http://libqglviewer.com/installUnix.html
depends_on('qt+gui+opengl')
depends_on('freeglut', when='^qt@:3.0')
depends_on('glu', type='link')
build_directory = 'QGLViewer'
def patch(self):
# Build dylib instead of Framework on macOS
if self.spec.satisfies('platform=darwin'):
filter_file('!staticlib: CONFIG *= lib_bundle', '',
join_path('QGLViewer', 'QGLViewer.pro'), string=True)
def qmake_args(self):
return ['PREFIX=' + self.prefix]
@run_after('install')
def darwin_fix(self):
if self.spec.satisfies('platform=darwin'):
fix_darwin_install_name(self.prefix.lib)
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from fabric import api as fab
from fabric.contrib import files as fab_files
from fabric.contrib.files import exists
from fab_deploy.utils import run_as
from ..base import _, Daemon, Debian, RedHat
from ..deployment import command
from ..utils import upload_template, upload_first
class SphinxSearch(Daemon):
use_sudo = False
version = 'sphinx-0.9.9'
api_version = 0x116
# TODO attributes server host and port
namespace = 'sphinxsearch'
supervisor = False
def __init__(self, daemon_name=None):
if daemon_name is None:
daemon_name = _('%(project_name)s_searchd')
super(SphinxSearch, self).__init__(daemon_name)
def dirs(self):
return ["data/sphinxsearch/",
"etc/sphinxsearch/"
]
@command
def configure_daemon(self):
upload_template('sphinxsearch/searchd',
_("/etc/init.d/%(project_name)s_searchd"),
context=fab.env,
use_sudo=True,
use_jinja=True,
mode=0755,
)
def put_config(self):
if not self.supervisor:
self.configure_daemon()
upload_template("sphinxsearch/sphinx.conf",
_("%(remote_dir)s/etc/sphinxsearch/sphinx.conf"),
fab.env,
use_jinja=True)
upload_template("sphinxsearch/index_all.sh",
_("%(remote_dir)s/etc/sphinxsearch/index_all.sh"),
fab.env,
use_jinja=True,
mode=0755,
)
def install_package(self):
#fab.env.os.install_package('sphinxsearch')
fab.run('wget http://sphinxsearch.com/files/%s.tar.gz' % self.version)
fab.run('tar -xzf %s.tar.gz' % self.version)
with fab.cd('%s' % self.version):
configure = './configure'
if fab.env.db.name != 'mysql':
configure += ' --without-mysql'
fab.run(configure)
fab.run('make')
fab.sudo('make install')
fab.run('rm -Rf %s %s.tar.gz' % (self.version, self.version))
def install_development_libraries(self):
os = fab.env.os
os.install_package('libxml2 libxml2-dev')
os.install_package('libexpat1 libexpat1-dev')
fab.env.db.install_headers()
def install(self, reindex=False):
with fab.settings(warn_only=True):
self.stop()
self.install_development_libraries()
self.install_package()
self.put_config()
if reindex:
self.reindex()
self.start()
def update(self, reindex=False):
self.put_config()
if reindex:
self.reindex()
self.restart()
def update_cron(self):
fab.env.setdefault('sphinxsearch_time', '10 *')
fab.env.cron.update(_('%(sphinxsearch_time)s * * *'
' %(remote_dir)s/etc/sphinxsearch/index_all.sh'
' >> /home/%(user)s/log/searchd_reindex.log'),
marker='sphinx_reindex')
@command
def reindex(self, pty=True):
fab.run(_("%(remote_dir)s/etc/sphinxsearch/index_all.sh"))
@command
def configure(self, install=False, reindex=False):
sphinx = fab.env.sphinxsearch
if install:
sphinx.install(reindex=reindex)
else:
sphinx.update(reindex=reindex)
def supervisor_start(self, pty=False):
pass
def supervisor_configure(self):
upload_first([_('sphinxsearch/%(domain)s.conf'),
'sphinxsearch/supervisor.conf',
],
_('%(remote_dir)s/etc/supervisor/sphinxsearch.conf'),
fab.env,
use_jinja=True)
class SphinxSearch201(SphinxSearch):
version = 'sphinx-2.0.1'
class SphinxSearch202(SphinxSearch201):
version = 'sphinx-2.0.2-beta'
use_deb = property(lambda: isinstance(fab.env.os, Debian))
is_rhel = property(lambda: isinstance(fab.env.os, RedHat))
def install_package(self):
if self.use_deb:
filename = '%s-lucid_i386.deb' %\
self.version.replace('sphinx', 'sphinxsearch')
fab.run('wget http://sphinxsearch.com/files/%s' % filename)
try:
fab.sudo('dpkg -I %s' % filename)
finally:
fab.run('rm -R %s' % filename)
elif self.is_rhel:
fab.env.os.install_package('sphinx')
else:
super(self, SphinxSearch201).install_package()
class SphinxSearch203(SphinxSearch202):
version = 'sphinx-2.0.3-release'
class SphinxSearch207(SphinxSearch202):
version = 'sphinx-2.0.7-release'
fixed missing attr
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from fabric import api as fab
from fabric.contrib import files as fab_files
from fabric.contrib.files import exists
from fab_deploy.utils import run_as
from ..base import _, Daemon, Debian, RedHat
from ..deployment import command
from ..utils import upload_template, upload_first
class SphinxSearch(Daemon):
use_sudo = False
version = 'sphinx-0.9.9'
api_version = 0x116
# TODO attributes server host and port
namespace = 'sphinxsearch'
supervisor = False
def __init__(self, daemon_name=None):
if daemon_name is None:
daemon_name = _('%(project_name)s_searchd')
super(SphinxSearch, self).__init__(daemon_name)
def dirs(self):
return ["data/sphinxsearch/",
"etc/sphinxsearch/"
]
@command
def configure_daemon(self):
upload_template('sphinxsearch/searchd',
_("/etc/init.d/%(project_name)s_searchd"),
context=fab.env,
use_sudo=True,
use_jinja=True,
mode=0755,
)
def put_config(self):
if not self.supervisor:
self.configure_daemon()
upload_template("sphinxsearch/sphinx.conf",
_("%(remote_dir)s/etc/sphinxsearch/sphinx.conf"),
fab.env,
use_jinja=True)
upload_template("sphinxsearch/index_all.sh",
_("%(remote_dir)s/etc/sphinxsearch/index_all.sh"),
fab.env,
use_jinja=True,
mode=0755,
)
def install_package(self):
#fab.env.os.install_package('sphinxsearch')
fab.run('wget http://sphinxsearch.com/files/%s.tar.gz' % self.version)
fab.run('tar -xzf %s.tar.gz' % self.version)
with fab.cd('%s' % self.version):
configure = './configure'
if fab.env.db.name != 'mysql':
configure += ' --without-mysql'
fab.run(configure)
fab.run('make')
fab.sudo('make install')
fab.run('rm -Rf %s %s.tar.gz' % (self.version, self.version))
def install_development_libraries(self):
os = fab.env.os
os.install_package('libxml2 libxml2-dev')
os.install_package('libexpat1 libexpat1-dev')
fab.env.db.install_headers()
def install(self, reindex=False):
with fab.settings(warn_only=True):
self.stop()
self.install_development_libraries()
self.install_package()
self.put_config()
if reindex:
self.reindex()
self.start()
def update(self, reindex=False):
self.put_config()
if reindex:
self.reindex()
self.restart()
def update_cron(self):
fab.env.setdefault('sphinxsearch_time', '10 *')
fab.env.cron.update(_('%(sphinxsearch_time)s * * *'
' %(remote_dir)s/etc/sphinxsearch/index_all.sh'
' >> /home/%(user)s/log/searchd_reindex.log'),
marker='sphinx_reindex')
@command
def reindex(self, pty=True):
fab.run(_("%(remote_dir)s/etc/sphinxsearch/index_all.sh"))
@command
def configure(self, install=False, reindex=False):
sphinx = fab.env.sphinxsearch
if install:
sphinx.install(reindex=reindex)
else:
sphinx.update(reindex=reindex)
def supervisor_start(self, pty=False):
pass
def supervisor_configure(self):
upload_first([_('sphinxsearch/%(domain)s.conf'),
'sphinxsearch/supervisor.conf',
],
_('%(remote_dir)s/etc/supervisor/sphinxsearch.conf'),
fab.env,
use_jinja=True)
class SphinxSearch201(SphinxSearch):
version = 'sphinx-2.0.1'
class SphinxSearch202(SphinxSearch201):
version = 'sphinx-2.0.2-beta'
use_deb = property(lambda obj: isinstance(fab.env.os, Debian))
is_rhel = property(lambda obj: isinstance(fab.env.os, RedHat))
def install_package(self):
if self.use_deb:
filename = '%s-lucid_i386.deb' %\
self.version.replace('sphinx', 'sphinxsearch')
fab.run('wget http://sphinxsearch.com/files/%s' % filename)
try:
fab.sudo('dpkg -I %s' % filename)
finally:
fab.run('rm -R %s' % filename)
elif self.is_rhel:
fab.env.os.install_package('sphinx')
else:
super(self, SphinxSearch201).install_package()
class SphinxSearch203(SphinxSearch202):
version = 'sphinx-2.0.3-release'
class SphinxSearch207(SphinxSearch202):
version = 'sphinx-2.0.7-release'
|
import constants, pandas, pdb, os, fnmatch, logging, pdb, numpy, datetime, re, StringIO
from sqlalchemy import create_engine
dd = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\Lake_States\\EPIC\\OpenLands_LS\\simulations\\LS_2013_10_25_2015_21h_51m\\0.ACN'
import re
def GetTheSentences(infile):
with open(infile) as fp:
for result in re.findall('ATMOS CO2(.*?)ATMOS CO2', fp.read(), re.S):
pdb.set_trace()
print result
print '-----'
# read in file
fl = pandas.read_csv(dd, skiprows=12)
import pdb
pdb.set_trace()
class EPIC_Output_File():
"""
Class to read EPIC Output files
"""
def __init__(self, ftype='', tag=''):
"""
Constructor
"""
# Get name of latest output directory (based on what time it was modified)
os.chdir(constants.epic_dir+os.sep+'output')
# Get list of all directories in output folder, select the ones which have the current TAG
dirs = [d for d in os.listdir(constants.epic_dir+os.sep+'output') if os.path.isdir(constants.epic_dir+os.sep+'output')]
cur_dirs = [d for d in dirs if constants.OUT_TAG in d]
# Select the TAGged directory which is the latest
self.ldir = sorted(cur_dirs, key=lambda x: os.path.getmtime(x), reverse=True)[:1][0]
if constants.DO_FOLDER:
self.epic_out_dir = constants.FOLDER_PATH
else:
self.epic_out_dir = constants.epic_dir + os.sep + 'output' + os.sep + self.ldir # Latest output directory
# Create a sqlite database in the analysis directory
self.db_path = constants.db_dir + os.sep + ftype + '_' + tag + '_' + self.ldir + '.db'
self.db_name = 'sqlite:///' + self.db_path
self.csv_path = constants.csv_dir + os.sep + ftype + '_' + tag + '_' + self.ldir + '.csv'
self.engine = create_engine(self.db_name)
self.ftype = ftype
self.tag = tag
self.ifexist = 'replace'
def get_col_widths(self, fl):
df = pandas.read_table(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP, header=None, nrows=1)
wt = df.iloc[0][0]
# Assume the columns (right-aligned) are one or more spaces followed by one or more non-space
cols = re.findall('\s+\S+', wt)
return [len(col) for col in cols]
###############################
# ACM
###############################
def parse_ACM(self, fls):
list_df = []
for idx, fl in enumerate(fls):
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP,
skipinitialspace=True, usecols=constants.ACM_PARAMS, sep=' ')
except:
logging.info('Error reading ' + fl)
df['site'] = fl[:-4]
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ACY
###############################
def parse_ACY(self, fls):
list_df = []
for idx, fl in enumerate(fls):
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP,
skipinitialspace=True, usecols=constants.ACY_PARAMS, sep=' ')
except:
logging.info('Error reading ' + fl)
df['site'] = fl[:-4]
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ANN
###############################
def parse_ANN(self, fls):
list_df = []
# Get column widths
cols_df = pandas.read_table(self.epic_out_dir + os.sep + self.ftype + os.sep + fls[0], skiprows=constants.SKIP,
sep=' ', skipinitialspace=True)
widths = [5,4]
for i in range(len(cols_df.columns.values)-2):
widths.append(8)
for idx, fl in enumerate(fls):
try:
df = pandas.read_fwf(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP, sep=' ',
usecols=constants.ANN_PARAMS, skipinitialspace=True, widths=widths)
except:
logging.info('Error reading ' + fl)
df['site'] = fl[:-4]
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ATG
###############################
def parse_ATG(self, fls):
list_df = []
for fl in fls:
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl,
skiprows=constants.SKIP, skipinitialspace=True, usecols=constants.ATG_PARAMS, sep=' ')
except:
logging.info('Error reading ' + fl)
#time_df = df[(df.Y >= int(constants.START_YR)) & (df.Y <= int(constants.END_YR))]
df['site'] = fl[:-4]
df.rename(columns={'Y': 'YR'}, inplace=True)
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# DGN
###############################
def parse_DGN(self, fls):
list_df = []
for fl in fls:
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP, delim_whitespace=True,
usecols=constants.DGN_PARAMS, parse_dates={"datetime": [0,1,2]}, index_col="datetime",
date_parser=lambda x: pandas.datetime.strptime(x, '%Y %m %d'))
except:
logging.info('Error reading ' + fl)
start = df.index.searchsorted(datetime.datetime(constants.START_YR, 1, 1))
end = df.index.searchsorted(datetime.datetime(constants.END_YR, 12, 31))
time_df = df.ix[start:end]
time_df = time_df.groupby(time_df.index.map(lambda x: x.year)).max()
time_df['site'] = fl[:-4]
list_df.append(time_df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# SCN
###############################
def parse_SCN(self, fls):
list_df = []
for idx, fl in enumerate(fls):
temp_df = pandas.DataFrame(index=[constants.END_YR], columns=constants.SCN_PARAMS)
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl,
skiprows=constants.SKIP_SCN, skipinitialspace=True, sep=' ')
except:
logging.info('Error reading ' + fl)
for var in constants.SCN_PARAMS:
temp_df[var] = df.TOT.ix[var]
temp_df['site'] = fl[:-4]
temp_df['YR'] = temp_df.index
list_df.append(temp_df)
frame_df = pandas.concat(list_df)
frame_df.index = range(len(frame_df))
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
def collect_epic_output(self, fls):
if(self.ftype == 'DGN'):
self.parse_DGN(fls)
elif(self.ftype == 'ACY'):
self.parse_ACY(fls)
elif(self.ftype == 'ANN'):
self.parse_ANN(fls)
elif(self.ftype == 'ATG'):
self.parse_ATG(fls)
elif(self.ftype == 'SCN'):
self.parse_SCN(fls)
elif(self.ftype == 'ACM'):
self.parse_ACM(fls)
else:
logging.info('Wrong file type')
def sql_to_csv():
"""
SQL stores information from all years. We then extract information for the latest year from this file
:return:
"""
epic_fl_types = constants.GET_PARAMS
dfs = pandas.DataFrame()
for idx, fl_name in enumerate(epic_fl_types):
obj = EPIC_Output_File(ftype=fl_name, tag=constants.TAG)
try:
df = pandas.read_sql_table(obj.db_name, obj.engine)
except:
logging.info(obj.db_name + ' not found')
if fl_name <> 'SCN':
max_yr = df.YR.unique().max()
fyr_df = df[df.YR == max_yr] # final year df, maybe use constants.END_YR instead of max_yr?
if idx == 0:
dfs = fyr_df
else:
dfs = pandas.merge(dfs, fyr_df, on=['YR','site'], how='outer')
else:
if idx == 0:
dfs = df
else:
dfs = pandas.merge(dfs, df, on=['YR','site'], how='outer')
# Merge with EPICRUN.DAT
epic_df = pandas.read_csv(constants.sims_dir + os.sep + obj.ldir + os.sep + 'EPICRUN.DAT', sep='\s+', header=None)
epic_df.columns = ['ASTN', 'ISIT', 'IWP1','IWP5', 'IWND', 'INPS', 'IOPS', 'IWTH']
# 1. Read ieSllist.dat and get mukey and corresponding index
# 2. Convert to dataframe
# 3. Merge with SSURGO properties csv file
# 4. Merge EPIC outputs with EPICRUN.DAT
# 5. Merge EPIC and SSURGO and output to csv
soil_dict = {}
with open(constants.sims_dir + os.sep + obj.ldir + os.sep + constants.SLLIST) as f:
for line in f:
#Sample line from soil file: 1 "Soils//1003958.sol"
(key, val) = int(line.split()[0]), int(line.split('//')[1].split('.')[0])
soil_dict[key] = val
soil_df = pandas.DataFrame.from_dict(soil_dict, orient='index').reset_index()
soil_df.columns = ['INPS', 'mukey']
sgo_file = pandas.read_csv(constants.sgo_dir + os.sep + constants.dominant)
grp_sgo = sgo_file.groupby('mukey').mean().reset_index()
grp_sgo = pandas.merge(grp_sgo, soil_df, on='mukey')
# Merge with EPICRUN
dfs[['site']] = dfs[['site']].astype(int)
epic_df[['ASTN']] = epic_df[['ASTN']].astype(int) # ASTN is site number
dfs = pandas.merge(dfs, epic_df, left_on='site', right_on='ASTN')
# Merge with SSURGO file
dfs = pandas.merge(dfs, grp_sgo, on='INPS') # INPS is identifier of soil files
dfs.to_csv(constants.csv_dir + os.sep + 'EPIC_' + obj.ldir + '.csv')
if __name__ == '__main__':
for idx, fl_name in enumerate(constants.GET_PARAMS):
print idx, fl_name
obj = EPIC_Output_File(ftype=fl_name, tag=constants.TAG)
# Get list of all output files for each EPIC output category
try:
list_fls = fnmatch.filter(os.listdir(obj.epic_out_dir + os.sep + constants.GET_PARAMS[idx] + os.sep), '*.' + fl_name)
# Collect EPIC output to database and csv
if len(list_fls) > 0:
obj.collect_epic_output(list_fls)
except:
logging.info('Error in reading ' + fl_name)
# Extract results
sql_to_csv()
minor
import constants, pandas, pdb, os, fnmatch, logging, pdb, numpy, datetime, re, StringIO
from sqlalchemy import create_engine
dd = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\Lake_States\\EPIC\\OpenLands_LS\\simulations\\LS_2013_10_25_2015_21h_51m\\0.ACN'
import re
def GetTheSentences(infile):
with open(infile) as fp:
for result in re.findall('ATMOS CO2(.*?)ATMOS CO2', fp.read(), re.S):
pdb.set_trace()
print result
print '-----'
# read in file
fl = pandas.read_csv(dd, skiprows=12)
class EPIC_Output_File():
"""
Class to read EPIC Output files
"""
def __init__(self, ftype='', tag=''):
"""
Constructor
"""
# Get name of latest output directory (based on what time it was modified)
os.chdir(constants.epic_dir+os.sep+'output')
# Get list of all directories in output folder, select the ones which have the current TAG
dirs = [d for d in os.listdir(constants.epic_dir+os.sep+'output') if os.path.isdir(constants.epic_dir+os.sep+'output')]
cur_dirs = [d for d in dirs if constants.OUT_TAG in d]
# Select the TAGged directory which is the latest
self.ldir = sorted(cur_dirs, key=lambda x: os.path.getmtime(x), reverse=True)[:1][0]
if constants.DO_FOLDER:
self.epic_out_dir = constants.FOLDER_PATH
else:
self.epic_out_dir = constants.epic_dir + os.sep + 'output' + os.sep + self.ldir # Latest output directory
# Create a sqlite database in the analysis directory
self.db_path = constants.db_dir + os.sep + ftype + '_' + tag + '_' + self.ldir + '.db'
self.db_name = 'sqlite:///' + self.db_path
self.csv_path = constants.csv_dir + os.sep + ftype + '_' + tag + '_' + self.ldir + '.csv'
self.engine = create_engine(self.db_name)
self.ftype = ftype
self.tag = tag
self.ifexist = 'replace'
def get_col_widths(self, fl):
df = pandas.read_table(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP, header=None, nrows=1)
wt = df.iloc[0][0]
# Assume the columns (right-aligned) are one or more spaces followed by one or more non-space
cols = re.findall('\s+\S+', wt)
return [len(col) for col in cols]
###############################
# ACM
###############################
def parse_ACM(self, fls):
list_df = []
for idx, fl in enumerate(fls):
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP,
skipinitialspace=True, usecols=constants.ACM_PARAMS, sep=' ')
except:
logging.info('Error reading ' + fl)
df['site'] = fl[:-4]
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ACY
###############################
def parse_ACY(self, fls):
list_df = []
for idx, fl in enumerate(fls):
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP,
skipinitialspace=True, usecols=constants.ACY_PARAMS, sep=' ')
except:
logging.info('Error reading ' + fl)
df['site'] = fl[:-4]
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ANN
###############################
def parse_ANN(self, fls):
list_df = []
# Get column widths
cols_df = pandas.read_table(self.epic_out_dir + os.sep + self.ftype + os.sep + fls[0], skiprows=constants.SKIP,
sep=' ', skipinitialspace=True)
widths = [5,4]
for i in range(len(cols_df.columns.values)-2):
widths.append(8)
for idx, fl in enumerate(fls):
try:
df = pandas.read_fwf(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP, sep=' ',
usecols=constants.ANN_PARAMS, skipinitialspace=True, widths=widths)
except:
logging.info('Error reading ' + fl)
df['site'] = fl[:-4]
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ATG
###############################
def parse_ATG(self, fls):
list_df = []
for fl in fls:
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl,
skiprows=constants.SKIP, skipinitialspace=True, usecols=constants.ATG_PARAMS, sep=' ')
except:
logging.info('Error reading ' + fl)
#time_df = df[(df.Y >= int(constants.START_YR)) & (df.Y <= int(constants.END_YR))]
df['site'] = fl[:-4]
df.rename(columns={'Y': 'YR'}, inplace=True)
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# DGN
###############################
def parse_DGN(self, fls):
list_df = []
for fl in fls:
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP, delim_whitespace=True,
usecols=constants.DGN_PARAMS, parse_dates={"datetime": [0,1,2]}, index_col="datetime",
date_parser=lambda x: pandas.datetime.strptime(x, '%Y %m %d'))
except:
logging.info('Error reading ' + fl)
start = df.index.searchsorted(datetime.datetime(constants.START_YR, 1, 1))
end = df.index.searchsorted(datetime.datetime(constants.END_YR, 12, 31))
time_df = df.ix[start:end]
time_df = time_df.groupby(time_df.index.map(lambda x: x.year)).max()
time_df['site'] = fl[:-4]
list_df.append(time_df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# SCN
###############################
def parse_SCN(self, fls):
list_df = []
for idx, fl in enumerate(fls):
temp_df = pandas.DataFrame(index=[constants.END_YR], columns=constants.SCN_PARAMS)
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl,
skiprows=constants.SKIP_SCN, skipinitialspace=True, sep=' ')
except:
logging.info('Error reading ' + fl)
for var in constants.SCN_PARAMS:
temp_df[var] = df.TOT.ix[var]
temp_df['site'] = fl[:-4]
temp_df['YR'] = temp_df.index
list_df.append(temp_df)
frame_df = pandas.concat(list_df)
frame_df.index = range(len(frame_df))
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
def collect_epic_output(self, fls):
if(self.ftype == 'DGN'):
self.parse_DGN(fls)
elif(self.ftype == 'ACY'):
self.parse_ACY(fls)
elif(self.ftype == 'ANN'):
self.parse_ANN(fls)
elif(self.ftype == 'ATG'):
self.parse_ATG(fls)
elif(self.ftype == 'SCN'):
self.parse_SCN(fls)
elif(self.ftype == 'ACM'):
self.parse_ACM(fls)
else:
logging.info('Wrong file type')
def sql_to_csv():
"""
SQL stores information from all years. We then extract information for the latest year from this file
:return:
"""
epic_fl_types = constants.GET_PARAMS
dfs = pandas.DataFrame()
for idx, fl_name in enumerate(epic_fl_types):
obj = EPIC_Output_File(ftype=fl_name, tag=constants.TAG)
try:
df = pandas.read_sql_table(obj.db_name, obj.engine)
except:
logging.info(obj.db_name + ' not found')
if fl_name <> 'SCN':
max_yr = df.YR.unique().max()
fyr_df = df[df.YR == max_yr] # final year df, maybe use constants.END_YR instead of max_yr?
if idx == 0:
dfs = fyr_df
else:
dfs = pandas.merge(dfs, fyr_df, on=['YR','site'], how='outer')
else:
if idx == 0:
dfs = df
else:
dfs = pandas.merge(dfs, df, on=['YR','site'], how='outer')
# Merge with EPICRUN.DAT
epic_df = pandas.read_csv(constants.sims_dir + os.sep + obj.ldir + os.sep + 'EPICRUN.DAT', sep='\s+', header=None)
epic_df.columns = ['ASTN', 'ISIT', 'IWP1','IWP5', 'IWND', 'INPS', 'IOPS', 'IWTH']
# 1. Read ieSllist.dat and get mukey and corresponding index
# 2. Convert to dataframe
# 3. Merge with SSURGO properties csv file
# 4. Merge EPIC outputs with EPICRUN.DAT
# 5. Merge EPIC and SSURGO and output to csv
soil_dict = {}
with open(constants.sims_dir + os.sep + obj.ldir + os.sep + constants.SLLIST) as f:
for line in f:
#Sample line from soil file: 1 "Soils//1003958.sol"
(key, val) = int(line.split()[0]), int(line.split('//')[1].split('.')[0])
soil_dict[key] = val
soil_df = pandas.DataFrame.from_dict(soil_dict, orient='index').reset_index()
soil_df.columns = ['INPS', 'mukey']
sgo_file = pandas.read_csv(constants.sgo_dir + os.sep + constants.dominant)
grp_sgo = sgo_file.groupby('mukey').mean().reset_index()
grp_sgo = pandas.merge(grp_sgo, soil_df, on='mukey')
# Merge with EPICRUN
dfs[['site']] = dfs[['site']].astype(int)
epic_df[['ASTN']] = epic_df[['ASTN']].astype(int) # ASTN is site number
dfs = pandas.merge(dfs, epic_df, left_on='site', right_on='ASTN')
# Merge with SSURGO file
dfs = pandas.merge(dfs, grp_sgo, on='INPS') # INPS is identifier of soil files
dfs.to_csv(constants.csv_dir + os.sep + 'EPIC_' + obj.ldir + '.csv')
if __name__ == '__main__':
for idx, fl_name in enumerate(constants.GET_PARAMS):
print idx, fl_name
obj = EPIC_Output_File(ftype=fl_name, tag=constants.TAG)
# Get list of all output files for each EPIC output category
try:
list_fls = fnmatch.filter(os.listdir(obj.epic_out_dir + os.sep + constants.GET_PARAMS[idx] + os.sep), '*.' + fl_name)
# Collect EPIC output to database and csv
if len(list_fls) > 0:
obj.collect_epic_output(list_fls)
except:
logging.info('Error in reading ' + fl_name)
# Extract results
sql_to_csv()
|
"""
nbttreewidget
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
from mcedit2.command import SimpleRevisionCommand
from mcedit2.util.lazyprop import weakrefprop
from mcedit2.widgets.nbttree.nbttreemodel import NBTFilterProxyModel, NBTPathRole, NBTIcon, NBTTreeModel
from mcedit2.util.load_ui import registerCustomWidget
from mcedit2.widgets.layout import Column
log = logging.getLogger(__name__)
class NBTDataChangeCommand(SimpleRevisionCommand):
pass
@registerCustomWidget
class NBTEditorWidget(QtGui.QWidget):
undoCommandPrefixText = ""
editorSession = weakrefprop()
proxyModel = None
rootTag = None
editMade = QtCore.Signal() # emitted to allow clients to mark the NBT tree's parent structure as dirty - xxx really??
def __init__(self, *args, **kwargs):
super(NBTEditorWidget, self).__init__(*args, **kwargs)
self.model = None
self.treeView = QtGui.QTreeView()
self.treeView.setAlternatingRowColors(True)
self.treeView.clicked.connect(self.itemClicked)
self.treeView.expanded.connect(self.itemExpanded)
self.setLayout(Column(self.treeView))
self.nbtTypesMenu = QtGui.QMenu()
self.nbtTypesMenu.addAction(NBTIcon(1), self.tr("Byte"), self.addByte)
self.nbtTypesMenu.addAction(NBTIcon(2), self.tr("Short"), self.addShort)
self.nbtTypesMenu.addAction(NBTIcon(3), self.tr("Int"), self.addInt)
self.nbtTypesMenu.addAction(NBTIcon(4), self.tr("Long"), self.addLong)
self.nbtTypesMenu.addAction(NBTIcon(5), self.tr("Float"), self.addFloat)
self.nbtTypesMenu.addAction(NBTIcon(6), self.tr("Double"), self.addDouble)
self.nbtTypesMenu.addAction(NBTIcon(8), self.tr("String"), self.addString)
self.nbtTypesMenu.addAction(NBTIcon(9), self.tr("List"), self.addList)
self.nbtTypesMenu.addAction(NBTIcon(10), self.tr("Compound"), self.addCompound)
self.nbtTypesMenu.addAction(NBTIcon(7), self.tr("Byte Array"), self.addByteArray)
self.nbtTypesMenu.addAction(NBTIcon(11), self.tr("Int Array"), self.addIntArray)
self.nbtTypesMenu.addAction(NBTIcon(12), self.tr("Short Array"), self.addShortArray)
def setRootTag(self, rootTag, keepExpanded=False):
if rootTag is self.rootTag:
return
self.rootTag = rootTag
if rootTag is None:
self.treeView.setModel(None)
self.model = None
return
self.model = NBTTreeModel(rootTag)
expanded = []
current = None
if keepExpanded and self.proxyModel:
current = self.proxyModel.data(self.treeView.currentIndex(), NBTPathRole)
def addExpanded(parentIndex):
for row in range(self.proxyModel.rowCount(parentIndex)):
index = self.proxyModel.index(row, 0, parentIndex)
if self.treeView.isExpanded(index):
expanded.append(self.proxyModel.data(index, NBTPathRole))
addExpanded(index)
addExpanded(QtCore.QModelIndex())
self.model.dataChanged.connect(self.dataDidChange)
self.model.rowsInserted.connect(self.rowsDidInsert)
self.model.rowsRemoved.connect(self.rowsDidRemove)
self.proxyModel = NBTFilterProxyModel(self)
self.proxyModel.setSourceModel(self.model)
# self.proxyModel.setDynamicSortFilter(True)
self.treeView.setModel(self.model)
header = self.treeView.header()
header.setStretchLastSection(False)
header.setResizeMode(1, header.ResizeMode.Stretch)
header.setResizeMode(2, header.ResizeMode.Fixed)
header.setResizeMode(3, header.ResizeMode.Fixed)
if keepExpanded:
for path in expanded:
matches = self.proxyModel.match(self.proxyModel.index(0, 0, QtCore.QModelIndex()),
NBTPathRole, path, flags=Qt.MatchExactly | Qt.MatchRecursive)
for i in matches:
self.treeView.setExpanded(i, True)
if current is not None:
matches = self.proxyModel.match(self.proxyModel.index(0, 0, QtCore.QModelIndex()),
NBTPathRole, current, flags=Qt.MatchExactly | Qt.MatchRecursive)
if len(matches):
self.treeView.setCurrentIndex(matches[0])
else:
self.treeView.expandToDepth(0)
self.treeView.sortByColumn(0, Qt.AscendingOrder)
self.treeView.resizeColumnToContents(0)
self.treeView.resizeColumnToContents(1)
self.treeView.resizeColumnToContents(2)
self.treeView.resizeColumnToContents(3)
def itemExpanded(self):
self.treeView.resizeColumnToContents(0)
indexAddingTo = None
def itemClicked(self, index):
#index = self.proxyModel.mapToSource(index)
item = self.model.getItem(index)
if index.column() == 2:
if item.isList and item.tag.list_type:
row = item.childCount()
self.model.insertRow(row, index)
newItemIndex = self.model.index(row, 1, index)
#self.treeView.setCurrentIndex(self.proxyModel.mapFromSource(newItemIndex))
#self.treeView.edit(self.proxyModel.mapFromSource(newItemIndex))
if item.isCompound or (item.isList and not item.tag.list_type):
self.indexAddingTo = index
self.nbtTypesMenu.move(QtGui.QCursor.pos())
self.nbtTypesMenu.show()
if index.column() == 3:
parent = self.model.parent(index)
self.doomedTagName = self.tagNameForUndo(index)
self.model.removeRow(index.row(), parent)
def addItemWithType(self, tagID):
if not self.indexAddingTo:
return
item = self.model.getItem(self.indexAddingTo)
row = item.childCount()
self.model.insertRow(row, self.indexAddingTo, tagID)
newItemIndex = self.model.index(row, 0 if item.isCompound else 1, self.indexAddingTo)
#self.treeView.setCurrentIndex(self.proxyModel.mapFromSource(newItemIndex))
#self.treeView.edit(self.proxyModel.mapFromSource(newItemIndex))
self.indexAddingTo = None
def addByte(self):
self.addItemWithType(1)
def addShort(self):
self.addItemWithType(2)
def addInt(self):
self.addItemWithType(3)
def addLong(self):
self.addItemWithType(4)
def addFloat(self):
self.addItemWithType(5)
def addDouble(self):
self.addItemWithType(6)
def addByteArray(self):
self.addItemWithType(7)
def addString(self):
self.addItemWithType(8)
def addList(self):
self.addItemWithType(9)
def addCompound(self):
self.addItemWithType(10)
def addIntArray(self):
self.addItemWithType(11)
def addShortArray(self):
self.addItemWithType(12)
def tagNameForUndo(self, index):
parent = self.model.parent(index)
item = self.model.getItem(index)
parentItem = self.model.getItem(parent)
if parentItem is not None and parentItem.isList:
name = "%s #%d" % (self.tagNameForUndo(parent), parentItem.tag.index(item.tag))
else:
name = item.tag.name
return name
def dataDidChange(self, index):
name = self.tagNameForUndo(index)
if index.column() == 0:
text = "%sRename NBT tag %s" % (self.undoCommandPrefixText, name)
elif index.column() == 1:
text = "%sChange value of NBT tag %s" % (self.undoCommandPrefixText, name)
else:
text = "Unknown data changed."
command = NBTDataChangeCommand(self.editorSession, text)
with command.begin():
self.editMade.emit()
self.editorSession.worldEditor.syncToDisk()
self.editorSession.pushCommand(command)
def rowsDidInsert(self, index):
name = self.tagNameForUndo(index.parent())
text = "%sInsert NBT tag under %s" % (self.undoCommandPrefixText, name)
command = NBTDataChangeCommand(self.editorSession, text)
with command.begin():
self.editMade.emit()
self.editorSession.worldEditor.syncToDisk()
self.editorSession.pushCommand(command)
doomedTagName = None
def rowsDidRemove(self, index, start, end):
name = self.tagNameForUndo(index)
text = "%sRemove NBT tag %s from %s" % (self.undoCommandPrefixText, self.doomedTagName, name)
command = NBTDataChangeCommand(self.editorSession, text)
with command.begin():
self.editMade.emit()
self.editorSession.worldEditor.syncToDisk()
self.editorSession.pushCommand(command)
NBTEditor uses its sorting proxy model again. Now to find out what was so wrong with it that it was removed.
"""
nbttreewidget
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
from mcedit2.command import SimpleRevisionCommand
from mcedit2.util.lazyprop import weakrefprop
from mcedit2.widgets.nbttree.nbttreemodel import NBTFilterProxyModel, NBTPathRole, NBTIcon, NBTTreeModel
from mcedit2.util.load_ui import registerCustomWidget
from mcedit2.widgets.layout import Column
log = logging.getLogger(__name__)
class NBTDataChangeCommand(SimpleRevisionCommand):
pass
@registerCustomWidget
class NBTEditorWidget(QtGui.QWidget):
undoCommandPrefixText = ""
editorSession = weakrefprop()
proxyModel = None
rootTag = None
editMade = QtCore.Signal() # emitted to allow clients to mark the NBT tree's parent structure as dirty - xxx really??
def __init__(self, *args, **kwargs):
super(NBTEditorWidget, self).__init__(*args, **kwargs)
self.model = None
self.treeView = QtGui.QTreeView()
self.treeView.setAlternatingRowColors(True)
self.treeView.clicked.connect(self.itemClicked)
self.treeView.expanded.connect(self.itemExpanded)
self.setLayout(Column(self.treeView))
self.nbtTypesMenu = QtGui.QMenu()
self.nbtTypesMenu.addAction(NBTIcon(1), self.tr("Byte"), self.addByte)
self.nbtTypesMenu.addAction(NBTIcon(2), self.tr("Short"), self.addShort)
self.nbtTypesMenu.addAction(NBTIcon(3), self.tr("Int"), self.addInt)
self.nbtTypesMenu.addAction(NBTIcon(4), self.tr("Long"), self.addLong)
self.nbtTypesMenu.addAction(NBTIcon(5), self.tr("Float"), self.addFloat)
self.nbtTypesMenu.addAction(NBTIcon(6), self.tr("Double"), self.addDouble)
self.nbtTypesMenu.addAction(NBTIcon(8), self.tr("String"), self.addString)
self.nbtTypesMenu.addAction(NBTIcon(9), self.tr("List"), self.addList)
self.nbtTypesMenu.addAction(NBTIcon(10), self.tr("Compound"), self.addCompound)
self.nbtTypesMenu.addAction(NBTIcon(7), self.tr("Byte Array"), self.addByteArray)
self.nbtTypesMenu.addAction(NBTIcon(11), self.tr("Int Array"), self.addIntArray)
self.nbtTypesMenu.addAction(NBTIcon(12), self.tr("Short Array"), self.addShortArray)
def setRootTag(self, rootTag, keepExpanded=False):
if rootTag is self.rootTag:
return
self.rootTag = rootTag
if rootTag is None:
self.treeView.setModel(None)
self.model = None
return
self.model = NBTTreeModel(rootTag)
expanded = []
current = None
if keepExpanded and self.proxyModel:
current = self.proxyModel.data(self.treeView.currentIndex(), NBTPathRole)
def addExpanded(parentIndex):
for row in range(self.proxyModel.rowCount(parentIndex)):
index = self.proxyModel.index(row, 0, parentIndex)
if self.treeView.isExpanded(index):
expanded.append(self.proxyModel.data(index, NBTPathRole))
addExpanded(index)
addExpanded(QtCore.QModelIndex())
self.model.dataChanged.connect(self.dataDidChange)
self.model.rowsInserted.connect(self.rowsDidInsert)
self.model.rowsRemoved.connect(self.rowsDidRemove)
self.proxyModel = NBTFilterProxyModel(self)
self.proxyModel.setSourceModel(self.model)
# self.proxyModel.setDynamicSortFilter(True)
self.treeView.setModel(self.proxyModel)
header = self.treeView.header()
header.setStretchLastSection(False)
header.setResizeMode(1, header.ResizeMode.Stretch)
header.setResizeMode(2, header.ResizeMode.Fixed)
header.setResizeMode(3, header.ResizeMode.Fixed)
if keepExpanded:
for path in expanded:
matches = self.proxyModel.match(self.proxyModel.index(0, 0, QtCore.QModelIndex()),
NBTPathRole, path, flags=Qt.MatchExactly | Qt.MatchRecursive)
for i in matches:
self.treeView.setExpanded(i, True)
if current is not None:
matches = self.proxyModel.match(self.proxyModel.index(0, 0, QtCore.QModelIndex()),
NBTPathRole, current, flags=Qt.MatchExactly | Qt.MatchRecursive)
if len(matches):
self.treeView.setCurrentIndex(matches[0])
else:
self.treeView.expandToDepth(0)
self.treeView.sortByColumn(0, Qt.AscendingOrder)
self.treeView.resizeColumnToContents(0)
self.treeView.resizeColumnToContents(1)
self.treeView.resizeColumnToContents(2)
self.treeView.resizeColumnToContents(3)
def itemExpanded(self):
self.treeView.resizeColumnToContents(0)
indexAddingTo = None
def itemClicked(self, index):
#index = self.proxyModel.mapToSource(index)
item = self.model.getItem(index)
if index.column() == 2:
if item.isList and item.tag.list_type:
row = item.childCount()
self.model.insertRow(row, index)
newItemIndex = self.model.index(row, 1, index)
#self.treeView.setCurrentIndex(self.proxyModel.mapFromSource(newItemIndex))
#self.treeView.edit(self.proxyModel.mapFromSource(newItemIndex))
if item.isCompound or (item.isList and not item.tag.list_type):
self.indexAddingTo = index
self.nbtTypesMenu.move(QtGui.QCursor.pos())
self.nbtTypesMenu.show()
if index.column() == 3:
parent = self.model.parent(index)
self.doomedTagName = self.tagNameForUndo(index)
self.model.removeRow(index.row(), parent)
def addItemWithType(self, tagID):
if not self.indexAddingTo:
return
item = self.model.getItem(self.indexAddingTo)
row = item.childCount()
self.model.insertRow(row, self.indexAddingTo, tagID)
newItemIndex = self.model.index(row, 0 if item.isCompound else 1, self.indexAddingTo)
#self.treeView.setCurrentIndex(self.proxyModel.mapFromSource(newItemIndex))
#self.treeView.edit(self.proxyModel.mapFromSource(newItemIndex))
self.indexAddingTo = None
def addByte(self):
self.addItemWithType(1)
def addShort(self):
self.addItemWithType(2)
def addInt(self):
self.addItemWithType(3)
def addLong(self):
self.addItemWithType(4)
def addFloat(self):
self.addItemWithType(5)
def addDouble(self):
self.addItemWithType(6)
def addByteArray(self):
self.addItemWithType(7)
def addString(self):
self.addItemWithType(8)
def addList(self):
self.addItemWithType(9)
def addCompound(self):
self.addItemWithType(10)
def addIntArray(self):
self.addItemWithType(11)
def addShortArray(self):
self.addItemWithType(12)
def tagNameForUndo(self, index):
parent = self.model.parent(index)
item = self.model.getItem(index)
parentItem = self.model.getItem(parent)
if parentItem is not None and parentItem.isList:
name = "%s #%d" % (self.tagNameForUndo(parent), parentItem.tag.index(item.tag))
else:
name = item.tag.name
return name
def dataDidChange(self, index):
name = self.tagNameForUndo(index)
if index.column() == 0:
text = "%sRename NBT tag %s" % (self.undoCommandPrefixText, name)
elif index.column() == 1:
text = "%sChange value of NBT tag %s" % (self.undoCommandPrefixText, name)
else:
text = "Unknown data changed."
command = NBTDataChangeCommand(self.editorSession, text)
with command.begin():
self.editMade.emit()
self.editorSession.worldEditor.syncToDisk()
self.editorSession.pushCommand(command)
def rowsDidInsert(self, index):
name = self.tagNameForUndo(index.parent())
text = "%sInsert NBT tag under %s" % (self.undoCommandPrefixText, name)
command = NBTDataChangeCommand(self.editorSession, text)
with command.begin():
self.editMade.emit()
self.editorSession.worldEditor.syncToDisk()
self.editorSession.pushCommand(command)
doomedTagName = None
def rowsDidRemove(self, index, start, end):
name = self.tagNameForUndo(index)
text = "%sRemove NBT tag %s from %s" % (self.undoCommandPrefixText, self.doomedTagName, name)
command = NBTDataChangeCommand(self.editorSession, text)
with command.begin():
self.editMade.emit()
self.editorSession.worldEditor.syncToDisk()
self.editorSession.pushCommand(command)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 5 11:33:04 2017
@author: Jackson
"""
import sys
import pandas as pd
class File():
'''
Implements a csv type file instance.
'''
def __init__(self,path,sep='\t',comment='#'):
super().__init__()
self.path=path
self.file=open(path,'a+')
self.sep=sep
self.comment=comment
self._before_writing=True
def close(self):
self.file.close()
def write_comment(self,string):
'''
Add a comment to the head of the file
Parameters
string: string
A string to be added. \n is not needed.
Returns
None
'''
if self._before_writing:
self.file.write(self.comment+string+'\n')
else:
raise Exception('Comments must be written before starting to write data')
def write_header(self,mylist):
'''
Set headers to the file.
Parameters
mylist: list
The header names.
Returns
None
'''
if self._before_writing:
self.columns=pd.DataFrame(columns=mylist)
self.columns.to_csv(self.file,sep=self.sep,index=False)
self._before_writing=False
else:
raise Exception('Headers are already written')
def write_data(self,mydict):
'''
Write data to the file
Parameters
mydict: dictionary
The keys corresponds to the headers.
Returns
None
'''
if self._before_writing:
raise Exception('Headers has not been set yet.')
else:
row=self.columns.append(mydict,ignore_index=True)
row.to_csv(self.file,sep=self.sep,index=False,header=False)
added save method to kuchinawa.File.File
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 5 11:33:04 2017
@author: Jackson
"""
import sys
import pandas as pd
class File():
'''
Implements a csv type file instance.
'''
def __init__(self,path,sep='\t',comment='#'):
super().__init__()
self.path=path
self.file=open(path,'a+')
self.sep=sep
self.comment=comment
self._before_writing=True
def close(self):
self.file.close()
def save(self):
self.close()
self.file=open(self.path,'a+')
def write_comment(self,string):
'''
Add a comment to the head of the file
Parameters
string: string
A string to be added. \n is not needed.
Returns
None
'''
if self._before_writing:
self.file.write(self.comment+string+'\n')
self.save()
else:
raise Exception('Comments must be written before starting to write data')
def write_header(self,mylist):
'''
Set headers to the file.
Parameters
mylist: list
The header names.
Returns
None
'''
if self._before_writing:
self.columns=pd.DataFrame(columns=mylist)
self.columns.to_csv(self.file,sep=self.sep,index=False)
self._before_writing=False
self.save()
else:
raise Exception('Headers are already written')
def write_data(self,mydict):
'''
Write data to the file
Parameters
mydict: dictionary
The keys corresponds to the headers.
Returns
None
'''
if self._before_writing:
raise Exception('Headers has not been set yet.')
else:
row=self.columns.append(mydict,ignore_index=True)
row.to_csv(self.file,sep=self.sep,index=False,header=False)
self.save() |
"""
Custom spectrogram function. Returned values are different than scipy.
(c) Cassio Amador 2015
TODO: DECIDE IF CUTTING FREQUENCY SHOULD RETURN A FULL MATRIX WITH NaNS,
OR JUST A PARTIAL ONE.
"""
import numpy as np
from scipy import signal, fftpack
def spectrogram(sig, window_size=256, step_scale=4, zer_pad=2, time_array=None,fft_type='fft', log=False, normalize=0, dc_cut=0, fft_shift=0,filtered=0, freq_mask=[]):
"""Evaluate and plot spectrogram (SFFT) of beating signal.
Input:
window_size: size of each segment, in number of points; defaults to 256
step_scale: step for next segment, proportional to window_size (step=window_size/step_scale); defaults to 4
zer_pad: size of each segment, including zero padding, defaults to 2
time_array: array with time for each
fft_type: 'fft' for standard fft (from scipy.fftpack); 'welch' for scipy.signal.welch transform
log: True for log spectrogram; defaults to False
normalize: set True to normalize each spectrum; defaults to False
dc_cut: set True to cut the zero frequency; defaults to False
filtered: set True to apply a filfilt filter to the spectrum. Not recommended unless you are sure about the output. Defaults to False
freqs_window: [f_min,f_max] array with maximum and minimum beating frequency, in terms of index. The 'forbidden'frequencies are set to None. Defaults to (0,window_size)
Output:
matrix: spectrogram as a matrix with n spectrums and m frequencies. [n x m]
time_spec: if time_array is given, returns an array with time of the sliding window center for each spectrum
beat_freq: if time_array is given, returns the beating frequency in (1/time_array) unity.
"""
# alias for sig length
sig=np.concatenate((np.zeros(window_size/4),sig,np.zeros(window_size/4)))
N = len(sig)
# SFFT step size,
step = int(window_size / step_scale)
if time_array is not None:
# beating frequency
if len(time_array) == len(sig):
beat_freq=eval_beat_freq(time_array,window_size=window_size,zer_pad=zer_pad,fft_shift=fft_shift)
else:
raise ValueError('length of time array and signal are different to evaluate spectrogram')
# time array for spectrogram
time_spec = np.linspace(time_array[window_size], time_array[-window_size], num=(N - window_size) / step)
# creates the window function that will 'slide' trough the signal
# to evaluate each FFT. Kaiser seems to be the cleanest one
#window_func = np.hanning(window_size)
#window_func = signal.tukey(window_size, 0.25)
window_func = signal.kaiser(window_size, 10)
# if not shifting, treats as if real signal
factor=2
if fft_shift:
factor=1
# create a matrix to receive the spectra
mat_Y=window_size*zer_pad/factor
#if len(freq_mask)!=0:
# mat_Y=len(np.where(freq_mask)[0])
matrix = np.empty(((N - window_size) / step, mat_Y))
if filtered == 1:
b, a, zi = _init_filter()
# slide window trough signal, and evaluates the FFT.
for i in range(int((N - window_size) / step)):
t = i * step
new_sig = sig[t:t + window_size]
#print(len(new_sig))
try:
new_sig = np.multiply(new_sig, window_func)
except ValueError:
print(len(new_sig), i, t)
if t < window_size:
new_sig = np.multiply(new_sig, window_func[:len(new - sig)])
elif t > window_size:
new_sig = np.multiply(new_sig, window_func[-len(new - sig):])
if fft_type == 'fft':
fft_sig = fftpack.fft(new_sig, n=zer_pad * window_size)[:window_size*zer_pad]
#fft_sig = fftpack.rfft(new_sig, zer_pad * window_size)[:window_size]
elif fft_type == 'welch':
freqs, fft_sig = signal.welch(new_sig, nfft=zer_pad * window_size*zer_pad)
fft_sig = fft_sig[1:window_size]
if dc_cut == True:
fft_sig = np.concatenate(
([fft_sig[1]], fft_sig[1:-1], [fft_sig[1]]))
if fft_shift == 1:
fft_sig = np.fft.fftshift(fft_sig)
else:
fft_sig=fft_sig[:len(fft_sig)/factor]
fft_sig=abs(fft_sig)
# if len(freq_mask)!=0:
# fft_sig[freq_mask == False] = np.nan
# fft_sig=fft_sig[freq_mask]
# if normalize == True:
# fft_sig *= (1. / fft_sig.max())
# if log == True:
# fft_sig = np.log(fft_sig)
if filtered == 1:
fft_sig = _butter_filter(fft_sig, b, a, zi)
if 0:
import matplotlib.pyplot as plt
plt.figure('sfft')
plt.clf()
print(i, t, t + window_size, len(sig[t:t + window_size]))
plt.plot(sig[t:t + window_size], 'b',label='signal')
plt.plot(window_func, 'k',label='window')
plt.plot(new_sig, 'r',label='signal w/ window')
plt.legend(loc='best')
plt.twinx()
plt.plot(fft_sig, 'c')
plt.draw()
input('')
matrix[i] = fft_sig
if len(freq_mask)!=0:
matrix=matrix[:,freq_mask]
if normalize == True:
matrix /= matrix.max(axis=1)[:, None]
if log == True:
matrix = np.log(matrix)
if time_array is not None:
return matrix, time_spec, beat_freq
else:
return matrix
def eval_beat_freq(time_array,window_size,step_scale=4, zer_pad=1,fft_shift=0):
# evaluates acquisition rate
acq_rate = 1 / (time_array[1] - time_array[0])
# create beating frequency
if fft_shift == 0:
beat_freq = np.linspace(0, acq_rate / 2, num=window_size*zer_pad/2)
elif fft_shift == 1:
beat_freq = np.linspace(-acq_rate / 2, acq_rate / 2, num=window_size*zer_pad)
# time array for spectrogram
# SFFT step size,
time_spec = np.linspace(time_array[0], time_array[-1], num=-2+((len(time_array))*step_scale / window_size))
return time_spec,beat_freq
def eval_mask(beat_freq,window_size,freq_min,freq_max,zer_pad=1,fft_shift=0):
# find the index position for min and max frequency
fmin=abs(beat_freq-freq_min).argmin()
if freq_max is None:
fmax=len(beat_freq)
else:
fmax=abs(beat_freq-freq_max).argmin()
factor=2
if fft_shift:
#fmin-=window_size/2
#fmax-=window_size/2
factor=1
# creates mask array for frequency window:
mask_array = np.arange(window_size*zer_pad/factor)
mask = np.logical_and(mask_array > fmin, mask_array < fmax)
#print(mask,mask_array)
return fmin,fmax,mask
def _init_filter():
from signal import butter, lfilter_zi
# Create an order 3 lowpass butterworth filter.
b, a = butter(3, 0.05)
# Apply the filter to xn. Use lfilter_zi to choose the initial condition
# of the filter.
zi = lfilter_zi(b, a)
return b, a, zi
def _butter_filter(sig, b, a, zi):
from signal import lfilter, filtfilt
z, _ = lfilter(b, a, sig, zi=zi * sig[0])
# Apply the filter again, to have a result filtered at an order
# the same as filtfilt.
z2, _ = lfilter(b, a, z, zi=zi * z[0])
# Use filtfilt to apply the filter.
return filtfilt(b, a, sig)
more corrections to time_spec
"""
Custom spectrogram function. Returned values are different than scipy.
(c) Cassio Amador 2015
TODO: DECIDE IF CUTTING FREQUENCY SHOULD RETURN A FULL MATRIX WITH NaNS,
OR JUST A PARTIAL ONE.
"""
import numpy as np
from scipy import signal, fftpack
def spectrogram(sig, window_size=256, step_scale=4, zer_pad=2, time_array=None,fft_type='fft', log=False, normalize=0, dc_cut=0, fft_shift=0,filtered=0, freq_mask=[]):
"""Evaluate and plot spectrogram (SFFT) of beating signal.
Input:
window_size: size of each segment, in number of points; defaults to 256
step_scale: step for next segment, proportional to window_size (step=window_size/step_scale); defaults to 4
zer_pad: size of each segment, including zero padding, defaults to 2
time_array: array with time for each
fft_type: 'fft' for standard fft (from scipy.fftpack); 'welch' for scipy.signal.welch transform
log: True for log spectrogram; defaults to False
normalize: set True to normalize each spectrum; defaults to False
dc_cut: set True to cut the zero frequency; defaults to False
filtered: set True to apply a filfilt filter to the spectrum. Not recommended unless you are sure about the output. Defaults to False
freqs_window: [f_min,f_max] array with maximum and minimum beating frequency, in terms of index. The 'forbidden'frequencies are set to None. Defaults to (0,window_size)
Output:
matrix: spectrogram as a matrix with n spectrums and m frequencies. [n x m]
time_spec: if time_array is given, returns an array with time of the sliding window center for each spectrum
beat_freq: if time_array is given, returns the beating frequency in (1/time_array) unity.
"""
# alias for sig length
sig=np.concatenate((np.zeros(window_size/4),sig,np.zeros(window_size/4)))
N = len(sig)
# SFFT step size,
step = int(window_size / step_scale)
if time_array is not None:
# beating frequency
if len(time_array) == len(sig):
beat_freq=eval_beat_freq(time_array,window_size=window_size,zer_pad=zer_pad,fft_shift=fft_shift)
else:
raise ValueError('length of time array and signal are different to evaluate spectrogram')
# time array for spectrogram
time_spec = np.linspace(time_array[window_size], time_array[-window_size], num=(N - window_size) / step)
# creates the window function that will 'slide' trough the signal
# to evaluate each FFT. Kaiser seems to be the cleanest one
#window_func = np.hanning(window_size)
#window_func = signal.tukey(window_size, 0.25)
window_func = signal.kaiser(window_size, 10)
# if not shifting, treats as if real signal
factor=2
if fft_shift:
factor=1
# create a matrix to receive the spectra
mat_Y=window_size*zer_pad/factor
#if len(freq_mask)!=0:
# mat_Y=len(np.where(freq_mask)[0])
matrix = np.empty(((N - window_size) / step, mat_Y))
if filtered == 1:
b, a, zi = _init_filter()
# slide window trough signal, and evaluates the FFT.
for i in range(int((N - window_size) / step)):
t = i * step
new_sig = sig[t:t + window_size]
#print(len(new_sig))
try:
new_sig = np.multiply(new_sig, window_func)
except ValueError:
print(len(new_sig), i, t)
if t < window_size:
new_sig = np.multiply(new_sig, window_func[:len(new - sig)])
elif t > window_size:
new_sig = np.multiply(new_sig, window_func[-len(new - sig):])
if fft_type == 'fft':
fft_sig = fftpack.fft(new_sig, n=zer_pad * window_size)[:window_size*zer_pad]
#fft_sig = fftpack.rfft(new_sig, zer_pad * window_size)[:window_size]
elif fft_type == 'welch':
freqs, fft_sig = signal.welch(new_sig, nfft=zer_pad * window_size*zer_pad)
fft_sig = fft_sig[1:window_size]
if dc_cut == True:
fft_sig = np.concatenate(
([fft_sig[1]], fft_sig[1:-1], [fft_sig[1]]))
if fft_shift == 1:
fft_sig = np.fft.fftshift(fft_sig)
else:
fft_sig=fft_sig[:len(fft_sig)/factor]
fft_sig=abs(fft_sig)
# if len(freq_mask)!=0:
# fft_sig[freq_mask == False] = np.nan
# fft_sig=fft_sig[freq_mask]
# if normalize == True:
# fft_sig *= (1. / fft_sig.max())
# if log == True:
# fft_sig = np.log(fft_sig)
if filtered == 1:
fft_sig = _butter_filter(fft_sig, b, a, zi)
if 0:
import matplotlib.pyplot as plt
plt.figure('sfft')
plt.clf()
print(i, t, t + window_size, len(sig[t:t + window_size]))
plt.plot(sig[t:t + window_size], 'b',label='signal')
plt.plot(window_func, 'k',label='window')
plt.plot(new_sig, 'r',label='signal w/ window')
plt.legend(loc='best')
plt.twinx()
plt.plot(fft_sig, 'c')
plt.draw()
input('')
matrix[i] = fft_sig
if len(freq_mask)!=0:
matrix=matrix[:,freq_mask]
if normalize == True:
matrix /= matrix.max(axis=1)[:, None]
if log == True:
matrix = np.log(matrix)
if time_array is not None:
return matrix, time_spec, beat_freq
else:
return matrix
def eval_beat_freq(time_array,window_size,step_scale=4, zer_pad=1,fft_shift=0):
# evaluates acquisition rate
acq_rate = 1 / (time_array[1] - time_array[0])
# create beating frequency
if fft_shift == 0:
beat_freq = np.linspace(0, acq_rate / 2, num=window_size*zer_pad/2)
elif fft_shift == 1:
beat_freq = np.linspace(-acq_rate / 2, acq_rate / 2, num=window_size*zer_pad)
# time array for spectrogram
# SFFT step size,
time_spec = np.linspace(time_array[window_size*0.25], time_array[-window_size*0.25], num=((len(time_array)-window_size/2)*step_scale / window_size))
return time_spec,beat_freq
def eval_mask(beat_freq,window_size,freq_min,freq_max,zer_pad=1,fft_shift=0):
# find the index position for min and max frequency
fmin=abs(beat_freq-freq_min).argmin()
if freq_max is None:
fmax=len(beat_freq)
else:
fmax=abs(beat_freq-freq_max).argmin()
factor=2
if fft_shift:
#fmin-=window_size/2
#fmax-=window_size/2
factor=1
# creates mask array for frequency window:
mask_array = np.arange(window_size*zer_pad/factor)
mask = np.logical_and(mask_array > fmin, mask_array < fmax)
#print(mask,mask_array)
return fmin,fmax,mask
def _init_filter():
from signal import butter, lfilter_zi
# Create an order 3 lowpass butterworth filter.
b, a = butter(3, 0.05)
# Apply the filter to xn. Use lfilter_zi to choose the initial condition
# of the filter.
zi = lfilter_zi(b, a)
return b, a, zi
def _butter_filter(sig, b, a, zi):
from signal import lfilter, filtfilt
z, _ = lfilter(b, a, sig, zi=zi * sig[0])
# Apply the filter again, to have a result filtered at an order
# the same as filtfilt.
z2, _ = lfilter(b, a, z, zi=zi * z[0])
# Use filtfilt to apply the filter.
return filtfilt(b, a, sig)
|
"""
Multiclass and multilabel classification strategies
===================================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Author: Hamzeh Alsalhi <93hamsal@gmail.com>
#
# License: BSD 3 clause
import array
import numpy as np
import warnings
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MetaEstimatorMixin, is_regressor
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted
from .utils.multiclass import (_check_partial_fit_first_call,
check_classification_targets)
from .externals.joblib import Parallel
from .externals.joblib import delayed
from .externals.six.moves import zip as izip
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." %
str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _partial_fit_binary(estimator, X, y):
"""Partially fit a single binary estimator."""
estimator.partial_fit(X, y, np.array((0, 1)))
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
if is_regressor(estimator):
return estimator.predict(X)
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _check_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if (not hasattr(estimator, "decision_function") and
not hasattr(estimator, "predict_proba")):
raise ValueError("The base estimator should implement "
"decision_function or predict_proba!")
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
self.y_ = y
return self
def predict(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def decision_function(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def predict_proba(self, X):
check_is_fitted(self, 'y_')
return np.repeat([np.hstack([1 - self.y_, self.y_])],
X.shape[0], axis=0)
class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-the-rest (OvR) multiclass/multilabel strategy
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
This strategy can also be used for multilabel learning, where a classifier
is used to predict multiple labels for instance, by fitting on a 2-d matrix
in which cell [i, j] is 1 if sample i has label j and 0 otherwise.
In the multilabel learning literature, OvR is also known as the binary
relevance method.
Read more in the :ref:`User Guide <ovr_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes` estimators
Estimators used for predictions.
classes_ : array, shape = [`n_classes`]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples,], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outpreform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
self.classes_ = self.label_binarizer_.classes_
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)(
self.estimator, X, column, classes=[
"not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i]])
for i, column in enumerate(columns))
return self
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators
Should be used when memory is inefficient to train all data.
Chunks of data can be passed in several iteration.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples,], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self
"""
if _check_partial_fit_first_call(self, classes):
if (not hasattr(self.estimator, "partial_fit")):
raise ValueError("Base estimator {0}, doesn't have partial_fit"
"method".format(estimator))
self.estimators_ = [clone(self.estimator) for _ in range
(self.n_classes_)]
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outperform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(
_partial_fit_binary)(self.estimators_[i],
X, next(columns) if self.classes_[i] in
self.label_binarizer_.classes_ else
np.zeros((1, len(y))))
for i in range(self.n_classes_))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples,], [n_samples, n_classes].
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
if (hasattr(self.estimators_[0], "decision_function") and
is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = .5
n_samples = _num_samples(X)
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.classes_[np.array(argmaxima.T)]
else:
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr),
shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, 'estimators_')
# Y[i,j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
def decision_function(self, X):
"""Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "decision_function"):
raise AttributeError(
"Base estimator doesn't have a decision_function attribute.")
return np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier"""
return self.label_binarizer_.y_type_.startswith('multilabel')
@property
def n_classes_(self):
return len(self.classes_)
@property
def coef_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "coef_"):
raise AttributeError(
"Base estimator doesn't have a coef_ attribute.")
coefs = [e.coef_ for e in self.estimators_]
if sp.issparse(coefs[0]):
return sp.vstack(coefs)
return np.vstack(coefs)
@property
def intercept_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "intercept_"):
raise AttributeError(
"Base estimator doesn't have an intercept_ attribute.")
return np.array([e.intercept_.ravel() for e in self.estimators_])
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
def _partial_fit_ovo_binary(estimator, X, y, i, j):
"""Partially fit a single binary estimator(one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.zeros_like(y)
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _partial_fit_binary(estimator, X[cond], y_binary)
class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-one multiclass strategy
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Read more in the :ref:`User Guide <ovo_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
check_consistent_length(X, y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y, self.classes_[i], self.classes_[j])
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators
Should be used when memory is inefficient to train all data. Chunks
of data can be passed in several iteration, where the first call
should have an array of all target variables.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self
"""
if _check_partial_fit_first_call(self, classes):
self.estimators_ = [clone(self.estimator) for i in
range(self.n_classes_ *
(self.n_classes_-1) // 2)]
y = np.asarray(y)
check_consistent_length(X, y)
check_classification_targets(y)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_ovo_binary)(
estimator, X, y, self.classes_[i], self.classes_[j])
for estimator, (i, j) in izip(self.estimators_, ((i, j) for i
in range(self.n_classes_) for j in range
(i + 1, self.n_classes_))))
return self
def predict(self, X):
"""Estimate the best class label for each sample in X.
This is implemented as ``argmax(decision_function(X), axis=1)`` which
will return the label of the class with most votes by estimators
predicting the outcome of a decision for each possible class pair.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
Y = self.decision_function(X)
return self.classes_[Y.argmax(axis=1)]
def decision_function(self, X):
"""Decision function for the OneVsOneClassifier.
The decision values for the samples are computed by adding the
normalized sum of pair-wise classification confidence levels to the
votes in order to disambiguate between the decision values when the
votes for all the classes are equal leading to a tie.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
Y : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
predictions = np.vstack([est.predict(X) for est in self.estimators_]).T
confidences = np.vstack([_predict_binary(est, X) for est in self.estimators_]).T
return _ovr_decision_function(predictions, confidences,
len(self.classes_))
@property
def n_classes_(self):
return len(self.classes_)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""(Error-Correcting) Output-Code multiclass strategy
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Read more in the :ref:`User Guide <ecoc>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
Dietterich T., Bakiri G.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
James G., Hastie T.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
Hastie T., Tibshirani R., Friedman J., page 606 (second-edition)
2008.
"""
def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
if self.code_size <= 0:
raise ValueError("code_size should be greater than 0, got {1}"
"".format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = dict((c, i) for i, c in enumerate(self.classes_))
Y = np.array([self.code_book_[classes_index[y[i]]]
for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i])
for i in range(Y.shape[1]))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
COSMIT: whitespace
To finish off #6087
"""
Multiclass and multilabel classification strategies
===================================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Author: Hamzeh Alsalhi <93hamsal@gmail.com>
#
# License: BSD 3 clause
import array
import numpy as np
import warnings
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MetaEstimatorMixin, is_regressor
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted
from .utils.multiclass import (_check_partial_fit_first_call,
check_classification_targets)
from .externals.joblib import Parallel
from .externals.joblib import delayed
from .externals.six.moves import zip as izip
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." %
str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _partial_fit_binary(estimator, X, y):
"""Partially fit a single binary estimator."""
estimator.partial_fit(X, y, np.array((0, 1)))
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
if is_regressor(estimator):
return estimator.predict(X)
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _check_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if (not hasattr(estimator, "decision_function") and
not hasattr(estimator, "predict_proba")):
raise ValueError("The base estimator should implement "
"decision_function or predict_proba!")
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
self.y_ = y
return self
def predict(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def decision_function(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def predict_proba(self, X):
check_is_fitted(self, 'y_')
return np.repeat([np.hstack([1 - self.y_, self.y_])],
X.shape[0], axis=0)
class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-the-rest (OvR) multiclass/multilabel strategy
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
This strategy can also be used for multilabel learning, where a classifier
is used to predict multiple labels for instance, by fitting on a 2-d matrix
in which cell [i, j] is 1 if sample i has label j and 0 otherwise.
In the multilabel learning literature, OvR is also known as the binary
relevance method.
Read more in the :ref:`User Guide <ovr_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes` estimators
Estimators used for predictions.
classes_ : array, shape = [`n_classes`]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outpreform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
self.classes_ = self.label_binarizer_.classes_
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)(
self.estimator, X, column, classes=[
"not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i]])
for i, column in enumerate(columns))
return self
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators
Should be used when memory is inefficient to train all data.
Chunks of data can be passed in several iteration.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self
"""
if _check_partial_fit_first_call(self, classes):
if (not hasattr(self.estimator, "partial_fit")):
raise ValueError("Base estimator {0}, doesn't have partial_fit"
"method".format(estimator))
self.estimators_ = [clone(self.estimator) for _ in range
(self.n_classes_)]
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outperform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(
_partial_fit_binary)(self.estimators_[i],
X, next(columns) if self.classes_[i] in
self.label_binarizer_.classes_ else
np.zeros((1, len(y))))
for i in range(self.n_classes_))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes].
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
if (hasattr(self.estimators_[0], "decision_function") and
is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = .5
n_samples = _num_samples(X)
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.classes_[np.array(argmaxima.T)]
else:
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr),
shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, 'estimators_')
# Y[i, j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
def decision_function(self, X):
"""Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "decision_function"):
raise AttributeError(
"Base estimator doesn't have a decision_function attribute.")
return np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier"""
return self.label_binarizer_.y_type_.startswith('multilabel')
@property
def n_classes_(self):
return len(self.classes_)
@property
def coef_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "coef_"):
raise AttributeError(
"Base estimator doesn't have a coef_ attribute.")
coefs = [e.coef_ for e in self.estimators_]
if sp.issparse(coefs[0]):
return sp.vstack(coefs)
return np.vstack(coefs)
@property
def intercept_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "intercept_"):
raise AttributeError(
"Base estimator doesn't have an intercept_ attribute.")
return np.array([e.intercept_.ravel() for e in self.estimators_])
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
def _partial_fit_ovo_binary(estimator, X, y, i, j):
"""Partially fit a single binary estimator(one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.zeros_like(y)
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _partial_fit_binary(estimator, X[cond], y_binary)
class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-one multiclass strategy
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Read more in the :ref:`User Guide <ovo_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
check_consistent_length(X, y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y, self.classes_[i], self.classes_[j])
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators
Should be used when memory is inefficient to train all data. Chunks
of data can be passed in several iteration, where the first call
should have an array of all target variables.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self
"""
if _check_partial_fit_first_call(self, classes):
self.estimators_ = [clone(self.estimator) for i in
range(self.n_classes_ *
(self.n_classes_-1) // 2)]
y = np.asarray(y)
check_consistent_length(X, y)
check_classification_targets(y)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_ovo_binary)(
estimator, X, y, self.classes_[i], self.classes_[j])
for estimator, (i, j) in izip(self.estimators_, ((i, j) for i
in range(self.n_classes_) for j in range
(i + 1, self.n_classes_))))
return self
def predict(self, X):
"""Estimate the best class label for each sample in X.
This is implemented as ``argmax(decision_function(X), axis=1)`` which
will return the label of the class with most votes by estimators
predicting the outcome of a decision for each possible class pair.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
Y = self.decision_function(X)
return self.classes_[Y.argmax(axis=1)]
def decision_function(self, X):
"""Decision function for the OneVsOneClassifier.
The decision values for the samples are computed by adding the
normalized sum of pair-wise classification confidence levels to the
votes in order to disambiguate between the decision values when the
votes for all the classes are equal leading to a tie.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
Y : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
predictions = np.vstack([est.predict(X) for est in self.estimators_]).T
confidences = np.vstack([_predict_binary(est, X) for est in self.estimators_]).T
return _ovr_decision_function(predictions, confidences,
len(self.classes_))
@property
def n_classes_(self):
return len(self.classes_)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""(Error-Correcting) Output-Code multiclass strategy
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Read more in the :ref:`User Guide <ecoc>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
Dietterich T., Bakiri G.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
James G., Hastie T.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
Hastie T., Tibshirani R., Friedman J., page 606 (second-edition)
2008.
"""
def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
if self.code_size <= 0:
raise ValueError("code_size should be greater than 0, got {1}"
"".format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = dict((c, i) for i, c in enumerate(self.classes_))
Y = np.array([self.code_book_[classes_index[y[i]]]
for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i])
for i in range(Y.shape[1]))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
|
"""Materialized Path Trees"""
import sys
import operator
if sys.version_info >= (3, 0):
from functools import reduce
from django.core import serializers
from django.db import models, transaction, connection, IntegrityError
from django.db.models import F, Q
from django.utils.translation import ugettext_noop as _
from treebeard.numconv import NumConv
from treebeard.models import Node
from treebeard.exceptions import InvalidMoveToDescendant, PathOverflow
class MP_NodeQuerySet(models.query.QuerySet):
"""
Custom queryset for the tree node manager.
Needed only for the customized delete method.
"""
def delete(self):
"""
Custom delete method, will remove all descendant nodes to ensure a
consistent tree (no orphans)
:returns: ``None``
"""
# we'll have to manually run through all the nodes that are going
# to be deleted and remove nodes from the list if an ancestor is
# already getting removed, since that would be redundant
removed = {}
for node in self.order_by('depth', 'path'):
found = False
for depth in range(1, int(len(node.path) / node.steplen)):
path = node._get_basepath(node.path, depth)
if path in removed:
# we are already removing a parent of this node
# skip
found = True
break
if not found:
removed[node.path] = node
# ok, got the minimal list of nodes to remove...
# we must also remove their children
# and update every parent node's numchild attribute
# LOTS OF FUN HERE!
parents = {}
toremove = []
for path, node in removed.items():
parentpath = node._get_basepath(node.path, node.depth - 1)
if parentpath:
if parentpath not in parents:
parents[parentpath] = node.get_parent(True)
parent = parents[parentpath]
if parent and parent.numchild > 0:
parent.numchild -= 1
parent.save()
if node.is_leaf():
toremove.append(Q(path=node.path))
else:
toremove.append(Q(path__startswith=node.path))
# Django will handle this as a SELECT and then a DELETE of
# ids, and will deal with removing related objects
if toremove:
qset = self.model.objects.filter(reduce(operator.or_, toremove))
super(MP_NodeQuerySet, qset).delete()
transaction.commit_unless_managed()
class MP_NodeManager(models.Manager):
"""Custom manager for nodes."""
def get_query_set(self):
"""Sets the custom queryset as the default."""
return MP_NodeQuerySet(self.model).order_by('path')
class MP_AddHandler(object):
def __init__(self):
self.stmts = []
class MP_ComplexAddMoveHandler(MP_AddHandler):
def run_sql_stmts(self):
cursor = self.node_cls._get_database_cursor('write')
for sql, vals in self.stmts:
cursor.execute(sql, vals)
def get_sql_update_numchild(self, path, incdec='inc'):
""":returns: The sql needed the numchild value of a node"""
sql = "UPDATE %s SET numchild=numchild%s1"\
" WHERE path=%%s" % (
connection.ops.quote_name(self.node_cls._meta.db_table),
{'inc': '+', 'dec': '-'}[incdec])
vals = [path]
return sql, vals
def reorder_nodes_before_add_or_move(self, pos, newpos, newdepth, target,
siblings, oldpath=None,
movebranch=False):
"""
Handles the reordering of nodes and branches when adding/moving
nodes.
:returns: A tuple containing the old path and the new path.
"""
if (
(pos == 'last-sibling') or
(pos == 'right' and target == target.get_last_sibling())
):
# easy, the last node
last = target.get_last_sibling()
newpath = last._inc_path()
if movebranch:
self.stmts.append(
self.get_sql_newpath_in_branches(oldpath, newpath))
else:
# do the UPDATE dance
if newpos is None:
siblings = target.get_siblings()
siblings = {'left': siblings.filter(path__gte=target.path),
'right': siblings.filter(path__gt=target.path),
'first-sibling': siblings}[pos]
basenum = target._get_lastpos_in_path()
newpos = {'first-sibling': 1,
'left': basenum,
'right': basenum + 1}[pos]
newpath = self.node_cls._get_path(target.path, newdepth, newpos)
# If the move is amongst siblings and is to the left and there
# are siblings to the right of its new position then to be on
# the safe side we temporarily dump it on the end of the list
tempnewpath = None
if movebranch and len(oldpath) == len(newpath):
parentoldpath = self.node_cls._get_basepath(
oldpath,
int(len(oldpath) / self.node_cls.steplen) - 1
)
parentnewpath = self.node_cls._get_basepath(
newpath, newdepth - 1)
if (
parentoldpath == parentnewpath and
siblings and
newpath < oldpath
):
last = target.get_last_sibling()
basenum = last._get_lastpos_in_path()
tempnewpath = self.node_cls._get_path(
newpath, newdepth, basenum + 2)
self.stmts.append(
self.get_sql_newpath_in_branches(
oldpath, tempnewpath))
# Optimisation to only move siblings which need moving
# (i.e. if we've got holes, allow them to compress)
movesiblings = []
priorpath = newpath
for node in siblings:
# If the path of the node is already greater than the path
# of the previous node it doesn't need shifting
if node.path > priorpath:
break
# It does need shifting, so add to the list
movesiblings.append(node)
# Calculate the path that it would be moved to, as that's
# the next "priorpath"
priorpath = node._inc_path()
movesiblings.reverse()
for node in movesiblings:
# moving the siblings (and their branches) at the right of the
# related position one step to the right
sql, vals = self.get_sql_newpath_in_branches(
node.path, node._inc_path())
self.stmts.append((sql, vals))
if movebranch:
if oldpath.startswith(node.path):
# if moving to a parent, update oldpath since we just
# increased the path of the entire branch
oldpath = vals[0] + oldpath[len(vals[0]):]
if target.path.startswith(node.path):
# and if we moved the target, update the object
# django made for us, since the update won't do it
# maybe useful in loops
target.path = vals[0] + target.path[len(vals[0]):]
if movebranch:
# node to move
if tempnewpath:
self.stmts.append(
self.get_sql_newpath_in_branches(
tempnewpath, newpath))
else:
self.stmts.append(
self.get_sql_newpath_in_branches(
oldpath, newpath))
return oldpath, newpath
def get_sql_newpath_in_branches(self, oldpath, newpath):
"""
:returns" The sql needed to move a branch to another position.
.. note::
The generated sql will only update the depth values if needed.
"""
vendor = self.node_cls.get_database_vendor('write')
sql1 = "UPDATE %s SET" % (
connection.ops.quote_name(self.node_cls._meta.db_table), )
# <3 "standard" sql
if vendor == 'sqlite':
# I know that the third argument in SUBSTR (LENGTH(path)) is
# awful, but sqlite fails without it:
# OperationalError: wrong number of arguments to function substr()
# even when the documentation says that 2 arguments are valid:
# http://www.sqlite.org/lang_corefunc.html
sqlpath = "%s||SUBSTR(path, %s, LENGTH(path))"
elif vendor == 'mysql':
# hooray for mysql ignoring standards in their default
# configuration!
# to make || work as it should, enable ansi mode
# http://dev.mysql.com/doc/refman/5.0/en/ansi-mode.html
sqlpath = "CONCAT(%s, SUBSTR(path, %s))"
else:
sqlpath = "%s||SUBSTR(path, %s)"
sql2 = ["path=%s" % (sqlpath, )]
vals = [newpath, len(oldpath) + 1]
if len(oldpath) != len(newpath) and vendor != 'mysql':
# when using mysql, this won't update the depth and it has to be
# done in another query
# doesn't even work with sql_mode='ANSI,TRADITIONAL'
# TODO: FIND OUT WHY?!?? right now I'm just blaming mysql
sql2.append("depth=LENGTH(%s)/%%s" % (sqlpath, ))
vals.extend([newpath, len(oldpath) + 1, self.node_cls.steplen])
sql3 = "WHERE path LIKE %s"
vals.extend([oldpath + '%'])
sql = '%s %s %s' % (sql1, ', '.join(sql2), sql3)
return sql, vals
class MP_AddRootHandler(MP_AddHandler):
def __init__(self, cls, **kwargs):
super(MP_AddRootHandler, self).__init__()
self.cls = cls
self.kwargs = kwargs
def process(self):
object_id = self.kwargs.get(MP_Node.OBJECT_NAME)
if not object_id:
raise KeyError('There is no object id')
# do we have a root node already?
last_root = self.cls.get_last_root_node(object_id)
if last_root and last_root.node_order_by:
# there are root nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
return last_root.add_sibling('sorted-sibling', **self.kwargs)
if last_root:
# adding the new root node as the last one
newpath = last_root._inc_path()
else:
# adding the first root node
newpath = self.cls._get_path(None, 1, 1)
# creating the new object
newobj = self.cls(**self.kwargs)
newobj.depth = 1
newobj.path = newpath
# saving the instance before returning it
newobj.save()
transaction.commit_unless_managed()
return newobj
class MP_AddChildHandler(MP_AddHandler):
def __init__(self, node, **kwargs):
super(MP_AddChildHandler, self).__init__()
self.node = node
self.node_cls = node.__class__
self.kwargs = kwargs
def process(self):
if self.node.object_id != self.kwargs.get('object_id', False):
raise KeyError("The object_id for parent and child must be the same")
if self.node_cls.node_order_by and not self.node.is_leaf():
# there are child nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
self.node.numchild += 1
return self.node.get_last_child().add_sibling(
'sorted-sibling', **self.kwargs)
# creating a new object
newobj = self.node_cls(**self.kwargs)
newobj.depth = self.node.depth + 1
if self.node.is_leaf():
# the node had no children, adding the first child
newobj.path = self.node_cls._get_path(
self.node.path, newobj.depth, 1)
max_length = self.node_cls._meta.get_field('path').max_length
if len(newobj.path) > max_length:
raise PathOverflow(
_('The new node is too deep in the tree, try'
' increasing the path.max_length property'
' and UPDATE your database'))
else:
# adding the new child as the last one
newobj.path = self.node.get_last_child()._inc_path()
# saving the instance before returning it
newobj.save()
newobj._cached_parent_obj = self.node
self.node_cls.objects.filter(
path=self.node.path, object_id=self.node.object_id).update(numchild=F('numchild')+1)
# we increase the numchild value of the object in memory
self.node.numchild += 1
transaction.commit_unless_managed()
return newobj
class MP_AddSiblingHandler(MP_ComplexAddMoveHandler):
def __init__(self, node, pos, **kwargs):
super(MP_AddSiblingHandler, self).__init__()
self.node = node
self.node_cls = node.__class__
self.pos = pos
self.kwargs = kwargs
def process(self):
self.pos = self.node._prepare_pos_var_for_add_sibling(self.pos)
# creating a new object
newobj = self.node_cls(**self.kwargs)
newobj.depth = self.node.depth
if self.pos == 'sorted-sibling':
siblings = self.node.get_sorted_pos_queryset(
self.node.get_siblings(), newobj)
try:
newpos = siblings.all()[0]._get_lastpos_in_path()
except IndexError:
newpos = None
if newpos is None:
self.pos = 'last-sibling'
else:
newpos, siblings = None, []
_, newpath = self.reorder_nodes_before_add_or_move(
self.pos, newpos, self.node.depth, self.node, siblings, None,
False)
parentpath = self.node._get_basepath(newpath, self.node.depth - 1)
if parentpath:
self.stmts.append(
self.get_sql_update_numchild(parentpath, 'inc'))
self.run_sql_stmts()
# saving the instance before returning it
newobj.path = newpath
newobj.save()
transaction.commit_unless_managed()
return newobj
class MP_MoveHandler(MP_ComplexAddMoveHandler):
def __init__(self, node, target, pos=None):
super(MP_MoveHandler, self).__init__()
self.node = node
self.node_cls = node.__class__
self.target = target
self.pos = pos
def process(self):
self.pos = self.node._prepare_pos_var_for_move(self.pos)
oldpath = self.node.path
# initialize variables and if moving to a child, updates "move to
# child" to become a "move to sibling" if possible (if it can't
# be done, it means that we are adding the first child)
newdepth, siblings, newpos = self.update_move_to_child_vars()
if self.target.is_descendant_of(self.node):
raise InvalidMoveToDescendant(
_("Can't move node to a descendant."))
if (
oldpath == self.target.path and
(
(self.pos == 'left') or
(
self.pos in ('right', 'last-sibling') and
self.target.path == self.target.get_last_sibling().path
) or
(
self.pos == 'first-sibling' and
self.target.path == self.target.get_first_sibling().path
)
)
):
# special cases, not actually moving the node so no need to UPDATE
return
if self.pos == 'sorted-sibling':
siblings = self.node.get_sorted_pos_queryset(
self.target.get_siblings(), self.node)
try:
newpos = siblings.all()[0]._get_lastpos_in_path()
except IndexError:
newpos = None
if newpos is None:
self.pos = 'last-sibling'
# generate the sql that will do the actual moving of nodes
oldpath, newpath = self.reorder_nodes_before_add_or_move(
self.pos, newpos, newdepth, self.target, siblings, oldpath, True)
# updates needed for mysql and children count in parents
self.sanity_updates_after_move(oldpath, newpath)
self.run_sql_stmts()
transaction.commit_unless_managed()
def sanity_updates_after_move(self, oldpath, newpath):
"""
Updates the list of sql statements needed after moving nodes.
1. :attr:`depth` updates *ONLY* needed by mysql databases (*sigh*)
2. update the number of children of parent nodes
"""
if (
self.node_cls.get_database_vendor('write') == 'mysql' and
len(oldpath) != len(newpath)
):
# no words can describe how dumb mysql is
# we must update the depth of the branch in a different query
self.stmts.append(
self.get_mysql_update_depth_in_branch(newpath))
oldparentpath = self.node_cls._get_parent_path_from_path(oldpath)
newparentpath = self.node_cls._get_parent_path_from_path(newpath)
if (
(not oldparentpath and newparentpath) or
(oldparentpath and not newparentpath) or
(oldparentpath != newparentpath)
):
# node changed parent, updating count
if oldparentpath:
self.stmts.append(
self.get_sql_update_numchild(oldparentpath, 'dec'))
if newparentpath:
self.stmts.append(
self.get_sql_update_numchild(newparentpath, 'inc'))
def update_move_to_child_vars(self):
"""Update preliminar vars in :meth:`move` when moving to a child"""
newdepth = self.target.depth
newpos = None
siblings = []
if self.pos in ('first-child', 'last-child', 'sorted-child'):
# moving to a child
parent = self.target
newdepth += 1
if self.target.is_leaf():
# moving as a target's first child
newpos = 1
self.pos = 'first-sibling'
siblings = self.node_cls.objects.none()
else:
self.target = self.target.get_last_child()
self.pos = {
'first-child': 'first-sibling',
'last-child': 'last-sibling',
'sorted-child': 'sorted-sibling'}[self.pos]
# this is not for save(), since if needed, will be handled with a
# custom UPDATE, this is only here to update django's object,
# should be useful in loops
parent.numchild += 1
return newdepth, siblings, newpos
def get_mysql_update_depth_in_branch(self, path):
"""
:returns: The sql needed to update the depth of all the nodes in a
branch.
"""
sql = "UPDATE %s SET depth=LENGTH(path)/%%s WHERE path LIKE %%s" % (
connection.ops.quote_name(self.node_cls._meta.db_table), )
vals = [self.node_cls.steplen, path + '%']
return sql, vals
class MP_Node(Node):
"""Abstract model to create your own Materialized Path Trees."""
# TODO: Get object field by this name?
OBJECT_NAME = 'object_id'
steplen = 4
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
node_order_by = []
path = models.CharField(max_length=255)
depth = models.PositiveSmallIntegerField()
numchild = models.PositiveSmallIntegerField(default=0)
gap = 1
objects = MP_NodeManager()
numconv_obj_ = None
def save(self, *args, **kwargs):
try:
return super(MP_Node, self).save(*args, **kwargs)
except IntegrityError as ex:
self.path = self._inc_path()
self.save(*args, **kwargs)
@classmethod
def _int2str(cls, num):
return cls.numconv_obj().int2str(num)
@classmethod
def _str2int(cls, num):
return cls.numconv_obj().str2int(num)
@classmethod
def numconv_obj(cls):
if cls.numconv_obj_ is None:
cls.numconv_obj_ = NumConv(len(cls.alphabet), cls.alphabet)
return cls.numconv_obj_
@classmethod
def add_root(cls, **kwargs):
"""
Adds a root node to the tree. If there is no sharded id,
it will be generated with 'generate_id' class method
:raise PathOverflow: when no more root objects can be added
:raise KeyError: when kwargs doesn't contain object_id
"""
return MP_AddRootHandler(cls, **kwargs).process()
@classmethod
def dump_bulk(cls, parent=None, keep_ids=True):
"""Dumps a tree branch to a python data structure."""
# Because of fix_tree, this method assumes that the depth
# and numchild properties in the nodes can be incorrect,
# so no helper methods are used
qset = cls._get_serializable_model().objects.all()
if parent:
qset = qset.filter(path__startswith=parent.path)
ret, lnk = [], {}
for pyobj in serializers.serialize('python', qset):
# django's serializer stores the attributes in 'fields'
fields = pyobj['fields']
path = fields['path']
depth = int(len(path) / cls.steplen)
# this will be useless in load_bulk
del fields['depth']
del fields['path']
del fields['numchild']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {'data': fields}
if keep_ids:
newobj['id'] = pyobj['pk']
if (not parent and depth == 1) or\
(parent and len(path) == len(parent.path)):
ret.append(newobj)
else:
parentpath = cls._get_basepath(path, depth - 1)
parentobj = lnk[parentpath]
if 'children' not in parentobj:
parentobj['children'] = []
parentobj['children'].append(newobj)
lnk[path] = newobj
return ret
@classmethod
def find_problems(cls):
"""
Checks for problems in the tree structure, problems can occur when:
1. your code breaks and you get incomplete transactions (always
use transactions!)
2. changing the ``steplen`` value in a model (you must
:meth:`dump_bulk` first, change ``steplen`` and then
:meth:`load_bulk`
:returns: A tuple of five lists:
1. a list of ids of nodes with characters not found in the
``alphabet``
2. a list of ids of nodes when a wrong ``path`` length
according to ``steplen``
3. a list of ids of orphaned nodes
4. a list of ids of nodes with the wrong depth value for
their path
5. a list of ids nodes that report a wrong number of children
"""
evil_chars, bad_steplen, orphans = [], [], []
wrong_depth, wrong_numchild = [], []
for node in cls.objects.all():
found_error = False
for char in node.path:
if char not in cls.alphabet:
evil_chars.append(node.pk)
found_error = True
break
if found_error:
continue
if len(node.path) % cls.steplen:
bad_steplen.append(node.pk)
continue
try:
node.get_parent(True)
except cls.DoesNotExist:
orphans.append(node.pk)
continue
if node.depth != int(len(node.path) / cls.steplen):
wrong_depth.append(node.pk)
continue
real_numchild = cls.objects.filter(
path__range=cls._get_children_path_interval(node.path)
).extra(
where=['LENGTH(path)/%d=%d' % (cls.steplen, node.depth + 1)]
).count()
if real_numchild != node.numchild:
wrong_numchild.append(node.pk)
continue
return evil_chars, bad_steplen, orphans, wrong_depth, wrong_numchild
@classmethod
def fix_tree(cls, destructive=False):
"""
Solves some problems that can appear when transactions are not used and
a piece of code breaks, leaving the tree in an inconsistent state.
The problems this method solves are:
1. Nodes with an incorrect ``depth`` or ``numchild`` values due to
incorrect code and lack of database transactions.
2. "Holes" in the tree. This is normal if you move/delete nodes a
lot. Holes in a tree don't affect performance,
3. Incorrect ordering of nodes when ``node_order_by`` is enabled.
Ordering is enforced on *node insertion*, so if an attribute in
``node_order_by`` is modified after the node is inserted, the
tree ordering will be inconsistent.
:param destructive:
A boolean value. If True, a more agressive fix_tree method will be
attemped. If False (the default), it will use a safe (and fast!)
fix approach, but it will only solve the ``depth`` and
``numchild`` nodes, it won't fix the tree holes or broken path
ordering.
.. warning::
Currently what the ``destructive`` method does is:
1. Backup the tree with :meth:`dump_data`
2. Remove all nodes in the tree.
3. Restore the tree with :meth:`load_data`
So, even when the primary keys of your nodes will be preserved,
this method isn't foreign-key friendly. That needs complex
in-place tree reordering, not available at the moment (hint:
patches are welcome).
"""
if destructive:
dump = cls.dump_bulk(None, True)
cls.objects.all().delete()
cls.load_bulk(dump, None, True)
else:
cursor = cls._get_database_cursor('write')
# fix the depth field
# we need the WHERE to speed up postgres
sql = "UPDATE %s "\
"SET depth=LENGTH(path)/%%s "\
"WHERE depth!=LENGTH(path)/%%s" % (
connection.ops.quote_name(cls._meta.db_table), )
vals = [cls.steplen, cls.steplen]
cursor.execute(sql, vals)
# fix the numchild field
vals = ['_' * cls.steplen]
# the cake and sql portability are a lie
if cls.get_database_vendor('read') == 'mysql':
sql = "SELECT tbn1.path, tbn1.numchild, ("\
"SELECT COUNT(1) "\
"FROM %(table)s AS tbn2 "\
"WHERE tbn2.path LIKE "\
"CONCAT(tbn1.path, %%s)) AS real_numchild "\
"FROM %(table)s AS tbn1 "\
"HAVING tbn1.numchild != real_numchild" % {
'table': connection.ops.quote_name(
cls._meta.db_table)}
else:
subquery = "(SELECT COUNT(1) FROM %(table)s AS tbn2"\
" WHERE tbn2.path LIKE tbn1.path||%%s)"
sql = ("SELECT tbn1.path, tbn1.numchild, " + subquery +
" FROM %(table)s AS tbn1 WHERE tbn1.numchild != " +
subquery)
sql = sql % {
'table': connection.ops.quote_name(cls._meta.db_table)}
# we include the subquery twice
vals *= 2
cursor.execute(sql, vals)
sql = "UPDATE %(table)s "\
"SET numchild=%%s "\
"WHERE path=%%s" % {
'table': connection.ops.quote_name(cls._meta.db_table)}
for node_data in cursor.fetchall():
vals = [node_data[2], node_data[0]]
cursor.execute(sql, vals)
transaction.commit_unless_managed()
@classmethod
def get_tree(cls, object_id, parent=None):
"""
:returns:
A *queryset* of nodes ordered as DFS, including the parent.
If no parent is given, the entire tree is returned.
"""
if parent is None:
# return the entire tree
return cls.objects.all()
if parent.is_leaf():
return cls.objects.filter(pk=parent.pk)
return cls.objects.filter(path__startswith=parent.path,
depth__gte=parent.depth,
object_id=object_id)
@classmethod
def get_root_nodes(cls, object_id=None):
""":returns: A queryset containing the root nodes in the tree."""
if object_id:
return cls.objects.filter(depth=1, object_id=object_id)
else:
return cls.objects.filter(depth=1)
@classmethod
def get_descendants_group_count(cls, parent=None):
"""
Helper for a very common case: get a group of siblings and the number
of *descendants* in every sibling.
"""
#~
# disclaimer: this is the FOURTH implementation I wrote for this
# function. I really tried to make it return a queryset, but doing so
# with a *single* query isn't trivial with Django's ORM.
# ok, I DID manage to make Django's ORM return a queryset here,
# defining two querysets, passing one subquery in the tables parameters
# of .extra() of the second queryset, using the undocumented order_by
# feature, and using a HORRIBLE hack to avoid django quoting the
# subquery as a table, BUT (and there is always a but) the hack didn't
# survive turning the QuerySet into a ValuesQuerySet, so I just used
# good old SQL.
# NOTE: in case there is interest, the hack to avoid django quoting the
# subquery as a table, was adding the subquery to the alias cache of
# the queryset's query object:
#
# qset.query.quote_cache[subquery] = subquery
#
# If there is a better way to do this in an UNMODIFIED django 1.0, let
# me know.
#~
if parent:
depth = parent.depth + 1
params = cls._get_children_path_interval(parent.path)
extrand = 'AND path BETWEEN %s AND %s'
else:
depth = 1
params = []
extrand = ''
sql = 'SELECT * FROM %(table)s AS t1 INNER JOIN '\
' (SELECT '\
' SUBSTR(path, 1, %(subpathlen)s) AS subpath, '\
' COUNT(1)-1 AS count '\
' FROM %(table)s '\
' WHERE depth >= %(depth)s %(extrand)s'\
' GROUP BY subpath) AS t2 '\
' ON t1.path=t2.subpath '\
' ORDER BY t1.path' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'subpathlen': depth * cls.steplen,
'depth': depth,
'extrand': extrand}
cursor = cls._get_database_cursor('write')
cursor.execute(sql, params)
ret = []
field_names = [field[0] for field in cursor.description]
for node_data in cursor.fetchall():
node = cls(**dict(zip(field_names, node_data[:-2])))
node.descendants_count = node_data[-1]
ret.append(node)
transaction.commit_unless_managed()
return ret
def get_depth(self):
""":returns: the depth (level) of the node"""
return self.depth
def get_siblings(self):
"""
:returns: A queryset of all the node's siblings, including the node
itself.
"""
qset = self.__class__.objects.filter(depth=self.depth, object_id=self.object_id)
if self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
qset = qset.filter(
path__range=self._get_children_path_interval(parentpath))
return qset
def get_children(self):
""":returns: A queryset of all the node's children"""
if self.is_leaf():
return self.__class__.objects.none()
return self.__class__.objects.filter(
depth=self.depth + 1,
path__range=self._get_children_path_interval(self.path),
object_id=self.object_id,
)
def get_next_sibling(self):
"""
:returns: The next node's sibling, or None if it was the rightmost
sibling.
"""
try:
return self.get_siblings().filter(path__gt=self.path)[0]
except IndexError:
return None
def get_descendants(self):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
"""
return self.__class__.get_tree(self.object_id, self).exclude(pk=self.pk)
def get_prev_sibling(self):
"""
:returns: The previous node's sibling, or None if it was the leftmost
sibling.
"""
try:
return self.get_siblings().filter(path__lt=self.path).reverse()[0]
except IndexError:
return None
def get_children_count(self):
"""
:returns: The number the node's children, calculated in the most
efficient possible way.
"""
return self.numchild
def is_sibling_of(self, node):
"""
:returns: ``True`` if the node is a sibling of another node given as an
argument, else, returns ``False``
"""
aux = self.depth == node.depth
# Check non-root nodes share a parent only if they have the same depth
if aux and self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
return aux and node.path.startswith(parentpath)
return aux
def is_child_of(self, node):
"""
:returns: ``True`` is the node if a child of another node given as an
argument, else, returns ``False``
"""
return (self.path.startswith(node.path) and
self.depth == node.depth + 1)
def is_descendant_of(self, node):
"""
:returns: ``True`` if the node is a descendant of another node given
as an argument, else, returns ``False``
"""
return self.path.startswith(node.path) and self.depth > node.depth
def add_child(self, **kwargs):
"""
Adds a child to the node.
:raise PathOverflow: when no more child nodes can be added
"""
return MP_AddChildHandler(self, **kwargs).process()
def add_sibling(self, pos=None, **kwargs):
"""
Adds a new node as a sibling to the current node object.
:raise PathOverflow: when the library can't make room for the
node's new position
"""
return MP_AddSiblingHandler(self, pos, **kwargs).process()
def get_root(self):
""":returns: the root node for the current node object."""
return self.__class__.objects.get(path=self.path[0:self.steplen])
def is_leaf(self):
""":returns: True if the node is a leaf node (else, returns False)"""
return self.numchild == 0
def get_ancestors(self):
"""
:returns: A queryset containing the current node object's ancestors,
starting by the root node and descending to the parent.
"""
paths = [
self.path[0:pos]
for pos in range(0, len(self.path), self.steplen)[1:]
]
return self.__class__.objects.filter(path__in=paths, object_id=self.object_id).order_by('depth')
def get_parent(self, update=False):
"""
:returns: the parent node of the current node object.
Caches the result in the object itself to help in loops.
"""
depth = int(len(self.path) / self.steplen)
if depth <= 1:
return
try:
if update:
del self._cached_parent_obj
else:
return self._cached_parent_obj
except AttributeError:
pass
parentpath = self._get_basepath(self.path, depth - 1)
self._cached_parent_obj = self.__class__.objects.get(path=parentpath)
return self._cached_parent_obj
def move(self, target, pos=None):
"""
Moves the current node and all it's descendants to a new position
relative to another node.
:raise PathOverflow: when the library can't make room for the
node's new position
"""
return MP_MoveHandler(self, target, pos).process()
@classmethod
def _get_basepath(cls, path, depth):
""":returns: The base path of another path up to a given depth"""
if path:
return path[0:depth * cls.steplen]
return ''
@classmethod
def _get_path(cls, path, depth, newstep):
"""
Builds a path given some values
:param path: the base path
:param depth: the depth of the node
:param newstep: the value (integer) of the new step
"""
parentpath = cls._get_basepath(path, depth - 1)
key = cls._int2str(newstep)
return '%s%s%s' % (parentpath,
'0' * (cls.steplen - len(key)),
key)
def _inc_path(self):
""":returns: The path of the next sibling of a given node path."""
newpos = self._str2int(self.path[-self.steplen:]) + 1
key = self._int2str(newpos)
if len(key) > self.steplen:
raise PathOverflow(_("Path Overflow from: '%s'" % (self.path, )))
return '%s%s%s' % (self.path[:-self.steplen],
'0' * (self.steplen - len(key)),
key)
def _get_lastpos_in_path(self):
""":returns: The integer value of the last step in a path."""
return self._str2int(self.path[-self.steplen:])
@classmethod
def _get_parent_path_from_path(cls, path):
""":returns: The parent path for a given path"""
if path:
return path[0:len(path) - cls.steplen]
return ''
@classmethod
def _get_children_path_interval(cls, path):
""":returns: An interval of all possible children paths for a node."""
return (path + cls.alphabet[0] * cls.steplen,
path + cls.alphabet[-1] * cls.steplen)
class Meta:
"""Abstract model."""
abstract = True
Fixed object_id usage
"""Materialized Path Trees"""
import sys
import operator
if sys.version_info >= (3, 0):
from functools import reduce
from django.core import serializers
from django.db import models, transaction, connection, IntegrityError
from django.db.models import F, Q
from django.utils.translation import ugettext_noop as _
from treebeard.numconv import NumConv
from treebeard.models import Node
from treebeard.exceptions import InvalidMoveToDescendant, PathOverflow
class MP_NodeQuerySet(models.query.QuerySet):
"""
Custom queryset for the tree node manager.
Needed only for the customized delete method.
"""
def delete(self):
"""
Custom delete method, will remove all descendant nodes to ensure a
consistent tree (no orphans)
:returns: ``None``
"""
# we'll have to manually run through all the nodes that are going
# to be deleted and remove nodes from the list if an ancestor is
# already getting removed, since that would be redundant
removed = {}
for node in self.order_by('depth', 'path'):
found = False
for depth in range(1, int(len(node.path) / node.steplen)):
path = node._get_basepath(node.path, depth)
if path in removed:
# we are already removing a parent of this node
# skip
found = True
break
if not found:
removed[node.path] = node
# ok, got the minimal list of nodes to remove...
# we must also remove their children
# and update every parent node's numchild attribute
# LOTS OF FUN HERE!
parents = {}
toremove = []
for path, node in removed.items():
parentpath = node._get_basepath(node.path, node.depth - 1)
if parentpath:
if parentpath not in parents:
parents[parentpath] = node.get_parent(True)
parent = parents[parentpath]
if parent and parent.numchild > 0:
parent.numchild -= 1
parent.save()
if node.is_leaf():
toremove.append(Q(path=node.path))
else:
toremove.append(Q(path__startswith=node.path))
# Django will handle this as a SELECT and then a DELETE of
# ids, and will deal with removing related objects
if toremove:
qset = self.model.objects.filter(reduce(operator.or_, toremove))
super(MP_NodeQuerySet, qset).delete()
transaction.commit_unless_managed()
class MP_NodeManager(models.Manager):
"""Custom manager for nodes."""
def get_query_set(self):
"""Sets the custom queryset as the default."""
return MP_NodeQuerySet(self.model).order_by('path')
class MP_AddHandler(object):
def __init__(self):
self.stmts = []
class MP_ComplexAddMoveHandler(MP_AddHandler):
def run_sql_stmts(self):
cursor = self.node_cls._get_database_cursor('write')
for sql, vals in self.stmts:
cursor.execute(sql, vals)
def get_sql_update_numchild(self, path, incdec='inc'):
""":returns: The sql needed the numchild value of a node"""
sql = "UPDATE %s SET numchild=numchild%s1"\
" WHERE path=%%s" % (
connection.ops.quote_name(self.node_cls._meta.db_table),
{'inc': '+', 'dec': '-'}[incdec])
vals = [path]
return sql, vals
def reorder_nodes_before_add_or_move(self, pos, newpos, newdepth, target,
siblings, oldpath=None,
movebranch=False):
"""
Handles the reordering of nodes and branches when adding/moving
nodes.
:returns: A tuple containing the old path and the new path.
"""
if (
(pos == 'last-sibling') or
(pos == 'right' and target == target.get_last_sibling())
):
# easy, the last node
last = target.get_last_sibling()
newpath = last._inc_path()
if movebranch:
self.stmts.append(
self.get_sql_newpath_in_branches(oldpath, newpath))
else:
# do the UPDATE dance
if newpos is None:
siblings = target.get_siblings()
siblings = {'left': siblings.filter(path__gte=target.path),
'right': siblings.filter(path__gt=target.path),
'first-sibling': siblings}[pos]
basenum = target._get_lastpos_in_path()
newpos = {'first-sibling': 1,
'left': basenum,
'right': basenum + 1}[pos]
newpath = self.node_cls._get_path(target.path, newdepth, newpos)
# If the move is amongst siblings and is to the left and there
# are siblings to the right of its new position then to be on
# the safe side we temporarily dump it on the end of the list
tempnewpath = None
if movebranch and len(oldpath) == len(newpath):
parentoldpath = self.node_cls._get_basepath(
oldpath,
int(len(oldpath) / self.node_cls.steplen) - 1
)
parentnewpath = self.node_cls._get_basepath(
newpath, newdepth - 1)
if (
parentoldpath == parentnewpath and
siblings and
newpath < oldpath
):
last = target.get_last_sibling()
basenum = last._get_lastpos_in_path()
tempnewpath = self.node_cls._get_path(
newpath, newdepth, basenum + 2)
self.stmts.append(
self.get_sql_newpath_in_branches(
oldpath, tempnewpath))
# Optimisation to only move siblings which need moving
# (i.e. if we've got holes, allow them to compress)
movesiblings = []
priorpath = newpath
for node in siblings:
# If the path of the node is already greater than the path
# of the previous node it doesn't need shifting
if node.path > priorpath:
break
# It does need shifting, so add to the list
movesiblings.append(node)
# Calculate the path that it would be moved to, as that's
# the next "priorpath"
priorpath = node._inc_path()
movesiblings.reverse()
for node in movesiblings:
# moving the siblings (and their branches) at the right of the
# related position one step to the right
sql, vals = self.get_sql_newpath_in_branches(
node.path, node._inc_path())
self.stmts.append((sql, vals))
if movebranch:
if oldpath.startswith(node.path):
# if moving to a parent, update oldpath since we just
# increased the path of the entire branch
oldpath = vals[0] + oldpath[len(vals[0]):]
if target.path.startswith(node.path):
# and if we moved the target, update the object
# django made for us, since the update won't do it
# maybe useful in loops
target.path = vals[0] + target.path[len(vals[0]):]
if movebranch:
# node to move
if tempnewpath:
self.stmts.append(
self.get_sql_newpath_in_branches(
tempnewpath, newpath))
else:
self.stmts.append(
self.get_sql_newpath_in_branches(
oldpath, newpath))
return oldpath, newpath
def get_sql_newpath_in_branches(self, oldpath, newpath):
"""
:returns" The sql needed to move a branch to another position.
.. note::
The generated sql will only update the depth values if needed.
"""
vendor = self.node_cls.get_database_vendor('write')
sql1 = "UPDATE %s SET" % (
connection.ops.quote_name(self.node_cls._meta.db_table), )
# <3 "standard" sql
if vendor == 'sqlite':
# I know that the third argument in SUBSTR (LENGTH(path)) is
# awful, but sqlite fails without it:
# OperationalError: wrong number of arguments to function substr()
# even when the documentation says that 2 arguments are valid:
# http://www.sqlite.org/lang_corefunc.html
sqlpath = "%s||SUBSTR(path, %s, LENGTH(path))"
elif vendor == 'mysql':
# hooray for mysql ignoring standards in their default
# configuration!
# to make || work as it should, enable ansi mode
# http://dev.mysql.com/doc/refman/5.0/en/ansi-mode.html
sqlpath = "CONCAT(%s, SUBSTR(path, %s))"
else:
sqlpath = "%s||SUBSTR(path, %s)"
sql2 = ["path=%s" % (sqlpath, )]
vals = [newpath, len(oldpath) + 1]
if len(oldpath) != len(newpath) and vendor != 'mysql':
# when using mysql, this won't update the depth and it has to be
# done in another query
# doesn't even work with sql_mode='ANSI,TRADITIONAL'
# TODO: FIND OUT WHY?!?? right now I'm just blaming mysql
sql2.append("depth=LENGTH(%s)/%%s" % (sqlpath, ))
vals.extend([newpath, len(oldpath) + 1, self.node_cls.steplen])
sql3 = "WHERE path LIKE %s"
vals.extend([oldpath + '%'])
sql = '%s %s %s' % (sql1, ', '.join(sql2), sql3)
return sql, vals
class MP_AddRootHandler(MP_AddHandler):
def __init__(self, cls, **kwargs):
super(MP_AddRootHandler, self).__init__()
self.cls = cls
self.kwargs = kwargs
def process(self):
object_id = self.kwargs.get(MP_Node.OBJECT_NAME)
if not object_id:
raise KeyError('There is no object id')
# do we have a root node already?
last_root = self.cls.get_last_root_node(object_id)
if last_root and last_root.node_order_by:
# there are root nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
return last_root.add_sibling('sorted-sibling', **self.kwargs)
if last_root:
# adding the new root node as the last one
newpath = last_root._inc_path()
else:
# adding the first root node
newpath = self.cls._get_path(None, 1, 1)
# creating the new object
newobj = self.cls(**self.kwargs)
newobj.depth = 1
newobj.path = newpath
# saving the instance before returning it
newobj.save()
transaction.commit_unless_managed()
return newobj
class MP_AddChildHandler(MP_AddHandler):
def __init__(self, node, **kwargs):
super(MP_AddChildHandler, self).__init__()
self.node = node
self.node_cls = node.__class__
self.kwargs = kwargs
def process(self):
if self.node.object_id != self.kwargs.get('object_id', False):
raise KeyError("The object_id for parent and child must be the same")
if self.node_cls.node_order_by and not self.node.is_leaf():
# there are child nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
self.node.numchild += 1
return self.node.get_last_child().add_sibling(
'sorted-sibling', **self.kwargs)
# creating a new object
newobj = self.node_cls(**self.kwargs)
newobj.depth = self.node.depth + 1
if self.node.is_leaf():
# the node had no children, adding the first child
newobj.path = self.node_cls._get_path(
self.node.path, newobj.depth, 1)
max_length = self.node_cls._meta.get_field('path').max_length
if len(newobj.path) > max_length:
raise PathOverflow(
_('The new node is too deep in the tree, try'
' increasing the path.max_length property'
' and UPDATE your database'))
else:
# adding the new child as the last one
newobj.path = self.node.get_last_child()._inc_path()
# saving the instance before returning it
newobj.save()
newobj._cached_parent_obj = self.node
self.node_cls.objects.filter(
path=self.node.path, object_id=self.node.object_id).update(numchild=F('numchild')+1)
# we increase the numchild value of the object in memory
self.node.numchild += 1
transaction.commit_unless_managed()
return newobj
class MP_AddSiblingHandler(MP_ComplexAddMoveHandler):
def __init__(self, node, pos, **kwargs):
super(MP_AddSiblingHandler, self).__init__()
self.node = node
self.node_cls = node.__class__
self.pos = pos
self.kwargs = kwargs
def process(self):
self.pos = self.node._prepare_pos_var_for_add_sibling(self.pos)
# creating a new object
newobj = self.node_cls(**self.kwargs)
newobj.depth = self.node.depth
if self.pos == 'sorted-sibling':
siblings = self.node.get_sorted_pos_queryset(
self.node.get_siblings(), newobj)
try:
newpos = siblings.all()[0]._get_lastpos_in_path()
except IndexError:
newpos = None
if newpos is None:
self.pos = 'last-sibling'
else:
newpos, siblings = None, []
_, newpath = self.reorder_nodes_before_add_or_move(
self.pos, newpos, self.node.depth, self.node, siblings, None,
False)
parentpath = self.node._get_basepath(newpath, self.node.depth - 1)
if parentpath:
self.stmts.append(
self.get_sql_update_numchild(parentpath, 'inc'))
self.run_sql_stmts()
# saving the instance before returning it
newobj.path = newpath
newobj.save()
transaction.commit_unless_managed()
return newobj
class MP_MoveHandler(MP_ComplexAddMoveHandler):
def __init__(self, node, target, pos=None):
super(MP_MoveHandler, self).__init__()
self.node = node
self.node_cls = node.__class__
self.target = target
self.pos = pos
def process(self):
self.pos = self.node._prepare_pos_var_for_move(self.pos)
oldpath = self.node.path
# initialize variables and if moving to a child, updates "move to
# child" to become a "move to sibling" if possible (if it can't
# be done, it means that we are adding the first child)
newdepth, siblings, newpos = self.update_move_to_child_vars()
if self.target.is_descendant_of(self.node):
raise InvalidMoveToDescendant(
_("Can't move node to a descendant."))
if (
oldpath == self.target.path and
(
(self.pos == 'left') or
(
self.pos in ('right', 'last-sibling') and
self.target.path == self.target.get_last_sibling().path
) or
(
self.pos == 'first-sibling' and
self.target.path == self.target.get_first_sibling().path
)
)
):
# special cases, not actually moving the node so no need to UPDATE
return
if self.pos == 'sorted-sibling':
siblings = self.node.get_sorted_pos_queryset(
self.target.get_siblings(), self.node)
try:
newpos = siblings.all()[0]._get_lastpos_in_path()
except IndexError:
newpos = None
if newpos is None:
self.pos = 'last-sibling'
# generate the sql that will do the actual moving of nodes
oldpath, newpath = self.reorder_nodes_before_add_or_move(
self.pos, newpos, newdepth, self.target, siblings, oldpath, True)
# updates needed for mysql and children count in parents
self.sanity_updates_after_move(oldpath, newpath)
self.run_sql_stmts()
transaction.commit_unless_managed()
def sanity_updates_after_move(self, oldpath, newpath):
"""
Updates the list of sql statements needed after moving nodes.
1. :attr:`depth` updates *ONLY* needed by mysql databases (*sigh*)
2. update the number of children of parent nodes
"""
if (
self.node_cls.get_database_vendor('write') == 'mysql' and
len(oldpath) != len(newpath)
):
# no words can describe how dumb mysql is
# we must update the depth of the branch in a different query
self.stmts.append(
self.get_mysql_update_depth_in_branch(newpath))
oldparentpath = self.node_cls._get_parent_path_from_path(oldpath)
newparentpath = self.node_cls._get_parent_path_from_path(newpath)
if (
(not oldparentpath and newparentpath) or
(oldparentpath and not newparentpath) or
(oldparentpath != newparentpath)
):
# node changed parent, updating count
if oldparentpath:
self.stmts.append(
self.get_sql_update_numchild(oldparentpath, 'dec'))
if newparentpath:
self.stmts.append(
self.get_sql_update_numchild(newparentpath, 'inc'))
def update_move_to_child_vars(self):
"""Update preliminar vars in :meth:`move` when moving to a child"""
newdepth = self.target.depth
newpos = None
siblings = []
if self.pos in ('first-child', 'last-child', 'sorted-child'):
# moving to a child
parent = self.target
newdepth += 1
if self.target.is_leaf():
# moving as a target's first child
newpos = 1
self.pos = 'first-sibling'
siblings = self.node_cls.objects.none()
else:
self.target = self.target.get_last_child()
self.pos = {
'first-child': 'first-sibling',
'last-child': 'last-sibling',
'sorted-child': 'sorted-sibling'}[self.pos]
# this is not for save(), since if needed, will be handled with a
# custom UPDATE, this is only here to update django's object,
# should be useful in loops
parent.numchild += 1
return newdepth, siblings, newpos
def get_mysql_update_depth_in_branch(self, path):
"""
:returns: The sql needed to update the depth of all the nodes in a
branch.
"""
sql = "UPDATE %s SET depth=LENGTH(path)/%%s WHERE path LIKE %%s" % (
connection.ops.quote_name(self.node_cls._meta.db_table), )
vals = [self.node_cls.steplen, path + '%']
return sql, vals
class MP_Node(Node):
"""Abstract model to create your own Materialized Path Trees."""
# TODO: Get object field by this name?
OBJECT_NAME = 'object_id'
steplen = 4
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
node_order_by = []
path = models.CharField(max_length=255)
depth = models.PositiveSmallIntegerField()
numchild = models.PositiveSmallIntegerField(default=0)
gap = 1
objects = MP_NodeManager()
numconv_obj_ = None
def save(self, *args, **kwargs):
try:
return super(MP_Node, self).save(*args, **kwargs)
except IntegrityError as ex:
self.path = self._inc_path()
self.save(*args, **kwargs)
@classmethod
def _int2str(cls, num):
return cls.numconv_obj().int2str(num)
@classmethod
def _str2int(cls, num):
return cls.numconv_obj().str2int(num)
@classmethod
def numconv_obj(cls):
if cls.numconv_obj_ is None:
cls.numconv_obj_ = NumConv(len(cls.alphabet), cls.alphabet)
return cls.numconv_obj_
@classmethod
def add_root(cls, **kwargs):
"""
Adds a root node to the tree. If there is no sharded id,
it will be generated with 'generate_id' class method
:raise PathOverflow: when no more root objects can be added
:raise KeyError: when kwargs doesn't contain object_id
"""
return MP_AddRootHandler(cls, **kwargs).process()
@classmethod
def dump_bulk(cls, parent=None, keep_ids=True):
"""Dumps a tree branch to a python data structure."""
# Because of fix_tree, this method assumes that the depth
# and numchild properties in the nodes can be incorrect,
# so no helper methods are used
qset = cls._get_serializable_model().objects.all()
if parent:
qset = qset.filter(path__startswith=parent.path)
ret, lnk = [], {}
for pyobj in serializers.serialize('python', qset):
# django's serializer stores the attributes in 'fields'
fields = pyobj['fields']
path = fields['path']
depth = int(len(path) / cls.steplen)
# this will be useless in load_bulk
del fields['depth']
del fields['path']
del fields['numchild']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {'data': fields}
if keep_ids:
newobj['id'] = pyobj['pk']
if (not parent and depth == 1) or\
(parent and len(path) == len(parent.path)):
ret.append(newobj)
else:
parentpath = cls._get_basepath(path, depth - 1)
parentobj = lnk[parentpath]
if 'children' not in parentobj:
parentobj['children'] = []
parentobj['children'].append(newobj)
lnk[path] = newobj
return ret
@classmethod
def find_problems(cls):
"""
Checks for problems in the tree structure, problems can occur when:
1. your code breaks and you get incomplete transactions (always
use transactions!)
2. changing the ``steplen`` value in a model (you must
:meth:`dump_bulk` first, change ``steplen`` and then
:meth:`load_bulk`
:returns: A tuple of five lists:
1. a list of ids of nodes with characters not found in the
``alphabet``
2. a list of ids of nodes when a wrong ``path`` length
according to ``steplen``
3. a list of ids of orphaned nodes
4. a list of ids of nodes with the wrong depth value for
their path
5. a list of ids nodes that report a wrong number of children
"""
evil_chars, bad_steplen, orphans = [], [], []
wrong_depth, wrong_numchild = [], []
for node in cls.objects.all():
found_error = False
for char in node.path:
if char not in cls.alphabet:
evil_chars.append(node.pk)
found_error = True
break
if found_error:
continue
if len(node.path) % cls.steplen:
bad_steplen.append(node.pk)
continue
try:
node.get_parent(True)
except cls.DoesNotExist:
orphans.append(node.pk)
continue
if node.depth != int(len(node.path) / cls.steplen):
wrong_depth.append(node.pk)
continue
real_numchild = cls.objects.filter(
path__range=cls._get_children_path_interval(node.path)
).extra(
where=['LENGTH(path)/%d=%d' % (cls.steplen, node.depth + 1)]
).count()
if real_numchild != node.numchild:
wrong_numchild.append(node.pk)
continue
return evil_chars, bad_steplen, orphans, wrong_depth, wrong_numchild
@classmethod
def fix_tree(cls, destructive=False):
"""
Solves some problems that can appear when transactions are not used and
a piece of code breaks, leaving the tree in an inconsistent state.
The problems this method solves are:
1. Nodes with an incorrect ``depth`` or ``numchild`` values due to
incorrect code and lack of database transactions.
2. "Holes" in the tree. This is normal if you move/delete nodes a
lot. Holes in a tree don't affect performance,
3. Incorrect ordering of nodes when ``node_order_by`` is enabled.
Ordering is enforced on *node insertion*, so if an attribute in
``node_order_by`` is modified after the node is inserted, the
tree ordering will be inconsistent.
:param destructive:
A boolean value. If True, a more agressive fix_tree method will be
attemped. If False (the default), it will use a safe (and fast!)
fix approach, but it will only solve the ``depth`` and
``numchild`` nodes, it won't fix the tree holes or broken path
ordering.
.. warning::
Currently what the ``destructive`` method does is:
1. Backup the tree with :meth:`dump_data`
2. Remove all nodes in the tree.
3. Restore the tree with :meth:`load_data`
So, even when the primary keys of your nodes will be preserved,
this method isn't foreign-key friendly. That needs complex
in-place tree reordering, not available at the moment (hint:
patches are welcome).
"""
if destructive:
dump = cls.dump_bulk(None, True)
cls.objects.all().delete()
cls.load_bulk(dump, None, True)
else:
cursor = cls._get_database_cursor('write')
# fix the depth field
# we need the WHERE to speed up postgres
sql = "UPDATE %s "\
"SET depth=LENGTH(path)/%%s "\
"WHERE depth!=LENGTH(path)/%%s" % (
connection.ops.quote_name(cls._meta.db_table), )
vals = [cls.steplen, cls.steplen]
cursor.execute(sql, vals)
# fix the numchild field
vals = ['_' * cls.steplen]
# the cake and sql portability are a lie
if cls.get_database_vendor('read') == 'mysql':
sql = "SELECT tbn1.path, tbn1.numchild, ("\
"SELECT COUNT(1) "\
"FROM %(table)s AS tbn2 "\
"WHERE tbn2.path LIKE "\
"CONCAT(tbn1.path, %%s)) AS real_numchild "\
"FROM %(table)s AS tbn1 "\
"HAVING tbn1.numchild != real_numchild" % {
'table': connection.ops.quote_name(
cls._meta.db_table)}
else:
subquery = "(SELECT COUNT(1) FROM %(table)s AS tbn2"\
" WHERE tbn2.path LIKE tbn1.path||%%s)"
sql = ("SELECT tbn1.path, tbn1.numchild, " + subquery +
" FROM %(table)s AS tbn1 WHERE tbn1.numchild != " +
subquery)
sql = sql % {
'table': connection.ops.quote_name(cls._meta.db_table)}
# we include the subquery twice
vals *= 2
cursor.execute(sql, vals)
sql = "UPDATE %(table)s "\
"SET numchild=%%s "\
"WHERE path=%%s" % {
'table': connection.ops.quote_name(cls._meta.db_table)}
for node_data in cursor.fetchall():
vals = [node_data[2], node_data[0]]
cursor.execute(sql, vals)
transaction.commit_unless_managed()
@classmethod
def get_tree(cls, object_id, parent=None):
"""
:returns:
A *queryset* of nodes ordered as DFS, including the parent.
If no parent is given, the entire tree is returned.
"""
if parent is None:
# return the entire tree
return cls.objects.filter(object_id=object_id)
if parent.is_leaf():
return cls.objects.filter(pk=parent.pk)
return cls.objects.filter(path__startswith=parent.path,
depth__gte=parent.depth,
object_id=object_id)
@classmethod
def get_root_nodes(cls, object_id=None):
""":returns: A queryset containing the root nodes in the tree."""
if object_id:
return cls.objects.filter(depth=1, object_id=object_id)
else:
return cls.objects.filter(depth=1)
@classmethod
def get_descendants_group_count(cls, parent=None):
"""
Helper for a very common case: get a group of siblings and the number
of *descendants* in every sibling.
"""
#~
# disclaimer: this is the FOURTH implementation I wrote for this
# function. I really tried to make it return a queryset, but doing so
# with a *single* query isn't trivial with Django's ORM.
# ok, I DID manage to make Django's ORM return a queryset here,
# defining two querysets, passing one subquery in the tables parameters
# of .extra() of the second queryset, using the undocumented order_by
# feature, and using a HORRIBLE hack to avoid django quoting the
# subquery as a table, BUT (and there is always a but) the hack didn't
# survive turning the QuerySet into a ValuesQuerySet, so I just used
# good old SQL.
# NOTE: in case there is interest, the hack to avoid django quoting the
# subquery as a table, was adding the subquery to the alias cache of
# the queryset's query object:
#
# qset.query.quote_cache[subquery] = subquery
#
# If there is a better way to do this in an UNMODIFIED django 1.0, let
# me know.
#~
if parent:
depth = parent.depth + 1
params = cls._get_children_path_interval(parent.path)
extrand = 'AND path BETWEEN %s AND %s'
else:
depth = 1
params = []
extrand = ''
sql = 'SELECT * FROM %(table)s AS t1 INNER JOIN '\
' (SELECT '\
' SUBSTR(path, 1, %(subpathlen)s) AS subpath, '\
' COUNT(1)-1 AS count '\
' FROM %(table)s '\
' WHERE depth >= %(depth)s %(extrand)s'\
' GROUP BY subpath) AS t2 '\
' ON t1.path=t2.subpath '\
' ORDER BY t1.path' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'subpathlen': depth * cls.steplen,
'depth': depth,
'extrand': extrand}
cursor = cls._get_database_cursor('write')
cursor.execute(sql, params)
ret = []
field_names = [field[0] for field in cursor.description]
for node_data in cursor.fetchall():
node = cls(**dict(zip(field_names, node_data[:-2])))
node.descendants_count = node_data[-1]
ret.append(node)
transaction.commit_unless_managed()
return ret
def get_depth(self):
""":returns: the depth (level) of the node"""
return self.depth
def get_siblings(self):
"""
:returns: A queryset of all the node's siblings, including the node
itself.
"""
qset = self.__class__.objects.filter(depth=self.depth, object_id=self.object_id)
if self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
qset = qset.filter(
path__range=self._get_children_path_interval(parentpath))
return qset
def get_children(self):
""":returns: A queryset of all the node's children"""
if self.is_leaf():
return self.__class__.objects.none()
return self.__class__.objects.filter(
depth=self.depth + 1,
path__range=self._get_children_path_interval(self.path),
object_id=self.object_id,
)
def get_next_sibling(self):
"""
:returns: The next node's sibling, or None if it was the rightmost
sibling.
"""
try:
return self.get_siblings().filter(path__gt=self.path)[0]
except IndexError:
return None
def get_descendants(self):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
"""
return self.__class__.get_tree(self.object_id, self).exclude(pk=self.pk)
def get_prev_sibling(self):
"""
:returns: The previous node's sibling, or None if it was the leftmost
sibling.
"""
try:
return self.get_siblings().filter(path__lt=self.path).reverse()[0]
except IndexError:
return None
def get_children_count(self):
"""
:returns: The number the node's children, calculated in the most
efficient possible way.
"""
return self.numchild
def is_sibling_of(self, node):
"""
:returns: ``True`` if the node is a sibling of another node given as an
argument, else, returns ``False``
"""
aux = self.depth == node.depth
# Check non-root nodes share a parent only if they have the same depth
if aux and self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
return aux and node.path.startswith(parentpath)
return aux
def is_child_of(self, node):
"""
:returns: ``True`` is the node if a child of another node given as an
argument, else, returns ``False``
"""
return (self.path.startswith(node.path) and
self.depth == node.depth + 1)
def is_descendant_of(self, node):
"""
:returns: ``True`` if the node is a descendant of another node given
as an argument, else, returns ``False``
"""
return self.path.startswith(node.path) and self.depth > node.depth
def add_child(self, **kwargs):
"""
Adds a child to the node.
:raise PathOverflow: when no more child nodes can be added
"""
return MP_AddChildHandler(self, **kwargs).process()
def add_sibling(self, pos=None, **kwargs):
"""
Adds a new node as a sibling to the current node object.
:raise PathOverflow: when the library can't make room for the
node's new position
"""
return MP_AddSiblingHandler(self, pos, **kwargs).process()
def get_root(self):
""":returns: the root node for the current node object."""
return self.__class__.objects.get(path=self.path[0:self.steplen])
def is_leaf(self):
""":returns: True if the node is a leaf node (else, returns False)"""
return self.numchild == 0
def get_ancestors(self):
"""
:returns: A queryset containing the current node object's ancestors,
starting by the root node and descending to the parent.
"""
paths = [
self.path[0:pos]
for pos in range(0, len(self.path), self.steplen)[1:]
]
return self.__class__.objects.filter(path__in=paths, object_id=self.object_id).order_by('depth')
def get_parent(self, update=False):
"""
:returns: the parent node of the current node object.
Caches the result in the object itself to help in loops.
"""
depth = int(len(self.path) / self.steplen)
if depth <= 1:
return
try:
if update:
del self._cached_parent_obj
else:
return self._cached_parent_obj
except AttributeError:
pass
parentpath = self._get_basepath(self.path, depth - 1)
self._cached_parent_obj = self.__class__.objects.get(
path=parentpath,
object_id=self.object_id,
)
return self._cached_parent_obj
def move(self, target, pos=None):
"""
Moves the current node and all it's descendants to a new position
relative to another node.
:raise PathOverflow: when the library can't make room for the
node's new position
"""
return MP_MoveHandler(self, target, pos).process()
@classmethod
def _get_basepath(cls, path, depth):
""":returns: The base path of another path up to a given depth"""
if path:
return path[0:depth * cls.steplen]
return ''
@classmethod
def _get_path(cls, path, depth, newstep):
"""
Builds a path given some values
:param path: the base path
:param depth: the depth of the node
:param newstep: the value (integer) of the new step
"""
parentpath = cls._get_basepath(path, depth - 1)
key = cls._int2str(newstep)
return '%s%s%s' % (parentpath,
'0' * (cls.steplen - len(key)),
key)
def _inc_path(self):
""":returns: The path of the next sibling of a given node path."""
newpos = self._str2int(self.path[-self.steplen:]) + 1
key = self._int2str(newpos)
if len(key) > self.steplen:
raise PathOverflow(_("Path Overflow from: '%s'" % (self.path, )))
return '%s%s%s' % (self.path[:-self.steplen],
'0' * (self.steplen - len(key)),
key)
def _get_lastpos_in_path(self):
""":returns: The integer value of the last step in a path."""
return self._str2int(self.path[-self.steplen:])
@classmethod
def _get_parent_path_from_path(cls, path):
""":returns: The parent path for a given path"""
if path:
return path[0:len(path) - cls.steplen]
return ''
@classmethod
def _get_children_path_interval(cls, path):
""":returns: An interval of all possible children paths for a node."""
return (path + cls.alphabet[0] * cls.steplen,
path + cls.alphabet[-1] * cls.steplen)
class Meta:
"""Abstract model."""
abstract = True
|
# Copyright (c) 2003, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of
# any required approvals from the U.S. Dept. of Energy). All rights
# reserved.
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id$"
import types, weakref, sys
from threading import RLock
from Namespaces import XMLNS
from Utility import DOM, DOMException, Collection, SplitQName, basejoin
from StringIO import StringIO
#
# Collections in XMLSchema class
#
TYPES = 'types'
ATTRIBUTE_GROUPS = 'attr_groups'
ATTRIBUTES = 'attr_decl'
ELEMENTS = 'elements'
MODEL_GROUPS = 'model_groups'
def GetSchema(component):
"""convience function for finding the parent XMLSchema instance.
"""
parent = component
while not isinstance(parent, XMLSchema):
parent = parent._parent()
return parent
class SchemaReader:
"""A SchemaReader creates XMLSchema objects from urls and xml data.
"""
def __init__(self, domReader=None, base_url=None):
"""domReader -- class must implement DOMAdapterInterface
base_url -- base url string
"""
self.__base_url = base_url
self.__readerClass = domReader
if not self.__readerClass:
self.__readerClass = DOMAdapter
self._includes = {}
self._imports = {}
def __setImports(self, schema):
"""Add dictionary of imports to schema instance.
schema -- XMLSchema instance
"""
for ns,val in schema.imports.items():
if self._imports.has_key(ns):
schema.addImportSchema(self._imports[ns])
def __setIncludes(self, schema):
"""Add dictionary of includes to schema instance.
schema -- XMLSchema instance
"""
for schemaLocation, val in schema.includes.items():
if self._includes.has_key(schemaLocation):
schema.addIncludeSchema(self._imports[schemaLocation])
def addSchemaByLocation(self, location, schema):
"""provide reader with schema document for a location.
"""
self._includes[location] = schema
def addSchemaByNamespace(self, schema):
"""provide reader with schema document for a targetNamespace.
"""
self._imports[schema.targetNamespace] = schema
def loadFromNode(self, parent, element):
"""element -- DOM node or document
parent -- WSDLAdapter instance
"""
reader = self.__readerClass(element)
schema = XMLSchema(parent)
#HACK to keep a reference
schema.wsdl = parent
schema.setBaseUrl(self.__base_url)
schema.load(reader)
return schema
def loadFromStream(self, file, url=None):
"""Return an XMLSchema instance loaded from a file object.
file -- file object
url -- base location for resolving imports/includes.
"""
reader = self.__readerClass()
reader.loadDocument(file)
schema = XMLSchema()
if url is not None:
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromString(self, data):
"""Return an XMLSchema instance loaded from an XML string.
data -- XML string
"""
return self.loadFromStream(StringIO(data))
def loadFromURL(self, url):
"""Return an XMLSchema instance loaded from the given url.
url -- URL to dereference
"""
reader = self.__readerClass()
if self.__base_url:
url = basejoin(self.__base_url,url)
reader.loadFromURL(url)
schema = XMLSchema()
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromFile(self, filename):
"""Return an XMLSchema instance loaded from the given file.
filename -- name of file to open
"""
if self.__base_url:
filename = basejoin(self.__base_url,filename)
file = open(filename, 'rb')
try:
schema = self.loadFromStream(file, filename)
finally:
file.close()
return schema
class SchemaError(Exception):
pass
###########################
# DOM Utility Adapters
##########################
class DOMAdapterInterface:
def hasattr(self, attr, ns=None):
"""return true if node has attribute
attr -- attribute to check for
ns -- namespace of attribute, by default None
"""
raise NotImplementedError, 'adapter method not implemented'
def getContentList(self, *contents):
"""returns an ordered list of child nodes
*contents -- list of node names to return
"""
raise NotImplementedError, 'adapter method not implemented'
def setAttributeDictionary(self, attributes):
"""set attribute dictionary
"""
raise NotImplementedError, 'adapter method not implemented'
def getAttributeDictionary(self):
"""returns a dict of node's attributes
"""
raise NotImplementedError, 'adapter method not implemented'
def getNamespace(self, prefix):
"""returns namespace referenced by prefix.
"""
raise NotImplementedError, 'adapter method not implemented'
def getTagName(self):
"""returns tagName of node
"""
raise NotImplementedError, 'adapter method not implemented'
def getParentNode(self):
"""returns parent element in DOMAdapter or None
"""
raise NotImplementedError, 'adapter method not implemented'
def loadDocument(self, file):
"""load a Document from a file object
file --
"""
raise NotImplementedError, 'adapter method not implemented'
def loadFromURL(self, url):
"""load a Document from an url
url -- URL to dereference
"""
raise NotImplementedError, 'adapter method not implemented'
class DOMAdapter(DOMAdapterInterface):
"""Adapter for ZSI.Utility.DOM
"""
def __init__(self, node=None):
"""Reset all instance variables.
element -- DOM document, node, or None
"""
if hasattr(node, 'documentElement'):
self.__node = node.documentElement
else:
self.__node = node
self.__attributes = None
def getNode(self):
return self.__node
def hasattr(self, attr, ns=None):
"""attr -- attribute
ns -- optional namespace, None means unprefixed attribute.
"""
if not self.__attributes:
self.setAttributeDictionary()
if ns:
return self.__attributes.get(ns,{}).has_key(attr)
return self.__attributes.has_key(attr)
def getContentList(self, *contents):
nodes = []
ELEMENT_NODE = self.__node.ELEMENT_NODE
for child in DOM.getElements(self.__node, None):
if child.nodeType == ELEMENT_NODE and\
SplitQName(child.tagName)[1] in contents:
nodes.append(child)
return map(self.__class__, nodes)
def setAttributeDictionary(self):
self.__attributes = {}
for v in self.__node._attrs.values():
self.__attributes[v.nodeName] = v.nodeValue
def getAttributeDictionary(self):
if not self.__attributes:
self.setAttributeDictionary()
return self.__attributes
def getTagName(self):
return self.__node.tagName
def getParentNode(self):
if self.__node.parentNode.nodeType == self.__node.ELEMENT_NODE:
return DOMAdapter(self.__node.parentNode)
return None
def getNamespace(self, prefix):
"""prefix -- deference namespace prefix in node's context.
Ascends parent nodes until found.
"""
namespace = None
if prefix == 'xmlns':
namespace = DOM.findDefaultNS(prefix, self.__node)
else:
try:
namespace = DOM.findNamespaceURI(prefix, self.__node)
except DOMException, ex:
if prefix != 'xml':
raise SchemaError, '%s namespace not declared for %s'\
%(prefix, self.__node._get_tagName())
namespace = XMLNS.XML
return namespace
def loadDocument(self, file):
self.__node = DOM.loadDocument(file)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
def loadFromURL(self, url):
self.__node = DOM.loadFromURL(url)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
class XMLBase:
""" These class variables are for string indentation.
"""
tag = None
__indent = 0
__rlock = RLock()
def __str__(self):
XMLBase.__rlock.acquire()
XMLBase.__indent += 1
tmp = "<" + str(self.__class__) + '>\n'
for k,v in self.__dict__.items():
tmp += "%s* %s = %s\n" %(XMLBase.__indent*' ', k, v)
XMLBase.__indent -= 1
XMLBase.__rlock.release()
return tmp
"""Marker Interface: can determine something about an instances properties by using
the provided convenience functions.
"""
class DefinitionMarker:
"""marker for definitions
"""
pass
class DeclarationMarker:
"""marker for declarations
"""
pass
class AttributeMarker:
"""marker for attributes
"""
pass
class AttributeGroupMarker:
"""marker for attribute groups
"""
pass
class WildCardMarker:
"""marker for wildcards
"""
pass
class ElementMarker:
"""marker for wildcards
"""
pass
class ReferenceMarker:
"""marker for references
"""
pass
class ModelGroupMarker:
"""marker for model groups
"""
pass
class AllMarker(ModelGroupMarker):
"""marker for all model group
"""
pass
class ChoiceMarker(ModelGroupMarker):
"""marker for choice model group
"""
pass
class SequenceMarker(ModelGroupMarker):
"""marker for sequence model group
"""
pass
class ExtensionMarker:
"""marker for extensions
"""
pass
class RestrictionMarker:
"""marker for restrictions
"""
facets = ['enumeration', 'length', 'maxExclusive', 'maxInclusive',\
'maxLength', 'minExclusive', 'minInclusive', 'minLength',\
'pattern', 'fractionDigits', 'totalDigits', 'whiteSpace']
class SimpleMarker:
"""marker for simple type information
"""
pass
class ListMarker:
"""marker for simple type list
"""
pass
class UnionMarker:
"""marker for simple type Union
"""
pass
class ComplexMarker:
"""marker for complex type information
"""
pass
class LocalMarker:
"""marker for complex type information
"""
pass
class MarkerInterface:
def isDefinition(self):
return isinstance(self, DefinitionMarker)
def isDeclaration(self):
return isinstance(self, DeclarationMarker)
def isAttribute(self):
return isinstance(self, AttributeMarker)
def isAttributeGroup(self):
return isinstance(self, AttributeGroupMarker)
def isElement(self):
return isinstance(self, ElementMarker)
def isReference(self):
return isinstance(self, ReferenceMarker)
def isWildCard(self):
return isinstance(self, WildCardMarker)
def isModelGroup(self):
return isinstance(self, ModelGroupMarker)
def isAll(self):
return isinstance(self, AllMarker)
def isChoice(self):
return isinstance(self, ChoiceMarker)
def isSequence(self):
return isinstance(self, SequenceMarker)
def isExtension(self):
return isinstance(self, ExtensionMarker)
def isRestriction(self):
return isinstance(self, RestrictionMarker)
def isSimple(self):
return isinstance(self, SimpleMarker)
def isComplex(self):
return isinstance(self, ComplexMarker)
def isLocal(self):
return isinstance(self, LocalMarker)
def isList(self):
return isinstance(self, ListMarker)
def isUnion(self):
return isinstance(self, UnionMarker)
##########################################################
# Schema Components
#########################################################
class XMLSchemaComponent(XMLBase, MarkerInterface):
"""
class variables:
required -- list of required attributes
attributes -- dict of default attribute values, including None.
Value can be a function for runtime dependencies.
contents -- dict of namespace keyed content lists.
'xsd' content of xsd namespace.
xmlns_key -- key for declared xmlns namespace.
xmlns -- xmlns is special prefix for namespace dictionary
xml -- special xml prefix for xml namespace.
"""
required = []
attributes = {}
contents = {}
xmlns_key = ''
xmlns = 'xmlns'
xml = 'xml'
def __init__(self, parent=None):
"""parent -- parent instance
instance variables:
attributes -- dictionary of node's attributes
"""
self.attributes = None
self._parent = parent
if self._parent:
self._parent = weakref.ref(parent)
if not self.__class__ == XMLSchemaComponent\
and not (type(self.__class__.required) == type(XMLSchemaComponent.required)\
and type(self.__class__.attributes) == type(XMLSchemaComponent.attributes)\
and type(self.__class__.contents) == type(XMLSchemaComponent.contents)):
raise RuntimeError, 'Bad type for a class variable in %s' %self.__class__
def getItemTrace(self):
"""Returns a node trace up to the <schema> item.
"""
item, path, name, ref = self, [], 'name', 'ref'
while not isinstance(item,XMLSchema) and not isinstance(item,WSDLToolsAdapter):
attr = item.getAttribute(name)
if attr is None:
attr = item.getAttribute(ref)
if attr is None: path.append('<%s>' %(item.tag))
else: path.append('<%s ref="%s">' %(item.tag, attr))
else:
path.append('<%s name="%s">' %(item.tag,attr))
item = item._parent()
try:
tns = item.getTargetNamespace()
except:
tns = ''
path.append('<%s targetNamespace="%s">' %(item.tag, tns))
path.reverse()
return ''.join(path)
def getTargetNamespace(self):
"""return targetNamespace
"""
parent = self
targetNamespace = 'targetNamespace'
tns = self.attributes.get(targetNamespace)
while not tns:
parent = parent._parent()
tns = parent.attributes.get(targetNamespace)
return tns
def getAttributeDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute(ATTRIBUTES, attribute)
def getAttributeGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute(ATTRIBUTE_GROUPS, attribute)
def getTypeDefinition(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute(TYPES, attribute)
def getElementDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. element).
collection -- check elements collection in parent Schema instance.
"""
return self.getQNameAttribute(ELEMENTS, attribute)
def getModelGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. ref).
collection -- check model_group collection in parent Schema instance.
"""
return self.getQNameAttribute(MODEL_GROUPS, attribute)
def getQNameAttribute(self, collection, attribute):
"""returns object instance representing QName --> (namespace,name),
or if does not exist return None.
attribute -- an information item attribute, with a QName value.
collection -- collection in parent Schema instance to search.
"""
obj = None
#tdc = self.attributes.get(attribute)
tdc = self.getAttributeQName(attribute)
if tdc:
obj = self.getSchemaItem(collection, tdc.getTargetNamespace(), tdc.getName())
return obj
def getSchemaItem(self, collection, namespace, name):
"""returns object instance representing namespace, name,
or if does not exist return None.
namespace -- namespace item defined in.
name -- name of item.
collection -- collection in parent Schema instance to search.
"""
obj = None
parent = GetSchema(self)
if parent.targetNamespace == namespace:
try:
obj = getattr(parent, collection)[name]
except KeyError, ex:
raise KeyError, "targetNamespace(%s) collection(%s) has no item(%s)"\
%(namespace, collection, name)
elif parent.imports.has_key(namespace):
schema = parent.imports[namespace].getSchema()
try:
obj = getattr(schema, collection)[name]
except KeyError, ex:
raise KeyError, "targetNamespace(%s) collection(%s) has no item(%s)"\
%(namespace, collection, name)
return obj
def getXMLNS(self, prefix=None):
"""deference prefix or by default xmlns, returns namespace.
"""
if prefix == XMLSchemaComponent.xml:
return XMLNS.XML
parent = self
ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
while not ns:
parent = parent._parent()
ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
if not ns and isinstance(parent, WSDLToolsAdapter):
if prefix is None:
return ''
raise SchemaError, 'unknown prefix %s' %prefix
return ns
def getAttribute(self, attribute):
"""return requested attribute value or None
"""
if type(attribute) in (list, tuple):
if len(attribute) != 2:
raise LookupError, 'To access attributes must use name or (namespace,name)'
return self.attributes.get(attribute[0]).get(attribute[1])
return self.attributes.get(attribute)
def getAttributeQName(self, attribute):
"""return requested attribute value as (namespace,name) or None
"""
qname = self.getAttribute(attribute)
if isinstance(qname, TypeDescriptionComponent) is True:
return qname
if qname is None:
return None
prefix,ncname = SplitQName(qname)
namespace = self.getXMLNS(prefix)
return TypeDescriptionComponent((namespace,ncname))
def getAttributeName(self):
"""return attribute name or None
"""
return self.getAttribute('name')
def setAttributes(self, node):
"""Sets up attribute dictionary, checks for required attributes and
sets default attribute values. attr is for default attribute values
determined at runtime.
structure of attributes dictionary
['xmlns'][xmlns_key] -- xmlns namespace
['xmlns'][prefix] -- declared namespace prefix
[namespace][prefix] -- attributes declared in a namespace
[attribute] -- attributes w/o prefix, default namespaces do
not directly apply to attributes, ie Name can't collide
with QName.
"""
self.attributes = {XMLSchemaComponent.xmlns:{}}
for k,v in node.getAttributeDictionary().items():
prefix,value = SplitQName(k)
if value == XMLSchemaComponent.xmlns:
self.attributes[value][prefix or XMLSchemaComponent.xmlns_key] = v
elif prefix:
ns = node.getNamespace(prefix)
if not ns:
raise SchemaError, 'no namespace for attribute prefix %s'\
%prefix
if not self.attributes.has_key(ns):
self.attributes[ns] = {}
elif self.attributes[ns].has_key(value):
raise SchemaError, 'attribute %s declared multiple times in %s'\
%(value, ns)
self.attributes[ns][value] = v
elif not self.attributes.has_key(value):
self.attributes[value] = v
else:
raise SchemaError, 'attribute %s declared multiple times' %value
if not isinstance(self, WSDLToolsAdapter):
self.__checkAttributes()
self.__setAttributeDefaults()
#set QNames
for k in ['type', 'element', 'base', 'ref', 'substitutionGroup', 'itemType']:
if self.attributes.has_key(k):
prefix, value = SplitQName(self.attributes.get(k))
self.attributes[k] = \
TypeDescriptionComponent((self.getXMLNS(prefix), value))
#Union, memberTypes is a whitespace separated list of QNames
for k in ['memberTypes']:
if self.attributes.has_key(k):
qnames = self.attributes[k]
self.attributes[k] = []
for qname in qnames.split():
prefix, value = SplitQName(qname)
self.attributes['memberTypes'].append(\
TypeDescriptionComponent(\
(self.getXMLNS(prefix), value)))
def getContents(self, node):
"""retrieve xsd contents
"""
return node.getContentList(*self.__class__.contents['xsd'])
def __setAttributeDefaults(self):
"""Looks for default values for unset attributes. If
class variable representing attribute is None, then
it must be defined as an instance variable.
"""
for k,v in self.__class__.attributes.items():
if v is not None and self.attributes.has_key(k) is False:
if isinstance(v, types.FunctionType):
self.attributes[k] = v(self)
else:
self.attributes[k] = v
def __checkAttributes(self):
"""Checks that required attributes have been defined,
attributes w/default cannot be required. Checks
all defined attributes are legal, attribute
references are not subject to this test.
"""
for a in self.__class__.required:
if not self.attributes.has_key(a):
raise SchemaError,\
'class instance %s, missing required attribute %s'\
%(self.__class__, a)
for a in self.attributes.keys():
if (a not in (XMLSchemaComponent.xmlns, XMLNS.XML)) and\
(a not in self.__class__.attributes.keys()) and not\
(self.isAttribute() and self.isReference()):
raise SchemaError, '%s, unknown attribute(%s,%s)' \
%(self.getItemTrace(), a, self.attributes[a])
class WSDLToolsAdapter(XMLSchemaComponent):
"""WSDL Adapter to grab the attributes from the wsdl document node.
"""
attributes = {'name':None, 'targetNamespace':None}
tag = 'definitions'
def __init__(self, wsdl):
XMLSchemaComponent.__init__(self, parent=wsdl)
self.setAttributes(DOMAdapter(wsdl.document))
def getImportSchemas(self):
"""returns WSDLTools.WSDL types Collection
"""
return self._parent().types
class Notation(XMLSchemaComponent):
"""<notation>
parent:
schema
attributes:
id -- ID
name -- NCName, Required
public -- token, Required
system -- anyURI
contents:
annotation?
"""
required = ['name', 'public']
attributes = {'id':None, 'name':None, 'public':None, 'system':None}
contents = {'xsd':('annotation')}
tag = 'notation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Annotation(XMLSchemaComponent):
"""<annotation>
parent:
all,any,anyAttribute,attribute,attributeGroup,choice,complexContent,
complexType,element,extension,field,group,import,include,key,keyref,
list,notation,redefine,restriction,schema,selector,simpleContent,
simpleType,union,unique
attributes:
id -- ID
contents:
(documentation | appinfo)*
"""
attributes = {'id':None}
contents = {'xsd':('documentation', 'appinfo')}
tag = 'annotation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'documentation':
#print_debug('class %s, documentation skipped' %self.__class__, 5)
continue
elif component == 'appinfo':
#print_debug('class %s, appinfo skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Documentation(XMLSchemaComponent):
"""<documentation>
parent:
annotation
attributes:
source, anyURI
xml:lang, language
contents:
mixed, any
"""
attributes = {'source':None, 'xml:lang':None}
contents = {'xsd':('mixed', 'any')}
tag = 'documentation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Appinfo(XMLSchemaComponent):
"""<appinfo>
parent:
annotation
attributes:
source, anyURI
contents:
mixed, any
"""
attributes = {'source':None, 'anyURI':None}
contents = {'xsd':('mixed', 'any')}
tag = 'appinfo'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class XMLSchemaFake:
# This is temporary, for the benefit of WSDL until the real thing works.
def __init__(self, element):
self.targetNamespace = DOM.getAttr(element, 'targetNamespace')
self.element = element
class XMLSchema(XMLSchemaComponent):
"""A schema is a collection of schema components derived from one
or more schema documents, that is, one or more <schema> element
information items. It represents the abstract notion of a schema
rather than a single schema document (or other representation).
<schema>
parent:
ROOT
attributes:
id -- ID
version -- token
xml:lang -- language
targetNamespace -- anyURI
attributeFormDefault -- 'qualified' | 'unqualified', 'unqualified'
elementFormDefault -- 'qualified' | 'unqualified', 'unqualified'
blockDefault -- '#all' | list of
('substitution | 'extension' | 'restriction')
finalDefault -- '#all' | list of
('extension' | 'restriction' | 'list' | 'union')
contents:
((include | import | redefine | annotation)*,
(attribute, attributeGroup, complexType, element, group,
notation, simpleType)*, annotation*)*
attributes -- schema attributes
imports -- import statements
includes -- include statements
redefines --
types -- global simpleType, complexType definitions
elements -- global element declarations
attr_decl -- global attribute declarations
attr_groups -- attribute Groups
model_groups -- model Groups
notations -- global notations
"""
attributes = {'id':None,
'version':None,
'xml:lang':None,
'targetNamespace':None,
'attributeFormDefault':'unqualified',
'elementFormDefault':'unqualified',
'blockDefault':None,
'finalDefault':None}
contents = {'xsd':('include', 'import', 'redefine', 'annotation',
'attribute', 'attributeGroup', 'complexType',
'element', 'group', 'notation', 'simpleType',
'annotation')}
empty_namespace = ''
tag = 'schema'
def __init__(self, parent=None):
"""parent --
instance variables:
targetNamespace -- schema's declared targetNamespace, or empty string.
_imported_schemas -- namespace keyed dict of schema dependencies, if
a schema is provided instance will not resolve import statement.
_included_schemas -- schemaLocation keyed dict of component schemas,
if schema is provided instance will not resolve include statement.
_base_url -- needed for relative URLs support, only works with URLs
relative to initial document.
includes -- collection of include statements
imports -- collection of import statements
elements -- collection of global element declarations
types -- collection of global type definitions
attr_decl -- collection of global attribute declarations
attr_groups -- collection of global attribute group definitions
model_groups -- collection of model group definitions
notations -- collection of notations
"""
self.__node = None
self.targetNamespace = None
XMLSchemaComponent.__init__(self, parent)
f = lambda k: k.attributes['name']
ns = lambda k: k.attributes['namespace']
sl = lambda k: k.attributes['schemaLocation']
self.includes = Collection(self, key=sl)
self.imports = Collection(self, key=ns)
self.elements = Collection(self, key=f)
self.types = Collection(self, key=f)
self.attr_decl = Collection(self, key=f)
self.attr_groups = Collection(self, key=f)
self.model_groups = Collection(self, key=f)
self.notations = Collection(self, key=f)
self._imported_schemas = {}
self._included_schemas = {}
self._base_url = None
def getNode(self):
"""
Interacting with the underlying DOM tree.
"""
return self.__node
def addImportSchema(self, schema):
"""for resolving import statements in Schema instance
schema -- schema instance
_imported_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if schema.targetNamespace != self.targetNamespace:
self._imported_schemas[schema.targetNamespace] = schema
else:
raise SchemaError, 'import schema bad targetNamespace'
def addIncludeSchema(self, schemaLocation, schema):
"""for resolving include statements in Schema instance
schemaLocation -- schema location
schema -- schema instance
_included_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if not schema.targetNamespace or\
schema.targetNamespace == self.targetNamespace:
self._included_schemas[schemaLocation] = schema
else:
raise SchemaError, 'include schema bad targetNamespace'
def setImportSchemas(self, schema_dict):
"""set the import schema dictionary, which is used to
reference depedent schemas.
"""
self._imported_schemas = schema_dict
def getImportSchemas(self):
"""get the import schema dictionary, which is used to
reference depedent schemas.
"""
return self._imported_schemas
def getSchemaNamespacesToImport(self):
"""returns tuple of namespaces the schema instance has declared
itself to be depedent upon.
"""
return tuple(self.includes.keys())
def setIncludeSchemas(self, schema_dict):
"""set the include schema dictionary, which is keyed with
schemaLocation (uri).
This is a means of providing
schemas to the current schema for content inclusion.
"""
self._included_schemas = schema_dict
def getIncludeSchemas(self):
"""get the include schema dictionary, which is keyed with
schemaLocation (uri).
"""
return self._included_schemas
def getBaseUrl(self):
"""get base url, used for normalizing all relative uri's
"""
return self._base_url
def setBaseUrl(self, url):
"""set base url, used for normalizing all relative uri's
"""
self._base_url = url
def getElementFormDefault(self):
"""return elementFormDefault attribute
"""
return self.attributes.get('elementFormDefault')
def isElementFormDefaultQualified(self):
return self.attributes.get('elementFormDefault') == 'qualified'
def getAttributeFormDefault(self):
"""return attributeFormDefault attribute
"""
return self.attributes.get('attributeFormDefault')
def getBlockDefault(self):
"""return blockDefault attribute
"""
return self.attributes.get('blockDefault')
def getFinalDefault(self):
"""return finalDefault attribute
"""
return self.attributes.get('finalDefault')
def load(self, node, location=None):
self.__node = node
pnode = node.getParentNode()
if pnode:
pname = SplitQName(pnode.getTagName())[1]
if pname == 'types':
attributes = {}
self.setAttributes(pnode)
attributes.update(self.attributes)
self.setAttributes(node)
for k,v in attributes['xmlns'].items():
if not self.attributes['xmlns'].has_key(k):
self.attributes['xmlns'][k] = v
else:
self.setAttributes(node)
else:
self.setAttributes(node)
self.targetNamespace = self.getTargetNamespace()
for childNode in self.getContents(node):
component = SplitQName(childNode.getTagName())[1]
if component == 'include':
tp = self.__class__.Include(self)
tp.fromDom(childNode)
sl = tp.attributes['schemaLocation']
schema = tp.getSchema()
if not self.getIncludeSchemas().has_key(sl):
self.addIncludeSchema(sl, schema)
self.includes[sl] = tp
pn = childNode.getParentNode().getNode()
pn.removeChild(childNode.getNode())
for child in schema.getNode().getNode().childNodes:
pn.appendChild(child.cloneNode(1))
for collection in ['imports','elements','types',
'attr_decl','attr_groups','model_groups',
'notations']:
for k,v in getattr(schema,collection).items():
if not getattr(self,collection).has_key(k):
v._parent = weakref.ref(self)
getattr(self,collection)[k] = v
else:
print "Warning: Not keeping schema component."
elif component == 'import':
tp = self.__class__.Import(self)
tp.fromDom(childNode)
import_ns = tp.getAttribute('namespace') or \
self.__class__.empty_namespace
if not self.getImportSchemas().has_key(import_ns) and \
tp.getAttribute('schemaLocation'):
self.addImportSchema(tp.getSchema())
self.imports[import_ns] = tp
elif component == 'redefine':
# redefine not implemented yet
pass
elif component == 'annotation':
# annotation not implemented yet
pass
elif component == 'attribute':
tp = AttributeDeclaration(self)
tp.fromDom(childNode)
self.attr_decl[tp.getAttribute('name')] = tp
elif component == 'attributeGroup':
tp = AttributeGroupDefinition(self)
tp.fromDom(childNode)
self.attr_groups[tp.getAttribute('name')] = tp
elif component == 'element':
tp = ElementDeclaration(self)
tp.fromDom(childNode)
self.elements[tp.getAttribute('name')] = tp
elif component == 'group':
tp = ModelGroupDefinition(self)
tp.fromDom(childNode)
self.model_groups[tp.getAttribute('name')] = tp
elif component == 'notation':
tp = Notation(self)
tp.fromDom(childNode)
self.notations[tp.getAttribute('name')] = tp
elif component == 'complexType':
tp = ComplexType(self)
tp.fromDom(childNode)
self.types[tp.getAttribute('name')] = tp
elif component == 'simpleType':
tp = SimpleType(self)
tp.fromDom(childNode)
self.types[tp.getAttribute('name')] = tp
else:
break
# indx += 1
class Import(XMLSchemaComponent):
"""<import>
parent:
schema
attributes:
id -- ID
namespace -- anyURI
schemaLocation -- anyURI
contents:
annotation?
"""
attributes = {'id':None,
'namespace':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'import'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
if self.attributes['namespace'] == self.getTargetNamespace():
raise SchemaError, 'namespace of schema and import match'
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance, and keep a hard reference.
"""
if not self._schema:
ns = self.attributes['namespace']
schema = self._parent().getImportSchemas().get(ns)
if not schema and self._parent()._parent:
schema = self._parent()._parent().getImportSchemas().get(ns)
if not schema:
url = self.attributes.get('schemaLocation')
if not url:
raise SchemaError, 'namespace(%s) is unknown' %ns
base_url = self._parent().getBaseUrl()
reader = SchemaReader(base_url=base_url)
reader._imports = self._parent().getImportSchemas()
reader._includes = self._parent().getIncludeSchemas()
self._schema = reader.loadFromURL(url)
return self._schema or schema
class Include(XMLSchemaComponent):
"""<include schemaLocation>
parent:
schema
attributes:
id -- ID
schemaLocation -- anyURI, required
contents:
annotation?
"""
required = ['schemaLocation']
attributes = {'id':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'include'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance.
"""
if not self._schema:
schema = self._parent()
self._schema = schema.getIncludeSchemas().get(\
self.attributes['schemaLocation']
)
if not self._schema:
url = self.attributes['schemaLocation']
reader = SchemaReader(base_url=schema.getBaseUrl())
reader._imports = schema.getImportSchemas()
reader._includes = schema.getIncludeSchemas()
self._schema = reader.loadFromURL(url)
return self._schema
class AttributeDeclaration(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker):
"""<attribute name>
parent:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
""" No list or union support
"""
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class LocalAttributeDeclaration(AttributeDeclaration,\
AttributeMarker,\
LocalMarker,\
DeclarationMarker):
"""<attribute name>
parent:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
name -- NCName, required
type -- QName
form -- ('qualified' | 'unqualified'), schema.attributeFormDefault
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'form':lambda self: GetSchema(self).getAttributeFormDefault(),
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
def __init__(self, parent):
AttributeDeclaration.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeWildCard(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker,\
WildCardMarker):
"""<anyAttribute>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
attributes = {'id':None,
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'anyAttribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeReference(XMLSchemaComponent,\
AttributeMarker,\
ReferenceMarker):
"""<attribute ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeDeclaration(self, attribute='ref'):
return XMLSchemaComponent.getAttributeDeclaration(self, attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeGroupDefinition(XMLSchemaComponent,\
AttributeGroupMarker,\
DefinitionMarker):
"""<attributeGroup name>
parents:
schema, redefine
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'attribute', 'attributeGroup', 'anyAttribute']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif component == 'attribute':
if contents[indx].hasattr('name'):
content.append(LocalAttributeDeclaration(self))
elif contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
raise SchemaError, 'Unknown attribute type'
content[-1].fromDom(contents[indx])
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
content[-1].fromDom(contents[indx])
elif component == 'anyAttribute':
if len(contents) != indx+1:
raise SchemaError, 'anyAttribute is out of order in %s' %self.getItemTrace()
content.append(AttributeWildCard(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content = tuple(content)
class AttributeGroupReference(XMLSchemaComponent,\
AttributeGroupMarker,\
ReferenceMarker):
"""<attributeGroup ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None}
contents = {'xsd':['annotation']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeGroup(self, attribute='ref'):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return XMLSchemaComponent.getAttributeGroup(self, attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Elements
#####################################################
class IdentityConstrants(XMLSchemaComponent):
"""Allow one to uniquely identify nodes in a document and ensure the
integrity of references between them.
attributes -- dictionary of attributes
selector -- XPath to selected nodes
fields -- list of XPath to key field
"""
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.selector = None
self.fields = None
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
fields = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'selector':
self.selector = self.Selector(self)
self.selector.fromDom(i)
continue
elif component == 'field':
fields.append(self.Field(self))
fields[-1].fromDom(i)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.fields = tuple(fields)
class Constraint(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Selector(Constraint):
"""<selector xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'selector'
class Field(Constraint):
"""<field xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'field'
class Unique(IdentityConstrants):
"""<unique name> Enforce fields are unique w/i a specified scope.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'unique'
class Key(IdentityConstrants):
"""<key name> Enforce fields are unique w/i a specified scope, and all
field values are present w/i document. Fields cannot
be nillable.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'key'
class KeyRef(IdentityConstrants):
"""<keyref name refer> Ensure a match between two sets of values in an
instance.
parent:
element
attributes:
id -- ID
name -- NCName, required
refer -- QName, required
contents:
annotation?, selector, field+
"""
required = ['name', 'refer']
attributes = {'id':None,
'name':None,
'refer':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'keyref'
class ElementDeclaration(XMLSchemaComponent,\
ElementMarker,\
DeclarationMarker):
"""<element name>
parents:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
nillable -- boolean, false
abstract -- boolean, false
substitutionGroup -- QName
block -- ('#all' | ('substition' | 'extension' | 'restriction')*),
schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*),
schema.finalDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'substitutionGroup':None,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.constraints = ()
def isQualified(self):
"""
Global elements are always qualified.
"""
return True
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute=None):
"""
If attribute is None, "type" is assumed, return the corresponding
representation of the global type definition (TypeDefinition),
or the local definition if don't find "type". To maintain backwards
compat, if attribute is provided call base class method.
"""
if attribute:
return XMLSchemaComponent.getTypeDefinition(self, attribute)
gt = XMLSchemaComponent.getTypeDefinition(self, 'type')
if gt:
return gt
return self.content
def getConstraints(self):
return self._constraints
def setConstraints(self, constraints):
self._constraints = tuple(constraints)
constraints = property(getConstraints, setConstraints, None, "tuple of key, keyref, unique constraints")
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
constraints = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType' and not self.content:
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
elif component == 'complexType' and not self.content:
self.content = LocalComplexType(self)
self.content.fromDom(i)
elif component == 'key':
constraints.append(Key(self))
constraints[-1].fromDom(i)
elif component == 'keyref':
constraints.append(KeyRef(self))
constraints[-1].fromDom(i)
elif component == 'unique':
constraints.append(Unique(self))
constraints[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.constraints = constraints
class LocalElementDeclaration(ElementDeclaration,\
LocalMarker):
"""<element>
parents:
all, choice, sequence
attributes:
id -- ID
name -- NCName, required
form -- ('qualified' | 'unqualified'), schema.elementFormDefault
type -- QName
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
default -- string
fixed -- string
nillable -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'form':lambda self: GetSchema(self).getElementFormDefault(),
'type':None,
'minOccurs':'1',
'maxOccurs':'1',
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'block':lambda self: GetSchema(self).getBlockDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
def isQualified(self):
"""
Local elements can be qualified or unqualifed according
to the attribute form, or the elementFormDefault. By default
local elements are unqualified.
"""
form = self.getAttribute('form')
if form == 'qualified':
return True
if form == 'unqualified':
return False
raise SchemaError, 'Bad form (%s) for element: %s' %(form, self.getItemTrace())
class ElementReference(XMLSchemaComponent,\
ElementMarker,\
ReferenceMarker):
"""<element ref>
parents:
all, choice, sequence
attributes:
id -- ID
ref -- QName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getElementDeclaration(self, attribute=None):
"""If attribute is None, "ref" is assumed, return the corresponding
representation of the global element declaration (ElementDeclaration),
To maintain backwards compat, if attribute is provided call base class method.
"""
if attribute:
return XMLSchemaComponent.getElementDeclaration(self, attribute)
return XMLSchemaComponent.getElementDeclaration(self, 'ref')
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ElementWildCard(LocalElementDeclaration, WildCardMarker):
"""<any>
parents:
choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
required = []
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1',
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'any'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def isQualified(self):
"""
Global elements are always qualified, but if processContents
are not strict could have dynamically generated local elements.
"""
return GetSchema(self).isElementFormDefaultQualified()
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' % self.tag
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Model Groups
#####################################################
class Sequence(XMLSchemaComponent,\
SequenceMarker):
"""<sequence>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'sequence'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class All(XMLSchemaComponent,\
AllMarker):
"""<all>
parents:
complexType, extension, restriction, group
attributes:
id -- ID
minOccurs -- '0' | '1', 1
maxOccurs -- '1', 1
contents:
annotation?, element*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element']}
tag = 'all'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Choice(XMLSchemaComponent,\
ChoiceMarker):
"""<choice>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'choice'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class ModelGroupDefinition(XMLSchemaComponent,\
ModelGroupMarker,\
DefinitionMarker):
"""<group name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (all | choice | sequence)?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'all', 'choice', 'sequence']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'all' and not self.content:
self.content = All(self)
elif component == 'choice' and not self.content:
self.content = Choice(self)
elif component == 'sequence' and not self.content:
self.content = Sequence(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ModelGroupReference(XMLSchemaComponent,\
ModelGroupMarker,\
ReferenceMarker):
"""<group ref>
parents:
choice, complexType, extension, restriction, sequence
attributes:
id -- ID
ref -- NCName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getModelGroupReference(self):
return self.getModelGroup('ref')
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ComplexType(XMLSchemaComponent,\
DefinitionMarker,\
ComplexMarker):
"""<complexType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
mixed -- boolean, false
abstract -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*), schema.finalDefault
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = ['name']
attributes = {'id':None,
'name':None,
'mixed':0,
'abstract':0,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleContent', 'complexContent',\
'group', 'all', 'choice', 'sequence', 'attribute', 'attributeGroup',\
'anyAttribute', 'any']}
tag = 'complexType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def isMixed(self):
m = self.getAttribute('mixed')
if m == 0 or m == False:
return False
if isinstance(m, basestring) is True:
if m in ('false', '0'):
return False
if m in ('true', '1'):
return True
raise SchemaError, 'invalid value for attribute mixed(%s): %s'\
%(m, self.getItemTrace())
def getAttributeContent(self):
return self.attr_content
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
#XXX ugly
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
self.content = None
if component == 'simpleContent':
self.content = self.__class__.SimpleContent(self)
self.content.fromDom(contents[indx])
elif component == 'complexContent':
self.content = self.__class__.ComplexContent(self)
self.content.fromDom(contents[indx])
else:
if component == 'all':
self.content = All(self)
elif component == 'choice':
self.content = Choice(self)
elif component == 'sequence':
self.content = Sequence(self)
elif component == 'group':
self.content = ModelGroupReference(self)
if self.content:
self.content.fromDom(contents[indx])
indx += 1
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
self.attr_content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s): %s' \
%(contents[indx].getTagName(),self.getItemTrace())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class _DerivedType(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
# XXX remove attribute derivation, inconsistent
self.derivation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'restriction' and not self.derivation:
self.derivation = self.__class__.Restriction(self)
elif component == 'extension' and not self.derivation:
self.derivation = self.__class__.Extension(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.derivation.fromDom(i)
self.content = self.derivation
class ComplexContent(_DerivedType,\
ComplexMarker):
"""<complexContent>
parents:
complexType
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None,
'mixed':0}
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'complexContent'
def isMixed(self):
m = self.getAttribute('mixed')
if m == 0 or m == False:
return False
if isinstance(m, basestring) is True:
if m in ('false', '0'):
return False
if m in ('true', '1'):
return True
raise SchemaError, 'invalid value for attribute mixed(%s): %s'\
%(m, self.getItemTrace())
class _DerivationBase(XMLSchemaComponent):
"""<extension>,<restriction>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'group', 'all', 'choice',\
'sequence', 'attribute', 'attributeGroup', 'anyAttribute']}
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
#XXX ugly
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
if component == 'all':
self.content = All(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'choice':
self.content = Choice(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'sequence':
self.content = Sequence(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'group':
self.content = ModelGroupReference(self)
self.content.fromDom(contents[indx])
indx += 1
else:
self.content = None
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeGroupReference(self))
else:
self.attr_content.append(AttributeGroupDefinition(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class Extension(_DerivationBase,
ExtensionMarker):
"""<extension base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'extension'
class Restriction(_DerivationBase,\
RestrictionMarker):
"""<restriction base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'restriction'
class SimpleContent(_DerivedType,\
SimpleMarker):
"""<simpleContent>
parents:
complexType
attributes:
id -- ID
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None}
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'simpleContent'
class Extension(XMLSchemaComponent,\
ExtensionMarker):
"""<extension base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'attribute', 'attributeGroup',
'anyAttribute']}
tag = 'extension'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
if num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*, (attribute | attributeGroup)*,
anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType', 'attribute',\
'attributeGroup', 'anyAttribute'] + RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.content = []
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
elif component == 'simpleType':
self.content.append(LocalSimpleType(self))
self.content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class LocalComplexType(ComplexType,\
LocalMarker):
"""<complexType>
parents:
element
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = []
attributes = {'id':None,
'mixed':0}
tag = 'complexType'
class SimpleType(XMLSchemaComponent,\
DefinitionMarker,\
SimpleMarker):
"""<simpleType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
final -- ('#all' | ('extension' | 'restriction' | 'list' | 'union')*),
schema.finalDefault
contents:
annotation?, (restriction | list | union)
"""
required = ['name']
attributes = {'id':None,
'name':None,
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'restriction', 'list', 'union']}
tag = 'simpleType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for child in contents:
component = SplitQName(child.getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(child)
continue
break
else:
return
if component == 'restriction':
self.content = self.__class__.Restriction(self)
elif component == 'list':
self.content = self.__class__.List(self)
elif component == 'union':
self.content = self.__class__.Union(self)
else:
raise SchemaError, 'Unknown component (%s)' %(component)
self.content.fromDom(child)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleType
attributes:
id -- ID
base -- QName, required or simpleType child
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*
"""
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType']+RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getAttributeBase(self):
return XMLSchemaComponent.getAttribute(self, 'base')
def getTypeDefinition(self, attribute='base'):
return XMLSchemaComponent.getTypeDefinition(self, attribute)
def getSimpleTypeContent(self):
for el in self.content:
if el.isSimple(): return el
return None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
continue
elif (component == 'simpleType') and (not indx or indx == 1):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
elif component in RestrictionMarker.facets:
#print_debug('%s class instance, skipping %s' %(self.__class__, component))
pass
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Union(XMLSchemaComponent,
UnionMarker):
"""<union>
parents:
simpleType
attributes:
id -- ID
memberTypes -- list of QNames, required or simpleType child.
contents:
annotation?, simpleType*
"""
attributes = {'id':None,
'memberTypes':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'union'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class List(XMLSchemaComponent,
ListMarker):
"""<list>
parents:
simpleType
attributes:
id -- ID
itemType -- QName, required or simpleType child.
contents:
annotation?, simpleType?
"""
attributes = {'id':None,
'itemType':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'list'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getItemType(self):
return self.attributes.get('itemType')
def getTypeDefinition(self, attribute='itemType'):
"""
return the type refered to by itemType attribute or
the simpleType content. If returns None, then the
type refered to by itemType is primitive.
"""
tp = XMLSchemaComponent.getTypeDefinition(self, attribute)
return tp or self.content
def fromDom(self, node):
self.annotation = None
self.content = None
self.setAttributes(node)
contents = self.getContents(node)
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
self.content = AnonymousSimpleType(self)
self.content.fromDom(contents[indx])
break
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AnonymousSimpleType(SimpleType,\
SimpleMarker,\
LocalMarker):
"""<simpleType>
parents:
attribute, element, list, restriction, union
attributes:
id -- ID
contents:
annotation?, (restriction | list | union)
"""
required = []
attributes = {'id':None}
tag = 'simpleType'
class Redefine:
"""<redefine>
parents:
attributes:
contents:
"""
tag = 'redefine'
###########################
###########################
if sys.version_info[:2] >= (2, 2):
tupleClass = tuple
else:
import UserTuple
tupleClass = UserTuple.UserTuple
class TypeDescriptionComponent(tupleClass):
"""Tuple of length 2, consisting of
a namespace and unprefixed name.
"""
def __init__(self, args):
"""args -- (namespace, name)
Remove the name's prefix, irrelevant.
"""
if len(args) != 2:
raise TypeError, 'expecting tuple (namespace, name), got %s' %args
elif args[1].find(':') >= 0:
args = (args[0], SplitQName(args[1])[1])
tuple.__init__(self, args)
return
def getTargetNamespace(self):
return self[0]
def getName(self):
return self[1]
In some cases it's necessary to run ZSI in a python environment where
threading is not available, such as
http://www.freebsd.org/cgi/url.cgi?ports/www/mod_python/pkg-descr
Attached is a patch which addresses this issue for ZSI.
# Copyright (c) 2003, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of
# any required approvals from the U.S. Dept. of Energy). All rights
# reserved.
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id$"
import types, weakref, sys
from Namespaces import XMLNS
from Utility import DOM, DOMException, Collection, SplitQName, basejoin
from StringIO import StringIO
# If we have no threading, this should be a no-op
try:
from threading import RLock
except ImportError:
class RLock:
def acquire():
pass
def release():
pass
#
# Collections in XMLSchema class
#
TYPES = 'types'
ATTRIBUTE_GROUPS = 'attr_groups'
ATTRIBUTES = 'attr_decl'
ELEMENTS = 'elements'
MODEL_GROUPS = 'model_groups'
def GetSchema(component):
"""convience function for finding the parent XMLSchema instance.
"""
parent = component
while not isinstance(parent, XMLSchema):
parent = parent._parent()
return parent
class SchemaReader:
"""A SchemaReader creates XMLSchema objects from urls and xml data.
"""
def __init__(self, domReader=None, base_url=None):
"""domReader -- class must implement DOMAdapterInterface
base_url -- base url string
"""
self.__base_url = base_url
self.__readerClass = domReader
if not self.__readerClass:
self.__readerClass = DOMAdapter
self._includes = {}
self._imports = {}
def __setImports(self, schema):
"""Add dictionary of imports to schema instance.
schema -- XMLSchema instance
"""
for ns,val in schema.imports.items():
if self._imports.has_key(ns):
schema.addImportSchema(self._imports[ns])
def __setIncludes(self, schema):
"""Add dictionary of includes to schema instance.
schema -- XMLSchema instance
"""
for schemaLocation, val in schema.includes.items():
if self._includes.has_key(schemaLocation):
schema.addIncludeSchema(self._imports[schemaLocation])
def addSchemaByLocation(self, location, schema):
"""provide reader with schema document for a location.
"""
self._includes[location] = schema
def addSchemaByNamespace(self, schema):
"""provide reader with schema document for a targetNamespace.
"""
self._imports[schema.targetNamespace] = schema
def loadFromNode(self, parent, element):
"""element -- DOM node or document
parent -- WSDLAdapter instance
"""
reader = self.__readerClass(element)
schema = XMLSchema(parent)
#HACK to keep a reference
schema.wsdl = parent
schema.setBaseUrl(self.__base_url)
schema.load(reader)
return schema
def loadFromStream(self, file, url=None):
"""Return an XMLSchema instance loaded from a file object.
file -- file object
url -- base location for resolving imports/includes.
"""
reader = self.__readerClass()
reader.loadDocument(file)
schema = XMLSchema()
if url is not None:
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromString(self, data):
"""Return an XMLSchema instance loaded from an XML string.
data -- XML string
"""
return self.loadFromStream(StringIO(data))
def loadFromURL(self, url):
"""Return an XMLSchema instance loaded from the given url.
url -- URL to dereference
"""
reader = self.__readerClass()
if self.__base_url:
url = basejoin(self.__base_url,url)
reader.loadFromURL(url)
schema = XMLSchema()
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromFile(self, filename):
"""Return an XMLSchema instance loaded from the given file.
filename -- name of file to open
"""
if self.__base_url:
filename = basejoin(self.__base_url,filename)
file = open(filename, 'rb')
try:
schema = self.loadFromStream(file, filename)
finally:
file.close()
return schema
class SchemaError(Exception):
pass
###########################
# DOM Utility Adapters
##########################
class DOMAdapterInterface:
def hasattr(self, attr, ns=None):
"""return true if node has attribute
attr -- attribute to check for
ns -- namespace of attribute, by default None
"""
raise NotImplementedError, 'adapter method not implemented'
def getContentList(self, *contents):
"""returns an ordered list of child nodes
*contents -- list of node names to return
"""
raise NotImplementedError, 'adapter method not implemented'
def setAttributeDictionary(self, attributes):
"""set attribute dictionary
"""
raise NotImplementedError, 'adapter method not implemented'
def getAttributeDictionary(self):
"""returns a dict of node's attributes
"""
raise NotImplementedError, 'adapter method not implemented'
def getNamespace(self, prefix):
"""returns namespace referenced by prefix.
"""
raise NotImplementedError, 'adapter method not implemented'
def getTagName(self):
"""returns tagName of node
"""
raise NotImplementedError, 'adapter method not implemented'
def getParentNode(self):
"""returns parent element in DOMAdapter or None
"""
raise NotImplementedError, 'adapter method not implemented'
def loadDocument(self, file):
"""load a Document from a file object
file --
"""
raise NotImplementedError, 'adapter method not implemented'
def loadFromURL(self, url):
"""load a Document from an url
url -- URL to dereference
"""
raise NotImplementedError, 'adapter method not implemented'
class DOMAdapter(DOMAdapterInterface):
"""Adapter for ZSI.Utility.DOM
"""
def __init__(self, node=None):
"""Reset all instance variables.
element -- DOM document, node, or None
"""
if hasattr(node, 'documentElement'):
self.__node = node.documentElement
else:
self.__node = node
self.__attributes = None
def getNode(self):
return self.__node
def hasattr(self, attr, ns=None):
"""attr -- attribute
ns -- optional namespace, None means unprefixed attribute.
"""
if not self.__attributes:
self.setAttributeDictionary()
if ns:
return self.__attributes.get(ns,{}).has_key(attr)
return self.__attributes.has_key(attr)
def getContentList(self, *contents):
nodes = []
ELEMENT_NODE = self.__node.ELEMENT_NODE
for child in DOM.getElements(self.__node, None):
if child.nodeType == ELEMENT_NODE and\
SplitQName(child.tagName)[1] in contents:
nodes.append(child)
return map(self.__class__, nodes)
def setAttributeDictionary(self):
self.__attributes = {}
for v in self.__node._attrs.values():
self.__attributes[v.nodeName] = v.nodeValue
def getAttributeDictionary(self):
if not self.__attributes:
self.setAttributeDictionary()
return self.__attributes
def getTagName(self):
return self.__node.tagName
def getParentNode(self):
if self.__node.parentNode.nodeType == self.__node.ELEMENT_NODE:
return DOMAdapter(self.__node.parentNode)
return None
def getNamespace(self, prefix):
"""prefix -- deference namespace prefix in node's context.
Ascends parent nodes until found.
"""
namespace = None
if prefix == 'xmlns':
namespace = DOM.findDefaultNS(prefix, self.__node)
else:
try:
namespace = DOM.findNamespaceURI(prefix, self.__node)
except DOMException, ex:
if prefix != 'xml':
raise SchemaError, '%s namespace not declared for %s'\
%(prefix, self.__node._get_tagName())
namespace = XMLNS.XML
return namespace
def loadDocument(self, file):
self.__node = DOM.loadDocument(file)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
def loadFromURL(self, url):
self.__node = DOM.loadFromURL(url)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
class XMLBase:
""" These class variables are for string indentation.
"""
tag = None
__indent = 0
__rlock = RLock()
def __str__(self):
XMLBase.__rlock.acquire()
XMLBase.__indent += 1
tmp = "<" + str(self.__class__) + '>\n'
for k,v in self.__dict__.items():
tmp += "%s* %s = %s\n" %(XMLBase.__indent*' ', k, v)
XMLBase.__indent -= 1
XMLBase.__rlock.release()
return tmp
"""Marker Interface: can determine something about an instances properties by using
the provided convenience functions.
"""
class DefinitionMarker:
"""marker for definitions
"""
pass
class DeclarationMarker:
"""marker for declarations
"""
pass
class AttributeMarker:
"""marker for attributes
"""
pass
class AttributeGroupMarker:
"""marker for attribute groups
"""
pass
class WildCardMarker:
"""marker for wildcards
"""
pass
class ElementMarker:
"""marker for wildcards
"""
pass
class ReferenceMarker:
"""marker for references
"""
pass
class ModelGroupMarker:
"""marker for model groups
"""
pass
class AllMarker(ModelGroupMarker):
"""marker for all model group
"""
pass
class ChoiceMarker(ModelGroupMarker):
"""marker for choice model group
"""
pass
class SequenceMarker(ModelGroupMarker):
"""marker for sequence model group
"""
pass
class ExtensionMarker:
"""marker for extensions
"""
pass
class RestrictionMarker:
"""marker for restrictions
"""
facets = ['enumeration', 'length', 'maxExclusive', 'maxInclusive',\
'maxLength', 'minExclusive', 'minInclusive', 'minLength',\
'pattern', 'fractionDigits', 'totalDigits', 'whiteSpace']
class SimpleMarker:
"""marker for simple type information
"""
pass
class ListMarker:
"""marker for simple type list
"""
pass
class UnionMarker:
"""marker for simple type Union
"""
pass
class ComplexMarker:
"""marker for complex type information
"""
pass
class LocalMarker:
"""marker for complex type information
"""
pass
class MarkerInterface:
def isDefinition(self):
return isinstance(self, DefinitionMarker)
def isDeclaration(self):
return isinstance(self, DeclarationMarker)
def isAttribute(self):
return isinstance(self, AttributeMarker)
def isAttributeGroup(self):
return isinstance(self, AttributeGroupMarker)
def isElement(self):
return isinstance(self, ElementMarker)
def isReference(self):
return isinstance(self, ReferenceMarker)
def isWildCard(self):
return isinstance(self, WildCardMarker)
def isModelGroup(self):
return isinstance(self, ModelGroupMarker)
def isAll(self):
return isinstance(self, AllMarker)
def isChoice(self):
return isinstance(self, ChoiceMarker)
def isSequence(self):
return isinstance(self, SequenceMarker)
def isExtension(self):
return isinstance(self, ExtensionMarker)
def isRestriction(self):
return isinstance(self, RestrictionMarker)
def isSimple(self):
return isinstance(self, SimpleMarker)
def isComplex(self):
return isinstance(self, ComplexMarker)
def isLocal(self):
return isinstance(self, LocalMarker)
def isList(self):
return isinstance(self, ListMarker)
def isUnion(self):
return isinstance(self, UnionMarker)
##########################################################
# Schema Components
#########################################################
class XMLSchemaComponent(XMLBase, MarkerInterface):
"""
class variables:
required -- list of required attributes
attributes -- dict of default attribute values, including None.
Value can be a function for runtime dependencies.
contents -- dict of namespace keyed content lists.
'xsd' content of xsd namespace.
xmlns_key -- key for declared xmlns namespace.
xmlns -- xmlns is special prefix for namespace dictionary
xml -- special xml prefix for xml namespace.
"""
required = []
attributes = {}
contents = {}
xmlns_key = ''
xmlns = 'xmlns'
xml = 'xml'
def __init__(self, parent=None):
"""parent -- parent instance
instance variables:
attributes -- dictionary of node's attributes
"""
self.attributes = None
self._parent = parent
if self._parent:
self._parent = weakref.ref(parent)
if not self.__class__ == XMLSchemaComponent\
and not (type(self.__class__.required) == type(XMLSchemaComponent.required)\
and type(self.__class__.attributes) == type(XMLSchemaComponent.attributes)\
and type(self.__class__.contents) == type(XMLSchemaComponent.contents)):
raise RuntimeError, 'Bad type for a class variable in %s' %self.__class__
def getItemTrace(self):
"""Returns a node trace up to the <schema> item.
"""
item, path, name, ref = self, [], 'name', 'ref'
while not isinstance(item,XMLSchema) and not isinstance(item,WSDLToolsAdapter):
attr = item.getAttribute(name)
if attr is None:
attr = item.getAttribute(ref)
if attr is None: path.append('<%s>' %(item.tag))
else: path.append('<%s ref="%s">' %(item.tag, attr))
else:
path.append('<%s name="%s">' %(item.tag,attr))
item = item._parent()
try:
tns = item.getTargetNamespace()
except:
tns = ''
path.append('<%s targetNamespace="%s">' %(item.tag, tns))
path.reverse()
return ''.join(path)
def getTargetNamespace(self):
"""return targetNamespace
"""
parent = self
targetNamespace = 'targetNamespace'
tns = self.attributes.get(targetNamespace)
while not tns:
parent = parent._parent()
tns = parent.attributes.get(targetNamespace)
return tns
def getAttributeDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute(ATTRIBUTES, attribute)
def getAttributeGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute(ATTRIBUTE_GROUPS, attribute)
def getTypeDefinition(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute(TYPES, attribute)
def getElementDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. element).
collection -- check elements collection in parent Schema instance.
"""
return self.getQNameAttribute(ELEMENTS, attribute)
def getModelGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. ref).
collection -- check model_group collection in parent Schema instance.
"""
return self.getQNameAttribute(MODEL_GROUPS, attribute)
def getQNameAttribute(self, collection, attribute):
"""returns object instance representing QName --> (namespace,name),
or if does not exist return None.
attribute -- an information item attribute, with a QName value.
collection -- collection in parent Schema instance to search.
"""
obj = None
#tdc = self.attributes.get(attribute)
tdc = self.getAttributeQName(attribute)
if tdc:
obj = self.getSchemaItem(collection, tdc.getTargetNamespace(), tdc.getName())
return obj
def getSchemaItem(self, collection, namespace, name):
"""returns object instance representing namespace, name,
or if does not exist return None.
namespace -- namespace item defined in.
name -- name of item.
collection -- collection in parent Schema instance to search.
"""
obj = None
parent = GetSchema(self)
if parent.targetNamespace == namespace:
try:
obj = getattr(parent, collection)[name]
except KeyError, ex:
raise KeyError, "targetNamespace(%s) collection(%s) has no item(%s)"\
%(namespace, collection, name)
elif parent.imports.has_key(namespace):
schema = parent.imports[namespace].getSchema()
try:
obj = getattr(schema, collection)[name]
except KeyError, ex:
raise KeyError, "targetNamespace(%s) collection(%s) has no item(%s)"\
%(namespace, collection, name)
return obj
def getXMLNS(self, prefix=None):
"""deference prefix or by default xmlns, returns namespace.
"""
if prefix == XMLSchemaComponent.xml:
return XMLNS.XML
parent = self
ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
while not ns:
parent = parent._parent()
ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
if not ns and isinstance(parent, WSDLToolsAdapter):
if prefix is None:
return ''
raise SchemaError, 'unknown prefix %s' %prefix
return ns
def getAttribute(self, attribute):
"""return requested attribute value or None
"""
if type(attribute) in (list, tuple):
if len(attribute) != 2:
raise LookupError, 'To access attributes must use name or (namespace,name)'
return self.attributes.get(attribute[0]).get(attribute[1])
return self.attributes.get(attribute)
def getAttributeQName(self, attribute):
"""return requested attribute value as (namespace,name) or None
"""
qname = self.getAttribute(attribute)
if isinstance(qname, TypeDescriptionComponent) is True:
return qname
if qname is None:
return None
prefix,ncname = SplitQName(qname)
namespace = self.getXMLNS(prefix)
return TypeDescriptionComponent((namespace,ncname))
def getAttributeName(self):
"""return attribute name or None
"""
return self.getAttribute('name')
def setAttributes(self, node):
"""Sets up attribute dictionary, checks for required attributes and
sets default attribute values. attr is for default attribute values
determined at runtime.
structure of attributes dictionary
['xmlns'][xmlns_key] -- xmlns namespace
['xmlns'][prefix] -- declared namespace prefix
[namespace][prefix] -- attributes declared in a namespace
[attribute] -- attributes w/o prefix, default namespaces do
not directly apply to attributes, ie Name can't collide
with QName.
"""
self.attributes = {XMLSchemaComponent.xmlns:{}}
for k,v in node.getAttributeDictionary().items():
prefix,value = SplitQName(k)
if value == XMLSchemaComponent.xmlns:
self.attributes[value][prefix or XMLSchemaComponent.xmlns_key] = v
elif prefix:
ns = node.getNamespace(prefix)
if not ns:
raise SchemaError, 'no namespace for attribute prefix %s'\
%prefix
if not self.attributes.has_key(ns):
self.attributes[ns] = {}
elif self.attributes[ns].has_key(value):
raise SchemaError, 'attribute %s declared multiple times in %s'\
%(value, ns)
self.attributes[ns][value] = v
elif not self.attributes.has_key(value):
self.attributes[value] = v
else:
raise SchemaError, 'attribute %s declared multiple times' %value
if not isinstance(self, WSDLToolsAdapter):
self.__checkAttributes()
self.__setAttributeDefaults()
#set QNames
for k in ['type', 'element', 'base', 'ref', 'substitutionGroup', 'itemType']:
if self.attributes.has_key(k):
prefix, value = SplitQName(self.attributes.get(k))
self.attributes[k] = \
TypeDescriptionComponent((self.getXMLNS(prefix), value))
#Union, memberTypes is a whitespace separated list of QNames
for k in ['memberTypes']:
if self.attributes.has_key(k):
qnames = self.attributes[k]
self.attributes[k] = []
for qname in qnames.split():
prefix, value = SplitQName(qname)
self.attributes['memberTypes'].append(\
TypeDescriptionComponent(\
(self.getXMLNS(prefix), value)))
def getContents(self, node):
"""retrieve xsd contents
"""
return node.getContentList(*self.__class__.contents['xsd'])
def __setAttributeDefaults(self):
"""Looks for default values for unset attributes. If
class variable representing attribute is None, then
it must be defined as an instance variable.
"""
for k,v in self.__class__.attributes.items():
if v is not None and self.attributes.has_key(k) is False:
if isinstance(v, types.FunctionType):
self.attributes[k] = v(self)
else:
self.attributes[k] = v
def __checkAttributes(self):
"""Checks that required attributes have been defined,
attributes w/default cannot be required. Checks
all defined attributes are legal, attribute
references are not subject to this test.
"""
for a in self.__class__.required:
if not self.attributes.has_key(a):
raise SchemaError,\
'class instance %s, missing required attribute %s'\
%(self.__class__, a)
for a in self.attributes.keys():
if (a not in (XMLSchemaComponent.xmlns, XMLNS.XML)) and\
(a not in self.__class__.attributes.keys()) and not\
(self.isAttribute() and self.isReference()):
raise SchemaError, '%s, unknown attribute(%s,%s)' \
%(self.getItemTrace(), a, self.attributes[a])
class WSDLToolsAdapter(XMLSchemaComponent):
"""WSDL Adapter to grab the attributes from the wsdl document node.
"""
attributes = {'name':None, 'targetNamespace':None}
tag = 'definitions'
def __init__(self, wsdl):
XMLSchemaComponent.__init__(self, parent=wsdl)
self.setAttributes(DOMAdapter(wsdl.document))
def getImportSchemas(self):
"""returns WSDLTools.WSDL types Collection
"""
return self._parent().types
class Notation(XMLSchemaComponent):
"""<notation>
parent:
schema
attributes:
id -- ID
name -- NCName, Required
public -- token, Required
system -- anyURI
contents:
annotation?
"""
required = ['name', 'public']
attributes = {'id':None, 'name':None, 'public':None, 'system':None}
contents = {'xsd':('annotation')}
tag = 'notation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Annotation(XMLSchemaComponent):
"""<annotation>
parent:
all,any,anyAttribute,attribute,attributeGroup,choice,complexContent,
complexType,element,extension,field,group,import,include,key,keyref,
list,notation,redefine,restriction,schema,selector,simpleContent,
simpleType,union,unique
attributes:
id -- ID
contents:
(documentation | appinfo)*
"""
attributes = {'id':None}
contents = {'xsd':('documentation', 'appinfo')}
tag = 'annotation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'documentation':
#print_debug('class %s, documentation skipped' %self.__class__, 5)
continue
elif component == 'appinfo':
#print_debug('class %s, appinfo skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Documentation(XMLSchemaComponent):
"""<documentation>
parent:
annotation
attributes:
source, anyURI
xml:lang, language
contents:
mixed, any
"""
attributes = {'source':None, 'xml:lang':None}
contents = {'xsd':('mixed', 'any')}
tag = 'documentation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Appinfo(XMLSchemaComponent):
"""<appinfo>
parent:
annotation
attributes:
source, anyURI
contents:
mixed, any
"""
attributes = {'source':None, 'anyURI':None}
contents = {'xsd':('mixed', 'any')}
tag = 'appinfo'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class XMLSchemaFake:
# This is temporary, for the benefit of WSDL until the real thing works.
def __init__(self, element):
self.targetNamespace = DOM.getAttr(element, 'targetNamespace')
self.element = element
class XMLSchema(XMLSchemaComponent):
"""A schema is a collection of schema components derived from one
or more schema documents, that is, one or more <schema> element
information items. It represents the abstract notion of a schema
rather than a single schema document (or other representation).
<schema>
parent:
ROOT
attributes:
id -- ID
version -- token
xml:lang -- language
targetNamespace -- anyURI
attributeFormDefault -- 'qualified' | 'unqualified', 'unqualified'
elementFormDefault -- 'qualified' | 'unqualified', 'unqualified'
blockDefault -- '#all' | list of
('substitution | 'extension' | 'restriction')
finalDefault -- '#all' | list of
('extension' | 'restriction' | 'list' | 'union')
contents:
((include | import | redefine | annotation)*,
(attribute, attributeGroup, complexType, element, group,
notation, simpleType)*, annotation*)*
attributes -- schema attributes
imports -- import statements
includes -- include statements
redefines --
types -- global simpleType, complexType definitions
elements -- global element declarations
attr_decl -- global attribute declarations
attr_groups -- attribute Groups
model_groups -- model Groups
notations -- global notations
"""
attributes = {'id':None,
'version':None,
'xml:lang':None,
'targetNamespace':None,
'attributeFormDefault':'unqualified',
'elementFormDefault':'unqualified',
'blockDefault':None,
'finalDefault':None}
contents = {'xsd':('include', 'import', 'redefine', 'annotation',
'attribute', 'attributeGroup', 'complexType',
'element', 'group', 'notation', 'simpleType',
'annotation')}
empty_namespace = ''
tag = 'schema'
def __init__(self, parent=None):
"""parent --
instance variables:
targetNamespace -- schema's declared targetNamespace, or empty string.
_imported_schemas -- namespace keyed dict of schema dependencies, if
a schema is provided instance will not resolve import statement.
_included_schemas -- schemaLocation keyed dict of component schemas,
if schema is provided instance will not resolve include statement.
_base_url -- needed for relative URLs support, only works with URLs
relative to initial document.
includes -- collection of include statements
imports -- collection of import statements
elements -- collection of global element declarations
types -- collection of global type definitions
attr_decl -- collection of global attribute declarations
attr_groups -- collection of global attribute group definitions
model_groups -- collection of model group definitions
notations -- collection of notations
"""
self.__node = None
self.targetNamespace = None
XMLSchemaComponent.__init__(self, parent)
f = lambda k: k.attributes['name']
ns = lambda k: k.attributes['namespace']
sl = lambda k: k.attributes['schemaLocation']
self.includes = Collection(self, key=sl)
self.imports = Collection(self, key=ns)
self.elements = Collection(self, key=f)
self.types = Collection(self, key=f)
self.attr_decl = Collection(self, key=f)
self.attr_groups = Collection(self, key=f)
self.model_groups = Collection(self, key=f)
self.notations = Collection(self, key=f)
self._imported_schemas = {}
self._included_schemas = {}
self._base_url = None
def getNode(self):
"""
Interacting with the underlying DOM tree.
"""
return self.__node
def addImportSchema(self, schema):
"""for resolving import statements in Schema instance
schema -- schema instance
_imported_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if schema.targetNamespace != self.targetNamespace:
self._imported_schemas[schema.targetNamespace] = schema
else:
raise SchemaError, 'import schema bad targetNamespace'
def addIncludeSchema(self, schemaLocation, schema):
"""for resolving include statements in Schema instance
schemaLocation -- schema location
schema -- schema instance
_included_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if not schema.targetNamespace or\
schema.targetNamespace == self.targetNamespace:
self._included_schemas[schemaLocation] = schema
else:
raise SchemaError, 'include schema bad targetNamespace'
def setImportSchemas(self, schema_dict):
"""set the import schema dictionary, which is used to
reference depedent schemas.
"""
self._imported_schemas = schema_dict
def getImportSchemas(self):
"""get the import schema dictionary, which is used to
reference depedent schemas.
"""
return self._imported_schemas
def getSchemaNamespacesToImport(self):
"""returns tuple of namespaces the schema instance has declared
itself to be depedent upon.
"""
return tuple(self.includes.keys())
def setIncludeSchemas(self, schema_dict):
"""set the include schema dictionary, which is keyed with
schemaLocation (uri).
This is a means of providing
schemas to the current schema for content inclusion.
"""
self._included_schemas = schema_dict
def getIncludeSchemas(self):
"""get the include schema dictionary, which is keyed with
schemaLocation (uri).
"""
return self._included_schemas
def getBaseUrl(self):
"""get base url, used for normalizing all relative uri's
"""
return self._base_url
def setBaseUrl(self, url):
"""set base url, used for normalizing all relative uri's
"""
self._base_url = url
def getElementFormDefault(self):
"""return elementFormDefault attribute
"""
return self.attributes.get('elementFormDefault')
def isElementFormDefaultQualified(self):
return self.attributes.get('elementFormDefault') == 'qualified'
def getAttributeFormDefault(self):
"""return attributeFormDefault attribute
"""
return self.attributes.get('attributeFormDefault')
def getBlockDefault(self):
"""return blockDefault attribute
"""
return self.attributes.get('blockDefault')
def getFinalDefault(self):
"""return finalDefault attribute
"""
return self.attributes.get('finalDefault')
def load(self, node, location=None):
self.__node = node
pnode = node.getParentNode()
if pnode:
pname = SplitQName(pnode.getTagName())[1]
if pname == 'types':
attributes = {}
self.setAttributes(pnode)
attributes.update(self.attributes)
self.setAttributes(node)
for k,v in attributes['xmlns'].items():
if not self.attributes['xmlns'].has_key(k):
self.attributes['xmlns'][k] = v
else:
self.setAttributes(node)
else:
self.setAttributes(node)
self.targetNamespace = self.getTargetNamespace()
for childNode in self.getContents(node):
component = SplitQName(childNode.getTagName())[1]
if component == 'include':
tp = self.__class__.Include(self)
tp.fromDom(childNode)
sl = tp.attributes['schemaLocation']
schema = tp.getSchema()
if not self.getIncludeSchemas().has_key(sl):
self.addIncludeSchema(sl, schema)
self.includes[sl] = tp
pn = childNode.getParentNode().getNode()
pn.removeChild(childNode.getNode())
for child in schema.getNode().getNode().childNodes:
pn.appendChild(child.cloneNode(1))
for collection in ['imports','elements','types',
'attr_decl','attr_groups','model_groups',
'notations']:
for k,v in getattr(schema,collection).items():
if not getattr(self,collection).has_key(k):
v._parent = weakref.ref(self)
getattr(self,collection)[k] = v
else:
print "Warning: Not keeping schema component."
elif component == 'import':
tp = self.__class__.Import(self)
tp.fromDom(childNode)
import_ns = tp.getAttribute('namespace') or \
self.__class__.empty_namespace
if not self.getImportSchemas().has_key(import_ns) and \
tp.getAttribute('schemaLocation'):
self.addImportSchema(tp.getSchema())
self.imports[import_ns] = tp
elif component == 'redefine':
# redefine not implemented yet
pass
elif component == 'annotation':
# annotation not implemented yet
pass
elif component == 'attribute':
tp = AttributeDeclaration(self)
tp.fromDom(childNode)
self.attr_decl[tp.getAttribute('name')] = tp
elif component == 'attributeGroup':
tp = AttributeGroupDefinition(self)
tp.fromDom(childNode)
self.attr_groups[tp.getAttribute('name')] = tp
elif component == 'element':
tp = ElementDeclaration(self)
tp.fromDom(childNode)
self.elements[tp.getAttribute('name')] = tp
elif component == 'group':
tp = ModelGroupDefinition(self)
tp.fromDom(childNode)
self.model_groups[tp.getAttribute('name')] = tp
elif component == 'notation':
tp = Notation(self)
tp.fromDom(childNode)
self.notations[tp.getAttribute('name')] = tp
elif component == 'complexType':
tp = ComplexType(self)
tp.fromDom(childNode)
self.types[tp.getAttribute('name')] = tp
elif component == 'simpleType':
tp = SimpleType(self)
tp.fromDom(childNode)
self.types[tp.getAttribute('name')] = tp
else:
break
# indx += 1
class Import(XMLSchemaComponent):
"""<import>
parent:
schema
attributes:
id -- ID
namespace -- anyURI
schemaLocation -- anyURI
contents:
annotation?
"""
attributes = {'id':None,
'namespace':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'import'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
if self.attributes['namespace'] == self.getTargetNamespace():
raise SchemaError, 'namespace of schema and import match'
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance, and keep a hard reference.
"""
if not self._schema:
ns = self.attributes['namespace']
schema = self._parent().getImportSchemas().get(ns)
if not schema and self._parent()._parent:
schema = self._parent()._parent().getImportSchemas().get(ns)
if not schema:
url = self.attributes.get('schemaLocation')
if not url:
raise SchemaError, 'namespace(%s) is unknown' %ns
base_url = self._parent().getBaseUrl()
reader = SchemaReader(base_url=base_url)
reader._imports = self._parent().getImportSchemas()
reader._includes = self._parent().getIncludeSchemas()
self._schema = reader.loadFromURL(url)
return self._schema or schema
class Include(XMLSchemaComponent):
"""<include schemaLocation>
parent:
schema
attributes:
id -- ID
schemaLocation -- anyURI, required
contents:
annotation?
"""
required = ['schemaLocation']
attributes = {'id':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'include'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance.
"""
if not self._schema:
schema = self._parent()
self._schema = schema.getIncludeSchemas().get(\
self.attributes['schemaLocation']
)
if not self._schema:
url = self.attributes['schemaLocation']
reader = SchemaReader(base_url=schema.getBaseUrl())
reader._imports = schema.getImportSchemas()
reader._includes = schema.getIncludeSchemas()
self._schema = reader.loadFromURL(url)
return self._schema
class AttributeDeclaration(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker):
"""<attribute name>
parent:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
""" No list or union support
"""
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class LocalAttributeDeclaration(AttributeDeclaration,\
AttributeMarker,\
LocalMarker,\
DeclarationMarker):
"""<attribute name>
parent:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
name -- NCName, required
type -- QName
form -- ('qualified' | 'unqualified'), schema.attributeFormDefault
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'form':lambda self: GetSchema(self).getAttributeFormDefault(),
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
def __init__(self, parent):
AttributeDeclaration.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeWildCard(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker,\
WildCardMarker):
"""<anyAttribute>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
attributes = {'id':None,
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'anyAttribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeReference(XMLSchemaComponent,\
AttributeMarker,\
ReferenceMarker):
"""<attribute ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeDeclaration(self, attribute='ref'):
return XMLSchemaComponent.getAttributeDeclaration(self, attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeGroupDefinition(XMLSchemaComponent,\
AttributeGroupMarker,\
DefinitionMarker):
"""<attributeGroup name>
parents:
schema, redefine
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'attribute', 'attributeGroup', 'anyAttribute']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif component == 'attribute':
if contents[indx].hasattr('name'):
content.append(LocalAttributeDeclaration(self))
elif contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
raise SchemaError, 'Unknown attribute type'
content[-1].fromDom(contents[indx])
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
content[-1].fromDom(contents[indx])
elif component == 'anyAttribute':
if len(contents) != indx+1:
raise SchemaError, 'anyAttribute is out of order in %s' %self.getItemTrace()
content.append(AttributeWildCard(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content = tuple(content)
class AttributeGroupReference(XMLSchemaComponent,\
AttributeGroupMarker,\
ReferenceMarker):
"""<attributeGroup ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None}
contents = {'xsd':['annotation']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeGroup(self, attribute='ref'):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return XMLSchemaComponent.getAttributeGroup(self, attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Elements
#####################################################
class IdentityConstrants(XMLSchemaComponent):
"""Allow one to uniquely identify nodes in a document and ensure the
integrity of references between them.
attributes -- dictionary of attributes
selector -- XPath to selected nodes
fields -- list of XPath to key field
"""
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.selector = None
self.fields = None
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
fields = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'selector':
self.selector = self.Selector(self)
self.selector.fromDom(i)
continue
elif component == 'field':
fields.append(self.Field(self))
fields[-1].fromDom(i)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.fields = tuple(fields)
class Constraint(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Selector(Constraint):
"""<selector xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'selector'
class Field(Constraint):
"""<field xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'field'
class Unique(IdentityConstrants):
"""<unique name> Enforce fields are unique w/i a specified scope.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'unique'
class Key(IdentityConstrants):
"""<key name> Enforce fields are unique w/i a specified scope, and all
field values are present w/i document. Fields cannot
be nillable.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'key'
class KeyRef(IdentityConstrants):
"""<keyref name refer> Ensure a match between two sets of values in an
instance.
parent:
element
attributes:
id -- ID
name -- NCName, required
refer -- QName, required
contents:
annotation?, selector, field+
"""
required = ['name', 'refer']
attributes = {'id':None,
'name':None,
'refer':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'keyref'
class ElementDeclaration(XMLSchemaComponent,\
ElementMarker,\
DeclarationMarker):
"""<element name>
parents:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
nillable -- boolean, false
abstract -- boolean, false
substitutionGroup -- QName
block -- ('#all' | ('substition' | 'extension' | 'restriction')*),
schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*),
schema.finalDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'substitutionGroup':None,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.constraints = ()
def isQualified(self):
"""
Global elements are always qualified.
"""
return True
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute=None):
"""
If attribute is None, "type" is assumed, return the corresponding
representation of the global type definition (TypeDefinition),
or the local definition if don't find "type". To maintain backwards
compat, if attribute is provided call base class method.
"""
if attribute:
return XMLSchemaComponent.getTypeDefinition(self, attribute)
gt = XMLSchemaComponent.getTypeDefinition(self, 'type')
if gt:
return gt
return self.content
def getConstraints(self):
return self._constraints
def setConstraints(self, constraints):
self._constraints = tuple(constraints)
constraints = property(getConstraints, setConstraints, None, "tuple of key, keyref, unique constraints")
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
constraints = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType' and not self.content:
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
elif component == 'complexType' and not self.content:
self.content = LocalComplexType(self)
self.content.fromDom(i)
elif component == 'key':
constraints.append(Key(self))
constraints[-1].fromDom(i)
elif component == 'keyref':
constraints.append(KeyRef(self))
constraints[-1].fromDom(i)
elif component == 'unique':
constraints.append(Unique(self))
constraints[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.constraints = constraints
class LocalElementDeclaration(ElementDeclaration,\
LocalMarker):
"""<element>
parents:
all, choice, sequence
attributes:
id -- ID
name -- NCName, required
form -- ('qualified' | 'unqualified'), schema.elementFormDefault
type -- QName
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
default -- string
fixed -- string
nillable -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'form':lambda self: GetSchema(self).getElementFormDefault(),
'type':None,
'minOccurs':'1',
'maxOccurs':'1',
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'block':lambda self: GetSchema(self).getBlockDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
def isQualified(self):
"""
Local elements can be qualified or unqualifed according
to the attribute form, or the elementFormDefault. By default
local elements are unqualified.
"""
form = self.getAttribute('form')
if form == 'qualified':
return True
if form == 'unqualified':
return False
raise SchemaError, 'Bad form (%s) for element: %s' %(form, self.getItemTrace())
class ElementReference(XMLSchemaComponent,\
ElementMarker,\
ReferenceMarker):
"""<element ref>
parents:
all, choice, sequence
attributes:
id -- ID
ref -- QName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getElementDeclaration(self, attribute=None):
"""If attribute is None, "ref" is assumed, return the corresponding
representation of the global element declaration (ElementDeclaration),
To maintain backwards compat, if attribute is provided call base class method.
"""
if attribute:
return XMLSchemaComponent.getElementDeclaration(self, attribute)
return XMLSchemaComponent.getElementDeclaration(self, 'ref')
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ElementWildCard(LocalElementDeclaration, WildCardMarker):
"""<any>
parents:
choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
required = []
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1',
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'any'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def isQualified(self):
"""
Global elements are always qualified, but if processContents
are not strict could have dynamically generated local elements.
"""
return GetSchema(self).isElementFormDefaultQualified()
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' % self.tag
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Model Groups
#####################################################
class Sequence(XMLSchemaComponent,\
SequenceMarker):
"""<sequence>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'sequence'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class All(XMLSchemaComponent,\
AllMarker):
"""<all>
parents:
complexType, extension, restriction, group
attributes:
id -- ID
minOccurs -- '0' | '1', 1
maxOccurs -- '1', 1
contents:
annotation?, element*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element']}
tag = 'all'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Choice(XMLSchemaComponent,\
ChoiceMarker):
"""<choice>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'choice'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class ModelGroupDefinition(XMLSchemaComponent,\
ModelGroupMarker,\
DefinitionMarker):
"""<group name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (all | choice | sequence)?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'all', 'choice', 'sequence']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'all' and not self.content:
self.content = All(self)
elif component == 'choice' and not self.content:
self.content = Choice(self)
elif component == 'sequence' and not self.content:
self.content = Sequence(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ModelGroupReference(XMLSchemaComponent,\
ModelGroupMarker,\
ReferenceMarker):
"""<group ref>
parents:
choice, complexType, extension, restriction, sequence
attributes:
id -- ID
ref -- NCName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getModelGroupReference(self):
return self.getModelGroup('ref')
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ComplexType(XMLSchemaComponent,\
DefinitionMarker,\
ComplexMarker):
"""<complexType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
mixed -- boolean, false
abstract -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*), schema.finalDefault
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = ['name']
attributes = {'id':None,
'name':None,
'mixed':0,
'abstract':0,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleContent', 'complexContent',\
'group', 'all', 'choice', 'sequence', 'attribute', 'attributeGroup',\
'anyAttribute', 'any']}
tag = 'complexType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def isMixed(self):
m = self.getAttribute('mixed')
if m == 0 or m == False:
return False
if isinstance(m, basestring) is True:
if m in ('false', '0'):
return False
if m in ('true', '1'):
return True
raise SchemaError, 'invalid value for attribute mixed(%s): %s'\
%(m, self.getItemTrace())
def getAttributeContent(self):
return self.attr_content
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
#XXX ugly
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
self.content = None
if component == 'simpleContent':
self.content = self.__class__.SimpleContent(self)
self.content.fromDom(contents[indx])
elif component == 'complexContent':
self.content = self.__class__.ComplexContent(self)
self.content.fromDom(contents[indx])
else:
if component == 'all':
self.content = All(self)
elif component == 'choice':
self.content = Choice(self)
elif component == 'sequence':
self.content = Sequence(self)
elif component == 'group':
self.content = ModelGroupReference(self)
if self.content:
self.content.fromDom(contents[indx])
indx += 1
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
self.attr_content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s): %s' \
%(contents[indx].getTagName(),self.getItemTrace())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class _DerivedType(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
# XXX remove attribute derivation, inconsistent
self.derivation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'restriction' and not self.derivation:
self.derivation = self.__class__.Restriction(self)
elif component == 'extension' and not self.derivation:
self.derivation = self.__class__.Extension(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.derivation.fromDom(i)
self.content = self.derivation
class ComplexContent(_DerivedType,\
ComplexMarker):
"""<complexContent>
parents:
complexType
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None,
'mixed':0}
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'complexContent'
def isMixed(self):
m = self.getAttribute('mixed')
if m == 0 or m == False:
return False
if isinstance(m, basestring) is True:
if m in ('false', '0'):
return False
if m in ('true', '1'):
return True
raise SchemaError, 'invalid value for attribute mixed(%s): %s'\
%(m, self.getItemTrace())
class _DerivationBase(XMLSchemaComponent):
"""<extension>,<restriction>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'group', 'all', 'choice',\
'sequence', 'attribute', 'attributeGroup', 'anyAttribute']}
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
#XXX ugly
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
if component == 'all':
self.content = All(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'choice':
self.content = Choice(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'sequence':
self.content = Sequence(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'group':
self.content = ModelGroupReference(self)
self.content.fromDom(contents[indx])
indx += 1
else:
self.content = None
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeGroupReference(self))
else:
self.attr_content.append(AttributeGroupDefinition(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class Extension(_DerivationBase,
ExtensionMarker):
"""<extension base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'extension'
class Restriction(_DerivationBase,\
RestrictionMarker):
"""<restriction base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'restriction'
class SimpleContent(_DerivedType,\
SimpleMarker):
"""<simpleContent>
parents:
complexType
attributes:
id -- ID
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None}
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'simpleContent'
class Extension(XMLSchemaComponent,\
ExtensionMarker):
"""<extension base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'attribute', 'attributeGroup',
'anyAttribute']}
tag = 'extension'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
if num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*, (attribute | attributeGroup)*,
anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType', 'attribute',\
'attributeGroup', 'anyAttribute'] + RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.content = []
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
elif component == 'simpleType':
self.content.append(LocalSimpleType(self))
self.content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class LocalComplexType(ComplexType,\
LocalMarker):
"""<complexType>
parents:
element
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = []
attributes = {'id':None,
'mixed':0}
tag = 'complexType'
class SimpleType(XMLSchemaComponent,\
DefinitionMarker,\
SimpleMarker):
"""<simpleType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
final -- ('#all' | ('extension' | 'restriction' | 'list' | 'union')*),
schema.finalDefault
contents:
annotation?, (restriction | list | union)
"""
required = ['name']
attributes = {'id':None,
'name':None,
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'restriction', 'list', 'union']}
tag = 'simpleType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for child in contents:
component = SplitQName(child.getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(child)
continue
break
else:
return
if component == 'restriction':
self.content = self.__class__.Restriction(self)
elif component == 'list':
self.content = self.__class__.List(self)
elif component == 'union':
self.content = self.__class__.Union(self)
else:
raise SchemaError, 'Unknown component (%s)' %(component)
self.content.fromDom(child)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleType
attributes:
id -- ID
base -- QName, required or simpleType child
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*
"""
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType']+RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getAttributeBase(self):
return XMLSchemaComponent.getAttribute(self, 'base')
def getTypeDefinition(self, attribute='base'):
return XMLSchemaComponent.getTypeDefinition(self, attribute)
def getSimpleTypeContent(self):
for el in self.content:
if el.isSimple(): return el
return None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
continue
elif (component == 'simpleType') and (not indx or indx == 1):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
elif component in RestrictionMarker.facets:
#print_debug('%s class instance, skipping %s' %(self.__class__, component))
pass
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Union(XMLSchemaComponent,
UnionMarker):
"""<union>
parents:
simpleType
attributes:
id -- ID
memberTypes -- list of QNames, required or simpleType child.
contents:
annotation?, simpleType*
"""
attributes = {'id':None,
'memberTypes':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'union'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class List(XMLSchemaComponent,
ListMarker):
"""<list>
parents:
simpleType
attributes:
id -- ID
itemType -- QName, required or simpleType child.
contents:
annotation?, simpleType?
"""
attributes = {'id':None,
'itemType':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'list'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getItemType(self):
return self.attributes.get('itemType')
def getTypeDefinition(self, attribute='itemType'):
"""
return the type refered to by itemType attribute or
the simpleType content. If returns None, then the
type refered to by itemType is primitive.
"""
tp = XMLSchemaComponent.getTypeDefinition(self, attribute)
return tp or self.content
def fromDom(self, node):
self.annotation = None
self.content = None
self.setAttributes(node)
contents = self.getContents(node)
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
self.content = AnonymousSimpleType(self)
self.content.fromDom(contents[indx])
break
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AnonymousSimpleType(SimpleType,\
SimpleMarker,\
LocalMarker):
"""<simpleType>
parents:
attribute, element, list, restriction, union
attributes:
id -- ID
contents:
annotation?, (restriction | list | union)
"""
required = []
attributes = {'id':None}
tag = 'simpleType'
class Redefine:
"""<redefine>
parents:
attributes:
contents:
"""
tag = 'redefine'
###########################
###########################
if sys.version_info[:2] >= (2, 2):
tupleClass = tuple
else:
import UserTuple
tupleClass = UserTuple.UserTuple
class TypeDescriptionComponent(tupleClass):
"""Tuple of length 2, consisting of
a namespace and unprefixed name.
"""
def __init__(self, args):
"""args -- (namespace, name)
Remove the name's prefix, irrelevant.
"""
if len(args) != 2:
raise TypeError, 'expecting tuple (namespace, name), got %s' %args
elif args[1].find(':') >= 0:
args = (args[0], SplitQName(args[1])[1])
tuple.__init__(self, args)
return
def getTargetNamespace(self):
return self[0]
def getName(self):
return self[1]
|
import datetime
import json
import logging
import re
from BeautifulSoup import BeautifulSoup as soup
from xml.sax.saxutils import escape as xhtml_escape
import tornado.web
from handlers.oauth import OAuth2Handler
from util import dateutils
from util.cache import Cache
from util.config import Config
from util.route import route
space_compress_regex = re.compile(r'\s+')
@route(r'/atom/(\d+)(?:/(\d+))?')
class AtomHandler(tornado.web.RequestHandler):
"""Fetches the public posts for a given G+ user id as an Atom feed."""
json_url = 'https://www.googleapis.com/plus/v1/people/%s/activities/public?maxResults=10&userIp=%s'
cache_key_template = 'pluss--gplusid--atom--2--%s'
ratelimit_key_template = 'pluss--remoteip--ratelimit--1--%s'
@tornado.web.asynchronous
def get(self, user_id, page_id):
ratelimit_key = self.ratelimit_key_template % self.request.remote_ip
remote_ip_rate = Cache.incr(ratelimit_key)
if remote_ip_rate is None:
Cache.set(ratelimit_key, 1, time=60)
elif remote_ip_rate > 60:
self.set_status(503)
self.set_header('Retry-After', '60')
self.write('Rate limit exceeded. Please do not make more than 60 requests per minute.')
# Don't log every single time we rate limit a host (that would get spammy fast),
# but do log significant breakpoints on exactly how spammy a host is being.
if remote_ip_rate in (61, 100, 1000, 10000):
logging.info('Rate limited IP %s - %s requests/min' % (self.request.remote_ip, remote_ip_rate))
return self.finish()
self.gplus_user_id = user_id
self.gplus_page_id = page_id
if len(user_id) != 21:
self.write("Google+ profile IDs are exactly 21 digits long. Please specify a proper profile ID.")
return self.finish()
if page_id and len(page_id) != 21:
self.write("Google+ page IDs are exactly 21 digits long. Please specify a proper page ID.")
self.cache_key = self.cache_key_template % user_id
if page_id:
self.cache_key += str(page_id)
cached_result = Cache.get(self.cache_key)
flush_requested = self.request.arguments.get('flush', [None])[0]
if cached_result:
if not Config.getboolean('cache', 'allow-flush') or not flush_requested:
return self._respond(**cached_result)
if page_id:
OAuth2Handler.authed_fetch(user_id, self.json_url % (page_id, self.request.remote_ip), self._on_api_response)
else:
OAuth2Handler.authed_fetch(user_id, self.json_url % ('me', self.request.remote_ip), self._on_api_response)
def _respond(self, headers=None, body='', **kwargs):
if headers is None:
headers = {}
# Potentially just send a 304 Not Modified if the browser supports it.
if 'If-Modified-Since' in self.request.headers:
remote_timestamp = dateutils.from_http_format(self.request.headers['If-Modified-Since'])
# This check is necessary because we intentionally don't send Last-Modified for
# empty feeds - if somehow a post shows up later, we'd want it to get served even if
# the empty feed is 'newer' than the post (since we use latest post time for Last-Modified)
if 'Last-Modified' in headers:
local_timestamp = dateutils.from_http_format(headers['Last-Modified'])
if local_timestamp <= remote_timestamp:
# Hasn't been modified since it was last requested
self.set_status(304)
return self.finish()
for (header, value) in headers.iteritems():
self.set_header(header, value)
self.write(body)
return self.finish()
def _on_api_response(self, response):
if response is None:
logging.error("API request for %s failed." % self.gplus_user_id)
self.write("Unable to fetch content for this Google+ ID; it may not be authenticated. See http://%s for more information." % self.request.host)
self.set_status(401)
return self.finish()
if response.error:
if response.code == 403:
logging.error("API Request 403: %r" % (json.loads(response.body)))
self.set_status(503)
self.write("Unable to fulfill request at this time - Google+ API rate limit exceeded.")
return self.finish()
else:
logging.error("AsyncHTTPRequest error: %r, %r" % (response.error, response))
return self.send_error(500)
else:
data = json.loads(response.body)
headers = {'Content-Type': 'application/atom+xml'}
params = {
'userid': self.gplus_page_id or self.gplus_user_id,
'baseurl': 'http://%s' % self.request.host,
'requesturi': 'http://%s%s' % (self.request.host, self.request.uri.split('?', 1)[0]),
}
if 'items' not in data or not data['items']:
params['lastupdate'] = dateutils.to_atom_format(datetime.datetime.today())
return self._respond(headers, empty_feed_template.format(**params))
posts = data['items']
lastupdate = max(dateutils.from_iso_format(p['updated']) for p in posts)
params['author'] = xhtml_escape(posts[0]['actor']['displayName'])
params['lastupdate'] = dateutils.to_atom_format(lastupdate)
headers['Last-Modified'] = dateutils.to_http_format(lastupdate)
params['entrycontent'] = u''.join(entry_template.format(**get_post_params(p)) for p in posts)
body = feed_template.format(**params)
Cache.set(self.cache_key, {'headers': headers, 'body': body}, time=Config.getint('cache', 'stream-expire'))
return self._respond(headers, body)
def get_post_params(post):
post_updated = dateutils.from_iso_format(post['updated'])
post_published = dateutils.from_iso_format(post['published'])
post_id = post['id']
permalink = post['url']
item = post['object']
content_for_title = []
if post['verb'] == 'post':
content = [item['content']]
content_for_title.extend(content)
elif post['verb'] == 'share':
content = [post.get('annotation')] if post.get('annotation') else []
content_for_title.extend(content)
if 'actor' in item:
content.append('<br/><br/>')
if 'url' in item['actor'] and 'displayName' in item['actor']:
content.append('<a href="%s">%s</a>' % (item['actor']['url'], item['actor']['displayName']))
content.append(' originally shared this post: ')
elif 'displayName' in item['actor']:
content.append(item['actor']['displayName'])
content.append(' originally shared this post: ')
content_for_title.append('Shared from %s: ' % item['actor'].get('displayName', 'elsewhere'))
content.append('<br/><blockquote>')
content.append(item['content'])
content.append('</blockquote>')
content_for_title.append(item['content'])
elif post['verb'] == 'checkin':
content = [item['content']]
place = post.get('placeName', '')
if place:
if item['content']:
# Add some spacing if there's actually a comment
content.append('<br/><br/>')
content.append('Checked in at %s' % place)
content_for_title.extend(content)
else:
content = []
if 'attachments' in item: # attached content
for attach in item['attachments']:
if content:
content.append('<br/><br/>')
attach_title = False
else:
attach_title = True
if attach['objectType'] == 'article':
# Attached link
content.append('<a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached link')))
# Possible attached photo
if 'image' in attach:
content.append('<br/><img src="%s" alt="%s" />' % (attach['image']['url'],
attach['image'].get('displayName', 'attached image')))
elif attach['objectType'] == 'photo':
# Attached image
content.append('<img src="%s" alt="%s" />' % (attach['image']['url'],
attach['image'].get('displayName', 'attached image'))) # G+ doesn't always supply alt text...
elif attach['objectType'] == 'photo-album' or attach['objectType'] == 'album':
# Attached photo album link
content.append('Album: <a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached album')))
elif attach['objectType'] == 'video':
# Attached video
content.append('Video: <a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached video')))
else:
# Unrecognized attachment type
content.append('[unsupported post attachment of type "%s"]' % attach['objectType'])
if attach_title:
content_for_title.extend(content)
# If no actual parseable content was found, just link to the post
post_content = u''.join(content).strip() or permalink
# Generate the post title out of just text [max: 100 characters]
post_title_content = re.split(r'<br\s*/?>', ''.join(content_for_title))[0]
post_title = u' '.join(x.string for x in soup(post_title_content).findAll(text=True))
post_title = space_compress_regex.sub(' ', post_title).strip()
if len(post_title) > 100:
if post_title == permalink or not post_title:
post_title = u"A public G+ post"
else:
candidate_title = post_title[:97]
if '&' in candidate_title[-5:]: # Don't risk cutting off HTML entities
candidate_title = candidate_title.rsplit('&', 1)[0]
if ' ' in candidate_title[-5:]: # Reasonably avoid cutting off words
candidate_title = candidate_title.rsplit(' ', 1)[0]
post_title = u"%s..." % candidate_title
return {
'title': post_title,
'permalink': xhtml_escape(permalink),
'postatomdate': dateutils.to_atom_format(post_updated),
'postatompubdate': dateutils.to_atom_format(post_published),
'postdate': post_published.strftime('%Y-%m-%d'),
'id': xhtml_escape(post_id),
'summary': xhtml_escape(post_content),
}
entry_template = u"""
<entry>
<title>{title}</title>
<link href="{permalink}" rel="alternate" />
<updated>{postatomdate}</updated>
<published>{postatompubdate}</published>
<id>tag:plus.google.com,{postdate}:/{id}/</id>
<summary type="html">{summary}</summary>
</entry>
"""
feed_template = u"""<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
<title>{author} - Google+ Public Posts</title>
<link href="https://plus.google.com/{userid}" rel="alternate" />
<link href="{requesturi}" rel="self" />
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<author>
<name>{author}</name>
</author>
{entrycontent}
</feed>
"""
empty_feed_template = u"""<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>No Public Items Found for {userid}</title>
<link href="https://plus.google.com/{userid}" rel="alternate"></link>
<link href="{requesturi}" rel="self"></link>
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<entry>
<title>No Public Items Found</title>
<link href="http://plus.google.com/{userid}"/>
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<published>{lastupdate}</published>
<summary>Google+ user {userid} has not made any posts public.</summary>
</entry>
</feed>
"""
Fix title logic error
import datetime
import json
import logging
import re
from BeautifulSoup import BeautifulSoup as soup
from xml.sax.saxutils import escape as xhtml_escape
import tornado.web
from handlers.oauth import OAuth2Handler
from util import dateutils
from util.cache import Cache
from util.config import Config
from util.route import route
space_compress_regex = re.compile(r'\s+')
@route(r'/atom/(\d+)(?:/(\d+))?')
class AtomHandler(tornado.web.RequestHandler):
"""Fetches the public posts for a given G+ user id as an Atom feed."""
json_url = 'https://www.googleapis.com/plus/v1/people/%s/activities/public?maxResults=10&userIp=%s'
cache_key_template = 'pluss--gplusid--atom--2--%s'
ratelimit_key_template = 'pluss--remoteip--ratelimit--1--%s'
@tornado.web.asynchronous
def get(self, user_id, page_id):
ratelimit_key = self.ratelimit_key_template % self.request.remote_ip
remote_ip_rate = Cache.incr(ratelimit_key)
if remote_ip_rate is None:
Cache.set(ratelimit_key, 1, time=60)
elif remote_ip_rate > 60:
self.set_status(503)
self.set_header('Retry-After', '60')
self.write('Rate limit exceeded. Please do not make more than 60 requests per minute.')
# Don't log every single time we rate limit a host (that would get spammy fast),
# but do log significant breakpoints on exactly how spammy a host is being.
if remote_ip_rate in (61, 100, 1000, 10000):
logging.info('Rate limited IP %s - %s requests/min' % (self.request.remote_ip, remote_ip_rate))
return self.finish()
self.gplus_user_id = user_id
self.gplus_page_id = page_id
if len(user_id) != 21:
self.write("Google+ profile IDs are exactly 21 digits long. Please specify a proper profile ID.")
return self.finish()
if page_id and len(page_id) != 21:
self.write("Google+ page IDs are exactly 21 digits long. Please specify a proper page ID.")
self.cache_key = self.cache_key_template % user_id
if page_id:
self.cache_key += str(page_id)
cached_result = Cache.get(self.cache_key)
flush_requested = self.request.arguments.get('flush', [None])[0]
if cached_result:
if not Config.getboolean('cache', 'allow-flush') or not flush_requested:
return self._respond(**cached_result)
if page_id:
OAuth2Handler.authed_fetch(user_id, self.json_url % (page_id, self.request.remote_ip), self._on_api_response)
else:
OAuth2Handler.authed_fetch(user_id, self.json_url % ('me', self.request.remote_ip), self._on_api_response)
def _respond(self, headers=None, body='', **kwargs):
if headers is None:
headers = {}
# Potentially just send a 304 Not Modified if the browser supports it.
if 'If-Modified-Since' in self.request.headers:
remote_timestamp = dateutils.from_http_format(self.request.headers['If-Modified-Since'])
# This check is necessary because we intentionally don't send Last-Modified for
# empty feeds - if somehow a post shows up later, we'd want it to get served even if
# the empty feed is 'newer' than the post (since we use latest post time for Last-Modified)
if 'Last-Modified' in headers:
local_timestamp = dateutils.from_http_format(headers['Last-Modified'])
if local_timestamp <= remote_timestamp:
# Hasn't been modified since it was last requested
self.set_status(304)
return self.finish()
for (header, value) in headers.iteritems():
self.set_header(header, value)
self.write(body)
return self.finish()
def _on_api_response(self, response):
if response is None:
logging.error("API request for %s failed." % self.gplus_user_id)
self.write("Unable to fetch content for this Google+ ID; it may not be authenticated. See http://%s for more information." % self.request.host)
self.set_status(401)
return self.finish()
if response.error:
if response.code == 403:
logging.error("API Request 403: %r" % (json.loads(response.body)))
self.set_status(503)
self.write("Unable to fulfill request at this time - Google+ API rate limit exceeded.")
return self.finish()
else:
logging.error("AsyncHTTPRequest error: %r, %r" % (response.error, response))
return self.send_error(500)
else:
data = json.loads(response.body)
headers = {'Content-Type': 'application/atom+xml'}
params = {
'userid': self.gplus_page_id or self.gplus_user_id,
'baseurl': 'http://%s' % self.request.host,
'requesturi': 'http://%s%s' % (self.request.host, self.request.uri.split('?', 1)[0]),
}
if 'items' not in data or not data['items']:
params['lastupdate'] = dateutils.to_atom_format(datetime.datetime.today())
return self._respond(headers, empty_feed_template.format(**params))
posts = data['items']
lastupdate = max(dateutils.from_iso_format(p['updated']) for p in posts)
params['author'] = xhtml_escape(posts[0]['actor']['displayName'])
params['lastupdate'] = dateutils.to_atom_format(lastupdate)
headers['Last-Modified'] = dateutils.to_http_format(lastupdate)
params['entrycontent'] = u''.join(entry_template.format(**get_post_params(p)) for p in posts)
body = feed_template.format(**params)
Cache.set(self.cache_key, {'headers': headers, 'body': body}, time=Config.getint('cache', 'stream-expire'))
return self._respond(headers, body)
def get_post_params(post):
post_updated = dateutils.from_iso_format(post['updated'])
post_published = dateutils.from_iso_format(post['published'])
post_id = post['id']
permalink = post['url']
item = post['object']
content_for_title = []
if post['verb'] == 'post':
content = [item['content']]
content_for_title.extend(content)
elif post['verb'] == 'share':
content = [post.get('annotation')] if post.get('annotation') else []
content_for_title.extend(content)
if 'actor' in item:
content.append('<br/><br/>')
if 'url' in item['actor'] and 'displayName' in item['actor']:
content.append('<a href="%s">%s</a>' % (item['actor']['url'], item['actor']['displayName']))
content.append(' originally shared this post: ')
elif 'displayName' in item['actor']:
content.append(item['actor']['displayName'])
content.append(' originally shared this post: ')
content_for_title.append('Shared from %s: ' % item['actor'].get('displayName', 'elsewhere'))
content.append('<br/><blockquote>')
content.append(item['content'])
content.append('</blockquote>')
content_for_title.append(item['content'])
elif post['verb'] == 'checkin':
content = [item['content']]
place = post.get('placeName', '')
if place:
if item['content']:
# Add some spacing if there's actually a comment
content.append('<br/><br/>')
content.append('Checked in at %s' % place)
content_for_title.extend(content)
else:
content = []
if 'attachments' in item: # attached content
for attach in item['attachments']:
if content:
content.append('<br/><br/>')
attach_title = False
else:
attach_title = True
if attach['objectType'] == 'article':
# Attached link
content.append('<a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached link')))
# Possible attached photo
if 'image' in attach:
content.append('<br/><img src="%s" alt="%s" />' % (attach['image']['url'],
attach['image'].get('displayName', 'attached image')))
elif attach['objectType'] == 'photo':
# Attached image
content.append('<img src="%s" alt="%s" />' % (attach['image']['url'],
attach['image'].get('displayName', 'attached image'))) # G+ doesn't always supply alt text...
elif attach['objectType'] == 'photo-album' or attach['objectType'] == 'album':
# Attached photo album link
content.append('Album: <a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached album')))
elif attach['objectType'] == 'video':
# Attached video
content.append('Video: <a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached video')))
else:
# Unrecognized attachment type
content.append('[unsupported post attachment of type "%s"]' % attach['objectType'])
if attach_title:
content_for_title.extend(content)
# If no actual parseable content was found, just link to the post
post_content = u''.join(content).strip() or permalink
# Generate the post title out of just text [max: 100 characters]
post_title_content = re.split(r'<br\s*/?>', ''.join(content_for_title))[0]
post_title = u' '.join(x.string for x in soup(post_title_content).findAll(text=True))
post_title = space_compress_regex.sub(' ', post_title).strip()
if post_title == permalink or not post_title:
post_title = u"A public G+ post"
else:
if len(post_title) > 100:
candidate_title = post_title[:97]
if '&' in candidate_title[-5:]: # Don't risk cutting off HTML entities
candidate_title = candidate_title.rsplit('&', 1)[0]
if ' ' in candidate_title[-5:]: # Reasonably avoid cutting off words
candidate_title = candidate_title.rsplit(' ', 1)[0]
post_title = u"%s..." % candidate_title
return {
'title': post_title,
'permalink': xhtml_escape(permalink),
'postatomdate': dateutils.to_atom_format(post_updated),
'postatompubdate': dateutils.to_atom_format(post_published),
'postdate': post_published.strftime('%Y-%m-%d'),
'id': xhtml_escape(post_id),
'summary': xhtml_escape(post_content),
}
entry_template = u"""
<entry>
<title>{title}</title>
<link href="{permalink}" rel="alternate" />
<updated>{postatomdate}</updated>
<published>{postatompubdate}</published>
<id>tag:plus.google.com,{postdate}:/{id}/</id>
<summary type="html">{summary}</summary>
</entry>
"""
feed_template = u"""<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
<title>{author} - Google+ Public Posts</title>
<link href="https://plus.google.com/{userid}" rel="alternate" />
<link href="{requesturi}" rel="self" />
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<author>
<name>{author}</name>
</author>
{entrycontent}
</feed>
"""
empty_feed_template = u"""<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>No Public Items Found for {userid}</title>
<link href="https://plus.google.com/{userid}" rel="alternate"></link>
<link href="{requesturi}" rel="self"></link>
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<entry>
<title>No Public Items Found</title>
<link href="http://plus.google.com/{userid}"/>
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<published>{lastupdate}</published>
<summary>Google+ user {userid} has not made any posts public.</summary>
</entry>
</feed>
"""
|
# -*- coding: utf-8 -*-
##
## $Id$
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""CDS Invenio ACCOUNT HANDLING"""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
try:
from mod_python import apache
except ImportError:
pass
from datetime import timedelta
from invenio.config import \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_URL, \
CFG_CERN_SITE, \
CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS
from invenio import webuser
from invenio.webpage import page
from invenio import webaccount
from invenio import webbasket
from invenio import webalert
from invenio.dbquery import run_sql
from invenio.webmessage import account_new_mail
from invenio.access_control_engine import make_apache_message, make_list_apache_firerole
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.urlutils import redirect_to_url, make_canonical_urlargd
from invenio import webgroup
from invenio import webgroup_dblayer
from invenio.messages import gettext_set_language, wash_language
from invenio.mailutils import send_email
from invenio.access_control_mailcookie import mail_cookie_retrieve_kind, \
mail_cookie_check_pw_reset, mail_cookie_delete_cookie, \
mail_cookie_create_pw_reset, mail_cookie_check_role, \
mail_cookie_check_mail_activation, InvenioWebAccessMailCookieError, \
InvenioWebAccessMailCookieDeletedError, mail_cookie_check_authorize_action
from invenio.access_control_config import CFG_WEBACCESS_WARNING_MSGS, \
CFG_EXTERNAL_AUTH_USING_SSO, CFG_EXTERNAL_AUTH_LOGOUT_SSO, \
CFG_EXTERNAL_AUTHENTICATION
import invenio.template
websession_templates = invenio.template.load('websession')
class WebInterfaceYourAccountPages(WebInterfaceDirectory):
_exports = ['', 'edit', 'change', 'lost', 'display',
'send_email', 'youradminactivities', 'access',
'delete', 'logout', 'login', 'register', 'resetpassword']
_force_https = True
def index(self, req, form):
redirect_to_url(req, '%s/youraccount/display' % CFG_SITE_SECURE_URL)
def access(self, req, form):
args = wash_urlargd(form, {'mailcookie' : (str, '')})
_ = gettext_set_language(args['ln'])
title = _("Mail Cookie Service")
try:
kind = mail_cookie_retrieve_kind(args['mailcookie'])
if kind == 'pw_reset':
redirect_to_url(req, '%s/youraccount/resetpassword?k=%s&ln=%s' % (CFG_SITE_SECURE_URL, args['mailcookie'], args['ln']))
elif kind == 'role':
uid = webuser.getUid(req)
try:
(role_name, expiration) = mail_cookie_check_role(args['mailcookie'], uid)
except InvenioWebAccessMailCookieDeletedError:
return page(title=_("Role authorization request"), req=req, body=_("This request for an authorization has already been authorized."), uid=webuser.getUid(req), navmenuid='youraccount', language=args['ln'])
return page(title=title,
body=webaccount.perform_back(
_("You have successfully obtained an authorization as %(x_role)s! "
"This authorization will last until %(x_expiration)s and until "
"you close your browser if you are a guest user.") %
{'x_role' : '<strong>%s</strong>' % role_name,
'x_expiration' : '<em>%s</em>' % expiration.strftime("%Y-%m-%d %H:%M:%S")},
'login', _('login'), args['ln']),
req=req,
uid=webuser.getUid(req),
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
elif kind == 'mail_activation':
try:
email = mail_cookie_check_mail_activation(args['mailcookie'])
if not email:
raise StandardError
webuser.confirm_email(email)
body = "<p>" + _("You have confirmed the validity of your email"
" address!") + "</p>"
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
body += "<p>" + _("Please, wait for the administrator to "
"enable your account.") + "</p>"
else:
uid = webuser.update_Uid(req, email)
body += "<p>" + _("You can now go to %(x_url_open)syour account page%(x_url_close)s.") % {'x_url_open' : '<a href="/youraccount/display?ln=%s">' % args['ln'], 'x_url_close' : '</a>'} + "</p>"
return page(title=_("Email address successfully activated"),
body=body, req=req, language=args['ln'], uid=webuser.getUid(req), lastupdated=__lastupdated__, navmenuid='youraccount')
except InvenioWebAccessMailCookieDeletedError, e:
body = "<p>" + _("You have already confirmed the validity of your email address!") + "</p>"
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
body += "<p>" + _("Please, wait for the administrator to "
"enable your account.") + "</p>"
else:
body += "<p>" + _("You can now go to %(x_url_open)syour account page%(x_url_close)s.") % {'x_url_open' : '<a href="/youraccount/display?ln=%s">' % args['ln'], 'x_url_close' : '</a>'} + "</p>"
return page(title=_("Email address successfully activated"),
body=body, req=req, language=args['ln'], uid=webuser.getUid(req), lastupdated=__lastupdated__, navmenuid='youraccount')
return webuser.page_not_authorized(req, "../youraccount/access",
text=_("This request for confirmation of an email "
"address is not valid or"
" is expired."), navmenuid='youraccount')
except InvenioWebAccessMailCookieError:
return webuser.page_not_authorized(req, "../youraccount/access",
text=_("This request for an authorization is not valid or"
" is expired."), navmenuid='youraccount')
def resetpassword(self, req, form):
args = wash_urlargd(form, {
'k' : (str, ''),
'reset' : (int, 0),
'password' : (str, ''),
'password2' : (str, '')
})
_ = gettext_set_language(args['ln'])
email = mail_cookie_check_pw_reset(args['k'])
reset_key = args['k']
title = _('Reset password')
if email is None or CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 3:
return webuser.page_not_authorized(req, "../youraccount/resetpassword",
text=_("This request for resetting the password is not valid or"
" is expired."), navmenuid='youraccount')
if not args['reset']:
return page(title=title,
body=webaccount.perform_reset_password(args['ln'], email, reset_key),
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
elif args['password'] != args['password2']:
msg = _('The two provided passwords aren\'t equal.')
return page(title=title,
body=webaccount.perform_reset_password(args['ln'], email, reset_key, msg),
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
run_sql('UPDATE user SET password=AES_ENCRYPT(email,%s) WHERE email=%s', (args['password'], email))
mail_cookie_delete_cookie(reset_key)
return page(title=title,
body=webaccount.perform_back(
_("The password was successfully set! "
"You can now proceed with the login."),
'login', _('login'), args['ln']),
req=req,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def display(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/display",
navmenuid='youraccount')
if webuser.isGuestUser(uid):
return page(title=_("Your Account"),
body=webaccount.perform_info(req, args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
username = webuser.get_nickname_or_email(uid)
bask = webbasket.account_list_baskets(uid, ln=args['ln'])
aler = webalert.account_list_alerts(uid, ln=args['ln'])
sear = webalert.account_list_searches(uid, ln=args['ln'])
msgs = account_new_mail(uid, ln=args['ln'])
grps = webgroup.account_group(uid, ln=args['ln'])
return page(title=_("Your Account"),
body=webaccount.perform_display_account(req,username,bask,aler,sear,msgs,grps,args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def edit(self, req, form):
args = wash_urlargd(form, {"verbose" : (int, 0)})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/edit",
navmenuid='youraccount')
if webuser.isGuestUser(uid):
return webuser.page_not_authorized(req, "../youraccount/edit",
text=_("This functionality is forbidden to guest users."),
navmenuid='youraccount')
body = ''
if args['verbose'] == 9:
user_info = webuser.collect_user_info(req)
for key, value in user_info.items():
body += "<b>%s</b>:%s<br />" % (key, value)
return page(title= _("Your Settings"),
body=body+webaccount.perform_set(webuser.get_email(uid),
args['ln'], verbose=args['verbose']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description=_("%s Personalize, Your Settings") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def change(self, req, form):
args = wash_urlargd(form, {
'nickname': (str, None),
'email': (str, None),
'old_password': (str, None),
'password': (str, None),
'password2': (str, None),
'login_method': (str, ""),
'group_records' : (int, None),
'latestbox' : (int, None),
'helpbox' : (int, None),
'lang' : (str, None),
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/change",
navmenuid='youraccount')
prefs = webuser.get_user_preferences(uid)
mess = ''
if args['email']:
args['email'] = args['email'].lower()
if args['login_method'] and CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 4 \
and args['login_method'] in CFG_EXTERNAL_AUTHENTICATION.keys():
title = _("Settings edited")
act = "display"
linkname = _("Show account")
if prefs['login_method'] != args['login_method']:
if not CFG_EXTERNAL_AUTHENTICATION[args['login_method']][0]:
# Switching to internal authentication: we drop any external datas
p_email = webuser.get_email(uid)
webuser.drop_external_settings(uid)
webgroup_dblayer.drop_external_groups(uid)
prefs['login_method'] = args['login_method']
webuser.set_user_preferences(uid, prefs)
mess = "<p>" + _("Switched to internal login method.") + " "
mess += _("Please note that if this is the first time that you are using this account "
"with the internal login method then the system has set for you "
"a randomly generated password. Please clic the "
"following button to obtain a password reset request "
"link sent to you via email:") + '</p>'
mess += """<p><form method="post" action="../youraccount/send_email">
<input type="hidden" name="p_email" value="%s">
<input class="formbutton" type="submit" value="%s">
</form></p>""" % (p_email, _("Send Password"))
else:
query = """SELECT email FROM user
WHERE id = %i"""
res = run_sql(query % uid)
if res:
email = res[0][0]
else:
email = None
if not email:
mess = _("Unable to switch to external login method %s, because your email address is unknown.") % args['login_method']
else:
try:
if not CFG_EXTERNAL_AUTHENTICATION[args['login_method']][0].user_exists(email):
mess = _("Unable to switch to external login method %s, because your email address is unknown to the external login system.") % args['login_method']
else:
prefs['login_method'] = args['login_method']
webuser.set_user_preferences(uid, prefs)
mess = _("Login method successfully selected.")
except AttributeError:
mess = _("The external login method %s does not support email address based logins. Please contact the site administrators.") % args['login_method']
elif args['login_method'] and CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 4:
return webuser.page_not_authorized(req, "../youraccount/change",
navmenuid='youraccount')
elif args['email']:
# We should ignore the password if the authentication method is an
# external one.
ignore_password_p = CFG_EXTERNAL_AUTHENTICATION[prefs['login_method']][0] != None
uid2 = webuser.emailUnique(args['email'])
uid_with_the_same_nickname = webuser.nicknameUnique(args['nickname'])
if (CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 2 or (CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS <= 1 and \
webuser.email_valid_p(args['email']))) \
and (args['nickname'] is None or webuser.nickname_valid_p(args['nickname'])) \
and uid2 != -1 and (uid2 == uid or uid2 == 0) \
and uid_with_the_same_nickname != -1 and (uid_with_the_same_nickname == uid or uid_with_the_same_nickname == 0):
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 3:
change = webuser.updateDataUser(uid,
args['email'],
args['nickname'])
else:
return webuser.page_not_authorized(req, "../youraccount/change",
navmenuid='youraccount')
if change:
mess = _("Settings successfully edited.")
act = "display"
linkname = _("Show account")
title = _("Settings edited")
elif args['nickname'] is not None and not webuser.nickname_valid_p(args['nickname']):
mess = _("Desired nickname %s is invalid.") % args['nickname']
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
elif not webuser.email_valid_p(args['email']):
mess = _("Supplied email address %s is invalid.") % args['email']
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
elif uid2 == -1 or uid2 != uid and not uid2 == 0:
mess = _("Supplied email address %s already exists in the database.") % args['email']
mess += " " + websession_templates.tmpl_lost_your_password_teaser(args['ln'])
mess += " " + _("Or please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
elif uid_with_the_same_nickname == -1 or uid_with_the_same_nickname != uid and not uid_with_the_same_nickname == 0:
mess = _("Desired nickname %s is already in use.") % args['nickname']
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
elif args['old_password'] != None and CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 3:
res = run_sql("SELECT id FROM user "
"WHERE AES_ENCRYPT(email,%s)=password AND id=%s",
(args['old_password'], uid))
if res:
if args['password'] == args['password2']:
webuser.updatePasswordUser(uid, args['password'])
mess = _("Password successfully edited.")
act = "display"
linkname = _("Show account")
title = _("Password edited")
else:
mess = _("Both passwords must match.")
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing password failed")
else:
mess = _("Wrong old password inserted.")
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing password failed")
elif args['group_records']:
prefs = webuser.get_user_preferences(uid)
prefs['websearch_group_records'] = args['group_records']
prefs['websearch_latestbox'] = args['latestbox']
prefs['websearch_helpbox'] = args['helpbox']
webuser.set_user_preferences(uid, prefs)
title = _("Settings edited")
act = "display"
linkname = _("Show account")
mess = _("User settings saved correctly.")
elif args['lang']:
lang = wash_language(args['lang'])
prefs = webuser.get_user_preferences(uid)
prefs['language'] = lang
args['ln'] = lang
_ = gettext_set_language(lang)
webuser.set_user_preferences(uid, prefs)
title = _("Settings edited")
act = "display"
linkname = _("Show account")
mess = _("User settings saved correctly.")
else:
mess = _("Unable to update settings.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
return page(title=title,
body=webaccount.perform_back(mess, act, linkname, args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def lost(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/lost",
navmenuid='youraccount')
return page(title=_("Lost your password?"),
body=webaccount.perform_lost(args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def send_email(self, req, form):
# set all the declared query fields as local variables
args = wash_urlargd(form, {'p_email': (str, None)})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/send_email",
navmenuid='youraccount')
user_prefs = webuser.get_user_preferences(webuser.emailUnique(args['p_email']))
if user_prefs:
if CFG_EXTERNAL_AUTHENTICATION.has_key(user_prefs['login_method']) and \
CFG_EXTERNAL_AUTHENTICATION[user_prefs['login_method']][0] is not None:
eMsg = _("Cannot send password reset request since you are using external authentication system.")
return page(title=_("Your Account"),
body=webaccount.perform_emailMessage(eMsg, args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME)),
uid=uid, req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
try:
reset_key = mail_cookie_create_pw_reset(args['p_email'], cookie_timeout=timedelta(days=CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS))
except InvenioWebAccessMailCookieError:
reset_key = None
if reset_key is None:
eMsg = _("The entered email address does not exist in the database.")
return page(title=_("Your Account"),
body=webaccount.perform_emailMessage(eMsg, args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid, req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
ip_address = req.connection.remote_host or req.connection.remote_ip
if not send_email(CFG_SITE_SUPPORT_EMAIL, args['p_email'], "%s %s"
% (_("Password reset request for"),
CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME)),
websession_templates.tmpl_account_reset_password_email_body(
args['p_email'],reset_key, ip_address, args['ln'])):
eMsg = _("The entered email address is incorrect, please check that it is written correctly (e.g. johndoe@example.com).")
return page(title=_("Incorrect email address"),
body=webaccount.perform_emailMessage(eMsg, args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
return page(title=_("Reset password link sent"),
body=webaccount.perform_emailSent(args['p_email'], args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid, req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def youradminactivities(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.getUid(req)
user_info = webuser.collect_user_info(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/youradminactivities",
navmenuid='admin')
return page(title=_("Your Administrative Activities"),
body=webaccount.perform_youradminactivities(user_info, args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='admin')
def delete(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/delete",
navmenuid='youraccount')
return page(title=_("Delete Account"),
body=webaccount.perform_delete(args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def logout(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.logoutUser(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/logout",
navmenuid='youraccount')
if CFG_EXTERNAL_AUTH_USING_SSO:
return redirect_to_url(req, CFG_EXTERNAL_AUTH_LOGOUT_SSO)
return page(title=_("Logout"),
body=webaccount.perform_logout(req, args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def login(self, req, form):
args = wash_urlargd(form, {
'p_un': (str, None),
'p_pw': (str, None),
'login_method': (str, None),
'action': (str, ''),
'remember_me' : (str, ''),
'referer': (str, '')})
if args['p_un']:
args['p_un'] = args['p_un'].strip()
args['remember_me'] = args['remember_me'] != ''
locals().update(args)
if CFG_ACCESS_CONTROL_LEVEL_SITE > 0:
return webuser.page_not_authorized(req, "../youraccount/login?ln=%s" % args['ln'],
navmenuid='youraccount')
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
apache_msg = ""
if args['action']:
cookie = args['action']
try:
action, arguments = mail_cookie_check_authorize_action(cookie)
apache_msg = make_apache_message(action, arguments, args['referer'])
# FIXME: Temporary Hack to help CDS current migration
if CFG_CERN_SITE:
roles = make_list_apache_firerole(action, arguments)
if len(roles) == 1:
# There's only one role enabled to see this collection
# Let's redirect to log to it!
return redirect_to_url(req, '%s/%s' % (CFG_SITE_SECURE_URL, make_canonical_urlargd({'realm' : roles[0][0], 'referer' : args['referer']}, {})))
except InvenioWebAccessMailCookieError:
pass
if not CFG_EXTERNAL_AUTH_USING_SSO:
if args['p_un'] is None or not args['login_method']:
return page(title=_("Login"),
body=webaccount.create_login_page_box(args['referer'], apache_msg, args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords="%s , personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
(iden, args['p_un'], args['p_pw'], msgcode) = webuser.loginUser(req, args['p_un'], args['p_pw'], args['login_method'])
else:
# Fake parameters for p_un & p_pw because SSO takes them from the environment
(iden, args['p_un'], args['p_pw'], msgcode) = webuser.loginUser(req, '', '', CFG_EXTERNAL_AUTH_USING_SSO)
args['remember_me'] = True
if len(iden)>0:
uid = webuser.update_Uid(req, args['p_un'], args['remember_me'])
uid2 = webuser.getUid(req)
if uid2 == -1:
webuser.logoutUser(req)
return webuser.page_not_authorized(req, "../youraccount/login?ln=%s" % args['ln'], uid=uid,
navmenuid='youraccount')
# login successful!
if args['referer']:
redirect_to_url(req, args['referer'])
else:
return self.display(req, form)
else:
mess = CFG_WEBACCESS_WARNING_MSGS[msgcode] % args['login_method']
if msgcode == 14:
if webuser.username_exists_p(args['p_un']):
mess = CFG_WEBACCESS_WARNING_MSGS[15] % args['login_method']
act = "login"
return page(title=_("Login"),
body=webaccount.perform_back(mess, act, _("login"), args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords="%s , personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def register(self, req, form):
args = wash_urlargd(form, {
'p_nickname': (str, None),
'p_email': (str, None),
'p_pw': (str, None),
'p_pw2': (str, None),
'action': (str, "login"),
'referer': (str, "")})
if CFG_ACCESS_CONTROL_LEVEL_SITE > 0:
return webuser.page_not_authorized(req, "../youraccount/register?ln=%s" % args['ln'],
navmenuid='youraccount')
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if args['p_nickname'] is None or args['p_email'] is None:
return page(title=_("Register"),
body=webaccount.create_register_page_box(args['referer'], args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description=_("%s Personalize, Main page") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords="%s , personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
mess = ""
act = ""
if args['p_pw'] == args['p_pw2']:
ruid = webuser.registerUser(req, args['p_email'], args['p_pw'], args['p_nickname'])
else:
ruid = -2
if ruid == 0:
mess = _("Your account has been successfully created.")
title = _("Account created")
if CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT == 1:
mess += " " + _("In order to confirm its validity, an email message containing an account activation key has been sent to the given email address.")
mess += " " + _("Please follow instructions presented there in order to complete the account registration process.")
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 1:
mess += " " + _("A second email will be sent when the account has been activated and can be used.")
elif CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT != 1:
uid = webuser.update_Uid(req, args['p_email'])
mess += " " + _("You can now access your %(x_url_open)saccount%(x_url_close)s.") %\
{'x_url_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/display?ln=' + args['ln'] + '">',
'x_url_close': '</a>'}
elif ruid == -2:
mess = _("Both passwords must match.")
mess += " " + _("Please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 1:
mess = _("Supplied email address %s is invalid.") % args['p_email']
mess += " " + _("Please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 2:
mess = _("Desired nickname %s is invalid.") % args['p_nickname']
mess += " " + _("Please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 3:
mess = _("Supplied email address %s already exists in the database.") % args['p_email']
mess += " " + websession_templates.tmpl_lost_your_password_teaser(args['ln'])
mess += " " + _("Or please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 4:
mess = _("Desired nickname %s already exists in the database.") % args['p_nickname']
mess += " " + _("Please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 5:
mess = _("Users cannot register themselves, only admin can register them.")
act = "register"
title = _("Registration failure")
elif ruid == 6:
mess = _("The site is having troubles in sending you an email for confirming your email address.") + _("The error has been logged and will be taken in consideration as soon as possible.")
act = "register"
title = _("Registration failure")
else:
# this should never happen
mess = _("Internal Error")
act = "register"
title = _("Registration failure")
return page(title=title,
body=webaccount.perform_back(mess,act, (act == 'register' and _("register") or ""), args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description=_("%s Personalize, Main page") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords="%s , personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
class WebInterfaceYourGroupsPages(WebInterfaceDirectory):
_exports = ['', 'display', 'create', 'join', 'leave', 'edit', 'members']
def index(self, req, form):
redirect_to_url(req, '/yourgroups/display')
def display(self, req, form):
"""
Displays groups the user is admin of
and the groups the user is member of(but not admin)
@param ln: language
@return the page for all the groups
"""
argd = wash_urlargd(form, {})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/display",
navmenuid='yourgroups')
(body, errors, warnings) = webgroup.perform_request_groups_display(uid=uid,
ln=argd['ln'])
return page(title = _("Your Groups"),
body = body,
navtrail = webgroup.get_navtrail(argd['ln']),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def create(self, req, form):
"""create(): interface for creating a new group
@param group_name : name of the new webgroup.Must be filled
@param group_description : description of the new webgroup.(optionnal)
@param join_policy : join policy of the new webgroup.Must be chosen
@param *button: which button was pressed
@param ln: language
@return the compose page Create group
"""
argd = wash_urlargd(form, {'group_name': (str, ""),
'group_description': (str, ""),
'join_policy': (str, ""),
'create_button':(str, ""),
'cancel':(str, "")
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/create",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
if argd['create_button'] :
(body, errors, warnings)= webgroup.perform_request_create_group(uid=uid,
group_name=argd['group_name'],
group_description=argd['group_description'],
join_policy=argd['join_policy'],
ln = argd['ln'])
else:
(body, errors, warnings) = webgroup.perform_request_input_create_group(group_name=argd['group_name'],
group_description=argd['group_description'],
join_policy=argd['join_policy'],
ln=argd['ln'])
title = _("Create new group")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def join(self, req, form):
"""join(): interface for joining a new group
@param grpID : list of the group the user wants to become a member.
The user must select only one group.
@param group_name : will search for groups matching group_name
@param *button: which button was pressed
@param ln: language
@return the compose page Join group
"""
argd = wash_urlargd(form, {'grpID':(list, []),
'group_name':(str, ""),
'find_button':(str, ""),
'join_button':(str, ""),
'cancel':(str, "")
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/join",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
if argd['join_button']:
search = 0
if argd['group_name']:
search = 1
(body, errors, warnings) = webgroup.perform_request_join_group(uid,
argd['grpID'],
argd['group_name'],
search,
argd['ln'])
else:
search = 0
if argd['find_button']:
search = 1
(body, errors, warnings) = webgroup.perform_request_input_join_group(uid,
argd['group_name'],
search,
ln=argd['ln'])
title = _("Join New Group")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def leave(self, req, form):
"""leave(): interface for leaving a group
@param grpID : group the user wants to leave.
@param group_name : name of the group the user wants to leave
@param *button: which button was pressed
@param confirmed : the user is first asked to confirm
@param ln: language
@return the compose page Leave group
"""
argd = wash_urlargd(form, {'grpID':(str, ""),
'group_name':(str, ""),
'leave_button':(str, ""),
'cancel':(str, ""),
'confirmed': (int, 0)
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/leave",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
if argd['leave_button']:
(body, errors, warnings) = webgroup.perform_request_leave_group(uid,
argd['grpID'],
argd['confirmed'],
argd['ln'])
else:
(body, errors, warnings) = webgroup.perform_request_input_leave_group(uid=uid,
ln=argd['ln'])
title = _("Leave Group")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def edit(self, req, form):
"""edit(): interface for editing group
@param grpID : group ID
@param group_name : name of the new webgroup.Must be filled
@param group_description : description of the new webgroup.(optionnal)
@param join_policy : join policy of the new webgroup.Must be chosen
@param update: button update group pressed
@param delete: button delete group pressed
@param cancel: button cancel pressed
@param confirmed : the user is first asked to confirm before deleting
@param ln: language
@return the main page displaying all the groups
"""
argd = wash_urlargd(form, {'grpID': (str, ""),
'update': (str, ""),
'cancel': (str, ""),
'delete': (str, ""),
'group_name': (str, ""),
'group_description': (str, ""),
'join_policy': (str, ""),
'confirmed': (int, 0)
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/display",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
elif argd['delete']:
(body, errors, warnings) = webgroup.perform_request_delete_group(uid=uid,
grpID=argd['grpID'],
confirmed=argd['confirmed'])
elif argd['update']:
(body, errors, warnings) = webgroup.perform_request_update_group(uid= uid,
grpID=argd['grpID'],
group_name=argd['group_name'],
group_description=argd['group_description'],
join_policy=argd['join_policy'],
ln=argd['ln'])
else :
(body, errors, warnings)= webgroup.perform_request_edit_group(uid=uid,
grpID=argd['grpID'],
ln=argd['ln'])
title = _("Edit Group")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def members(self, req, form):
"""member(): interface for managing members of a group
@param grpID : group ID
@param add_member: button add_member pressed
@param remove_member: button remove_member pressed
@param reject_member: button reject__member pressed
@param delete: button delete group pressed
@param member_id : ID of the existing member selected
@param pending_member_id : ID of the pending member selected
@param cancel: button cancel pressed
@param info : info about last user action
@param ln: language
@return the same page with data updated
"""
argd = wash_urlargd(form, {'grpID': (int, 0),
'cancel': (str, ""),
'add_member': (str, ""),
'remove_member': (str, ""),
'reject_member': (str, ""),
'member_id': (int, 0),
'pending_member_id': (int, 0)
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/display",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
if argd['remove_member']:
(body, errors, warnings) = webgroup.perform_request_remove_member(uid=uid,
grpID=argd['grpID'],
member_id=argd['member_id'],
ln=argd['ln'])
elif argd['reject_member']:
(body, errors, warnings) = webgroup.perform_request_reject_member(uid=uid,
grpID=argd['grpID'],
user_id=argd['pending_member_id'],
ln=argd['ln'])
elif argd['add_member']:
(body, errors, warnings) = webgroup.perform_request_add_member(uid=uid,
grpID=argd['grpID'],
user_id=argd['pending_member_id'],
ln=argd['ln'])
else:
(body, errors, warnings)= webgroup.perform_request_manage_member(uid=uid,
grpID=argd['grpID'],
ln=argd['ln'])
title = _("Edit group members")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
Using HTTP_MOVED_TEMPORARILY when successful login (otherwise FireFox complains
with HTTP_TEMPORARY_REDIRECT that it have to resend form information to a new
URL.
# -*- coding: utf-8 -*-
##
## $Id$
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""CDS Invenio ACCOUNT HANDLING"""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
try:
from mod_python import apache
except ImportError:
pass
from datetime import timedelta
from invenio.config import \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_URL, \
CFG_CERN_SITE, \
CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS
from invenio import webuser
from invenio.webpage import page
from invenio import webaccount
from invenio import webbasket
from invenio import webalert
from invenio.dbquery import run_sql
from invenio.webmessage import account_new_mail
from invenio.access_control_engine import make_apache_message, make_list_apache_firerole
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.urlutils import redirect_to_url, make_canonical_urlargd
from invenio import webgroup
from invenio import webgroup_dblayer
from invenio.messages import gettext_set_language, wash_language
from invenio.mailutils import send_email
from invenio.access_control_mailcookie import mail_cookie_retrieve_kind, \
mail_cookie_check_pw_reset, mail_cookie_delete_cookie, \
mail_cookie_create_pw_reset, mail_cookie_check_role, \
mail_cookie_check_mail_activation, InvenioWebAccessMailCookieError, \
InvenioWebAccessMailCookieDeletedError, mail_cookie_check_authorize_action
from invenio.access_control_config import CFG_WEBACCESS_WARNING_MSGS, \
CFG_EXTERNAL_AUTH_USING_SSO, CFG_EXTERNAL_AUTH_LOGOUT_SSO, \
CFG_EXTERNAL_AUTHENTICATION
import invenio.template
websession_templates = invenio.template.load('websession')
class WebInterfaceYourAccountPages(WebInterfaceDirectory):
_exports = ['', 'edit', 'change', 'lost', 'display',
'send_email', 'youradminactivities', 'access',
'delete', 'logout', 'login', 'register', 'resetpassword']
_force_https = True
def index(self, req, form):
redirect_to_url(req, '%s/youraccount/display' % CFG_SITE_SECURE_URL)
def access(self, req, form):
args = wash_urlargd(form, {'mailcookie' : (str, '')})
_ = gettext_set_language(args['ln'])
title = _("Mail Cookie Service")
try:
kind = mail_cookie_retrieve_kind(args['mailcookie'])
if kind == 'pw_reset':
redirect_to_url(req, '%s/youraccount/resetpassword?k=%s&ln=%s' % (CFG_SITE_SECURE_URL, args['mailcookie'], args['ln']))
elif kind == 'role':
uid = webuser.getUid(req)
try:
(role_name, expiration) = mail_cookie_check_role(args['mailcookie'], uid)
except InvenioWebAccessMailCookieDeletedError:
return page(title=_("Role authorization request"), req=req, body=_("This request for an authorization has already been authorized."), uid=webuser.getUid(req), navmenuid='youraccount', language=args['ln'])
return page(title=title,
body=webaccount.perform_back(
_("You have successfully obtained an authorization as %(x_role)s! "
"This authorization will last until %(x_expiration)s and until "
"you close your browser if you are a guest user.") %
{'x_role' : '<strong>%s</strong>' % role_name,
'x_expiration' : '<em>%s</em>' % expiration.strftime("%Y-%m-%d %H:%M:%S")},
'login', _('login'), args['ln']),
req=req,
uid=webuser.getUid(req),
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
elif kind == 'mail_activation':
try:
email = mail_cookie_check_mail_activation(args['mailcookie'])
if not email:
raise StandardError
webuser.confirm_email(email)
body = "<p>" + _("You have confirmed the validity of your email"
" address!") + "</p>"
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
body += "<p>" + _("Please, wait for the administrator to "
"enable your account.") + "</p>"
else:
uid = webuser.update_Uid(req, email)
body += "<p>" + _("You can now go to %(x_url_open)syour account page%(x_url_close)s.") % {'x_url_open' : '<a href="/youraccount/display?ln=%s">' % args['ln'], 'x_url_close' : '</a>'} + "</p>"
return page(title=_("Email address successfully activated"),
body=body, req=req, language=args['ln'], uid=webuser.getUid(req), lastupdated=__lastupdated__, navmenuid='youraccount')
except InvenioWebAccessMailCookieDeletedError, e:
body = "<p>" + _("You have already confirmed the validity of your email address!") + "</p>"
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
body += "<p>" + _("Please, wait for the administrator to "
"enable your account.") + "</p>"
else:
body += "<p>" + _("You can now go to %(x_url_open)syour account page%(x_url_close)s.") % {'x_url_open' : '<a href="/youraccount/display?ln=%s">' % args['ln'], 'x_url_close' : '</a>'} + "</p>"
return page(title=_("Email address successfully activated"),
body=body, req=req, language=args['ln'], uid=webuser.getUid(req), lastupdated=__lastupdated__, navmenuid='youraccount')
return webuser.page_not_authorized(req, "../youraccount/access",
text=_("This request for confirmation of an email "
"address is not valid or"
" is expired."), navmenuid='youraccount')
except InvenioWebAccessMailCookieError:
return webuser.page_not_authorized(req, "../youraccount/access",
text=_("This request for an authorization is not valid or"
" is expired."), navmenuid='youraccount')
def resetpassword(self, req, form):
args = wash_urlargd(form, {
'k' : (str, ''),
'reset' : (int, 0),
'password' : (str, ''),
'password2' : (str, '')
})
_ = gettext_set_language(args['ln'])
email = mail_cookie_check_pw_reset(args['k'])
reset_key = args['k']
title = _('Reset password')
if email is None or CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 3:
return webuser.page_not_authorized(req, "../youraccount/resetpassword",
text=_("This request for resetting the password is not valid or"
" is expired."), navmenuid='youraccount')
if not args['reset']:
return page(title=title,
body=webaccount.perform_reset_password(args['ln'], email, reset_key),
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
elif args['password'] != args['password2']:
msg = _('The two provided passwords aren\'t equal.')
return page(title=title,
body=webaccount.perform_reset_password(args['ln'], email, reset_key, msg),
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
run_sql('UPDATE user SET password=AES_ENCRYPT(email,%s) WHERE email=%s', (args['password'], email))
mail_cookie_delete_cookie(reset_key)
return page(title=title,
body=webaccount.perform_back(
_("The password was successfully set! "
"You can now proceed with the login."),
'login', _('login'), args['ln']),
req=req,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def display(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/display",
navmenuid='youraccount')
if webuser.isGuestUser(uid):
return page(title=_("Your Account"),
body=webaccount.perform_info(req, args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
username = webuser.get_nickname_or_email(uid)
bask = webbasket.account_list_baskets(uid, ln=args['ln'])
aler = webalert.account_list_alerts(uid, ln=args['ln'])
sear = webalert.account_list_searches(uid, ln=args['ln'])
msgs = account_new_mail(uid, ln=args['ln'])
grps = webgroup.account_group(uid, ln=args['ln'])
return page(title=_("Your Account"),
body=webaccount.perform_display_account(req,username,bask,aler,sear,msgs,grps,args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def edit(self, req, form):
args = wash_urlargd(form, {"verbose" : (int, 0)})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/edit",
navmenuid='youraccount')
if webuser.isGuestUser(uid):
return webuser.page_not_authorized(req, "../youraccount/edit",
text=_("This functionality is forbidden to guest users."),
navmenuid='youraccount')
body = ''
if args['verbose'] == 9:
user_info = webuser.collect_user_info(req)
for key, value in user_info.items():
body += "<b>%s</b>:%s<br />" % (key, value)
return page(title= _("Your Settings"),
body=body+webaccount.perform_set(webuser.get_email(uid),
args['ln'], verbose=args['verbose']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description=_("%s Personalize, Your Settings") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def change(self, req, form):
args = wash_urlargd(form, {
'nickname': (str, None),
'email': (str, None),
'old_password': (str, None),
'password': (str, None),
'password2': (str, None),
'login_method': (str, ""),
'group_records' : (int, None),
'latestbox' : (int, None),
'helpbox' : (int, None),
'lang' : (str, None),
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/change",
navmenuid='youraccount')
prefs = webuser.get_user_preferences(uid)
mess = ''
if args['email']:
args['email'] = args['email'].lower()
if args['login_method'] and CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 4 \
and args['login_method'] in CFG_EXTERNAL_AUTHENTICATION.keys():
title = _("Settings edited")
act = "display"
linkname = _("Show account")
if prefs['login_method'] != args['login_method']:
if not CFG_EXTERNAL_AUTHENTICATION[args['login_method']][0]:
# Switching to internal authentication: we drop any external datas
p_email = webuser.get_email(uid)
webuser.drop_external_settings(uid)
webgroup_dblayer.drop_external_groups(uid)
prefs['login_method'] = args['login_method']
webuser.set_user_preferences(uid, prefs)
mess = "<p>" + _("Switched to internal login method.") + " "
mess += _("Please note that if this is the first time that you are using this account "
"with the internal login method then the system has set for you "
"a randomly generated password. Please clic the "
"following button to obtain a password reset request "
"link sent to you via email:") + '</p>'
mess += """<p><form method="post" action="../youraccount/send_email">
<input type="hidden" name="p_email" value="%s">
<input class="formbutton" type="submit" value="%s">
</form></p>""" % (p_email, _("Send Password"))
else:
query = """SELECT email FROM user
WHERE id = %i"""
res = run_sql(query % uid)
if res:
email = res[0][0]
else:
email = None
if not email:
mess = _("Unable to switch to external login method %s, because your email address is unknown.") % args['login_method']
else:
try:
if not CFG_EXTERNAL_AUTHENTICATION[args['login_method']][0].user_exists(email):
mess = _("Unable to switch to external login method %s, because your email address is unknown to the external login system.") % args['login_method']
else:
prefs['login_method'] = args['login_method']
webuser.set_user_preferences(uid, prefs)
mess = _("Login method successfully selected.")
except AttributeError:
mess = _("The external login method %s does not support email address based logins. Please contact the site administrators.") % args['login_method']
elif args['login_method'] and CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 4:
return webuser.page_not_authorized(req, "../youraccount/change",
navmenuid='youraccount')
elif args['email']:
# We should ignore the password if the authentication method is an
# external one.
ignore_password_p = CFG_EXTERNAL_AUTHENTICATION[prefs['login_method']][0] != None
uid2 = webuser.emailUnique(args['email'])
uid_with_the_same_nickname = webuser.nicknameUnique(args['nickname'])
if (CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 2 or (CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS <= 1 and \
webuser.email_valid_p(args['email']))) \
and (args['nickname'] is None or webuser.nickname_valid_p(args['nickname'])) \
and uid2 != -1 and (uid2 == uid or uid2 == 0) \
and uid_with_the_same_nickname != -1 and (uid_with_the_same_nickname == uid or uid_with_the_same_nickname == 0):
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 3:
change = webuser.updateDataUser(uid,
args['email'],
args['nickname'])
else:
return webuser.page_not_authorized(req, "../youraccount/change",
navmenuid='youraccount')
if change:
mess = _("Settings successfully edited.")
act = "display"
linkname = _("Show account")
title = _("Settings edited")
elif args['nickname'] is not None and not webuser.nickname_valid_p(args['nickname']):
mess = _("Desired nickname %s is invalid.") % args['nickname']
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
elif not webuser.email_valid_p(args['email']):
mess = _("Supplied email address %s is invalid.") % args['email']
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
elif uid2 == -1 or uid2 != uid and not uid2 == 0:
mess = _("Supplied email address %s already exists in the database.") % args['email']
mess += " " + websession_templates.tmpl_lost_your_password_teaser(args['ln'])
mess += " " + _("Or please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
elif uid_with_the_same_nickname == -1 or uid_with_the_same_nickname != uid and not uid_with_the_same_nickname == 0:
mess = _("Desired nickname %s is already in use.") % args['nickname']
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
elif args['old_password'] != None and CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 3:
res = run_sql("SELECT id FROM user "
"WHERE AES_ENCRYPT(email,%s)=password AND id=%s",
(args['old_password'], uid))
if res:
if args['password'] == args['password2']:
webuser.updatePasswordUser(uid, args['password'])
mess = _("Password successfully edited.")
act = "display"
linkname = _("Show account")
title = _("Password edited")
else:
mess = _("Both passwords must match.")
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing password failed")
else:
mess = _("Wrong old password inserted.")
mess += " " + _("Please try again.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing password failed")
elif args['group_records']:
prefs = webuser.get_user_preferences(uid)
prefs['websearch_group_records'] = args['group_records']
prefs['websearch_latestbox'] = args['latestbox']
prefs['websearch_helpbox'] = args['helpbox']
webuser.set_user_preferences(uid, prefs)
title = _("Settings edited")
act = "display"
linkname = _("Show account")
mess = _("User settings saved correctly.")
elif args['lang']:
lang = wash_language(args['lang'])
prefs = webuser.get_user_preferences(uid)
prefs['language'] = lang
args['ln'] = lang
_ = gettext_set_language(lang)
webuser.set_user_preferences(uid, prefs)
title = _("Settings edited")
act = "display"
linkname = _("Show account")
mess = _("User settings saved correctly.")
else:
mess = _("Unable to update settings.")
act = "edit"
linkname = _("Edit settings")
title = _("Editing settings failed")
return page(title=title,
body=webaccount.perform_back(mess, act, linkname, args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def lost(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/lost",
navmenuid='youraccount')
return page(title=_("Lost your password?"),
body=webaccount.perform_lost(args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def send_email(self, req, form):
# set all the declared query fields as local variables
args = wash_urlargd(form, {'p_email': (str, None)})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/send_email",
navmenuid='youraccount')
user_prefs = webuser.get_user_preferences(webuser.emailUnique(args['p_email']))
if user_prefs:
if CFG_EXTERNAL_AUTHENTICATION.has_key(user_prefs['login_method']) and \
CFG_EXTERNAL_AUTHENTICATION[user_prefs['login_method']][0] is not None:
eMsg = _("Cannot send password reset request since you are using external authentication system.")
return page(title=_("Your Account"),
body=webaccount.perform_emailMessage(eMsg, args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME)),
uid=uid, req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
try:
reset_key = mail_cookie_create_pw_reset(args['p_email'], cookie_timeout=timedelta(days=CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS))
except InvenioWebAccessMailCookieError:
reset_key = None
if reset_key is None:
eMsg = _("The entered email address does not exist in the database.")
return page(title=_("Your Account"),
body=webaccount.perform_emailMessage(eMsg, args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid, req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
ip_address = req.connection.remote_host or req.connection.remote_ip
if not send_email(CFG_SITE_SUPPORT_EMAIL, args['p_email'], "%s %s"
% (_("Password reset request for"),
CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME)),
websession_templates.tmpl_account_reset_password_email_body(
args['p_email'],reset_key, ip_address, args['ln'])):
eMsg = _("The entered email address is incorrect, please check that it is written correctly (e.g. johndoe@example.com).")
return page(title=_("Incorrect email address"),
body=webaccount.perform_emailMessage(eMsg, args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
return page(title=_("Reset password link sent"),
body=webaccount.perform_emailSent(args['p_email'], args['ln']),
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid, req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def youradminactivities(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.getUid(req)
user_info = webuser.collect_user_info(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/youradminactivities",
navmenuid='admin')
return page(title=_("Your Administrative Activities"),
body=webaccount.perform_youradminactivities(user_info, args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='admin')
def delete(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/delete",
navmenuid='youraccount')
return page(title=_("Delete Account"),
body=webaccount.perform_delete(args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def logout(self, req, form):
args = wash_urlargd(form, {})
uid = webuser.logoutUser(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../youraccount/logout",
navmenuid='youraccount')
if CFG_EXTERNAL_AUTH_USING_SSO:
return redirect_to_url(req, CFG_EXTERNAL_AUTH_LOGOUT_SSO)
return page(title=_("Logout"),
body=webaccount.perform_logout(req, args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def login(self, req, form):
args = wash_urlargd(form, {
'p_un': (str, None),
'p_pw': (str, None),
'login_method': (str, None),
'action': (str, ''),
'remember_me' : (str, ''),
'referer': (str, '')})
if args['p_un']:
args['p_un'] = args['p_un'].strip()
args['remember_me'] = args['remember_me'] != ''
locals().update(args)
if CFG_ACCESS_CONTROL_LEVEL_SITE > 0:
return webuser.page_not_authorized(req, "../youraccount/login?ln=%s" % args['ln'],
navmenuid='youraccount')
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
apache_msg = ""
if args['action']:
cookie = args['action']
try:
action, arguments = mail_cookie_check_authorize_action(cookie)
apache_msg = make_apache_message(action, arguments, args['referer'])
# FIXME: Temporary Hack to help CDS current migration
if CFG_CERN_SITE:
roles = make_list_apache_firerole(action, arguments)
if len(roles) == 1:
# There's only one role enabled to see this collection
# Let's redirect to log to it!
return redirect_to_url(req, '%s/%s' % (CFG_SITE_SECURE_URL, make_canonical_urlargd({'realm' : roles[0][0], 'referer' : args['referer']}, {})))
except InvenioWebAccessMailCookieError:
pass
if not CFG_EXTERNAL_AUTH_USING_SSO:
if args['p_un'] is None or not args['login_method']:
return page(title=_("Login"),
body=webaccount.create_login_page_box(args['referer'], apache_msg, args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords="%s , personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
(iden, args['p_un'], args['p_pw'], msgcode) = webuser.loginUser(req, args['p_un'], args['p_pw'], args['login_method'])
else:
# Fake parameters for p_un & p_pw because SSO takes them from the environment
(iden, args['p_un'], args['p_pw'], msgcode) = webuser.loginUser(req, '', '', CFG_EXTERNAL_AUTH_USING_SSO)
args['remember_me'] = True
if len(iden)>0:
uid = webuser.update_Uid(req, args['p_un'], args['remember_me'])
uid2 = webuser.getUid(req)
if uid2 == -1:
webuser.logoutUser(req)
return webuser.page_not_authorized(req, "../youraccount/login?ln=%s" % args['ln'], uid=uid,
navmenuid='youraccount')
# login successful!
if args['referer']:
redirect_to_url(req, args['referer'], apache.HTTP_MOVED_TEMPORARILY)
else:
return self.display(req, form)
else:
mess = CFG_WEBACCESS_WARNING_MSGS[msgcode] % args['login_method']
if msgcode == 14:
if webuser.username_exists_p(args['p_un']):
mess = CFG_WEBACCESS_WARNING_MSGS[15] % args['login_method']
act = "login"
return page(title=_("Login"),
body=webaccount.perform_back(mess, act, _("login"), args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description="%s Personalize, Main page" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords="%s , personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
def register(self, req, form):
args = wash_urlargd(form, {
'p_nickname': (str, None),
'p_email': (str, None),
'p_pw': (str, None),
'p_pw2': (str, None),
'action': (str, "login"),
'referer': (str, "")})
if CFG_ACCESS_CONTROL_LEVEL_SITE > 0:
return webuser.page_not_authorized(req, "../youraccount/register?ln=%s" % args['ln'],
navmenuid='youraccount')
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(args['ln'])
if args['p_nickname'] is None or args['p_email'] is None:
return page(title=_("Register"),
body=webaccount.create_register_page_box(args['referer'], args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description=_("%s Personalize, Main page") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords="%s , personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
mess = ""
act = ""
if args['p_pw'] == args['p_pw2']:
ruid = webuser.registerUser(req, args['p_email'], args['p_pw'], args['p_nickname'])
else:
ruid = -2
if ruid == 0:
mess = _("Your account has been successfully created.")
title = _("Account created")
if CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT == 1:
mess += " " + _("In order to confirm its validity, an email message containing an account activation key has been sent to the given email address.")
mess += " " + _("Please follow instructions presented there in order to complete the account registration process.")
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 1:
mess += " " + _("A second email will be sent when the account has been activated and can be used.")
elif CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT != 1:
uid = webuser.update_Uid(req, args['p_email'])
mess += " " + _("You can now access your %(x_url_open)saccount%(x_url_close)s.") %\
{'x_url_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/display?ln=' + args['ln'] + '">',
'x_url_close': '</a>'}
elif ruid == -2:
mess = _("Both passwords must match.")
mess += " " + _("Please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 1:
mess = _("Supplied email address %s is invalid.") % args['p_email']
mess += " " + _("Please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 2:
mess = _("Desired nickname %s is invalid.") % args['p_nickname']
mess += " " + _("Please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 3:
mess = _("Supplied email address %s already exists in the database.") % args['p_email']
mess += " " + websession_templates.tmpl_lost_your_password_teaser(args['ln'])
mess += " " + _("Or please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 4:
mess = _("Desired nickname %s already exists in the database.") % args['p_nickname']
mess += " " + _("Please try again.")
act = "register"
title = _("Registration failure")
elif ruid == 5:
mess = _("Users cannot register themselves, only admin can register them.")
act = "register"
title = _("Registration failure")
elif ruid == 6:
mess = _("The site is having troubles in sending you an email for confirming your email address.") + _("The error has been logged and will be taken in consideration as soon as possible.")
act = "register"
title = _("Registration failure")
else:
# this should never happen
mess = _("Internal Error")
act = "register"
title = _("Registration failure")
return page(title=title,
body=webaccount.perform_back(mess,act, (act == 'register' and _("register") or ""), args['ln']),
navtrail="""<a class="navtrail" href="%s/youraccount/display?ln=%s">""" % (CFG_SITE_SECURE_URL, args['ln']) + _("Your Account") + """</a>""",
description=_("%s Personalize, Main page") % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
keywords="%s , personalize" % CFG_SITE_NAME_INTL.get(args['ln'], CFG_SITE_NAME),
uid=uid,
req=req,
secure_page_p = 1,
language=args['ln'],
lastupdated=__lastupdated__,
navmenuid='youraccount')
class WebInterfaceYourGroupsPages(WebInterfaceDirectory):
_exports = ['', 'display', 'create', 'join', 'leave', 'edit', 'members']
def index(self, req, form):
redirect_to_url(req, '/yourgroups/display')
def display(self, req, form):
"""
Displays groups the user is admin of
and the groups the user is member of(but not admin)
@param ln: language
@return the page for all the groups
"""
argd = wash_urlargd(form, {})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/display",
navmenuid='yourgroups')
(body, errors, warnings) = webgroup.perform_request_groups_display(uid=uid,
ln=argd['ln'])
return page(title = _("Your Groups"),
body = body,
navtrail = webgroup.get_navtrail(argd['ln']),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def create(self, req, form):
"""create(): interface for creating a new group
@param group_name : name of the new webgroup.Must be filled
@param group_description : description of the new webgroup.(optionnal)
@param join_policy : join policy of the new webgroup.Must be chosen
@param *button: which button was pressed
@param ln: language
@return the compose page Create group
"""
argd = wash_urlargd(form, {'group_name': (str, ""),
'group_description': (str, ""),
'join_policy': (str, ""),
'create_button':(str, ""),
'cancel':(str, "")
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/create",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
if argd['create_button'] :
(body, errors, warnings)= webgroup.perform_request_create_group(uid=uid,
group_name=argd['group_name'],
group_description=argd['group_description'],
join_policy=argd['join_policy'],
ln = argd['ln'])
else:
(body, errors, warnings) = webgroup.perform_request_input_create_group(group_name=argd['group_name'],
group_description=argd['group_description'],
join_policy=argd['join_policy'],
ln=argd['ln'])
title = _("Create new group")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def join(self, req, form):
"""join(): interface for joining a new group
@param grpID : list of the group the user wants to become a member.
The user must select only one group.
@param group_name : will search for groups matching group_name
@param *button: which button was pressed
@param ln: language
@return the compose page Join group
"""
argd = wash_urlargd(form, {'grpID':(list, []),
'group_name':(str, ""),
'find_button':(str, ""),
'join_button':(str, ""),
'cancel':(str, "")
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/join",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
if argd['join_button']:
search = 0
if argd['group_name']:
search = 1
(body, errors, warnings) = webgroup.perform_request_join_group(uid,
argd['grpID'],
argd['group_name'],
search,
argd['ln'])
else:
search = 0
if argd['find_button']:
search = 1
(body, errors, warnings) = webgroup.perform_request_input_join_group(uid,
argd['group_name'],
search,
ln=argd['ln'])
title = _("Join New Group")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def leave(self, req, form):
"""leave(): interface for leaving a group
@param grpID : group the user wants to leave.
@param group_name : name of the group the user wants to leave
@param *button: which button was pressed
@param confirmed : the user is first asked to confirm
@param ln: language
@return the compose page Leave group
"""
argd = wash_urlargd(form, {'grpID':(str, ""),
'group_name':(str, ""),
'leave_button':(str, ""),
'cancel':(str, ""),
'confirmed': (int, 0)
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/leave",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
if argd['leave_button']:
(body, errors, warnings) = webgroup.perform_request_leave_group(uid,
argd['grpID'],
argd['confirmed'],
argd['ln'])
else:
(body, errors, warnings) = webgroup.perform_request_input_leave_group(uid=uid,
ln=argd['ln'])
title = _("Leave Group")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def edit(self, req, form):
"""edit(): interface for editing group
@param grpID : group ID
@param group_name : name of the new webgroup.Must be filled
@param group_description : description of the new webgroup.(optionnal)
@param join_policy : join policy of the new webgroup.Must be chosen
@param update: button update group pressed
@param delete: button delete group pressed
@param cancel: button cancel pressed
@param confirmed : the user is first asked to confirm before deleting
@param ln: language
@return the main page displaying all the groups
"""
argd = wash_urlargd(form, {'grpID': (str, ""),
'update': (str, ""),
'cancel': (str, ""),
'delete': (str, ""),
'group_name': (str, ""),
'group_description': (str, ""),
'join_policy': (str, ""),
'confirmed': (int, 0)
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/display",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
elif argd['delete']:
(body, errors, warnings) = webgroup.perform_request_delete_group(uid=uid,
grpID=argd['grpID'],
confirmed=argd['confirmed'])
elif argd['update']:
(body, errors, warnings) = webgroup.perform_request_update_group(uid= uid,
grpID=argd['grpID'],
group_name=argd['group_name'],
group_description=argd['group_description'],
join_policy=argd['join_policy'],
ln=argd['ln'])
else :
(body, errors, warnings)= webgroup.perform_request_edit_group(uid=uid,
grpID=argd['grpID'],
ln=argd['ln'])
title = _("Edit Group")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
def members(self, req, form):
"""member(): interface for managing members of a group
@param grpID : group ID
@param add_member: button add_member pressed
@param remove_member: button remove_member pressed
@param reject_member: button reject__member pressed
@param delete: button delete group pressed
@param member_id : ID of the existing member selected
@param pending_member_id : ID of the pending member selected
@param cancel: button cancel pressed
@param info : info about last user action
@param ln: language
@return the same page with data updated
"""
argd = wash_urlargd(form, {'grpID': (int, 0),
'cancel': (str, ""),
'add_member': (str, ""),
'remove_member': (str, ""),
'reject_member': (str, ""),
'member_id': (int, 0),
'pending_member_id': (int, 0)
})
uid = webuser.getUid(req)
# load the right message language
_ = gettext_set_language(argd['ln'])
if uid == -1 or webuser.isGuestUser(uid) or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return webuser.page_not_authorized(req, "../yourgroups/display",
navmenuid='yourgroups')
if argd['cancel']:
url = CFG_SITE_URL + '/yourgroups/display?ln=%s'
url %= argd['ln']
redirect_to_url(req, url)
if argd['remove_member']:
(body, errors, warnings) = webgroup.perform_request_remove_member(uid=uid,
grpID=argd['grpID'],
member_id=argd['member_id'],
ln=argd['ln'])
elif argd['reject_member']:
(body, errors, warnings) = webgroup.perform_request_reject_member(uid=uid,
grpID=argd['grpID'],
user_id=argd['pending_member_id'],
ln=argd['ln'])
elif argd['add_member']:
(body, errors, warnings) = webgroup.perform_request_add_member(uid=uid,
grpID=argd['grpID'],
user_id=argd['pending_member_id'],
ln=argd['ln'])
else:
(body, errors, warnings)= webgroup.perform_request_manage_member(uid=uid,
grpID=argd['grpID'],
ln=argd['ln'])
title = _("Edit group members")
return page(title = title,
body = body,
navtrail = webgroup.get_navtrail(argd['ln'], title),
uid = uid,
req = req,
language = argd['ln'],
lastupdated = __lastupdated__,
errors = errors,
warnings = warnings,
navmenuid = 'yourgroups')
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import json
import logging
import requests
from requests.exceptions import HTTPError
# When releasing a new version to PyPI please also file a bug to request
# that it is uploaded to http://pypi.pub.build.mozilla.org/pub/ too.
# See bug 1191498 for an example of this.
__version__ = '1.6.0'
logger = logging.getLogger(__name__)
class ValidatorMixin(object):
def validate(self, required_properties={}):
"""
Implement job object validation rules. If a rule fails to validate
raise TreeherderClientError
Classes using this mixin should implement a required_properties
dict. The keys in this dict are the required keys in the struture
contained in self.data. Nested keys can be specified with the '.'
operator. Each key in required_properties should have a dict value
like so:
{
'len':optional, some int, max allowed len of property value
'type':optional, some data type, required type of property
value
'cb': some function reference, called with
list of keys, list of values, required_properties key
}
Example:
self.required_properties = {
'revision_hash':{
'len':50, 'cb':self.validate_existence
},
'project':{
'cb':self.validate_existence
},
'job':{
'type':dict, 'cb':self.validate_existence
},
'job.job_guid':{
'len':50, 'cb':self.validate_existence
}
}
"""
required_properties = required_properties or self.required_properties
for prop in required_properties:
cb = required_properties[prop]['cb']
cb(prop.split('.'), required_properties[prop], prop)
def validate_existence(self, keys, values, property_key):
"""
This required_properties callback method confirms the following.
- The keys provided are found in required_properties
- The type of the values match the specified type
- The values are defined and less than the required len
if a len is specified
If any of these assertions fail TreeherderClientError is raised
"""
# missing keys
missing_keys = []
property_errors = ''
# get value
v = None
for index, k in enumerate(keys):
if index > 0:
try:
v = v[k]
except KeyError:
missing_keys.append(k)
else:
try:
v = self.data[k]
except KeyError:
missing_keys.append(k)
if missing_keys:
property_errors += ('\tThe required Property, {0}, is '
'missing\n'.format('.'.join(missing_keys)))
if not v:
property_errors += '\tValue not defined for {0}\n'.format(
property_key)
elif ('type' in values) and (not isinstance(v, values['type'])):
property_errors += ('\tThe value type, {0}, should be '
'{1}\n'.format(type(v), values['type']))
max_limit = values.get('len', None)
if v and max_limit and (len(v) > max_limit):
property_errors += ('\tValue length exceeds maximum {0} char '
'limit: {1}\n'.format(str(max_limit), str(v)))
if property_errors:
msg = ('{0} structure validation errors detected for property:{1}'
'\n{2}\n{3}\n'.format(
self.__class__.__name__, property_key, property_errors,
json.dumps(self.data)))
raise TreeherderClientError(msg, [])
class TreeherderData(object):
def __init__(self, data={}):
self.data = {}
if data:
self.data = data
else:
self.init_data()
def to_json(self):
return json.dumps(self.data)
class TreeherderJob(TreeherderData, ValidatorMixin):
PARSE_STATUSES = {'pending', 'parsed', 'error'}
def __init__(self, data={}):
super(TreeherderJob, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'revision_hash': {'len': 50, 'cb': self.validate_existence},
'project': {'cb': self.validate_existence},
'job': {'type': dict, 'cb': self.validate_existence},
'job.job_guid': {'len': 50, 'cb': self.validate_existence}
}
def add_revision_hash(self, revision_hash):
self.data['revision_hash'] = revision_hash
def add_coalesced_guid(self, guids):
if guids:
self.data['coalesced'].extend(guids)
def add_project(self, project):
self.data['project'] = project
def add_job_guid(self, guid):
self.data['job']['job_guid'] = guid
def add_job_name(self, name):
self.data['job']['name'] = name
def add_job_symbol(self, symbol):
self.data['job']['job_symbol'] = symbol
def add_group_name(self, name):
self.data['job']['group_name'] = name
def add_group_symbol(self, symbol):
self.data['job']['group_symbol'] = symbol
def add_description(self, desc):
self.data['job']['desc'] = desc
def add_product_name(self, name):
self.data['job']['product_name'] = name
def add_state(self, state):
self.data['job']['state'] = state
def add_result(self, result):
self.data['job']['result'] = result
def add_reason(self, reason):
self.data['job']['reason'] = reason
def add_who(self, who):
self.data['job']['who'] = who
def add_submit_timestamp(self, tstamp):
self.data['job']['submit_timestamp'] = tstamp
def add_start_timestamp(self, tstamp):
self.data['job']['start_timestamp'] = tstamp
def add_end_timestamp(self, tstamp):
self.data['job']['end_timestamp'] = tstamp
def add_machine(self, machine):
self.data['job']['machine'] = machine
def add_build_url(self, url):
self.data['job']['build_url'] = url
def add_build_info(self, os_name, platform, arch):
self.data['job']['build_platform']['os_name'] = os_name
self.data['job']['build_platform']['platform'] = platform
self.data['job']['build_platform']['architecture'] = arch
def add_machine_info(self, os_name, platform, arch):
self.data['job']['machine_platform']['os_name'] = os_name
self.data['job']['machine_platform']['platform'] = platform
self.data['job']['machine_platform']['architecture'] = arch
def add_option_collection(self, option_collection):
if option_collection:
self.data['job']['option_collection'].update(option_collection)
def add_tier(self, tier):
self.data['job']['tier'] = tier
def add_log_reference(self, name, url, parse_status='pending'):
"""
parse_status - one of 'pending', 'parsed' or 'error'
"""
if parse_status not in self.PARSE_STATUSES:
msg = "{0}: Invalid parse_status '{1}': must be one of: {2}".format(
self.__class__.__name__,
parse_status,
', '.join(self.PARSE_STATUSES)
)
raise TreeherderClientError(msg, [])
if name and url:
self.data['job']['log_references'].append(
{'url': url, 'name': name, 'parse_status': parse_status}
)
def add_artifact(self, name, artifact_type, blob):
if blob:
self.data['job']['artifacts'].append({
'name': name,
'type': artifact_type,
'blob': blob,
'job_guid': self.data['job']['job_guid']
})
def init_data(self):
self.data = {
'revision_hash': '',
'project': '',
'job': {
# Stored in project_jobs_1.job.job_guid
'job_guid': '',
# Stored in treeherder_reference_1.job_type.name
'name': '',
# Stored in treeherder_reference_1.job_type.name
'desc': '',
# Stored symbol represending the job in the UI
# Stored in treeherder_reference_1.job_type.symbol
'job_symbol': '',
# human readable group name (can be null)
# Stored in treeherder_reference_1.job_group.name
'group_name': '',
# Stored symbol representing the job group (can be null)
# Stored in treeherder_reference_1.job_group.symbol
'group_symbol': '',
# Stored in treeherder_reference_1.product
'product_name': '',
# Stored in project_jobs_1.job.state
'state': '',
# Stored in project_jobs_1.job.result
'result': '',
# Stored in project_jobs_1.job.reason
'reason': '',
# Stored in project_jobs_1.job.who
'who': '',
# Stored in project_jobs_1.job.submit_timestamp
'submit_timestamp': '',
# Stored in project_jobs_1.job.start_timestamp
'start_timestamp': '',
# Stored in project_jobs_1.job.end_timestamp
'end_timestamp': '',
# Stored in treeherder_reference_1.machine.name
'machine': '',
# Stored in project_jobs_1.job_artifact, name=build_url
'build_url': '',
# Stored in
# treeherder_reference_1.build_platform.os_name,
# treeherder_reference_1.build_platform.platform,
# treeherder_reference_1.build_platform.architecture,
'build_platform': {
'os_name': '', 'platform': '', 'architecture': ''},
# Stored in:
# treeherder_reference_1.machine_platform.os_name,
# treeherder_reference_1.machine_platform.platform,
# treeherder_reference_1.machine_platform.architecture,
'machine_platform': {
'os_name': '', 'platform': '', 'architecture': ''},
# Stored in treeherder_reference_1.option_collection and
# treeherder_reference_1.option
# Ex: 'debug | pgo | asan | opt': True
'option_collection': {},
# Stored in project_jobs_1.job_log_url
# Example:
# log_references: [
# { url: 'http://ftp.mozilla.org/mozilla.org/firefox.gz',
# name: 'unittest' },
'log_references': [],
# Stored in
# project_jobs_1.job_artifact.name
# project_jobs_1.job_artifact.type
# project_jobs_1.job_artifact.blob
'artifacts': []
},
# List of job_guids that were coallesced to this job
# Stored in project_jobs_1.job.coalesced_job_guid
# Where the value of coalesced_job_guid is set to job_guid
# for the list of job_guids provided in coalesced
'coalesced': []
}
class TreeherderRevision(TreeherderData, ValidatorMixin):
"""
Supports building a revision structure that is contained in
TreeherderResultSet.
"""
def __init__(self, data={}):
super(TreeherderRevision, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'revision': {'len': 50, 'cb': self.validate_existence},
'repository': {'cb': self.validate_existence},
}
def init_data(self):
self.data = {
# Stored in project_jobs_1.revision.author
'author': '',
# Stored in project_jobs_1.revision.comments
'comment': '',
# Stored in treeherder_reference_1.repository.name
'repository': '',
# Stored in project_jobs_1.revision.revision
'revision': '',
}
def add_author(self, author):
self.data['author'] = author
def add_comment(self, comment):
self.data['comment'] = comment
def add_repository(self, repository):
self.data['repository'] = repository
def add_revision(self, revision):
self.data['revision'] = revision
class TreeherderResultSet(TreeherderData, ValidatorMixin):
"""
Supports building a treeherder result set
"""
def __init__(self, data={}):
super(TreeherderResultSet, self).__init__(data)
self.required_properties = {
'revision_hash': {'len': 50, 'cb': self.validate_existence},
'revisions': {'type': list, 'cb': self.validate_existence},
'author': {'len': 150, 'cb': self.validate_existence}
}
def init_data(self):
self.data = {
# Stored in project_jobs_1.result_set.push_timestamp
'push_timestamp': None,
# Stored in project_jobs_1.result_set.revision_hash
'revision_hash': '',
# Stored in project_jobs_1.result_set.author
'author': '',
# Stored in project_jobs_1.revision, new row per revision
'revisions': [],
# TODO: add type column to resultset in treeherder-service
'type': '',
}
def add_push_timestamp(self, push_timestamp):
self.data['push_timestamp'] = push_timestamp
def add_revision_hash(self, revision_hash):
self.data['revision_hash'] = revision_hash
def add_author(self, author):
self.data['author'] = author
def add_revisions(self, revisions):
if revisions:
self.data['revisions'] = revisions
def add_revision(self, revision):
if revision:
revision.validate()
self.data['revisions'].append(revision.data)
def add_type(self, resultset_type):
self.data['type'] = resultset_type
def get_revision(self, data={}):
return TreeherderRevision(data)
class TreeherderArtifact(TreeherderData, ValidatorMixin):
"""
Supports building a treeherder job artifact
"""
def __init__(self, data={}):
super(TreeherderArtifact, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'blob': {'cb': self.validate_existence},
'type': {'cb': self.validate_existence},
'name': {'cb': self.validate_existence},
'job_guid': {'cb': self.validate_existence}
}
def init_data(self):
self.data = {
# Stored in project_jobs_1.artifact.blob
'blob': '',
# Stored in project_jobs_1.artifact.type
'type': '',
# Stored in project_jobs_1.artifact.name
'name': '',
# Stored in project_jobs_1.artifact.job_guid
'job_guid': None
}
def add_blob(self, blob):
self.data['blob'] = blob
def add_type(self, type):
self.data['type'] = type
def add_name(self, name):
self.data['name'] = name
def add_job_guid(self, job_guid):
self.data['job_guid'] = job_guid
class TreeherderCollection(object):
"""
Base class for treeherder data collections
"""
def __init__(self, endpoint_base, data=[]):
self.data = []
self.endpoint_base = endpoint_base
if data:
self.data = data
def get_collection_data(self):
"""
Build data structure containing the data attribute only for
each item in the collection
"""
data_struct = []
for datum_instance in self.data:
data_struct.append(datum_instance.data)
return data_struct
def to_json(self):
"""
Convert list of data objects to json
"""
return json.dumps(self.get_collection_data())
def add(self, datum_instance):
"""
Add a data structure class instance to data list
"""
self.data.append(datum_instance)
def validate(self):
"""
validate the data structure class
"""
for d in self.data:
d.validate()
def get_chunks(self, chunk_size):
"""
Return a generator of new collections broken into chunks of size ``chunk_size``.
Each chunk will be a ``TreeherderCollection`` of the same
type as the original with a max of ``chunk_size`` count of
``TreeherderData`` objects.
Each collection must then be POSTed individually.
"""
for i in range(0, len(self.data), chunk_size):
# we must copy not only the data chunk,
# but also the endpoint_base or any other field of the
# collection. In the case of a TreeherderJobCollection,
# this is determined in the constructor.
chunk = self.__class__(self.data[i:i + chunk_size])
chunk.endpoint_base = self.endpoint_base
yield chunk
class TreeherderJobCollection(TreeherderCollection):
"""
Collection of job objects
"""
def __init__(self, data=[]):
super(TreeherderJobCollection, self).__init__('jobs', data)
def get_job(self, data={}):
return TreeherderJob(data)
class TreeherderResultSetCollection(TreeherderCollection):
"""
Collection of result set objects
"""
def __init__(self, data=[]):
super(TreeherderResultSetCollection, self).__init__('resultset', data)
def get_resultset(self, data={}):
return TreeherderResultSet(data)
class TreeherderArtifactCollection(TreeherderCollection):
"""
Collection of job artifacts
"""
def __init__(self, data=[]):
super(TreeherderArtifactCollection, self).__init__('artifact', data)
def get_artifact(self, data={}):
return TreeherderArtifact(data)
class TreeherderClient(object):
"""
Treeherder client class
"""
PROTOCOLS = {'http', 'https'} # supported protocols
API_VERSION = '1.0'
REQUEST_HEADERS = {
'Accept': 'application/json; version={}'.format(API_VERSION),
'User-Agent': 'treeherder-pyclient/{}'.format(__version__),
}
UPDATE_ENDPOINT = 'job-log-url/{}/update_parse_status'
RESULTSET_ENDPOINT = 'resultset'
JOBS_ENDPOINT = 'jobs'
ARTIFACTS_ENDPOINT = 'artifact'
OPTION_COLLECTION_HASH_ENDPOINT = 'optioncollectionhash'
REPOSITORY_ENDPOINT = 'repository'
JOBGROUP_ENDPOINT = 'jobgroup'
JOBTYPE_ENDPOINT = 'jobtype'
MAX_COUNT = 2000
def __init__(
self, protocol='https', host='treeherder.mozilla.org',
timeout=120, auth=None):
"""
:param protocol: protocol to use (http or https)
:param host: treeherder host to post to
:param timeout: maximum time it can take for a request to complete
:param auth: an instance of TreeherderAuth holding the auth credentials
"""
self.host = host
if protocol not in self.PROTOCOLS:
raise AssertionError('Protocol "%s" not supported; please use one '
'of %s' % (protocol,
', '.join(self.PROTOCOLS)))
self.protocol = protocol
self.timeout = timeout
self.auth = auth
def _get_project_uri(self, project, endpoint):
return '{0}://{1}/api/project/{2}/{3}/'.format(
self.protocol, self.host, project, endpoint
)
def _get_uri(self, endpoint):
uri = '{0}://{1}/api/{2}'.format(
self.protocol, self.host, endpoint)
return uri
def _get_json_list(self, endpoint, timeout, project=None, **params):
if "count" in params and (params["count"] is None or params["count"] > self.MAX_COUNT):
total = None if params["count"] is None else params["count"]
count = self.MAX_COUNT
offset = 0
data = []
while True:
params["count"] = count
params["offset"] = offset
new_data = self._get_json(endpoint, timeout, project=project, **params)["results"]
data += new_data
if len(new_data) < self.MAX_COUNT:
return data
offset += count
if total is not None:
count = min(total-offset, self.MAX_COUNT)
else:
return self._get_json(endpoint, timeout, project=project, **params)["results"]
def _get_json(self, endpoint, timeout, project=None, **params):
if timeout is None:
timeout = self.timeout
if project is None:
uri = self._get_uri(endpoint)
else:
uri = self._get_project_uri(project, endpoint)
resp = requests.get(uri, timeout=timeout, params=params,
headers=self.REQUEST_HEADERS)
try:
resp.raise_for_status()
except HTTPError as e:
response = e.response
logger.error("Error submitting data to %s" % response.request.url)
logger.error("Request headers: %s" % response.request.headers)
logger.error("Response headers: %s" % response.headers)
logger.error("Response body: %s" % response.content)
raise
return resp.json()
def _post_json(self, project, endpoint, data,
timeout, auth):
if timeout is None:
timeout = self.timeout
auth = auth or self.auth
uri = self._get_project_uri(project, endpoint)
resp = requests.post(uri, json=data,
headers=self.REQUEST_HEADERS,
timeout=timeout, auth=auth)
try:
resp.raise_for_status()
except HTTPError as e:
response = e.response
logger.error("Error submitting data to %s" % response.request.url)
logger.error("Request headers: %s" % response.request.headers)
logger.error("Request body: %s" % response.request.body)
logger.error("Response headers: %s" % response.headers)
logger.error("Response body: %s" % response.content)
raise
def get_option_collection_hash(self):
"""
Gets option collection hash, a mapping of hash values to build properties
Returns a dictionary with the following structure:
{
hashkey1: [ { key: value }, { key: value }, ... ],
hashkey2: [ { key: value }, { key: value }, ... ],
...
}
"""
resp = self._get_json(self.OPTION_COLLECTION_HASH_ENDPOINT, None)
ret = {}
for result in resp:
ret[result['option_collection_hash']] = result['options']
return ret
def get_repositories(self):
"""
Gets a list of valid treeherder repositories.
Returns a list with the following structure:
[
{name: repository-name, dvcs_type: dcvs-type, ...},
...
]
"""
return self._get_json(self.REPOSITORY_ENDPOINT, None)
def get_job_groups(self):
"""
Gets a list of job groups stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>
symbol: <symbol>
name: <name>
...
}
"""
return self._get_json(self.JOBGROUP_ENDPOINT, None)
def get_job_types(self):
"""
Gets a list of job types stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>
job_group: <job_group_id>
symbol: <symbol>
name: <name>
...
}
"""
return self._get_json(self.JOBTYPE_ENDPOINT, None)
def get_resultsets(self, project, **params):
"""
Gets resultsets from project, filtered by parameters
By default this method will just return the latest 10 result sets (if they exist)
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.RESULTSET_ENDPOINT, None, project, **params)
def get_jobs(self, project, **params):
"""
Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.JOBS_ENDPOINT, None, project, **params)
def get_artifacts(self, project, **params):
"""
Gets artifact list from project, filtered by parameters
:param project: project (repository name) to query for
:param params: keyword arguments to filter results
"""
response = self._get_json(self.ARTIFACTS_ENDPOINT, None, project, **params)
return response
def post_collection(self, project, collection_inst, timeout=None, auth=None):
"""
Sends a treeherder collection to the server
:param project: project to submit data for
:param collection_inst: a TreeherderCollection instance
:param timeout: custom timeout in seconds (defaults to class timeout)
:param auth: an instance of TreeherderAuth holding the auth credentials
"""
auth = auth or self.auth
if not isinstance(collection_inst, TreeherderCollection):
msg = '{0} should be an instance of TreeherderCollection'.format(
type(collection_inst))
raise TreeherderClientError(msg, [])
if not collection_inst.endpoint_base:
msg = "{0}: collection endpoint_base property not defined".format(
self.__class__.__name__)
raise TreeherderClientError(msg, [])
if not collection_inst.data:
msg = "{0}: collection data property not defined".format(
self.__class__.__name__)
raise TreeherderClientError(msg, [])
collection_inst.validate()
self._post_json(project, collection_inst.endpoint_base,
collection_inst.get_collection_data(),
timeout, auth)
def update_parse_status(self, project, job_log_url_id,
parse_status, timeout=None, auth=None):
"""
Updates the parsing status of a treeherder job
:param project: project to submit data for
:param parse_status: string representing parse status of a treeherder
job
:param timeout: custom timeout in seconds (defaults to class timeout)
:param auth: an instance of TreeherderAuth holding the auth credentials
"""
auth = auth or self.auth
self._post_json(project, self.UPDATE_ENDPOINT.format(job_log_url_id),
{'parse_status': parse_status},
timeout, auth)
class TreeherderClientError(Exception):
def __init__(self, msg, Errors):
Exception.__init__(self, msg)
self.Errors = Errors
Bug 1193925 - Add a client method for getting list of machine platforms
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import json
import logging
import requests
from requests.exceptions import HTTPError
# When releasing a new version to PyPI please also file a bug to request
# that it is uploaded to http://pypi.pub.build.mozilla.org/pub/ too.
# See bug 1191498 for an example of this.
__version__ = '1.6.0'
logger = logging.getLogger(__name__)
class ValidatorMixin(object):
def validate(self, required_properties={}):
"""
Implement job object validation rules. If a rule fails to validate
raise TreeherderClientError
Classes using this mixin should implement a required_properties
dict. The keys in this dict are the required keys in the struture
contained in self.data. Nested keys can be specified with the '.'
operator. Each key in required_properties should have a dict value
like so:
{
'len':optional, some int, max allowed len of property value
'type':optional, some data type, required type of property
value
'cb': some function reference, called with
list of keys, list of values, required_properties key
}
Example:
self.required_properties = {
'revision_hash':{
'len':50, 'cb':self.validate_existence
},
'project':{
'cb':self.validate_existence
},
'job':{
'type':dict, 'cb':self.validate_existence
},
'job.job_guid':{
'len':50, 'cb':self.validate_existence
}
}
"""
required_properties = required_properties or self.required_properties
for prop in required_properties:
cb = required_properties[prop]['cb']
cb(prop.split('.'), required_properties[prop], prop)
def validate_existence(self, keys, values, property_key):
"""
This required_properties callback method confirms the following.
- The keys provided are found in required_properties
- The type of the values match the specified type
- The values are defined and less than the required len
if a len is specified
If any of these assertions fail TreeherderClientError is raised
"""
# missing keys
missing_keys = []
property_errors = ''
# get value
v = None
for index, k in enumerate(keys):
if index > 0:
try:
v = v[k]
except KeyError:
missing_keys.append(k)
else:
try:
v = self.data[k]
except KeyError:
missing_keys.append(k)
if missing_keys:
property_errors += ('\tThe required Property, {0}, is '
'missing\n'.format('.'.join(missing_keys)))
if not v:
property_errors += '\tValue not defined for {0}\n'.format(
property_key)
elif ('type' in values) and (not isinstance(v, values['type'])):
property_errors += ('\tThe value type, {0}, should be '
'{1}\n'.format(type(v), values['type']))
max_limit = values.get('len', None)
if v and max_limit and (len(v) > max_limit):
property_errors += ('\tValue length exceeds maximum {0} char '
'limit: {1}\n'.format(str(max_limit), str(v)))
if property_errors:
msg = ('{0} structure validation errors detected for property:{1}'
'\n{2}\n{3}\n'.format(
self.__class__.__name__, property_key, property_errors,
json.dumps(self.data)))
raise TreeherderClientError(msg, [])
class TreeherderData(object):
def __init__(self, data={}):
self.data = {}
if data:
self.data = data
else:
self.init_data()
def to_json(self):
return json.dumps(self.data)
class TreeherderJob(TreeherderData, ValidatorMixin):
PARSE_STATUSES = {'pending', 'parsed', 'error'}
def __init__(self, data={}):
super(TreeherderJob, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'revision_hash': {'len': 50, 'cb': self.validate_existence},
'project': {'cb': self.validate_existence},
'job': {'type': dict, 'cb': self.validate_existence},
'job.job_guid': {'len': 50, 'cb': self.validate_existence}
}
def add_revision_hash(self, revision_hash):
self.data['revision_hash'] = revision_hash
def add_coalesced_guid(self, guids):
if guids:
self.data['coalesced'].extend(guids)
def add_project(self, project):
self.data['project'] = project
def add_job_guid(self, guid):
self.data['job']['job_guid'] = guid
def add_job_name(self, name):
self.data['job']['name'] = name
def add_job_symbol(self, symbol):
self.data['job']['job_symbol'] = symbol
def add_group_name(self, name):
self.data['job']['group_name'] = name
def add_group_symbol(self, symbol):
self.data['job']['group_symbol'] = symbol
def add_description(self, desc):
self.data['job']['desc'] = desc
def add_product_name(self, name):
self.data['job']['product_name'] = name
def add_state(self, state):
self.data['job']['state'] = state
def add_result(self, result):
self.data['job']['result'] = result
def add_reason(self, reason):
self.data['job']['reason'] = reason
def add_who(self, who):
self.data['job']['who'] = who
def add_submit_timestamp(self, tstamp):
self.data['job']['submit_timestamp'] = tstamp
def add_start_timestamp(self, tstamp):
self.data['job']['start_timestamp'] = tstamp
def add_end_timestamp(self, tstamp):
self.data['job']['end_timestamp'] = tstamp
def add_machine(self, machine):
self.data['job']['machine'] = machine
def add_build_url(self, url):
self.data['job']['build_url'] = url
def add_build_info(self, os_name, platform, arch):
self.data['job']['build_platform']['os_name'] = os_name
self.data['job']['build_platform']['platform'] = platform
self.data['job']['build_platform']['architecture'] = arch
def add_machine_info(self, os_name, platform, arch):
self.data['job']['machine_platform']['os_name'] = os_name
self.data['job']['machine_platform']['platform'] = platform
self.data['job']['machine_platform']['architecture'] = arch
def add_option_collection(self, option_collection):
if option_collection:
self.data['job']['option_collection'].update(option_collection)
def add_tier(self, tier):
self.data['job']['tier'] = tier
def add_log_reference(self, name, url, parse_status='pending'):
"""
parse_status - one of 'pending', 'parsed' or 'error'
"""
if parse_status not in self.PARSE_STATUSES:
msg = "{0}: Invalid parse_status '{1}': must be one of: {2}".format(
self.__class__.__name__,
parse_status,
', '.join(self.PARSE_STATUSES)
)
raise TreeherderClientError(msg, [])
if name and url:
self.data['job']['log_references'].append(
{'url': url, 'name': name, 'parse_status': parse_status}
)
def add_artifact(self, name, artifact_type, blob):
if blob:
self.data['job']['artifacts'].append({
'name': name,
'type': artifact_type,
'blob': blob,
'job_guid': self.data['job']['job_guid']
})
def init_data(self):
self.data = {
'revision_hash': '',
'project': '',
'job': {
# Stored in project_jobs_1.job.job_guid
'job_guid': '',
# Stored in treeherder_reference_1.job_type.name
'name': '',
# Stored in treeherder_reference_1.job_type.name
'desc': '',
# Stored symbol represending the job in the UI
# Stored in treeherder_reference_1.job_type.symbol
'job_symbol': '',
# human readable group name (can be null)
# Stored in treeherder_reference_1.job_group.name
'group_name': '',
# Stored symbol representing the job group (can be null)
# Stored in treeherder_reference_1.job_group.symbol
'group_symbol': '',
# Stored in treeherder_reference_1.product
'product_name': '',
# Stored in project_jobs_1.job.state
'state': '',
# Stored in project_jobs_1.job.result
'result': '',
# Stored in project_jobs_1.job.reason
'reason': '',
# Stored in project_jobs_1.job.who
'who': '',
# Stored in project_jobs_1.job.submit_timestamp
'submit_timestamp': '',
# Stored in project_jobs_1.job.start_timestamp
'start_timestamp': '',
# Stored in project_jobs_1.job.end_timestamp
'end_timestamp': '',
# Stored in treeherder_reference_1.machine.name
'machine': '',
# Stored in project_jobs_1.job_artifact, name=build_url
'build_url': '',
# Stored in
# treeherder_reference_1.build_platform.os_name,
# treeherder_reference_1.build_platform.platform,
# treeherder_reference_1.build_platform.architecture,
'build_platform': {
'os_name': '', 'platform': '', 'architecture': ''},
# Stored in:
# treeherder_reference_1.machine_platform.os_name,
# treeherder_reference_1.machine_platform.platform,
# treeherder_reference_1.machine_platform.architecture,
'machine_platform': {
'os_name': '', 'platform': '', 'architecture': ''},
# Stored in treeherder_reference_1.option_collection and
# treeherder_reference_1.option
# Ex: 'debug | pgo | asan | opt': True
'option_collection': {},
# Stored in project_jobs_1.job_log_url
# Example:
# log_references: [
# { url: 'http://ftp.mozilla.org/mozilla.org/firefox.gz',
# name: 'unittest' },
'log_references': [],
# Stored in
# project_jobs_1.job_artifact.name
# project_jobs_1.job_artifact.type
# project_jobs_1.job_artifact.blob
'artifacts': []
},
# List of job_guids that were coallesced to this job
# Stored in project_jobs_1.job.coalesced_job_guid
# Where the value of coalesced_job_guid is set to job_guid
# for the list of job_guids provided in coalesced
'coalesced': []
}
class TreeherderRevision(TreeherderData, ValidatorMixin):
"""
Supports building a revision structure that is contained in
TreeherderResultSet.
"""
def __init__(self, data={}):
super(TreeherderRevision, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'revision': {'len': 50, 'cb': self.validate_existence},
'repository': {'cb': self.validate_existence},
}
def init_data(self):
self.data = {
# Stored in project_jobs_1.revision.author
'author': '',
# Stored in project_jobs_1.revision.comments
'comment': '',
# Stored in treeherder_reference_1.repository.name
'repository': '',
# Stored in project_jobs_1.revision.revision
'revision': '',
}
def add_author(self, author):
self.data['author'] = author
def add_comment(self, comment):
self.data['comment'] = comment
def add_repository(self, repository):
self.data['repository'] = repository
def add_revision(self, revision):
self.data['revision'] = revision
class TreeherderResultSet(TreeherderData, ValidatorMixin):
"""
Supports building a treeherder result set
"""
def __init__(self, data={}):
super(TreeherderResultSet, self).__init__(data)
self.required_properties = {
'revision_hash': {'len': 50, 'cb': self.validate_existence},
'revisions': {'type': list, 'cb': self.validate_existence},
'author': {'len': 150, 'cb': self.validate_existence}
}
def init_data(self):
self.data = {
# Stored in project_jobs_1.result_set.push_timestamp
'push_timestamp': None,
# Stored in project_jobs_1.result_set.revision_hash
'revision_hash': '',
# Stored in project_jobs_1.result_set.author
'author': '',
# Stored in project_jobs_1.revision, new row per revision
'revisions': [],
# TODO: add type column to resultset in treeherder-service
'type': '',
}
def add_push_timestamp(self, push_timestamp):
self.data['push_timestamp'] = push_timestamp
def add_revision_hash(self, revision_hash):
self.data['revision_hash'] = revision_hash
def add_author(self, author):
self.data['author'] = author
def add_revisions(self, revisions):
if revisions:
self.data['revisions'] = revisions
def add_revision(self, revision):
if revision:
revision.validate()
self.data['revisions'].append(revision.data)
def add_type(self, resultset_type):
self.data['type'] = resultset_type
def get_revision(self, data={}):
return TreeherderRevision(data)
class TreeherderArtifact(TreeherderData, ValidatorMixin):
"""
Supports building a treeherder job artifact
"""
def __init__(self, data={}):
super(TreeherderArtifact, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'blob': {'cb': self.validate_existence},
'type': {'cb': self.validate_existence},
'name': {'cb': self.validate_existence},
'job_guid': {'cb': self.validate_existence}
}
def init_data(self):
self.data = {
# Stored in project_jobs_1.artifact.blob
'blob': '',
# Stored in project_jobs_1.artifact.type
'type': '',
# Stored in project_jobs_1.artifact.name
'name': '',
# Stored in project_jobs_1.artifact.job_guid
'job_guid': None
}
def add_blob(self, blob):
self.data['blob'] = blob
def add_type(self, type):
self.data['type'] = type
def add_name(self, name):
self.data['name'] = name
def add_job_guid(self, job_guid):
self.data['job_guid'] = job_guid
class TreeherderCollection(object):
"""
Base class for treeherder data collections
"""
def __init__(self, endpoint_base, data=[]):
self.data = []
self.endpoint_base = endpoint_base
if data:
self.data = data
def get_collection_data(self):
"""
Build data structure containing the data attribute only for
each item in the collection
"""
data_struct = []
for datum_instance in self.data:
data_struct.append(datum_instance.data)
return data_struct
def to_json(self):
"""
Convert list of data objects to json
"""
return json.dumps(self.get_collection_data())
def add(self, datum_instance):
"""
Add a data structure class instance to data list
"""
self.data.append(datum_instance)
def validate(self):
"""
validate the data structure class
"""
for d in self.data:
d.validate()
def get_chunks(self, chunk_size):
"""
Return a generator of new collections broken into chunks of size ``chunk_size``.
Each chunk will be a ``TreeherderCollection`` of the same
type as the original with a max of ``chunk_size`` count of
``TreeherderData`` objects.
Each collection must then be POSTed individually.
"""
for i in range(0, len(self.data), chunk_size):
# we must copy not only the data chunk,
# but also the endpoint_base or any other field of the
# collection. In the case of a TreeherderJobCollection,
# this is determined in the constructor.
chunk = self.__class__(self.data[i:i + chunk_size])
chunk.endpoint_base = self.endpoint_base
yield chunk
class TreeherderJobCollection(TreeherderCollection):
"""
Collection of job objects
"""
def __init__(self, data=[]):
super(TreeherderJobCollection, self).__init__('jobs', data)
def get_job(self, data={}):
return TreeherderJob(data)
class TreeherderResultSetCollection(TreeherderCollection):
"""
Collection of result set objects
"""
def __init__(self, data=[]):
super(TreeherderResultSetCollection, self).__init__('resultset', data)
def get_resultset(self, data={}):
return TreeherderResultSet(data)
class TreeherderArtifactCollection(TreeherderCollection):
"""
Collection of job artifacts
"""
def __init__(self, data=[]):
super(TreeherderArtifactCollection, self).__init__('artifact', data)
def get_artifact(self, data={}):
return TreeherderArtifact(data)
class TreeherderClient(object):
"""
Treeherder client class
"""
PROTOCOLS = {'http', 'https'} # supported protocols
API_VERSION = '1.0'
REQUEST_HEADERS = {
'Accept': 'application/json; version={}'.format(API_VERSION),
'User-Agent': 'treeherder-pyclient/{}'.format(__version__),
}
UPDATE_ENDPOINT = 'job-log-url/{}/update_parse_status'
RESULTSET_ENDPOINT = 'resultset'
JOBS_ENDPOINT = 'jobs'
ARTIFACTS_ENDPOINT = 'artifact'
OPTION_COLLECTION_HASH_ENDPOINT = 'optioncollectionhash'
REPOSITORY_ENDPOINT = 'repository'
JOBGROUP_ENDPOINT = 'jobgroup'
JOBTYPE_ENDPOINT = 'jobtype'
MACHINE_PLATFORM_ENDPOINT = 'machineplatform'
MAX_COUNT = 2000
def __init__(
self, protocol='https', host='treeherder.mozilla.org',
timeout=120, auth=None):
"""
:param protocol: protocol to use (http or https)
:param host: treeherder host to post to
:param timeout: maximum time it can take for a request to complete
:param auth: an instance of TreeherderAuth holding the auth credentials
"""
self.host = host
if protocol not in self.PROTOCOLS:
raise AssertionError('Protocol "%s" not supported; please use one '
'of %s' % (protocol,
', '.join(self.PROTOCOLS)))
self.protocol = protocol
self.timeout = timeout
self.auth = auth
def _get_project_uri(self, project, endpoint):
return '{0}://{1}/api/project/{2}/{3}/'.format(
self.protocol, self.host, project, endpoint
)
def _get_uri(self, endpoint):
uri = '{0}://{1}/api/{2}'.format(
self.protocol, self.host, endpoint)
return uri
def _get_json_list(self, endpoint, timeout, project=None, **params):
if "count" in params and (params["count"] is None or params["count"] > self.MAX_COUNT):
total = None if params["count"] is None else params["count"]
count = self.MAX_COUNT
offset = 0
data = []
while True:
params["count"] = count
params["offset"] = offset
new_data = self._get_json(endpoint, timeout, project=project, **params)["results"]
data += new_data
if len(new_data) < self.MAX_COUNT:
return data
offset += count
if total is not None:
count = min(total-offset, self.MAX_COUNT)
else:
return self._get_json(endpoint, timeout, project=project, **params)["results"]
def _get_json(self, endpoint, timeout, project=None, **params):
if timeout is None:
timeout = self.timeout
if project is None:
uri = self._get_uri(endpoint)
else:
uri = self._get_project_uri(project, endpoint)
resp = requests.get(uri, timeout=timeout, params=params,
headers=self.REQUEST_HEADERS)
try:
resp.raise_for_status()
except HTTPError as e:
response = e.response
logger.error("Error submitting data to %s" % response.request.url)
logger.error("Request headers: %s" % response.request.headers)
logger.error("Response headers: %s" % response.headers)
logger.error("Response body: %s" % response.content)
raise
return resp.json()
def _post_json(self, project, endpoint, data,
timeout, auth):
if timeout is None:
timeout = self.timeout
auth = auth or self.auth
uri = self._get_project_uri(project, endpoint)
resp = requests.post(uri, json=data,
headers=self.REQUEST_HEADERS,
timeout=timeout, auth=auth)
try:
resp.raise_for_status()
except HTTPError as e:
response = e.response
logger.error("Error submitting data to %s" % response.request.url)
logger.error("Request headers: %s" % response.request.headers)
logger.error("Request body: %s" % response.request.body)
logger.error("Response headers: %s" % response.headers)
logger.error("Response body: %s" % response.content)
raise
def get_option_collection_hash(self):
"""
Gets option collection hash, a mapping of hash values to build properties
Returns a dictionary with the following structure:
{
hashkey1: [ { key: value }, { key: value }, ... ],
hashkey2: [ { key: value }, { key: value }, ... ],
...
}
"""
resp = self._get_json(self.OPTION_COLLECTION_HASH_ENDPOINT, None)
ret = {}
for result in resp:
ret[result['option_collection_hash']] = result['options']
return ret
def get_repositories(self):
"""
Gets a list of valid treeherder repositories.
Returns a list with the following structure:
[
{name: repository-name, dvcs_type: dcvs-type, ...},
...
]
"""
return self._get_json(self.REPOSITORY_ENDPOINT, None)
def get_job_groups(self):
"""
Gets a list of job groups stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>
symbol: <symbol>
name: <name>
...
}
"""
return self._get_json(self.JOBGROUP_ENDPOINT, None)
def get_job_types(self):
"""
Gets a list of job types stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>
job_group: <job_group_id>
symbol: <symbol>
name: <name>
...
}
"""
return self._get_json(self.JOBTYPE_ENDPOINT, None)
def get_machine_platforms(self):
"""
Gets a list of machine platforms stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>
os_name: <os_name>
platform: <platform>,
architecture: <architecture>,
active_status: <active_status>
}
"""
return self._get_json(self.MACHINE_PLATFORM_ENDPOINT, None)
def get_resultsets(self, project, **params):
"""
Gets resultsets from project, filtered by parameters
By default this method will just return the latest 10 result sets (if they exist)
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.RESULTSET_ENDPOINT, None, project, **params)
def get_jobs(self, project, **params):
"""
Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.JOBS_ENDPOINT, None, project, **params)
def get_artifacts(self, project, **params):
"""
Gets artifact list from project, filtered by parameters
:param project: project (repository name) to query for
:param params: keyword arguments to filter results
"""
response = self._get_json(self.ARTIFACTS_ENDPOINT, None, project, **params)
return response
def post_collection(self, project, collection_inst, timeout=None, auth=None):
"""
Sends a treeherder collection to the server
:param project: project to submit data for
:param collection_inst: a TreeherderCollection instance
:param timeout: custom timeout in seconds (defaults to class timeout)
:param auth: an instance of TreeherderAuth holding the auth credentials
"""
auth = auth or self.auth
if not isinstance(collection_inst, TreeherderCollection):
msg = '{0} should be an instance of TreeherderCollection'.format(
type(collection_inst))
raise TreeherderClientError(msg, [])
if not collection_inst.endpoint_base:
msg = "{0}: collection endpoint_base property not defined".format(
self.__class__.__name__)
raise TreeherderClientError(msg, [])
if not collection_inst.data:
msg = "{0}: collection data property not defined".format(
self.__class__.__name__)
raise TreeherderClientError(msg, [])
collection_inst.validate()
self._post_json(project, collection_inst.endpoint_base,
collection_inst.get_collection_data(),
timeout, auth)
def update_parse_status(self, project, job_log_url_id,
parse_status, timeout=None, auth=None):
"""
Updates the parsing status of a treeherder job
:param project: project to submit data for
:param parse_status: string representing parse status of a treeherder
job
:param timeout: custom timeout in seconds (defaults to class timeout)
:param auth: an instance of TreeherderAuth holding the auth credentials
"""
auth = auth or self.auth
self._post_json(project, self.UPDATE_ENDPOINT.format(job_log_url_id),
{'parse_status': parse_status},
timeout, auth)
class TreeherderClientError(Exception):
def __init__(self, msg, Errors):
Exception.__init__(self, msg)
self.Errors = Errors
|
import functools
import itertools
import operator
import xp
from einsum_opt import _greedy_path, _optimal_path
options = {
'sum_ellipsis': False,
'broadcast_diagonal': False,
}
einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
def _concat(lists):
return sum(lists, [])
def _prod(xs):
return functools.reduce(operator.mul, xs, 1)
def _transpose_ex(a, axeses):
"""Transpose and diagonal
Args:
a
axeses (list of list of ints)
Returns:
p: a with its axes permutated. A writeable view is returned whenever
possible.
"""
shape = []
strides = []
for axes in axeses:
dims = [a.shape[axis] for axis in axes]
dim = max(dims) # TODO(kataoka): fix to dim=0
stride = sum(
0 if d == 1 else a.strides[axis]
for axis, d in zip(axes, dims)
)
shape.append(dim)
strides.append(stride)
return xp.view_from_shape_and_strides(a, shape, strides)
def _parse_int_subscript(sub):
subscripts = ""
for s in sub:
if s is Ellipsis:
subscripts += "@"
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
return subscripts
def _parse_einsum_input(operands, parse_ellipsis=True):
"""Parse einsum operands.
This function is based on `numpy.core.einsumfunc._parse_einsum_input`
function in NumPy 1.14.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
(['@a, @a'], 'xz', [a, b])
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
(['@a, @a'], 'xz', [a, b])
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = list(operands[1:])
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
# Parse "..."
subscripts = subscripts.replace("...", "@")
if "." in subscripts:
raise ValueError("Invalid Ellipses.")
# Parse "->"
if ("-" in subscripts) or (">" in subscripts):
# Check for proper "->"
invalid = subscripts.count("-") > 1 or subscripts.count(">") > 1
subscripts = subscripts.split("->")
if invalid or len(subscripts) != 2:
raise ValueError("Subscripts can only contain one '->'.")
input_subscripts, output_subscript = subscripts
else:
input_subscripts = subscripts
output_subscript = None
input_subscripts = input_subscripts.split(",")
if len(input_subscripts) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the"
" number of operands.")
else:
tmp_operands = list(operands)
operands = []
input_subscripts = []
while len(tmp_operands) >= 2:
operands.append(tmp_operands.pop(0))
input_subscripts.append(_parse_int_subscript(
tmp_operands.pop(0)))
if tmp_operands:
output_subscript = _parse_int_subscript(tmp_operands[0])
else:
output_subscript = None
return input_subscripts, output_subscript, operands
def _chr(char):
if char < 0:
return "...[%d]" % char
else:
return chr(char)
def _parse_ellipsis_subscript(subscript, ndim=None, ellipsis_len=None):
subs = subscript.split('@')
if len(subs) == 1:
sub, = subs
if ndim is not None and len(sub) != ndim:
# raise ValueError later
return "Einstein sum subscript %s does not contain the correct" \
" number of indices " % subs
return list(map(ord, sub))
elif len(subs) == 2:
left_sub, right_sub = subs
if ndim is not None:
ellipsis_len = ndim - (len(left_sub) + len(right_sub))
if ellipsis_len < 0:
# raise ValueError later
return "Einstein sum subscript %s...%s does not contain the" \
" correct number of indices " % (left_sub, right_sub)
return list(itertools.chain(
map(ord, left_sub),
range(-ellipsis_len, 0),
map(ord, right_sub),
))
else:
# >= 2 ellipses for an operand
raise ValueError("Invalid Ellipses.")
def _einsum_diagonals(input_subscripts, operands):
"""Compute diagonal for each operand
This function mutates args.
"""
for num, sub in enumerate(input_subscripts):
if len(set(sub)) < len(sub):
op = operands[num]
axes = {}
for i, s in enumerate(sub):
axes.setdefault(s, []).append(i)
axes = list(axes.items())
input_subscripts[num] = [
s
for s, _ in axes
]
if not options['broadcast_diagonal']:
for s, indices in axes:
dims = list({op.shape[j] for j in indices})
if len(dims) >= 2:
raise ValueError(
"dimensions in operand %d"
" for collapsing index '%s' don't match (%d != %d)"
% (num, _chr(s), dims[0], dims[1])
)
axes = [
indices
for _, indices in axes
]
operands[num] = _transpose_ex(
op, axes
)
def einsum(*operands, **kwargs):
"""einsum(subscripts, *operands, dtype=False, casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations.
.. note::
Memory contiguity of calculation result is not always compatible with
`numpy.einsum`.
``out`` and ``order`` options are not supported.
Args:
subscripts (str): Specifies the subscripts for summation.
operands (sequence of arrays): These are the arrays for the operation.
Returns:
cupy.ndarray:
The calculation based on the Einstein summation convention.
.. seealso:: :func:`numpy.einsum`
"""
input_subscripts, output_subscript, operands = \
_parse_einsum_input(operands)
assert isinstance(input_subscripts, list)
assert isinstance(operands, list)
dtype = kwargs.pop('dtype', None)
casting = kwargs.pop('casting', 'safe')
optimize = kwargs.pop('optimize', False)
# assert optimize is False, "optimize: sorry"
if optimize is True:
optimize = 'greedy'
if kwargs:
raise TypeError("Did not understand the following kwargs: %s"
% list(kwargs.keys))
result_dtype = xp.result_type(*operands) if dtype is None else dtype
operands = [
xp.asanyarray(arr)
for arr in operands
]
input_subscripts = [
_parse_ellipsis_subscript(sub, ndim=arr.ndim)
for sub, arr in zip(input_subscripts, operands)
]
for i, sub_or_err in enumerate(input_subscripts):
if isinstance(sub_or_err, str):
raise ValueError(sub_or_err + "for operand %d." % i)
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
for tnum, term in enumerate(input_subscripts):
sh = operands[tnum].shape
for cnum, char in enumerate(term):
dim = sh[cnum]
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
dimension_dict[char] = dim
elif dim not in (1, dimension_dict[char]):
dim_old = dimension_dict[char]
raise ValueError("Size of label '%s' for operand %d (%d) "
"does not match previous terms (%d)."
% (_chr(char), tnum, dim, dim_old))
else:
dimension_dict[char] = dim
if output_subscript is None:
# Build output subscripts
tmp_subscripts = _concat(input_subscripts)
output_subscript = [
s
for s in sorted(set(tmp_subscripts))
if s < 0 or tmp_subscripts.count(s) == 1
]
else:
if not options['sum_ellipsis']:
if '@' not in output_subscript and -1 in dimension_dict:
raise ValueError("output had too few broadcast dimensions")
output_subscript = _parse_ellipsis_subscript(
output_subscript,
ellipsis_len=len(list(s for s in dimension_dict.keys() if s < 0))
)
# Make sure output subscripts are in the input
tmp_subscripts = set(_concat(input_subscripts))
for char in output_subscript:
if char not in tmp_subscripts:
raise ValueError(
"Output character %s did not appear in the input"
% _chr(char))
_einsum_diagonals(input_subscripts, operands)
# no more raises
if len(operands) >= 2:
if any(op.size == 0 for op in operands):
return xp.zeros(
tuple(dimension_dict[s] for s in output_subscript),
dtype=result_dtype
)
# Don't squeeze if unary, because this affects later (in trivial sum)
# whether the return is a writeable view.
for num in range(len(operands)):
op = operands[num]
if 1 in op.shape:
squeeze_indices = []
sub = []
for i, s in enumerate(input_subscripts[num]):
if op.shape[i] == 1:
squeeze_indices.append(i)
else:
sub.append(s)
input_subscripts[num] = sub
operands[num] = xp.squeeze(op, axis=tuple(squeeze_indices))
assert len(operands[num].shape) == len(input_subscripts[num])
# unary einsum without summation should return a (writeable) view
returns_view = len(operands) == 1
# unary sum
for num, sub in enumerate(input_subscripts):
other_subscripts = list(input_subscripts)
other_subscripts[num] = output_subscript
other_subscripts = _concat(other_subscripts)
sum_axes = tuple(
i
for i, s in enumerate(sub)
if s not in other_subscripts
)
if sum_axes:
returns_view = False
input_subscripts[num] = [
s
for i, s in enumerate(sub)
if i not in sum_axes
]
# Cannot do the following in cupy (bug?)
# operands[num] = operands[num].sum(
# axis=sum_axes, dtype=result_dtype)
operands[num] = (
operands[num]
.astype(result_dtype, casting=casting, copy=False)
.sum(axis=sum_axes)
# .sum uses platform integer types by default
.astype(result_dtype, copy=False)
)
if returns_view:
operands = [arr.view() for arr in operands]
else:
operands = [
arr.astype(result_dtype, casting=casting, copy=False)
for arr in operands
]
# no more casts
optimize_algorithms = {
'greedy': _greedy_path,
'optimal': _optimal_path,
}
if optimize is False:
path = [(0, 1)] * (len(operands) - 1) # TODO(kataoka): fix
elif isinstance(optimize, str) and optimize in optimize_algorithms.keys():
input_sets = [set(sub) for sub in input_subscripts]
output_set = set(output_subscript)
memory_arg = 1e99
algo = optimize_algorithms[optimize]
path = algo(input_sets, output_set, dimension_dict, memory_arg)
elif len(optimize) and (optimize[0] == 'einsum_path'):
path = optimize[1:]
else:
raise TypeError("Did not understand the path (optimize): %s"
% str(optimize))
for idx0, idx1 in path:
# repeat binary einsum
assert idx0 < idx1
sub1 = input_subscripts.pop(idx1)
op1 = operands.pop(idx1)
sub0 = input_subscripts.pop(idx0)
op0 = operands.pop(idx0)
set0 = set(sub0)
set1 = set(sub1)
assert len(set0) == len(sub0)
assert len(set1) == len(sub1)
set_out = set(_concat([output_subscript] + input_subscripts))
shared = set0 & set1
batch_dims = shared & set_out
contract_dims = shared - batch_dims
bs0, cs0, ts0 = _make_transpose_axes(sub0, batch_dims, contract_dims)
bs1, cs1, ts1 = _make_transpose_axes(sub1, batch_dims, contract_dims)
batch_size = _prod([dimension_dict[s] for s in batch_dims])
contract_size = _prod([dimension_dict[s] for s in contract_dims])
tmp0 = op0.transpose(bs0 + ts0 + cs0).reshape(
batch_size, -1, contract_size)
tmp1 = op1.transpose(bs1 + cs1 + ts1).reshape(
batch_size, contract_size, -1)
tmp_out = xp.matmul(tmp0, tmp1)
sub_b = [sub0[i] for i in bs0]
assert sub_b == [sub1[i] for i in bs1]
sub_l = [sub0[i] for i in ts0]
sub_r = [sub1[i] for i in ts1]
sub_out = sub_b + sub_l + sub_r
op_out = tmp_out.reshape([dimension_dict[s] for s in sub_out])
input_subscripts.append(sub_out)
operands.append(op_out)
# unary einsum at last
op0, = operands
sub0, = input_subscripts
transpose_axes = []
for s in output_subscript:
if s in sub0:
transpose_axes.append(sub0.index(s))
op_out = op0.transpose(transpose_axes).reshape([
dimension_dict[s]
for s in output_subscript
])
assert returns_view or op_out.dtype == result_dtype
return op_out
def _tuple_sorted_by_0(zs):
return tuple(i for _, i in sorted(zs))
def _make_transpose_axes(sub, b_dims, c_dims):
bs = []
cs = []
ts = []
for i, s in enumerate(sub):
if s in b_dims:
bs.append((s, i))
elif s in c_dims:
cs.append((s, i))
else:
ts.append((s, i))
return (
_tuple_sorted_by_0(bs),
_tuple_sorted_by_0(cs),
_tuple_sorted_by_0(ts),
)
Fix inconsistency between parsers
import functools
import itertools
import operator
import xp
from einsum_opt import _greedy_path, _optimal_path
options = {
'sum_ellipsis': False,
'broadcast_diagonal': False,
}
einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
def _concat(lists):
return sum(lists, [])
def _prod(xs):
return functools.reduce(operator.mul, xs, 1)
def _transpose_ex(a, axeses):
"""Transpose and diagonal
Args:
a
axeses (list of list of ints)
Returns:
p: a with its axes permutated. A writeable view is returned whenever
possible.
"""
shape = []
strides = []
for axes in axeses:
dims = [a.shape[axis] for axis in axes]
dim = max(dims) # TODO(kataoka): fix to dim=0
stride = sum(
0 if d == 1 else a.strides[axis]
for axis, d in zip(axes, dims)
)
shape.append(dim)
strides.append(stride)
return xp.view_from_shape_and_strides(a, shape, strides)
def _parse_int_subscript(sub):
subscripts = ""
for s in sub:
if s is Ellipsis:
subscripts += "@"
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise ValueError("For this input type lists must contain "
"either int or Ellipsis")
return subscripts
def _parse_einsum_input(operands, parse_ellipsis=True):
"""Parse einsum operands.
This function is based on `numpy.core.einsumfunc._parse_einsum_input`
function in NumPy 1.14.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
(['@a, @a'], 'xz', [a, b])
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
(['@a, @a'], 'xz', [a, b])
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = list(operands[1:])
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
# Parse "..."
subscripts = subscripts.replace("...", "@")
if "." in subscripts:
raise ValueError("Invalid Ellipses.")
# Parse "->"
if ("-" in subscripts) or (">" in subscripts):
# Check for proper "->"
invalid = subscripts.count("-") > 1 or subscripts.count(">") > 1
subscripts = subscripts.split("->")
if invalid or len(subscripts) != 2:
raise ValueError("Subscripts can only contain one '->'.")
input_subscripts, output_subscript = subscripts
else:
input_subscripts = subscripts
output_subscript = None
input_subscripts = input_subscripts.split(",")
if len(input_subscripts) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the"
" number of operands.")
else:
tmp_operands = list(operands)
operands = []
input_subscripts = []
while len(tmp_operands) >= 2:
operands.append(tmp_operands.pop(0))
input_subscripts.append(_parse_int_subscript(
tmp_operands.pop(0)))
if tmp_operands:
output_subscript = _parse_int_subscript(tmp_operands[0])
else:
output_subscript = None
return input_subscripts, output_subscript, operands
def _chr(char):
if char < 0:
return "...[%d]" % char
else:
return chr(char)
def _parse_ellipsis_subscript(subscript, ndim=None, ellipsis_len=None):
subs = subscript.split('@')
if len(subs) == 1:
sub, = subs
if ndim is not None and len(sub) != ndim:
# raise ValueError later
return "Einstein sum subscript %s does not contain the correct" \
" number of indices " % subs
return list(map(ord, sub))
elif len(subs) == 2:
left_sub, right_sub = subs
if ndim is not None:
ellipsis_len = ndim - (len(left_sub) + len(right_sub))
if ellipsis_len < 0:
# raise ValueError later
return "Einstein sum subscript %s...%s does not contain the" \
" correct number of indices " % (left_sub, right_sub)
return list(itertools.chain(
map(ord, left_sub),
range(-ellipsis_len, 0),
map(ord, right_sub),
))
else:
# >= 2 ellipses for an operand
raise ValueError("Invalid Ellipses.")
def _einsum_diagonals(input_subscripts, operands):
"""Compute diagonal for each operand
This function mutates args.
"""
for num, sub in enumerate(input_subscripts):
if len(set(sub)) < len(sub):
op = operands[num]
axes = {}
for i, s in enumerate(sub):
axes.setdefault(s, []).append(i)
axes = list(axes.items())
input_subscripts[num] = [
s
for s, _ in axes
]
if not options['broadcast_diagonal']:
for s, indices in axes:
dims = list({op.shape[j] for j in indices})
if len(dims) >= 2:
raise ValueError(
"dimensions in operand %d"
" for collapsing index '%s' don't match (%d != %d)"
% (num, _chr(s), dims[0], dims[1])
)
axes = [
indices
for _, indices in axes
]
operands[num] = _transpose_ex(
op, axes
)
def einsum(*operands, **kwargs):
"""einsum(subscripts, *operands, dtype=False, casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations.
.. note::
Memory contiguity of calculation result is not always compatible with
`numpy.einsum`.
``out`` and ``order`` options are not supported.
Args:
subscripts (str): Specifies the subscripts for summation.
operands (sequence of arrays): These are the arrays for the operation.
Returns:
cupy.ndarray:
The calculation based on the Einstein summation convention.
.. seealso:: :func:`numpy.einsum`
"""
input_subscripts, output_subscript, operands = \
_parse_einsum_input(operands)
assert isinstance(input_subscripts, list)
assert isinstance(operands, list)
dtype = kwargs.pop('dtype', None)
casting = kwargs.pop('casting', 'safe')
optimize = kwargs.pop('optimize', False)
# assert optimize is False, "optimize: sorry"
if optimize is True:
optimize = 'greedy'
if kwargs:
raise TypeError("Did not understand the following kwargs: %s"
% list(kwargs.keys))
result_dtype = xp.result_type(*operands) if dtype is None else dtype
operands = [
xp.asanyarray(arr)
for arr in operands
]
input_subscripts = [
_parse_ellipsis_subscript(sub, ndim=arr.ndim)
for sub, arr in zip(input_subscripts, operands)
]
for i, sub_or_err in enumerate(input_subscripts):
if isinstance(sub_or_err, str):
raise ValueError(sub_or_err + "for operand %d." % i)
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
for tnum, term in enumerate(input_subscripts):
sh = operands[tnum].shape
for cnum, char in enumerate(term):
dim = sh[cnum]
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
dimension_dict[char] = dim
elif dim not in (1, dimension_dict[char]):
dim_old = dimension_dict[char]
raise ValueError("Size of label '%s' for operand %d (%d) "
"does not match previous terms (%d)."
% (_chr(char), tnum, dim, dim_old))
else:
dimension_dict[char] = dim
if output_subscript is None:
# Build output subscripts
tmp_subscripts = _concat(input_subscripts)
output_subscript = [
s
for s in sorted(set(tmp_subscripts))
if s < 0 or tmp_subscripts.count(s) == 1
]
else:
if not options['sum_ellipsis']:
if '@' not in output_subscript and -1 in dimension_dict:
raise ValueError("output had too few broadcast dimensions")
output_subscript = _parse_ellipsis_subscript(
output_subscript,
ellipsis_len=len(list(s for s in dimension_dict.keys() if s < 0))
)
# Make sure output subscripts are in the input
tmp_subscripts = set(_concat(input_subscripts))
for char in output_subscript:
if char not in tmp_subscripts:
raise ValueError(
"Output character %s did not appear in the input"
% _chr(char))
_einsum_diagonals(input_subscripts, operands)
# no more raises
if len(operands) >= 2:
if any(op.size == 0 for op in operands):
return xp.zeros(
tuple(dimension_dict[s] for s in output_subscript),
dtype=result_dtype
)
# Don't squeeze if unary, because this affects later (in trivial sum)
# whether the return is a writeable view.
for num in range(len(operands)):
op = operands[num]
if 1 in op.shape:
squeeze_indices = []
sub = []
for i, s in enumerate(input_subscripts[num]):
if op.shape[i] == 1:
squeeze_indices.append(i)
else:
sub.append(s)
input_subscripts[num] = sub
operands[num] = xp.squeeze(op, axis=tuple(squeeze_indices))
assert len(operands[num].shape) == len(input_subscripts[num])
# unary einsum without summation should return a (writeable) view
returns_view = len(operands) == 1
# unary sum
for num, sub in enumerate(input_subscripts):
other_subscripts = list(input_subscripts)
other_subscripts[num] = output_subscript
other_subscripts = _concat(other_subscripts)
sum_axes = tuple(
i
for i, s in enumerate(sub)
if s not in other_subscripts
)
if sum_axes:
returns_view = False
input_subscripts[num] = [
s
for i, s in enumerate(sub)
if i not in sum_axes
]
# Cannot do the following in cupy (bug?)
# operands[num] = operands[num].sum(
# axis=sum_axes, dtype=result_dtype)
operands[num] = (
operands[num]
.astype(result_dtype, casting=casting, copy=False)
.sum(axis=sum_axes)
# .sum uses platform integer types by default
.astype(result_dtype, copy=False)
)
if returns_view:
operands = [arr.view() for arr in operands]
else:
operands = [
arr.astype(result_dtype, casting=casting, copy=False)
for arr in operands
]
# no more casts
optimize_algorithms = {
'greedy': _greedy_path,
'optimal': _optimal_path,
}
if optimize is False:
path = [(0, 1)] * (len(operands) - 1) # TODO(kataoka): fix
elif isinstance(optimize, str) and optimize in optimize_algorithms.keys():
input_sets = [set(sub) for sub in input_subscripts]
output_set = set(output_subscript)
memory_arg = 1e99
algo = optimize_algorithms[optimize]
path = algo(input_sets, output_set, dimension_dict, memory_arg)
elif len(optimize) and (optimize[0] == 'einsum_path'):
path = optimize[1:]
else:
raise TypeError("Did not understand the path (optimize): %s"
% str(optimize))
for idx0, idx1 in path:
# repeat binary einsum
assert idx0 < idx1
sub1 = input_subscripts.pop(idx1)
op1 = operands.pop(idx1)
sub0 = input_subscripts.pop(idx0)
op0 = operands.pop(idx0)
set0 = set(sub0)
set1 = set(sub1)
assert len(set0) == len(sub0)
assert len(set1) == len(sub1)
set_out = set(_concat([output_subscript] + input_subscripts))
shared = set0 & set1
batch_dims = shared & set_out
contract_dims = shared - batch_dims
bs0, cs0, ts0 = _make_transpose_axes(sub0, batch_dims, contract_dims)
bs1, cs1, ts1 = _make_transpose_axes(sub1, batch_dims, contract_dims)
batch_size = _prod([dimension_dict[s] for s in batch_dims])
contract_size = _prod([dimension_dict[s] for s in contract_dims])
tmp0 = op0.transpose(bs0 + ts0 + cs0).reshape(
batch_size, -1, contract_size)
tmp1 = op1.transpose(bs1 + cs1 + ts1).reshape(
batch_size, contract_size, -1)
tmp_out = xp.matmul(tmp0, tmp1)
sub_b = [sub0[i] for i in bs0]
assert sub_b == [sub1[i] for i in bs1]
sub_l = [sub0[i] for i in ts0]
sub_r = [sub1[i] for i in ts1]
sub_out = sub_b + sub_l + sub_r
op_out = tmp_out.reshape([dimension_dict[s] for s in sub_out])
input_subscripts.append(sub_out)
operands.append(op_out)
# unary einsum at last
op0, = operands
sub0, = input_subscripts
transpose_axes = []
for s in output_subscript:
if s in sub0:
transpose_axes.append(sub0.index(s))
op_out = op0.transpose(transpose_axes).reshape([
dimension_dict[s]
for s in output_subscript
])
assert returns_view or op_out.dtype == result_dtype
return op_out
def _tuple_sorted_by_0(zs):
return tuple(i for _, i in sorted(zs))
def _make_transpose_axes(sub, b_dims, c_dims):
bs = []
cs = []
ts = []
for i, s in enumerate(sub):
if s in b_dims:
bs.append((s, i))
elif s in c_dims:
cs.append((s, i))
else:
ts.append((s, i))
return (
_tuple_sorted_by_0(bs),
_tuple_sorted_by_0(cs),
_tuple_sorted_by_0(ts),
)
|
import unittest
import logging
import Queue
import pika
import esgfpid.rabbit.asynchronous.thread_feeder
from esgfpid.rabbit.asynchronous.exceptions import OperationNotAllowed
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
# Test resources:
from resources.TESTVALUES import *
import resources.TESTVALUES as TESTHELPERS
class ThreadFeederTestCase(unittest.TestCase):
def setUp(self):
LOGGER.info('######## Next test (%s) ##########', __name__)
def tearDown(self):
LOGGER.info('#############################')
def make_feeder(self, error=None):
thread = TESTHELPERS.get_thread_mock2(error)
statemachine = esgfpid.rabbit.asynchronous.thread_statemachine.StateMachine()
nodemanager = TESTHELPERS.get_nodemanager()
feeder = esgfpid.rabbit.asynchronous.thread_feeder.RabbitFeeder(
thread,
statemachine,
nodemanager)
statemachine.set_to_available()
nodemanager.set_next_host() # otherwise, we cannot call its method inside the publish method
return feeder, thread
# Tests
#
# Sending messages
#
def test_send_message_ok(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
# Run code to be tested:
feeder.publish_message()
# Check result:
# Publish was called:
thread._channel.basic_publish.assert_called_once()
# Message not waiting in queue anymore:
self.assertNotIn(msg, thread.messages)
def test_send_message_empty(self):
# Preparation:
feeder, thread = self.make_feeder()
# Run code to be tested:
feeder.publish_message()
feeder.publish_message()
# Check result:
# Publish was not called:
thread._channel.basic_publish.assert_not_called()
def test_send_message_error(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder(error=pika.exceptions.ChannelClosed)
thread.messages.append(msg)
# Run code to be tested:
feeder.publish_message()
# Check result:
# Publish was tried:
thread._channel.basic_publish.assert_called_once()
# Message was put back to queue:
self.assertIn(msg, thread.messages)
self.assertIn(msg, thread.put_back)
def test_send_message_NOT_STARTED_YET(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
feeder.statemachine._StateMachine__state = 0
# Run code to be tested:
feeder.publish_message()
# Check result:
self.assertTrue(feeder.statemachine.is_NOT_STARTED_YET())
self.assertIn(msg, thread.messages)
thread._channel.basic_publish.assert_not_called()
def test_send_message_PERMANENTLY_UNAVAILABLE_1(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
feeder.statemachine.set_to_permanently_unavailable()
feeder.statemachine.set_detail_closed_by_publisher()
# Run code to be tested:
feeder.publish_message()
# Check result:
self.assertTrue(feeder.statemachine.is_PERMANENTLY_UNAVAILABLE())
self.assertIn(msg, thread.messages)
thread._channel.basic_publish.assert_not_called()
def test_send_message_PERMANENTLY_UNAVAILABLE_2(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
feeder.statemachine.set_to_permanently_unavailable()
feeder.statemachine.detail_could_not_connect = True
# Run code to be tested:
feeder.publish_message()
# Check result:
self.assertTrue(feeder.statemachine.is_PERMANENTLY_UNAVAILABLE())
self.assertIn(msg, thread.messages)
thread._channel.basic_publish.assert_not_called()
def test_send_message_WAITING_TO_BE_AVAILABLE(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
feeder.statemachine.set_to_waiting_to_be_available()
# Run code to be tested:
feeder.publish_message()
# Check result:
self.assertIn(msg, thread.messages)
self.assertTrue(feeder.statemachine.is_WAITING_TO_BE_AVAILABLE())
thread._channel.basic_publish.assert_not_called()
def test_reset_delivery_number(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
thread.messages.append(msg)
thread.messages.append(msg)
# Pre-Check:
self.assertEquals(feeder._RabbitFeeder__delivery_number, 1)
# Increase delivery number:
feeder.publish_message()
feeder.publish_message()
feeder.publish_message()
thread._channel.basic_publish.assert_called()
# Check if it was increased:
self.assertEquals(feeder._RabbitFeeder__delivery_number, 4)
# Run code to be tested:
feeder.reset_delivery_number()
# Check if it was reset:
self.assertEquals(feeder._RabbitFeeder__delivery_number, 1)
Unit tests: Added unit tests for routing key adaptations.
import unittest
import logging
import Queue
import pika
import esgfpid.rabbit.asynchronous.thread_feeder
from esgfpid.rabbit.asynchronous.exceptions import OperationNotAllowed
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
# Test resources:
from resources.TESTVALUES import *
import resources.TESTVALUES as TESTHELPERS
class ThreadFeederTestCase(unittest.TestCase):
def setUp(self):
LOGGER.info('######## Next test (%s) ##########', __name__)
def tearDown(self):
LOGGER.info('#############################')
def make_feeder(self, error=None):
thread = TESTHELPERS.get_thread_mock2(error)
statemachine = esgfpid.rabbit.asynchronous.thread_statemachine.StateMachine()
nodemanager = TESTHELPERS.get_nodemanager()
feeder = esgfpid.rabbit.asynchronous.thread_feeder.RabbitFeeder(
thread,
statemachine,
nodemanager)
statemachine.set_to_available()
nodemanager.set_next_host() # otherwise, we cannot call its method inside the publish method
return feeder, thread
# Tests
#
# Sending messages
#
def test_send_message_ok(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
# Run code to be tested:
feeder.publish_message()
# Check result:
# Publish was called:
thread._channel.basic_publish.assert_called_once()
# Message not waiting in queue anymore:
self.assertNotIn(msg, thread.messages)
def test_routing_key_ok(self):
# Preparation:
msg = '{"foo":"bar", "ROUTING_KEY":"myprefix.HASH.fresh.mydescription"}'
feeder, thread = self.make_feeder()
thread.messages.append(msg)
# Run code to be tested:
feeder.publish_message()
# Check result:
# Publish was called:
thread._channel.basic_publish.assert_called_once()
# Message not waiting in queue anymore:
self.assertNotIn(msg, thread.messages)
# Correct routing key:
rk = thread._channel.basic_publish.call_args[1]["routing_key"]
self.assertEquals(rk, 'myprefix.HASH.fresh.mydescription')
def test_routing_key_untrusted_ok(self):
# Preparation:
msg = '{"foo":"bar", "ROUTING_KEY":"myprefix.HASH.fresh.mydescription"}'
feeder, thread = self.make_feeder()
feeder.nodemanager._NodeManager__current_node['is_open']=True
thread.messages.append(msg)
# Run code to be tested:
feeder.publish_message()
# Check result:
# Publish was called:
thread._channel.basic_publish.assert_called_once()
# Message not waiting in queue anymore:
self.assertNotIn(msg, thread.messages)
# Correct routing key:
rk = thread._channel.basic_publish.call_args[1]["routing_key"]
self.assertEquals(rk, 'myprefix.HASH.fresh-untrusted-fallback.mydescription')
def test_routing_key_untrusted_ok(self):
# Preparation:
msg = '{"foo":"bar", "ROUTING_KEY":"myprefix.HASH.fresh.mydescription"}'
feeder, thread = self.make_feeder()
feeder.nodemanager._NodeManager__current_node['is_open']=True
feeder.nodemanager._NodeManager__has_trusted=False
thread.messages.append(msg)
# Run code to be tested:
feeder.publish_message()
# Check result:
# Publish was called:
thread._channel.basic_publish.assert_called_once()
# Message not waiting in queue anymore:
self.assertNotIn(msg, thread.messages)
# Correct routing key:
rk = thread._channel.basic_publish.call_args[1]["routing_key"]
self.assertEquals(rk, 'myprefix.HASH.fresh-untrusted-only.mydescription')
def test_send_message_empty(self):
# Preparation:
feeder, thread = self.make_feeder()
# Run code to be tested:
feeder.publish_message()
feeder.publish_message()
# Check result:
# Publish was not called:
thread._channel.basic_publish.assert_not_called()
def test_send_message_error(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder(error=pika.exceptions.ChannelClosed)
thread.messages.append(msg)
# Run code to be tested:
feeder.publish_message()
# Check result:
# Publish was tried:
thread._channel.basic_publish.assert_called_once()
# Message was put back to queue:
self.assertIn(msg, thread.messages)
self.assertIn(msg, thread.put_back)
def test_send_message_NOT_STARTED_YET(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
feeder.statemachine._StateMachine__state = 0
# Run code to be tested:
feeder.publish_message()
# Check result:
self.assertTrue(feeder.statemachine.is_NOT_STARTED_YET())
self.assertIn(msg, thread.messages)
thread._channel.basic_publish.assert_not_called()
def test_send_message_PERMANENTLY_UNAVAILABLE_1(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
feeder.statemachine.set_to_permanently_unavailable()
feeder.statemachine.set_detail_closed_by_publisher()
# Run code to be tested:
feeder.publish_message()
# Check result:
self.assertTrue(feeder.statemachine.is_PERMANENTLY_UNAVAILABLE())
self.assertIn(msg, thread.messages)
thread._channel.basic_publish.assert_not_called()
def test_send_message_PERMANENTLY_UNAVAILABLE_2(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
feeder.statemachine.set_to_permanently_unavailable()
feeder.statemachine.detail_could_not_connect = True
# Run code to be tested:
feeder.publish_message()
# Check result:
self.assertTrue(feeder.statemachine.is_PERMANENTLY_UNAVAILABLE())
self.assertIn(msg, thread.messages)
thread._channel.basic_publish.assert_not_called()
def test_send_message_WAITING_TO_BE_AVAILABLE(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
feeder.statemachine.set_to_waiting_to_be_available()
# Run code to be tested:
feeder.publish_message()
# Check result:
self.assertIn(msg, thread.messages)
self.assertTrue(feeder.statemachine.is_WAITING_TO_BE_AVAILABLE())
thread._channel.basic_publish.assert_not_called()
def test_reset_delivery_number(self):
# Preparation:
msg = "{'foo':'bar'}"
feeder, thread = self.make_feeder()
thread.messages.append(msg)
thread.messages.append(msg)
thread.messages.append(msg)
# Pre-Check:
self.assertEquals(feeder._RabbitFeeder__delivery_number, 1)
# Increase delivery number:
feeder.publish_message()
feeder.publish_message()
feeder.publish_message()
thread._channel.basic_publish.assert_called()
# Check if it was increased:
self.assertEquals(feeder._RabbitFeeder__delivery_number, 4)
# Run code to be tested:
feeder.reset_delivery_number()
# Check if it was reset:
self.assertEquals(feeder._RabbitFeeder__delivery_number, 1)
|
__author__ = 'Artur Maciąg'
__email__ = 'maciag.artur@gmail.com'
__version__ = '0.0.1'
__url__ = 'https://github.com/kore-plugins/kore-plugins-ini'
version 0.0.2
__author__ = 'Artur Maciąg'
__email__ = 'maciag.artur@gmail.com'
__version__ = '0.0.2'
__url__ = 'https://github.com/kore-plugins/kore-plugins-ini'
|
# standard python imports
import os
import time
import json
import logging
import datetime
import subprocess
from dateutil.parser import parse
import numbers
# external imports
import hjson
import gevent
from PIL import Image
from rhizo.main import c
from rhizo.extensions.camera import encode_image
import requests
# our own imports
from sim_devices import simulate, add_sim_sensor, add_sim_actuator, remove_sim_device
from diagram_storage import list_diagrams, load_diagram, save_diagram, rename_diagram, delete_diagram
from diagram import Diagram
from git_tools import git_base_command
from commands.command import Command
from commands.list_versions_command import ListVersionsCommand
from commands.download_software_command import DownloadSoftwareCommand
from commands.update_software_command import UpdateSoftwareCommand
#
# Check if we can include IP addresses in our status
#
include_network_status = True
try:
from netifaces import interfaces, ifaddresses, AF_INET
except ImportError:
include_network_status = False
include_version_info = True
try:
import subprocess
except ImportError:
include_version_info = False
# threshold for idle use messages indicating to stop sending update messages
IDLE_STOP_UPDATE_THRESHOLD = 5 * 60.0
# The Flow class holds the state and control code for the data flow client program (running on a RasPi or similar).
class Flow(object):
#
# Values for set_operational_status
#
OP_STATUS_READY = "READY"
OP_STATUS_UPDATING = "UPDATING"
#
# Get version info
#
FLOW_VERSION = None
if include_version_info:
try:
#
# Track version with git tags.
# If this head is tagged, then this returns the tag name.
# Otherwise this returns a short hash of the head.
#
FLOW_VERSION = subprocess.check_output( git_base_command() +
[ 'describe',
'--tags',
'--always' ]).rstrip()
except Exception as err:
FLOW_VERSION = "unknown"
logging.debug("Found flow version '%s'" % (FLOW_VERSION))
def __init__(self):
self.diagram = None # the currently running diagram (if any)
# used to perform always save history for each non-sim sensor
# at sensor raw reading interval
self.integ_test = False
self.publisher = None
self.store = None
self.sensaur_hub = None
self.last_user_message_time = None # the last user message time (if any)
self.last_camera_store_time = None # the last camera sequence update time (if any)
self.last_record_timestamp = None # datetime object of last values recorded to long-term storage
self.recording_interval = None # number of seconds between storing values in long-term storage
self.run_name = "Noname" # name of run for which recording is being saved
self.sequence_names = {} # a dictionary mapping block IDs to sequence names (when recording to server)
c.add_message_handler(self) # register to receive messages from server/websocket
c.auto_devices.add_input_handler(self)
# if using sensaur system, initialize it
if c.config.get('enable_sensaur') and c.config.get('sensaur_port'):
import sensaur
self.sensaur_hub = sensaur.Hub(c.config['sensaur_port'])
self.sensaur_hub.add_input_handler(self)
else:
self.sensaur_hub = None
# load last diagram on startup
# TODO: decide to use this or self.store("diagram"...) below
if c.config.get('startup_diagram', ''):
name = c.config.startup_diagram
diagram_spec = load_diagram(name)
logging.debug("Flow.__init__: loading diagram: %s" % name)
self.diagram = Diagram(name, diagram_spec)
# call init functions. if they fail, mqtt, store resp. will be noop
if c.config.get('enable_ble', False):
self.init_mqtt()
else:
logging.debug("MQTT and BLE disabled.")
if c.config.get('enable_store', False):
self.init_store()
# if store enabled, restore recording_interval
rs = self.store.query("select * from run order by time desc limit 2")
points = list(rs.get_points())
logging.debug("Flow.__init__: run points: %s" % points)
if points:
point = points[0]
if point.get("action") == "start":
logging.info("Flow.__init__: loading last run info: %s" % point)
# last action was start, not stop: load name and interval
self.recording_interval = int(point.get("value"))
self.run_name = point.get("name")
# sample data:
# points: [{u'count': 60, u'name': u'light', u'pin': u'2671', u'min': 242,
# u'max': 245, u'time': u'2017-06-16T20:42:00Z', u'mean': 244.8}, ...
# if diagram not loaded from hardcoded config, load from influxdb 'diagram' measurement
if not self.diagram:
rs = self.store.query("select * from diagram order by time desc limit 2")
points = list(rs.get_points())
logging.debug("Flow.__init__: diagram points: %s" % points)
if points:
point = points[0]
if point.get("action") == "start":
#logging.info("Flow.__init__: loading last diagram: %s" % point)
name = point.get("name")
try:
diagram_spec = load_diagram(name)
logging.debug("Flow.__init__: loading diagram: %s" % name)
self.diagram = Diagram(name, diagram_spec)
except Exception as err:
logging.warning("Flow.__init__: can't load diagram %s: %s" % (name, err))
else:
logging.debug("Store disabled.")
self.operational_status = self.OP_STATUS_READY
self.available_versions = [] # Available software versions
self.username = None # User currently recording
self.recording_location = None # Folder path on server of
# named dataset
self.device_check_greenlet = None # The recording greenlet.
self.sensor_data_latest = {} # For the sensor data greenlet,
# keep the last input_handler
# value received for each physical
# device so that we can return
# sensor data to the user without
# having a diagram loaded.
self.sensor_data_greenlet = None # Greenlet for sending sensor
# data over the websocket.
self.firebase = None # Holds parameters from flow_server::firebase_init message
self.firebase_sensor_data_greenlet = None # Greenlet for sending sensor data to Firebase.
# MQTT integration
def init_mqtt(self):
"""Initialize mqtt."""
try:
from mqttclient import MqttPublisher
#TODO: load mq_topic from config. It has to be the same
# --- as in gattserver/hpserver.py
mq_topic = "flow/ble"
self.publisher = MqttPublisher(mq_topic)
self.publisher.start()
#print("MQTT Initialized.")
logging.info("MQTT Initialized.")
except:
logging.error("Can't initialize MQTT. Probably some components not installed. MQTT publish will be disabled.")
#print("Can't initialize MQTT. Probably some components not installed. MQTT publish will be disabled.")
# store integration
def init_store(self):
"""Initialize store."""
try:
from influxstore import Store
# TODO load pin for this device
my_pin = '2671'
# open store to flow database
self.store = Store(database="flow", pin=my_pin)
logging.info("Influxdb store Initialized.")
except Exception as err:
logging.error("Can't initialize store. Probably influxdb library not installed or influxdb not running. Store will be disabled: %s" % \
err)
#
# Set operational status
#
def set_operational_status(self, status):
self.operational_status = status
#
# Get operational status
#
def get_operational_status(self):
return self.operational_status
# run the current diagram (if any); this is the main loop of the flow program
def start(self):
# launch a greenlet to send watchdog messages to server
gevent.spawn(self.send_watchdog)
# launch sensaur greenlets if enabled
if self.sensaur_hub:
self.sensaur_hub.start_greenlets()
# loop forever
timestamp = datetime.datetime.utcnow().replace(microsecond=0)
while True:
# if the current time is greater than our target timestamp, run our processing
if datetime.datetime.utcnow() > timestamp:
if self.diagram:
self.update_diagram_and_send_values(timestamp)
# the processing could have taken more than a second, so update the target timestamp as many times as needed (by an integer amount) to be in the future
# alternative: could compute timedelta and do some math to do this in a single step
while timestamp < datetime.datetime.utcnow():
timestamp += datetime.timedelta(seconds=1)
# sleep until it is time to do another update
c.sleep(0.1)
# updates the current diagram and sends values to server and external hardware;
# this function should be called once a second;
# timestamp should always be 1 second after the last timestamp (and should be an even number of seconds)
def update_diagram_and_send_values(self, timestamp):
# update diagram values
self.update_camera_blocks()
self.diagram.update()
# send values to server and actuators
values = {}
for block in self.diagram.blocks:
value = None
#logging.debug('flow.start loop: block=%s' % block)
if block.output_type == 'i': # only send camera/image updates if recent message from user
if self.last_user_message_time and time.time() < self.last_user_message_time + 300:
value = block.value
else:
if block.value is not None:
format = '%' + '.%df' % block.decimal_places
value = format % block.value
values[block.id] = value
# send values to actuators
if not block.output_type:
device = self.find_device(block.name) # fix(later): does this still work if we rename a block?
if device and device.dir == 'out':
try:
value = int(block.value)
except:
value = None
if value is not None:
if hasattr(device, 'send_command'):
device.send_command('set %d' % value) # for auto_devices devices
else:
self.sensaur_hub.set_output_value(device, value) # for sensaur components
#logging.debug('flow.start loop: values=%s' % values)
if self.last_user_message_time and (time.time() - self.last_user_message_time < IDLE_STOP_UPDATE_THRESHOLD):
#logging.debug("IDLE_STOP_UPDATE_THRESHOLD passed")
self.send_message('update_diagram', {'values': values})
else:
pass
#logging.debug("IDLE_STOP_UPDATE_THRESHOLD failed")
# send sequence values
if self.recording_interval and ((self.last_record_timestamp is None) or timestamp >= self.last_record_timestamp + datetime.timedelta(seconds = self.recording_interval)):
data_storage_block = None
for block in self.diagram.blocks:
if block.type == 'data storage':
data_storage_block = block
break
if data_storage_block: # if data storage block is defined, store everything that feeds into it
record_blocks = [b for b in data_storage_block.sources]
self.record_data(record_blocks, timestamp)
self.last_record_timestamp = timestamp
def calc_auto_interval(self, start, end):
"""Calculate automatic interval.
About 120 records should fit in start/end range.
:param start: start of history range for which interval is calculated
:param end: end of history range for which interval is calculated
:return: string compatible with influxdb group by interval, e.g. 5s, 1m, 60m
or None if auto interval is < 1m and no grouping needs to be done
"""
ret = None
try:
start = parse(start)
end = parse(end)
diff = end - start
#interval_diff = diff/120
total_seconds = diff.total_seconds()
if total_seconds <= 600:
# < 10m
ret = None
elif total_seconds <= 3600:
# < 1h and < 10m
ret = "1m"
elif total_seconds <= 24*3600:
# > 1h and < 1d
# 48 records max
ret = "30m"
elif total_seconds <= 7*24*3600:
# > 1d and < 7d
# 84 records max
ret = "4h"
elif total_seconds <= 30*24*3600:
# > 7d and < 30d
# 120 records max
ret = "8h"
except Exception as err:
# Can't parse: return default (None)
ret = None
return ret
#
# handle messages from server (sent via websocket)
#
def handle_message(self, type, params):
logging.debug('handle_message: %s %s' % (type, params))
#
# For any messages that choose to implement the command interface,
# they can be instantiated using their message type as key.
#
command_class_dict = {
'download_software_updates': DownloadSoftwareCommand,
'list_software_versions': ListVersionsCommand,
'update_software_version': UpdateSoftwareCommand }
#
# Messages allowed when in recording mode
#
allowed_when_recording = [ 'stop_recording',
'stop_diagram',
'list_diagrams',
'request_status',
'rename_diagram',
'delete_diagram',
'flow_server::firebase_init' ]
#
# Restrict allowed operations while recording.
# Do not allow modification of the running diagram while
# recording.
#
if self.recording_interval is not None:
if not type in allowed_when_recording:
logging.debug("Message %s not allowed while recording." % (type))
username = self.username
diagram_name = None
if self.diagram:
diagram_name = self.diagram.name
self.send_message(type + '_response',
{ 'success': False,
'error': 'recording_in_progress',
'data': { 'username': username,
'diagram': diagram_name },
'message': 'Cannot perform operation %s while controller is recording.' % (type)
})
self.last_user_message_time = time.time()
return True
used = True
if type == 'list_devices':
print 'list_devices'
for device in self.device_list():
self.send_message('device_added', device.as_dict())
elif type == 'history':
self.send_history(params)
elif type == 'request_block_types':
block_types = hjson.loads(open('block_types.hjson').read())
self.send_message('block_types', block_types)
elif type == 'list_diagrams':
self.send_message('diagram_list', {'diagrams': list_diagrams()})
elif type == 'save_diagram':
save_diagram(params['name'], params['diagram'])
logging.debug("Sending save_diagram_response")
self.send_message( 'save_diagram_response',
{ 'success': True,
'message': "Saved diagram: %s" % (params['name'])
})
elif type == 'rename_diagram':
#
# Do not allow renaming of recording diagram
#
if self.recording_interval is not None:
if params['old_name'] == self.diagram.name:
self.send_message(
'rename_diagram_response',
{ 'success': False,
'message': "Cannot rename diagram while recording"
})
return
rename_diagram(params['old_name'], params['new_name'])
self.send_message(
'rename_diagram_response',
{ 'success': True,
'message': "Diagram renamed"
})
elif type == 'delete_diagram':
#
# Do not allow deleting of recording diagram
#
if self.recording_interval is not None:
if params['name'] == self.diagram.name:
self.send_message(
'delete_diagram_response',
{ 'success': False,
'message': "Cannot delete diagram while recording"
})
return
delete_diagram(params['name'])
self.send_message(
'delete_diagram_response',
{ 'success': True,
'message': "Diagram deleted"
})
elif type == 'set_diagram':
self.set_diagram(params)
elif type == 'start_diagram': # start a diagram running on the controller; this will stop any diagram that is already running
logging.debug("handle_message: start_diagram - loading diagram: %s" % params['name'])
diagram_spec = load_diagram(params['name'])
self.diagram = Diagram(params['name'], diagram_spec)
#local_config = hjson.loads(open('local.hjson').read()) # save name of diagram to load when start script next time
#local_config['startup_diagram'] = params['name']
#open('local.hjson', 'w').write(hjson.dumps(local_config))
if self.store:
self.store.save('diagram', params['name'], 0, {'action': 'start'})
self.send_message( 'start_diagram_response',
{ 'success': True,
'message': "Started diagram: %s" % (params['name'])
})
elif type == 'stop_diagram':
#
# Need to update the metadata in the recording location
# indicating that we are no longer recording to that location.
# Note if a controller dies while recording, the metadata
# will still indicate 'recording: True', so someone might stop
# a recording (to update the metadata to set 'recording: False')
# even though the controller might no longer be recording to that
# location. There should probably be a better way to handle this.
#
stop_location = params.get('stop_location')
if stop_location is None:
stop_location = self.recording_location
#
# Set this recording as done.
#
if stop_location:
if not stop_location.startswith('/'):
stop_location = '/' + stop_location
metadata = c.resources.read_file(stop_location + "/metadata")
if metadata is not None:
metadata = json.loads(metadata)
metadata['recording'] = False
metadata['end_time'] = '%s' % (datetime.datetime.utcnow())
c.resources.write_file(
stop_location + "/metadata",
json.dumps(metadata) )
else:
c.resources.write_file(
stop_location + "/metadata",
json.dumps({ 'controller_path': c.path_on_server(),
'recording': False,
'recording_interval': self.recording_interval }))
#
# Stop recording if in progress.
# Remove the currently running diagram program.
# Remove the currently set user.
#
self.recording_interval = None
self.diagram = None
self.username = None
if self.recording_location != stop_location:
#
# We are not really recording to the location we
# have been asked to "stop" so just update the
# above metadata and continue.
#
self.send_message( type + '_response',
{ 'success': True,
'message': "This controller was no longer recording at that location, but the recording has been marked as stopped."
})
return
self.recording_location = None
self.send_message( type + '_response',
{ 'success': True,
'message': "Program stopped"
})
#
# Ensure latest status reflects that this controller is
# not recording.
#
self.send_status()
elif type == 'start_recording':
#
# Allow 'set_diagram' and 'start_recording' to be
# an atomic operation.
# Caller can specify diagram and username in params.
#
if set(('diagram', 'username')) <= set(params):
self.set_diagram(params)
metadata = {
'controller_path': c.path_on_server(),
'controller_name': self.controller_name(),
'program': self.diagram.diagram_spec,
}
# check for data storage block
data_storage_block = None
for block in self.diagram.blocks:
if block.type == 'data storage':
data_storage_block = block
break
# if data storage block, get recording info from it
if data_storage_block:
dataset_displayedName = data_storage_block.read_param(data_storage_block.params, 'dataset_location', 'data')
self.recording_location = params['recording_location']
self.recording_interval = data_storage_block.read_param(data_storage_block.params, 'recording_interval', 1)
self.sequence_names = data_storage_block.read_param(data_storage_block.params, 'sequence_names', 'data')
metadata_location = self.recording_location
metadata['displayedName'] = dataset_displayedName
metadata['recording'] = True
metadata['start_time'] = '%s' % (datetime.datetime.utcnow()) # TODO: should use ISO string
metadata['recording_location'] = self.recording_location
metadata['recording_user'] = self.username
metadata['recording_interval'] = self.recording_interval
# otherwise, we still want to create a metadata file (using the recording location specified with this message)
else:
metadata['recording'] = True # note: not really recording; just need this to work with current front-end code
metadata['is_empty'] = True
self.recording_location = None
self.recording_interval = None
self.sequence_names = []
metadata_location = params['recording_location'] # TODO: rethink this
#
# Create metadata file.
#
logging.info("Creating sequences...")
c.resources.create_folder(metadata_location)
c.resources.write_file(metadata_location + "/metadata", json.dumps(metadata))
# Create sequences for blocks
if data_storage_block:
record_blocks = [b for b in data_storage_block.sources]
self.create_sequences(record_blocks)
self.send_message('start_recording_response',
{ 'success': True,
'message': "Recording started."
})
#
# Ensure latest status reflects that this controller is
# recording.
#
self.send_status()
elif type == 'stop_recording':
logging.info('stop recording data')
if self.store:
# save stop for current run
if self.recording_interval:
self.store.save('run', self.run_name, self.recording_interval, {'action': 'stop'})
else:
logging.info('stop recording data not saved (recording_interval none)')
self.recording_interval = None
self.recording_location = None
if self.device_check_greenlet:
self.device_check_greenlet.kill()
self.send_message( type + '_response',
{ 'success': True,
'message': "Recording stopped."
})
elif type == 'send_sensor_data':
if self.sensor_data_greenlet is not None:
return
stoptime = params.get('stoptime')
if stoptime is None:
stoptime = 60
self.sensor_data_greenlet = gevent.spawn( self.send_sensor_data,
stoptime )
self.send_message( type + '_response',
{ 'success': True,
'message': "Sending sensor data."
})
elif type == 'rename_block':
old_name = params['old_name']
new_name = params['new_name']
device = self.find_device(old_name)
device.name = new_name
rename_sequence(c.path_on_server(), old_name, new_name) # change sequence name on server
elif type == 'update_actuator':
name = params['name']
value = params['value']
device = c.auto_devices.find_device(name)
if device:
device.send_command('set %s' % value)
elif self.sensaur_hub:
component = self.sensaur_hub.find_component(name)
if component:
self.sensaur_hub.set_output_value(component, value)
elif type == 'add_camera':
self.add_camera()
elif type == 'add_sim_sensor':
add_sim_sensor()
elif type == 'add_sim_actuator':
add_sim_actuator()
elif type == 'remove_sim_device':
remove_sim_device()
elif type == 'request_status':
self.send_status()
elif type in [ 'download_software_updates',
'list_software_versions',
'update_software_version' ]:
class_ = command_class_dict[type]
cmd = class_(self, type, params)
cmd.exec_cmd()
elif type == 'flow_server::firebase_init':
self.firebase = params
send_sensor_data = self.firebase['send_sensor_data']
if send_sensor_data and send_sensor_data['enabled'] and c.config.get('firebase_send_sensor_data', True):
if self.firebase_sensor_data_greenlet is not None:
self.firebase_sensor_data_greenlet.kill()
self.firebase_sensor_data_greenlet = gevent.spawn(self.firebase_send_sensor_data, self.firebase, send_sensor_data)
else:
used = False
# keep track of last message from web interface
if used:
self.last_user_message_time = time.time()
return used
# a wrapper used to send messages to server or BLE
def send_message(self, type, parameters):
"""Send message to websocket and/or ble.
Currently, we support two modes:
- websocket
- websocket plus ble
if elable_ble is set in config, we send to both ble (via mqtt) and websocket (via c._send_message).
Otherwise, we send to websocket only via c.send_message
"""
#logging.debug('send_message type=%s' % type)
#
# Add our folder name to the params so that the client knows
# which controller is responding in case they have
# sent messages to multiple controllers.
#
own_path = c.path_on_server()
parameters['src_folder'] = own_path
if c.config.get('enable_ble', False) and self.publisher:
# update_sequence not needed by ble, only by store
if type != "update_sequence":
jsonobj = {"type": type, "parameters": parameters}
#jsonmsg = '{"type":"sensor_update","parameters":{"values":[388.0],"name":"light"}}'
jsonmsg = json.dumps(jsonobj)
#logging.debug('mqtt published : %s' % jsonmsg)
#if not self.integ_test:
self.publisher.publish(jsonmsg)
# also send message to websocket
c.send_message(type, parameters)
else:
# send message to websocket
c.send_message(type, parameters)
#
# Handle an incoming value from a sensor device (connected via USB)
#
# Duplicate device types append a space followed by an integer
# starting at 2. E.g.:
#
# "CO2"
# "CO2 2"
# "humidity"
# "humidity 2"
#
# How do we map these?
#
def handle_input(self, device_or_name, value):
# get the device name
# the auto_devices code provides a name; the senaur code provides a device object
if hasattr(device_or_name, 'name'):
name = device_or_name.name
values = [value] # the sensaur system just sends a single value at a time
else:
name = device_or_name
values = value # the auto_devices system provides a list of values for one device
# logging.debug('input_handler: name=%s, values[0]=%s' % (name, values[0]))
# ---- start of send_message replacement (store and ble test without diagram open)
if self.integ_test:
if self.store:
value = float(values[0])
try:
self.store.save('sensor', name, value)
except Exception as err:
logging.error("store.save error: %s" % err)
# simulate update_diagram when it was not requested by flow-server
# i.e. when flow-server is not reachable after flow restart
#if self.publisher:
# jsonobj = {"type": "update_diagram", "parameters": {'values': { '1': value}}}
# jsonmsg = json.dumps(jsonobj)
# #logging.debug('mqtt published : %s' % jsonmsg)
# self.publisher.publish(jsonmsg)
# ---- end of of send_message replacement
if self.diagram:
block = self.diagram.find_block_by_name(name)
if block:
block.decimal_places = block.compute_decimal_places(values[0])
block.value = float(values[0])
#
# Store last read sensor data associated with a physical sensor.
#
now = datetime.datetime.utcnow()
value = float(values[0])
self.sensor_data_latest[name] = (now, value)
# record data by sending it to the server and/or storing it locally
def record_data(self, blocks, timestamp):
# publish to recording queue to be saved by storage service or save directly
# store block_name and value into 'sensor' measurement
# perform store only if store has been initialized properly
if not self.integ_test:
if self.store:
for block in blocks:
try:
logging.debug("record_data: %s=%s" % (block.name, block.value))
self.store.save('sensor', block.name, block.value)
except Exception as err:
logging.error("store.save error: %s" % err)
# store blocks on server
sequence_prefix = self.recording_location + '/'
values = {}
for b in blocks:
id_str = str(b.id)
if id_str in self.sequence_names:
seq_name = sequence_prefix + self.sequence_names[id_str]
values[seq_name] = b.value
logging.debug('c.update_sequences %s' % (values))
if values:
c.update_sequences(values, timestamp)
# send locally recorded time series data to browser
def send_history(self, params):
# history is currently only used for sending local history
# over ble
# Sample parameters for type history: {u'count': 100000, u'start_timestamp': u'2017-06-15T23:50:19.567Z',
# u'name': u'temperature', u'end_timestamp': u'2017-06-16T00:00:19.567Z'}
history = []
if self.store:
name = params.get("name")
start = params.get("start_timestamp")
end = params.get("end_timestamp")
count = params.get("count")
# auto interval allows for automatic adjustment of history timestamp interval
# so that it fits into ble packet (< 120 records)
auto_interval = params.get("auto_interval")
interval = None
if auto_interval is None:
auto_interval = True
if auto_interval:
#
interval = self.calc_auto_interval(start, end)
#
try:
if interval:
query = \
"""SELECT mean(mean) from sensor_mean where "name"='%s' and time > '%s' and time <= '%s' group by time(%s) limit %s""" % \
(name, start, end, interval, count)
else:
query = \
"""SELECT mean from sensor_mean where "name"='%s' and time > '%s' and time <= '%s' limit %s""" % \
(name, start, end, count)
logging.debug("interval=%s, query=%s" % (interval, query))
rs = self.store.query(query)
# sample data:
# points: [{u'count': 60, u'name': u'light', u'pin': u'2671', u'min': 242,
# u'max': 245, u'time': u'2017-06-16T20:42:00Z', u'mean': 244.8}, ...
points = list(rs.get_points())
#logging.debug("%d points: first 10: %s" % (len(points), points[:10]))
if c.config.get('enable_ble', False) and self.publisher:
# extract rounded numbers for 'mean' field
values = [round(x['mean'],2) if isinstance(x['mean'], numbers.Number) else x['mean'] for x in points]
timestamps = [x['time'] for x in points]
if not values:
values = [0,0]
timestamps = [start, end]
jsonobj = {"type": type, "parameters": { "name": name,
"values": values, "timestamps": timestamps }
}
#jsonmsg = '{"type":"sensor_update","parameters":{"values":[388.0],"name":"light"}}'
jsonmsg = json.dumps(jsonobj)
#logging.debug('mqtt published : %s' % jsonmsg)
self.publisher.publish(jsonmsg)
except Exception as err:
logging.error("store.query error: %s" % err)
#self.send_message('history', {'values': history})
#
# send client info to server/browser
#
def send_status(self):
#
# Get IP info
#
ip_map = None
if include_network_status:
ip_map = {}
for interface in interfaces():
if interface == 'lo':
continue
addresses = ifaddresses(interface)
if AF_INET in addresses:
links = addresses[AF_INET]
for link in links:
ip_map[interface] = link['addr']
if os.path.exists('/sys/class/net/wlan0/address'):
mac_addr = subprocess.check_output(['cat', '/sys/class/net/wlan0/address']).strip() # for raspi
else:
mac_addr = 'N/A'
status = {
'operational_status': self.operational_status,
'available_versions': self.available_versions,
'username': self.username,
'flow_version': Flow.FLOW_VERSION,
'lib_version': c.VERSION + ' ' + c.BUILD,
'device_count': len(self.device_list()),
'recording_interval': self.recording_interval,
'ip_addresses': ip_map,
'mac_address': mac_addr,
}
if self.diagram:
logging.debug("Setting name %s" % (self.diagram.name))
status['current_diagram'] = self.diagram.name
else:
logging.debug("No diagram name to set.")
status['current_diagram'] = None
self.send_message('status', status)
# update controller status table on server
own_path = c.path_on_server()
c.resources.send_request_to_server('PUT', '/api/v1/resources' + own_path, {'status': json.dumps(status)})
# create sequences on server for the given blocks
def create_sequences(self, blocks):
# get list of existing sequences
print('recording location: %s' % self.recording_location)
file_infos = c.resources.list_files(self.recording_location, type = 'sequence')
server_seqs = set([fi['name'] for fi in file_infos])
print('server seqs: %s' % server_seqs)
# create a sequence for each block (that doesn't already have a sequence)
for block in blocks:
id_str = str(block.id)
if id_str in self.sequence_names:
seq_name = self.sequence_names[id_str]
if seq_name not in server_seqs:
device = self.find_device(block.name)
units = device.units if device else None
create_sequence(self.recording_location, seq_name, data_type=1, units=units) # data_type 1 is numeric
server_seqs.add(block.name)
# get sensor data
def get_sensor_data(self):
data = []
for device in self.device_list():
dict = device.as_dict()
name = dict['name']
value = None
if name in self.sensor_data_latest:
(time, value) = self.sensor_data_latest[name]
#
# How to decide when a value is stale?
# This might not be necessary since _auto_devices
# removes the unplugged USB device...
#
now = datetime.datetime.utcnow()
if time < now - datetime.timedelta(seconds=5):
value = None
dict['value'] = value
data.append(dict)
# add timer blocks
# TODO: rename send_sensor_data since we're adding timer data
if self.diagram:
for block in self.diagram.blocks:
if block.type == 'timer':
d = {
'id': block.id,
'name': block.name,
'type': block.type,
'value': block.value,
}
data.append(d)
return data
# get list of devices
def device_list(self):
devices = c.auto_devices._auto_devices
if self.sensaur_hub:
if devices: # handle this case separately, since it will be slow (making copies of lists) and unusual (only when have both old and new hardware attached at the same time)
devices = devices + self.sensaur_hub.components # note: a device is called a component in the sensaur system
else:
devices = self.sensaur_hub.components
return devices
# find a device by name; assumes each device has a unique name
def find_device(self, name):
device = c.auto_devices.find_device(name)
if not device and self.sensaur_hub:
device = self.sensaur_hub.find_component(name) # note: a device is called a component in the sensaur system
return device
#
# Send all sensor data to Firebase including sensor values
#
def firebase_send_sensor_data(self, firebase, send_sensor_data):
started = datetime.datetime.utcnow().replace(microsecond=0)
interval = send_sensor_data["interval"]
api_key = firebase["api_key"]
google_auth_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken?key=%s' % (api_key)
refresh_token_url = 'https://securetoken.googleapis.com/v1/token?key=%s' % (api_key)
logging.debug("firebase_send_sensor_data start: %s, interval: %s" % (started, interval))
# Enable https connection reuse to Firebase
session = requests.Session()
session.mount('https://', requests.adapters.HTTPAdapter())
# Exchange the custom token for an id token
r = session.post(google_auth_url, headers={'Content-Type': 'application/json'}, data = json.dumps({'token': firebase["token"], 'returnSecureToken': True}))
if r.status_code != 200:
logging.error('ABORTING firebase_send_sensor_data thread! POST to %s returned %s' % (google_auth_url, r.status_code))
return
auth = r.json()
id_token = auth["idToken"]
refresh_token = auth["refreshToken"]
expires_in = int(time.time()) + int(auth["expiresIn"])
while True:
# refresh token when there is 5 minutes left
now = int(time.time())
if now >= expires_in - (5*60):
logging.debug('Refreshing token')
r = session.post(refresh_token_url, headers={'Content-Type': 'application/json'}, data = json.dumps({'refresh_token': refresh_token, 'grant_type': 'refresh_token'}))
if r.status_code != 200:
logging.error('ABORTING firebase_send_sensor_data thread! POST to %s returned %s' % (refresh_token_url, r.status_code))
return
refresh = r.json()
id_token = refresh["id_token"]
refresh_token = refresh["refresh_token"]
expires_in = int(time.time()) + int(refresh["expires_in"])
# Get data
data = self.get_sensor_data()
timestamp = datetime.datetime.utcnow().isoformat() + "Z"
# Send PUT request
firebase_url = 'https://%s.firebaseio.com%s.json?auth=%s' % (firebase["project_id"], send_sensor_data["path"], id_token)
r = session.put(firebase_url, data = json.dumps({'timestamp': timestamp, 'data': data}))
if r.status_code == 200:
logging.debug('Sent %s to %s' % (data, firebase_url))
else:
logging.error('PUT to %s returned %s - %s' % (firebase_url, r.status_code, r.text))
# Sleep
c.sleep(interval)
#
# Send all sensor data over websocket including sensor values
# Stop after specified number of minutes.
#
def send_sensor_data(self, minutes):
timestamp = datetime.datetime.utcnow().replace(microsecond=0)
stoptime = timestamp + datetime.timedelta(minutes=minutes)
logging.debug("send_sensor_data start: %s stop: %s" %
(timestamp, stoptime))
while True:
#
# Only loop for the specified number of minutes
#
if datetime.datetime.utcnow() > stoptime:
logging.debug("Stopping send_sensor_data")
self.sensor_data_greenlet = None
break
#
# Send all sensor data.
#
c.send_message('send_sensor_data_response',
{ 'success': True,
'data': self.get_sensor_data(),
'src_folder': c.path_on_server() } )
#
# Sleep
#
c.sleep(1)
#
# Send watchdog message to server so that it knows which
# controllers are online
#
def send_watchdog(self):
minutes = 0
while True:
if minutes == 0:
self.available_versions = []
list_cmd = ListVersionsCommand(None, None, {})
list_cmd.exec_cmd()
if list_cmd.get_response() and list_cmd.get_response()['version_list']:
self.available_versions = list_cmd.get_response()['version_list']
if minutes == 10:
minutes = 0
minutes += 1
self.send_status()
c.send_message('watchdog', {})
c.sleep(60)
# start capturing from a camera
def add_camera(self):
if hasattr(c, 'camera'):
c.camera.open()
if c.camera.device and c.camera.device.is_connected():
self.send_message('device_added', {'type': 'camera', 'name': 'camera', 'dir': 'in'})
# create image sequence on server if doesn't already exist
server_path = c.path_on_server()
if not c.resources.file_exists(server_path + '/image'):
create_sequence(server_path, 'image', data_type=3)
else:
logging.warning('unable to open camera')
else:
logging.warning('camera extension not added')
# get a new image for the camera block and store it as a base64 encoded value;
# for now we'll support just one physical camera (though it can feed into multiple camera blocks)
def update_camera_blocks(self):
if hasattr(c, 'camera') and c.camera.device and c.camera.device.is_connected():
camera_block_defined = False
for block in self.diagram.blocks:
if block.type == 'camera':
camera_block_defined = True
if camera_block_defined:
image = c.camera.device.capture_image()
# store camera image once a minute
current_time = time.time()
if not self.last_camera_store_time or current_time > self.last_camera_store_time + 60:
image.thumbnail((720, 540), Image.ANTIALIAS)
self.send_message('update_sequence', {'sequence': 'image', 'value': encode_image(image)})
self.last_camera_store_time = current_time
logging.debug('updating image sequence')
# create small thumbnail to send to UI
image.thumbnail((320, 240), Image.ANTIALIAS)
data = encode_image(image)
for block in self.diagram.blocks:
if block.type == 'camera':
block.value = data
#
# Get user friendly controller display name
#
def controller_name(self):
parts = c.path_on_server().split('/')
return parts[-1]
#
# Set the currently running diagram
#
def set_diagram(self, params):
#
# v2.0 messages should associate a username with a running
# program.
#
if set(('diagram', 'username')) <= set(params):
diagram_spec = params['diagram']
if 'name' not in diagram_spec:
self.send_message(
'set_diagram_response',
{ 'success': False,
'message': "No program name specified."
})
return
name = diagram_spec['name']
self.diagram = Diagram(name, diagram_spec)
self.username = params['username']
self.send_message(
'set_diagram_response',
{ 'success': True,
'message': "Set running program %s for user %s." % (name, self.username)
})
else:
#
# Support legacy flow for backwards compatibility.
# TODO remove this once v1.0 is no longer supported.
#
diagram_spec = params['diagram']
name = '_temp_'
if 'name' in diagram_spec:
name = diagram_spec['name']
logging.debug(
"handle_message: set_diagram name %s" % (name))
self.diagram = Diagram(name, diagram_spec)
# ======== UTILITY FUNCTIONS ========
# create a sequence resource on the server
# data types: 1 = numeric, 2 = text, 3 = image
def create_sequence(server_path, name, data_type, units = None):
print('creating new sequence: %s' % name)
sequence_info = {
'path': server_path,
'name': name,
'type': 21, # sequence
'data_type': data_type,
'min_storage_interval': 0,
}
if units:
sequence_info['units'] = units
c.resources.send_request_to_server('POST', '/api/v1/resources', sequence_info)
# change the name of a sequence on the server
def rename_sequence(server_path, old_name, new_name):
print('renaming sequence: %s -> %s' % (old_name, new_name))
c.resources.send_request_to_server('PUT', '/api/v1/resources' + server_path + '/' + old_name, {'name': new_name})
Add error handling around firebase connection
Add try/catch error handling around firebase connection. Put connection attempt in while loop so we can retry the connection if we fail with an exception.
[#161186430]
# standard python imports
import os
import time
import json
import logging
import datetime
import subprocess
from dateutil.parser import parse
import numbers
# external imports
import hjson
import gevent
from PIL import Image
from rhizo.main import c
from rhizo.extensions.camera import encode_image
import requests
# our own imports
from sim_devices import simulate, add_sim_sensor, add_sim_actuator, remove_sim_device
from diagram_storage import list_diagrams, load_diagram, save_diagram, rename_diagram, delete_diagram
from diagram import Diagram
from git_tools import git_base_command
from commands.command import Command
from commands.list_versions_command import ListVersionsCommand
from commands.download_software_command import DownloadSoftwareCommand
from commands.update_software_command import UpdateSoftwareCommand
#
# Check if we can include IP addresses in our status
#
include_network_status = True
try:
from netifaces import interfaces, ifaddresses, AF_INET
except ImportError:
include_network_status = False
include_version_info = True
try:
import subprocess
except ImportError:
include_version_info = False
# threshold for idle use messages indicating to stop sending update messages
IDLE_STOP_UPDATE_THRESHOLD = 5 * 60.0
# The Flow class holds the state and control code for the data flow client program (running on a RasPi or similar).
class Flow(object):
#
# Values for set_operational_status
#
OP_STATUS_READY = "READY"
OP_STATUS_UPDATING = "UPDATING"
#
# Get version info
#
FLOW_VERSION = None
if include_version_info:
try:
#
# Track version with git tags.
# If this head is tagged, then this returns the tag name.
# Otherwise this returns a short hash of the head.
#
FLOW_VERSION = subprocess.check_output( git_base_command() +
[ 'describe',
'--tags',
'--always' ]).rstrip()
except Exception as err:
FLOW_VERSION = "unknown"
logging.debug("Found flow version '%s'" % (FLOW_VERSION))
def __init__(self):
self.diagram = None # the currently running diagram (if any)
# used to perform always save history for each non-sim sensor
# at sensor raw reading interval
self.integ_test = False
self.publisher = None
self.store = None
self.sensaur_hub = None
self.last_user_message_time = None # the last user message time (if any)
self.last_camera_store_time = None # the last camera sequence update time (if any)
self.last_record_timestamp = None # datetime object of last values recorded to long-term storage
self.recording_interval = None # number of seconds between storing values in long-term storage
self.run_name = "Noname" # name of run for which recording is being saved
self.sequence_names = {} # a dictionary mapping block IDs to sequence names (when recording to server)
c.add_message_handler(self) # register to receive messages from server/websocket
c.auto_devices.add_input_handler(self)
# if using sensaur system, initialize it
if c.config.get('enable_sensaur') and c.config.get('sensaur_port'):
import sensaur
self.sensaur_hub = sensaur.Hub(c.config['sensaur_port'])
self.sensaur_hub.add_input_handler(self)
else:
self.sensaur_hub = None
# load last diagram on startup
# TODO: decide to use this or self.store("diagram"...) below
if c.config.get('startup_diagram', ''):
name = c.config.startup_diagram
diagram_spec = load_diagram(name)
logging.debug("Flow.__init__: loading diagram: %s" % name)
self.diagram = Diagram(name, diagram_spec)
# call init functions. if they fail, mqtt, store resp. will be noop
if c.config.get('enable_ble', False):
self.init_mqtt()
else:
logging.debug("MQTT and BLE disabled.")
if c.config.get('enable_store', False):
self.init_store()
# if store enabled, restore recording_interval
rs = self.store.query("select * from run order by time desc limit 2")
points = list(rs.get_points())
logging.debug("Flow.__init__: run points: %s" % points)
if points:
point = points[0]
if point.get("action") == "start":
logging.info("Flow.__init__: loading last run info: %s" % point)
# last action was start, not stop: load name and interval
self.recording_interval = int(point.get("value"))
self.run_name = point.get("name")
# sample data:
# points: [{u'count': 60, u'name': u'light', u'pin': u'2671', u'min': 242,
# u'max': 245, u'time': u'2017-06-16T20:42:00Z', u'mean': 244.8}, ...
# if diagram not loaded from hardcoded config, load from influxdb 'diagram' measurement
if not self.diagram:
rs = self.store.query("select * from diagram order by time desc limit 2")
points = list(rs.get_points())
logging.debug("Flow.__init__: diagram points: %s" % points)
if points:
point = points[0]
if point.get("action") == "start":
#logging.info("Flow.__init__: loading last diagram: %s" % point)
name = point.get("name")
try:
diagram_spec = load_diagram(name)
logging.debug("Flow.__init__: loading diagram: %s" % name)
self.diagram = Diagram(name, diagram_spec)
except Exception as err:
logging.warning("Flow.__init__: can't load diagram %s: %s" % (name, err))
else:
logging.debug("Store disabled.")
self.operational_status = self.OP_STATUS_READY
self.available_versions = [] # Available software versions
self.username = None # User currently recording
self.recording_location = None # Folder path on server of
# named dataset
self.device_check_greenlet = None # The recording greenlet.
self.sensor_data_latest = {} # For the sensor data greenlet,
# keep the last input_handler
# value received for each physical
# device so that we can return
# sensor data to the user without
# having a diagram loaded.
self.sensor_data_greenlet = None # Greenlet for sending sensor
# data over the websocket.
self.firebase = None # Holds parameters from flow_server::firebase_init message
self.firebase_sensor_data_greenlet = None # Greenlet for sending sensor data to Firebase.
# MQTT integration
def init_mqtt(self):
"""Initialize mqtt."""
try:
from mqttclient import MqttPublisher
#TODO: load mq_topic from config. It has to be the same
# --- as in gattserver/hpserver.py
mq_topic = "flow/ble"
self.publisher = MqttPublisher(mq_topic)
self.publisher.start()
#print("MQTT Initialized.")
logging.info("MQTT Initialized.")
except:
logging.error("Can't initialize MQTT. Probably some components not installed. MQTT publish will be disabled.")
#print("Can't initialize MQTT. Probably some components not installed. MQTT publish will be disabled.")
# store integration
def init_store(self):
"""Initialize store."""
try:
from influxstore import Store
# TODO load pin for this device
my_pin = '2671'
# open store to flow database
self.store = Store(database="flow", pin=my_pin)
logging.info("Influxdb store Initialized.")
except Exception as err:
logging.error("Can't initialize store. Probably influxdb library not installed or influxdb not running. Store will be disabled: %s" % \
err)
#
# Set operational status
#
def set_operational_status(self, status):
self.operational_status = status
#
# Get operational status
#
def get_operational_status(self):
return self.operational_status
# run the current diagram (if any); this is the main loop of the flow program
def start(self):
# launch a greenlet to send watchdog messages to server
gevent.spawn(self.send_watchdog)
# launch sensaur greenlets if enabled
if self.sensaur_hub:
self.sensaur_hub.start_greenlets()
# loop forever
timestamp = datetime.datetime.utcnow().replace(microsecond=0)
while True:
# if the current time is greater than our target timestamp, run our processing
if datetime.datetime.utcnow() > timestamp:
if self.diagram:
self.update_diagram_and_send_values(timestamp)
# the processing could have taken more than a second, so update the target timestamp as many times as needed (by an integer amount) to be in the future
# alternative: could compute timedelta and do some math to do this in a single step
while timestamp < datetime.datetime.utcnow():
timestamp += datetime.timedelta(seconds=1)
# sleep until it is time to do another update
c.sleep(0.1)
# updates the current diagram and sends values to server and external hardware;
# this function should be called once a second;
# timestamp should always be 1 second after the last timestamp (and should be an even number of seconds)
def update_diagram_and_send_values(self, timestamp):
# update diagram values
self.update_camera_blocks()
self.diagram.update()
# send values to server and actuators
values = {}
for block in self.diagram.blocks:
value = None
#logging.debug('flow.start loop: block=%s' % block)
if block.output_type == 'i': # only send camera/image updates if recent message from user
if self.last_user_message_time and time.time() < self.last_user_message_time + 300:
value = block.value
else:
if block.value is not None:
format = '%' + '.%df' % block.decimal_places
value = format % block.value
values[block.id] = value
# send values to actuators
if not block.output_type:
device = self.find_device(block.name) # fix(later): does this still work if we rename a block?
if device and device.dir == 'out':
try:
value = int(block.value)
except:
value = None
if value is not None:
if hasattr(device, 'send_command'):
device.send_command('set %d' % value) # for auto_devices devices
else:
self.sensaur_hub.set_output_value(device, value) # for sensaur components
#logging.debug('flow.start loop: values=%s' % values)
if self.last_user_message_time and (time.time() - self.last_user_message_time < IDLE_STOP_UPDATE_THRESHOLD):
#logging.debug("IDLE_STOP_UPDATE_THRESHOLD passed")
self.send_message('update_diagram', {'values': values})
else:
pass
#logging.debug("IDLE_STOP_UPDATE_THRESHOLD failed")
# send sequence values
if self.recording_interval and ((self.last_record_timestamp is None) or timestamp >= self.last_record_timestamp + datetime.timedelta(seconds = self.recording_interval)):
data_storage_block = None
for block in self.diagram.blocks:
if block.type == 'data storage':
data_storage_block = block
break
if data_storage_block: # if data storage block is defined, store everything that feeds into it
record_blocks = [b for b in data_storage_block.sources]
self.record_data(record_blocks, timestamp)
self.last_record_timestamp = timestamp
def calc_auto_interval(self, start, end):
"""Calculate automatic interval.
About 120 records should fit in start/end range.
:param start: start of history range for which interval is calculated
:param end: end of history range for which interval is calculated
:return: string compatible with influxdb group by interval, e.g. 5s, 1m, 60m
or None if auto interval is < 1m and no grouping needs to be done
"""
ret = None
try:
start = parse(start)
end = parse(end)
diff = end - start
#interval_diff = diff/120
total_seconds = diff.total_seconds()
if total_seconds <= 600:
# < 10m
ret = None
elif total_seconds <= 3600:
# < 1h and < 10m
ret = "1m"
elif total_seconds <= 24*3600:
# > 1h and < 1d
# 48 records max
ret = "30m"
elif total_seconds <= 7*24*3600:
# > 1d and < 7d
# 84 records max
ret = "4h"
elif total_seconds <= 30*24*3600:
# > 7d and < 30d
# 120 records max
ret = "8h"
except Exception as err:
# Can't parse: return default (None)
ret = None
return ret
#
# handle messages from server (sent via websocket)
#
def handle_message(self, type, params):
logging.debug('handle_message: %s %s' % (type, params))
#
# For any messages that choose to implement the command interface,
# they can be instantiated using their message type as key.
#
command_class_dict = {
'download_software_updates': DownloadSoftwareCommand,
'list_software_versions': ListVersionsCommand,
'update_software_version': UpdateSoftwareCommand }
#
# Messages allowed when in recording mode
#
allowed_when_recording = [ 'stop_recording',
'stop_diagram',
'list_diagrams',
'request_status',
'rename_diagram',
'delete_diagram',
'flow_server::firebase_init' ]
#
# Restrict allowed operations while recording.
# Do not allow modification of the running diagram while
# recording.
#
if self.recording_interval is not None:
if not type in allowed_when_recording:
logging.debug("Message %s not allowed while recording." % (type))
username = self.username
diagram_name = None
if self.diagram:
diagram_name = self.diagram.name
self.send_message(type + '_response',
{ 'success': False,
'error': 'recording_in_progress',
'data': { 'username': username,
'diagram': diagram_name },
'message': 'Cannot perform operation %s while controller is recording.' % (type)
})
self.last_user_message_time = time.time()
return True
used = True
if type == 'list_devices':
print 'list_devices'
for device in self.device_list():
self.send_message('device_added', device.as_dict())
elif type == 'history':
self.send_history(params)
elif type == 'request_block_types':
block_types = hjson.loads(open('block_types.hjson').read())
self.send_message('block_types', block_types)
elif type == 'list_diagrams':
self.send_message('diagram_list', {'diagrams': list_diagrams()})
elif type == 'save_diagram':
save_diagram(params['name'], params['diagram'])
logging.debug("Sending save_diagram_response")
self.send_message( 'save_diagram_response',
{ 'success': True,
'message': "Saved diagram: %s" % (params['name'])
})
elif type == 'rename_diagram':
#
# Do not allow renaming of recording diagram
#
if self.recording_interval is not None:
if params['old_name'] == self.diagram.name:
self.send_message(
'rename_diagram_response',
{ 'success': False,
'message': "Cannot rename diagram while recording"
})
return
rename_diagram(params['old_name'], params['new_name'])
self.send_message(
'rename_diagram_response',
{ 'success': True,
'message': "Diagram renamed"
})
elif type == 'delete_diagram':
#
# Do not allow deleting of recording diagram
#
if self.recording_interval is not None:
if params['name'] == self.diagram.name:
self.send_message(
'delete_diagram_response',
{ 'success': False,
'message': "Cannot delete diagram while recording"
})
return
delete_diagram(params['name'])
self.send_message(
'delete_diagram_response',
{ 'success': True,
'message': "Diagram deleted"
})
elif type == 'set_diagram':
self.set_diagram(params)
elif type == 'start_diagram': # start a diagram running on the controller; this will stop any diagram that is already running
logging.debug("handle_message: start_diagram - loading diagram: %s" % params['name'])
diagram_spec = load_diagram(params['name'])
self.diagram = Diagram(params['name'], diagram_spec)
#local_config = hjson.loads(open('local.hjson').read()) # save name of diagram to load when start script next time
#local_config['startup_diagram'] = params['name']
#open('local.hjson', 'w').write(hjson.dumps(local_config))
if self.store:
self.store.save('diagram', params['name'], 0, {'action': 'start'})
self.send_message( 'start_diagram_response',
{ 'success': True,
'message': "Started diagram: %s" % (params['name'])
})
elif type == 'stop_diagram':
#
# Need to update the metadata in the recording location
# indicating that we are no longer recording to that location.
# Note if a controller dies while recording, the metadata
# will still indicate 'recording: True', so someone might stop
# a recording (to update the metadata to set 'recording: False')
# even though the controller might no longer be recording to that
# location. There should probably be a better way to handle this.
#
stop_location = params.get('stop_location')
if stop_location is None:
stop_location = self.recording_location
#
# Set this recording as done.
#
if stop_location:
if not stop_location.startswith('/'):
stop_location = '/' + stop_location
metadata = c.resources.read_file(stop_location + "/metadata")
if metadata is not None:
metadata = json.loads(metadata)
metadata['recording'] = False
metadata['end_time'] = '%s' % (datetime.datetime.utcnow())
c.resources.write_file(
stop_location + "/metadata",
json.dumps(metadata) )
else:
c.resources.write_file(
stop_location + "/metadata",
json.dumps({ 'controller_path': c.path_on_server(),
'recording': False,
'recording_interval': self.recording_interval }))
#
# Stop recording if in progress.
# Remove the currently running diagram program.
# Remove the currently set user.
#
self.recording_interval = None
self.diagram = None
self.username = None
if self.recording_location != stop_location:
#
# We are not really recording to the location we
# have been asked to "stop" so just update the
# above metadata and continue.
#
self.send_message( type + '_response',
{ 'success': True,
'message': "This controller was no longer recording at that location, but the recording has been marked as stopped."
})
return
self.recording_location = None
self.send_message( type + '_response',
{ 'success': True,
'message': "Program stopped"
})
#
# Ensure latest status reflects that this controller is
# not recording.
#
self.send_status()
elif type == 'start_recording':
#
# Allow 'set_diagram' and 'start_recording' to be
# an atomic operation.
# Caller can specify diagram and username in params.
#
if set(('diagram', 'username')) <= set(params):
self.set_diagram(params)
metadata = {
'controller_path': c.path_on_server(),
'controller_name': self.controller_name(),
'program': self.diagram.diagram_spec,
}
# check for data storage block
data_storage_block = None
for block in self.diagram.blocks:
if block.type == 'data storage':
data_storage_block = block
break
# if data storage block, get recording info from it
if data_storage_block:
dataset_displayedName = data_storage_block.read_param(data_storage_block.params, 'dataset_location', 'data')
self.recording_location = params['recording_location']
self.recording_interval = data_storage_block.read_param(data_storage_block.params, 'recording_interval', 1)
self.sequence_names = data_storage_block.read_param(data_storage_block.params, 'sequence_names', 'data')
metadata_location = self.recording_location
metadata['displayedName'] = dataset_displayedName
metadata['recording'] = True
metadata['start_time'] = '%s' % (datetime.datetime.utcnow()) # TODO: should use ISO string
metadata['recording_location'] = self.recording_location
metadata['recording_user'] = self.username
metadata['recording_interval'] = self.recording_interval
# otherwise, we still want to create a metadata file (using the recording location specified with this message)
else:
metadata['recording'] = True # note: not really recording; just need this to work with current front-end code
metadata['is_empty'] = True
self.recording_location = None
self.recording_interval = None
self.sequence_names = []
metadata_location = params['recording_location'] # TODO: rethink this
#
# Create metadata file.
#
logging.info("Creating sequences...")
c.resources.create_folder(metadata_location)
c.resources.write_file(metadata_location + "/metadata", json.dumps(metadata))
# Create sequences for blocks
if data_storage_block:
record_blocks = [b for b in data_storage_block.sources]
self.create_sequences(record_blocks)
self.send_message('start_recording_response',
{ 'success': True,
'message': "Recording started."
})
#
# Ensure latest status reflects that this controller is
# recording.
#
self.send_status()
elif type == 'stop_recording':
logging.info('stop recording data')
if self.store:
# save stop for current run
if self.recording_interval:
self.store.save('run', self.run_name, self.recording_interval, {'action': 'stop'})
else:
logging.info('stop recording data not saved (recording_interval none)')
self.recording_interval = None
self.recording_location = None
if self.device_check_greenlet:
self.device_check_greenlet.kill()
self.send_message( type + '_response',
{ 'success': True,
'message': "Recording stopped."
})
elif type == 'send_sensor_data':
if self.sensor_data_greenlet is not None:
return
stoptime = params.get('stoptime')
if stoptime is None:
stoptime = 60
self.sensor_data_greenlet = gevent.spawn( self.send_sensor_data,
stoptime )
self.send_message( type + '_response',
{ 'success': True,
'message': "Sending sensor data."
})
elif type == 'rename_block':
old_name = params['old_name']
new_name = params['new_name']
device = self.find_device(old_name)
device.name = new_name
rename_sequence(c.path_on_server(), old_name, new_name) # change sequence name on server
elif type == 'update_actuator':
name = params['name']
value = params['value']
device = c.auto_devices.find_device(name)
if device:
device.send_command('set %s' % value)
elif self.sensaur_hub:
component = self.sensaur_hub.find_component(name)
if component:
self.sensaur_hub.set_output_value(component, value)
elif type == 'add_camera':
self.add_camera()
elif type == 'add_sim_sensor':
add_sim_sensor()
elif type == 'add_sim_actuator':
add_sim_actuator()
elif type == 'remove_sim_device':
remove_sim_device()
elif type == 'request_status':
self.send_status()
elif type in [ 'download_software_updates',
'list_software_versions',
'update_software_version' ]:
class_ = command_class_dict[type]
cmd = class_(self, type, params)
cmd.exec_cmd()
elif type == 'flow_server::firebase_init':
self.firebase = params
send_sensor_data = self.firebase['send_sensor_data']
if send_sensor_data and send_sensor_data['enabled'] and c.config.get('firebase_send_sensor_data', True):
if self.firebase_sensor_data_greenlet is not None:
self.firebase_sensor_data_greenlet.kill()
self.firebase_sensor_data_greenlet = gevent.spawn(self.firebase_send_sensor_data, self.firebase, send_sensor_data)
else:
used = False
# keep track of last message from web interface
if used:
self.last_user_message_time = time.time()
return used
# a wrapper used to send messages to server or BLE
def send_message(self, type, parameters):
"""Send message to websocket and/or ble.
Currently, we support two modes:
- websocket
- websocket plus ble
if elable_ble is set in config, we send to both ble (via mqtt) and websocket (via c._send_message).
Otherwise, we send to websocket only via c.send_message
"""
#logging.debug('send_message type=%s' % type)
#
# Add our folder name to the params so that the client knows
# which controller is responding in case they have
# sent messages to multiple controllers.
#
own_path = c.path_on_server()
parameters['src_folder'] = own_path
if c.config.get('enable_ble', False) and self.publisher:
# update_sequence not needed by ble, only by store
if type != "update_sequence":
jsonobj = {"type": type, "parameters": parameters}
#jsonmsg = '{"type":"sensor_update","parameters":{"values":[388.0],"name":"light"}}'
jsonmsg = json.dumps(jsonobj)
#logging.debug('mqtt published : %s' % jsonmsg)
#if not self.integ_test:
self.publisher.publish(jsonmsg)
# also send message to websocket
c.send_message(type, parameters)
else:
# send message to websocket
c.send_message(type, parameters)
#
# Handle an incoming value from a sensor device (connected via USB)
#
# Duplicate device types append a space followed by an integer
# starting at 2. E.g.:
#
# "CO2"
# "CO2 2"
# "humidity"
# "humidity 2"
#
# How do we map these?
#
def handle_input(self, device_or_name, value):
# get the device name
# the auto_devices code provides a name; the senaur code provides a device object
if hasattr(device_or_name, 'name'):
name = device_or_name.name
values = [value] # the sensaur system just sends a single value at a time
else:
name = device_or_name
values = value # the auto_devices system provides a list of values for one device
# logging.debug('input_handler: name=%s, values[0]=%s' % (name, values[0]))
# ---- start of send_message replacement (store and ble test without diagram open)
if self.integ_test:
if self.store:
value = float(values[0])
try:
self.store.save('sensor', name, value)
except Exception as err:
logging.error("store.save error: %s" % err)
# simulate update_diagram when it was not requested by flow-server
# i.e. when flow-server is not reachable after flow restart
#if self.publisher:
# jsonobj = {"type": "update_diagram", "parameters": {'values': { '1': value}}}
# jsonmsg = json.dumps(jsonobj)
# #logging.debug('mqtt published : %s' % jsonmsg)
# self.publisher.publish(jsonmsg)
# ---- end of of send_message replacement
if self.diagram:
block = self.diagram.find_block_by_name(name)
if block:
block.decimal_places = block.compute_decimal_places(values[0])
block.value = float(values[0])
#
# Store last read sensor data associated with a physical sensor.
#
now = datetime.datetime.utcnow()
value = float(values[0])
self.sensor_data_latest[name] = (now, value)
# record data by sending it to the server and/or storing it locally
def record_data(self, blocks, timestamp):
# publish to recording queue to be saved by storage service or save directly
# store block_name and value into 'sensor' measurement
# perform store only if store has been initialized properly
if not self.integ_test:
if self.store:
for block in blocks:
try:
logging.debug("record_data: %s=%s" % (block.name, block.value))
self.store.save('sensor', block.name, block.value)
except Exception as err:
logging.error("store.save error: %s" % err)
# store blocks on server
sequence_prefix = self.recording_location + '/'
values = {}
for b in blocks:
id_str = str(b.id)
if id_str in self.sequence_names:
seq_name = sequence_prefix + self.sequence_names[id_str]
values[seq_name] = b.value
logging.debug('c.update_sequences %s' % (values))
if values:
c.update_sequences(values, timestamp)
# send locally recorded time series data to browser
def send_history(self, params):
# history is currently only used for sending local history
# over ble
# Sample parameters for type history: {u'count': 100000, u'start_timestamp': u'2017-06-15T23:50:19.567Z',
# u'name': u'temperature', u'end_timestamp': u'2017-06-16T00:00:19.567Z'}
history = []
if self.store:
name = params.get("name")
start = params.get("start_timestamp")
end = params.get("end_timestamp")
count = params.get("count")
# auto interval allows for automatic adjustment of history timestamp interval
# so that it fits into ble packet (< 120 records)
auto_interval = params.get("auto_interval")
interval = None
if auto_interval is None:
auto_interval = True
if auto_interval:
#
interval = self.calc_auto_interval(start, end)
#
try:
if interval:
query = \
"""SELECT mean(mean) from sensor_mean where "name"='%s' and time > '%s' and time <= '%s' group by time(%s) limit %s""" % \
(name, start, end, interval, count)
else:
query = \
"""SELECT mean from sensor_mean where "name"='%s' and time > '%s' and time <= '%s' limit %s""" % \
(name, start, end, count)
logging.debug("interval=%s, query=%s" % (interval, query))
rs = self.store.query(query)
# sample data:
# points: [{u'count': 60, u'name': u'light', u'pin': u'2671', u'min': 242,
# u'max': 245, u'time': u'2017-06-16T20:42:00Z', u'mean': 244.8}, ...
points = list(rs.get_points())
#logging.debug("%d points: first 10: %s" % (len(points), points[:10]))
if c.config.get('enable_ble', False) and self.publisher:
# extract rounded numbers for 'mean' field
values = [round(x['mean'],2) if isinstance(x['mean'], numbers.Number) else x['mean'] for x in points]
timestamps = [x['time'] for x in points]
if not values:
values = [0,0]
timestamps = [start, end]
jsonobj = {"type": type, "parameters": { "name": name,
"values": values, "timestamps": timestamps }
}
#jsonmsg = '{"type":"sensor_update","parameters":{"values":[388.0],"name":"light"}}'
jsonmsg = json.dumps(jsonobj)
#logging.debug('mqtt published : %s' % jsonmsg)
self.publisher.publish(jsonmsg)
except Exception as err:
logging.error("store.query error: %s" % err)
#self.send_message('history', {'values': history})
#
# send client info to server/browser
#
def send_status(self):
#
# Get IP info
#
ip_map = None
if include_network_status:
ip_map = {}
for interface in interfaces():
if interface == 'lo':
continue
addresses = ifaddresses(interface)
if AF_INET in addresses:
links = addresses[AF_INET]
for link in links:
ip_map[interface] = link['addr']
if os.path.exists('/sys/class/net/wlan0/address'):
mac_addr = subprocess.check_output(['cat', '/sys/class/net/wlan0/address']).strip() # for raspi
else:
mac_addr = 'N/A'
status = {
'operational_status': self.operational_status,
'available_versions': self.available_versions,
'username': self.username,
'flow_version': Flow.FLOW_VERSION,
'lib_version': c.VERSION + ' ' + c.BUILD,
'device_count': len(self.device_list()),
'recording_interval': self.recording_interval,
'ip_addresses': ip_map,
'mac_address': mac_addr,
}
if self.diagram:
logging.debug("Setting name %s" % (self.diagram.name))
status['current_diagram'] = self.diagram.name
else:
logging.debug("No diagram name to set.")
status['current_diagram'] = None
self.send_message('status', status)
# update controller status table on server
own_path = c.path_on_server()
c.resources.send_request_to_server('PUT', '/api/v1/resources' + own_path, {'status': json.dumps(status)})
# create sequences on server for the given blocks
def create_sequences(self, blocks):
# get list of existing sequences
print('recording location: %s' % self.recording_location)
file_infos = c.resources.list_files(self.recording_location, type = 'sequence')
server_seqs = set([fi['name'] for fi in file_infos])
print('server seqs: %s' % server_seqs)
# create a sequence for each block (that doesn't already have a sequence)
for block in blocks:
id_str = str(block.id)
if id_str in self.sequence_names:
seq_name = self.sequence_names[id_str]
if seq_name not in server_seqs:
device = self.find_device(block.name)
units = device.units if device else None
create_sequence(self.recording_location, seq_name, data_type=1, units=units) # data_type 1 is numeric
server_seqs.add(block.name)
# get sensor data
def get_sensor_data(self):
data = []
for device in self.device_list():
dict = device.as_dict()
name = dict['name']
value = None
if name in self.sensor_data_latest:
(time, value) = self.sensor_data_latest[name]
#
# How to decide when a value is stale?
# This might not be necessary since _auto_devices
# removes the unplugged USB device...
#
now = datetime.datetime.utcnow()
if time < now - datetime.timedelta(seconds=5):
value = None
dict['value'] = value
data.append(dict)
# add timer blocks
# TODO: rename send_sensor_data since we're adding timer data
if self.diagram:
for block in self.diagram.blocks:
if block.type == 'timer':
d = {
'id': block.id,
'name': block.name,
'type': block.type,
'value': block.value,
}
data.append(d)
return data
# get list of devices
def device_list(self):
devices = c.auto_devices._auto_devices
if self.sensaur_hub:
if devices: # handle this case separately, since it will be slow (making copies of lists) and unusual (only when have both old and new hardware attached at the same time)
devices = devices + self.sensaur_hub.components # note: a device is called a component in the sensaur system
else:
devices = self.sensaur_hub.components
return devices
# find a device by name; assumes each device has a unique name
def find_device(self, name):
device = c.auto_devices.find_device(name)
if not device and self.sensaur_hub:
device = self.sensaur_hub.find_component(name) # note: a device is called a component in the sensaur system
return device
#
# Send all sensor data to Firebase including sensor values
#
def firebase_send_sensor_data(self, firebase, send_sensor_data):
started = datetime.datetime.utcnow().replace(microsecond=0)
interval = send_sensor_data["interval"]
api_key = firebase["api_key"]
google_auth_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken?key=%s' % (api_key)
refresh_token_url = 'https://securetoken.googleapis.com/v1/token?key=%s' % (api_key)
logging.debug("firebase_send_sensor_data start: %s, interval: %s" % (started, interval))
# Enable https connection reuse to Firebase
session = requests.Session()
session.mount('https://', requests.adapters.HTTPAdapter())
# Exchange the custom token for an id token
post_succeeded = False;
while not post_succeeded:
try:
r = session.post(google_auth_url, headers={'Content-Type': 'application/json'}, data = json.dumps({'token': firebase["token"], 'returnSecureToken': True}))
if r.status_code != 200:
logging.error('ABORTING firebase_send_sensor_data thread! POST to %s returned %s' % (google_auth_url, r.status_code))
return
post_succeeded = True
except Exception as err:
logging.error("Session post error: %s" % (err))
c.sleep(0.1)
auth = r.json()
id_token = auth["idToken"]
refresh_token = auth["refreshToken"]
expires_in = int(time.time()) + int(auth["expiresIn"])
while True:
# refresh token when there is 5 minutes left
now = int(time.time())
if now >= expires_in - (5*60):
logging.debug('Refreshing token')
r = session.post(refresh_token_url, headers={'Content-Type': 'application/json'}, data = json.dumps({'refresh_token': refresh_token, 'grant_type': 'refresh_token'}))
if r.status_code != 200:
logging.error('ABORTING firebase_send_sensor_data thread! POST to %s returned %s' % (refresh_token_url, r.status_code))
return
refresh = r.json()
id_token = refresh["id_token"]
refresh_token = refresh["refresh_token"]
expires_in = int(time.time()) + int(refresh["expires_in"])
# Get data
data = self.get_sensor_data()
timestamp = datetime.datetime.utcnow().isoformat() + "Z"
# Send PUT request
firebase_url = 'https://%s.firebaseio.com%s.json?auth=%s' % (firebase["project_id"], send_sensor_data["path"], id_token)
r = session.put(firebase_url, data = json.dumps({'timestamp': timestamp, 'data': data}))
if r.status_code == 200:
logging.debug('Sent %s to %s' % (data, firebase_url))
else:
logging.error('PUT to %s returned %s - %s' % (firebase_url, r.status_code, r.text))
# Sleep
c.sleep(interval)
#
# Send all sensor data over websocket including sensor values
# Stop after specified number of minutes.
#
def send_sensor_data(self, minutes):
timestamp = datetime.datetime.utcnow().replace(microsecond=0)
stoptime = timestamp + datetime.timedelta(minutes=minutes)
logging.debug("send_sensor_data start: %s stop: %s" %
(timestamp, stoptime))
while True:
#
# Only loop for the specified number of minutes
#
if datetime.datetime.utcnow() > stoptime:
logging.debug("Stopping send_sensor_data")
self.sensor_data_greenlet = None
break
#
# Send all sensor data.
#
c.send_message('send_sensor_data_response',
{ 'success': True,
'data': self.get_sensor_data(),
'src_folder': c.path_on_server() } )
#
# Sleep
#
c.sleep(1)
#
# Send watchdog message to server so that it knows which
# controllers are online
#
def send_watchdog(self):
minutes = 0
while True:
if minutes == 0:
self.available_versions = []
list_cmd = ListVersionsCommand(None, None, {})
list_cmd.exec_cmd()
if list_cmd.get_response() and list_cmd.get_response()['version_list']:
self.available_versions = list_cmd.get_response()['version_list']
if minutes == 10:
minutes = 0
minutes += 1
self.send_status()
c.send_message('watchdog', {})
c.sleep(60)
# start capturing from a camera
def add_camera(self):
if hasattr(c, 'camera'):
c.camera.open()
if c.camera.device and c.camera.device.is_connected():
self.send_message('device_added', {'type': 'camera', 'name': 'camera', 'dir': 'in'})
# create image sequence on server if doesn't already exist
server_path = c.path_on_server()
if not c.resources.file_exists(server_path + '/image'):
create_sequence(server_path, 'image', data_type=3)
else:
logging.warning('unable to open camera')
else:
logging.warning('camera extension not added')
# get a new image for the camera block and store it as a base64 encoded value;
# for now we'll support just one physical camera (though it can feed into multiple camera blocks)
def update_camera_blocks(self):
if hasattr(c, 'camera') and c.camera.device and c.camera.device.is_connected():
camera_block_defined = False
for block in self.diagram.blocks:
if block.type == 'camera':
camera_block_defined = True
if camera_block_defined:
image = c.camera.device.capture_image()
# store camera image once a minute
current_time = time.time()
if not self.last_camera_store_time or current_time > self.last_camera_store_time + 60:
image.thumbnail((720, 540), Image.ANTIALIAS)
self.send_message('update_sequence', {'sequence': 'image', 'value': encode_image(image)})
self.last_camera_store_time = current_time
logging.debug('updating image sequence')
# create small thumbnail to send to UI
image.thumbnail((320, 240), Image.ANTIALIAS)
data = encode_image(image)
for block in self.diagram.blocks:
if block.type == 'camera':
block.value = data
#
# Get user friendly controller display name
#
def controller_name(self):
parts = c.path_on_server().split('/')
return parts[-1]
#
# Set the currently running diagram
#
def set_diagram(self, params):
#
# v2.0 messages should associate a username with a running
# program.
#
if set(('diagram', 'username')) <= set(params):
diagram_spec = params['diagram']
if 'name' not in diagram_spec:
self.send_message(
'set_diagram_response',
{ 'success': False,
'message': "No program name specified."
})
return
name = diagram_spec['name']
self.diagram = Diagram(name, diagram_spec)
self.username = params['username']
self.send_message(
'set_diagram_response',
{ 'success': True,
'message': "Set running program %s for user %s." % (name, self.username)
})
else:
#
# Support legacy flow for backwards compatibility.
# TODO remove this once v1.0 is no longer supported.
#
diagram_spec = params['diagram']
name = '_temp_'
if 'name' in diagram_spec:
name = diagram_spec['name']
logging.debug(
"handle_message: set_diagram name %s" % (name))
self.diagram = Diagram(name, diagram_spec)
# ======== UTILITY FUNCTIONS ========
# create a sequence resource on the server
# data types: 1 = numeric, 2 = text, 3 = image
def create_sequence(server_path, name, data_type, units = None):
print('creating new sequence: %s' % name)
sequence_info = {
'path': server_path,
'name': name,
'type': 21, # sequence
'data_type': data_type,
'min_storage_interval': 0,
}
if units:
sequence_info['units'] = units
c.resources.send_request_to_server('POST', '/api/v1/resources', sequence_info)
# change the name of a sequence on the server
def rename_sequence(server_path, old_name, new_name):
print('renaming sequence: %s -> %s' % (old_name, new_name))
c.resources.send_request_to_server('PUT', '/api/v1/resources' + server_path + '/' + old_name, {'name': new_name})
|
"""Implementation of Valve learning layer 2/3 switch."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASISo
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import os
from collections import namedtuple
import ipaddr
import aruba.aruba_pipeline as aruba
import valve_acl
import valve_flood
import valve_host
import valve_of
import valve_packet
import valve_route
import util
from ryu.lib import mac
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
def valve_factory(dp):
"""Return a Valve object based dp's hardware configuration field.
Arguments:
dp -- a DP object with the configuration for this valve.
"""
SUPPORTED_HARDWARE = {
'Allied-Telesis': Valve,
'Aruba': ArubaValve,
'NoviFlow': Valve,
'Open vSwitch': Valve,
'ZodiacFX': Valve,
}
if dp.hardware in SUPPORTED_HARDWARE:
return SUPPORTED_HARDWARE[dp.hardware]
else:
return None
class Valve(object):
"""Generates the messages to configure a datapath as a l2 learning switch.
Vendor specific implementations may require sending configuration flows.
This can be achieved by inheriting from this class and overwriting the
function switch_features.
"""
FAUCET_MAC = '0e:00:00:00:00:01'
TABLE_MATCH_TYPES = {}
def __init__(self, dp, logname, *args, **kwargs):
self.dp = dp
self.logger = logging.getLogger(logname + '.valve')
self.ofchannel_logger = None
self._register_table_match_types()
# TODO: functional flow managers require too much state.
# Should interface with a common composer class.
self.ipv4_route_manager = valve_route.ValveIPv4RouteManager(
self.logger, self.FAUCET_MAC, self.dp.arp_neighbor_timeout,
self.dp.ipv4_fib_table, self.dp.eth_src_table, self.dp.eth_dst_table,
self.dp.highest_priority,
self.valve_in_match, self.valve_flowdel, self.valve_flowmod,
self.valve_flowcontroller)
self.ipv6_route_manager = valve_route.ValveIPv6RouteManager(
self.logger, self.FAUCET_MAC, self.dp.arp_neighbor_timeout,
self.dp.ipv6_fib_table, self.dp.eth_src_table, self.dp.eth_dst_table,
self.dp.highest_priority,
self.valve_in_match, self.valve_flowdel, self.valve_flowmod,
self.valve_flowcontroller)
self.flood_manager = valve_flood.ValveFloodManager(
self.dp.flood_table, self.dp.low_priority,
self.valve_in_match, self.valve_flowmod,
self.dp.stack, self.dp.ports, self.dp.shortest_path_to_root)
self.host_manager = valve_host.ValveHostManager(
self.logger, self.dp.eth_src_table, self.dp.eth_dst_table,
self.dp.timeout, self.dp.low_priority, self.dp.highest_priority,
self.valve_in_match, self.valve_flowmod, self.valve_flowdel,
self.valve_flowdrop)
def _register_table_match_types(self):
# TODO: functional flow managers should be able to register
# the flows they need, themselves.
self.TABLE_MATCH_TYPES = {
self.dp.vlan_table: (
'in_port', 'vlan_vid', 'eth_src', 'eth_dst', 'eth_type'),
# TODO: eth_src_table matches too many things. It should
# be split further into two tables for IPv4/IPv6 entries.
self.dp.eth_src_table: (
'in_port', 'vlan_vid', 'eth_src', 'eth_dst', 'eth_type',
'ip_proto',
'icmpv6_type', 'ipv6_nd_target',
'arp_tpa', 'ipv4_src'),
self.dp.ipv4_fib_table: (
'vlan_vid', 'eth_type', 'ip_proto',
'ipv4_src', 'ipv4_dst'),
self.dp.ipv6_fib_table: (
'vlan_vid', 'eth_type', 'ip_proto',
'icmpv6_type', 'ipv6_dst'),
self.dp.eth_dst_table: (
'vlan_vid', 'eth_dst'),
self.dp.flood_table: (
'in_port', 'vlan_vid', 'eth_dst'),
}
def _in_port_tables(self):
in_port_tables = [self.dp.acl_table]
for table_id in self.TABLE_MATCH_TYPES:
if 'in_port' in self.TABLE_MATCH_TYPES:
in_port_tables.append(table_id)
return in_port_tables
def switch_features(self, dp_id, msg):
"""Send configuration flows necessary for the switch implementation.
Arguments:
dp_id -- the Datapath unique ID (64bit int)
msg -- OFPSwitchFeatures msg sent from switch.
Vendor specific configuration should be implemented here.
"""
return []
def ofchannel_log(self, ofmsgs):
if self.dp is not None:
if self.dp.ofchannel_log is not None:
self.ofchannel_logger = util.get_logger(
self.dp.ofchannel_log,
self.dp.ofchannel_log,
logging.DEBUG,
0)
for ofmsg in ofmsgs:
self.ofchannel_logger.debug(ofmsg)
def valve_in_match(self, table_id, in_port=None, vlan=None,
eth_type=None, eth_src=None,
eth_dst=None, eth_dst_mask=None,
ipv6_nd_target=None, icmpv6_type=None,
nw_proto=None, nw_src=None, nw_dst=None):
match_dict = valve_of.build_match_dict(
in_port, vlan, eth_type, eth_src,
eth_dst, eth_dst_mask, ipv6_nd_target, icmpv6_type,
nw_proto, nw_src, nw_dst)
if table_id != self.dp.acl_table:
assert table_id in self.TABLE_MATCH_TYPES,\
'%u table not registered' % table_id
for match_type in match_dict.iterkeys():
assert match_type in self.TABLE_MATCH_TYPES[table_id],\
'%s match not registered for table %u' % (
match_type, table_id)
match = valve_of.match(match_dict)
return match
def _ignore_dpid(self, dp_id):
"""Return True if this datapath ID is not ours.
Args:
dp_id (int): datapath ID
Returns:
bool: True if this datapath ID is not ours.
"""
if dp_id != self.dp.dp_id:
self.logger.error('Unknown %s', util.dpid_log(dp_id))
return True
return False
def _all_valve_tables(self):
"""Return all Valve tables.
Returns:
tuple: all Valve tables as ints.
"""
return (
self.dp.vlan_table,
self.dp.acl_table,
self.dp.eth_src_table,
self.dp.ipv4_fib_table,
self.dp.ipv6_fib_table,
self.dp.eth_dst_table,
self.dp.flood_table)
def valve_flowmod(self, table_id, match=None, priority=None,
inst=None, command=ofp.OFPFC_ADD, out_port=0,
out_group=0, hard_timeout=0, idle_timeout=0):
"""Helper function to construct a flow mod message with cookie."""
if match is None:
match = self.valve_in_match(table_id)
if priority is None:
priority = self.dp.lowest_priority
if inst is None:
inst = []
return valve_of.flowmod(
self.dp.cookie,
command,
table_id,
priority,
out_port,
out_group,
match,
inst,
hard_timeout,
idle_timeout)
def valve_flowdel(self, table_id, match=None, priority=None,
out_port=ofp.OFPP_ANY):
"""Delete matching flows from a table."""
return [
self.valve_flowmod(
table_id,
match=match,
priority=priority,
command=ofp.OFPFC_DELETE,
out_port=out_port,
out_group=ofp.OFPG_ANY),
valve_of.barrier()]
def valve_flowdrop(self, table_id, match=None, priority=None,
hard_timeout=0):
"""Add drop matching flow to a table."""
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
hard_timeout=hard_timeout,
inst=[])
def valve_flowcontroller(self, table_id, match=None, priority=None,
inst=None):
if inst is None:
inst = []
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
inst=[valve_of.apply_actions(
[valve_of.output_controller()])] + inst)
def _delete_all_valve_flows(self):
"""Delete all flows from all FAUCET tables."""
ofmsgs = []
for table_id in self._all_valve_tables():
ofmsgs.extend(self.valve_flowdel(table_id))
return ofmsgs
def _delete_all_port_match_flows(self, port):
ofmsgs = []
for table in self._in_port_tables():
in_port_match = self.valve_in_match(table, in_port=port.number)
ofmsgs.extend(self.valve_flowdel(table, in_port_match))
return ofmsgs
def _add_default_drop_flows(self):
"""Add default drop rules on all FAUCET tables."""
# default drop on all tables.
ofmsgs = []
for table in self._all_valve_tables():
ofmsgs.append(self.valve_flowdrop(
table,
priority=self.dp.lowest_priority))
# antispoof for FAUCET's MAC address
# TODO: antispoof for controller IPs on this VLAN, too.
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, eth_src=self.FAUCET_MAC),
priority=self.dp.high_priority))
# drop STDP BPDU
for bpdu_mac in ('01:80:C2:00:00:00', '01:00:0C:CC:CC:CD'):
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, eth_dst=bpdu_mac),
priority=self.dp.highest_priority))
# drop LLDP
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, eth_type=ether.ETH_TYPE_LLDP),
priority=self.dp.highest_priority))
# drop broadcast sources
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, eth_src=mac.BROADCAST_STR),
priority=self.dp.highest_priority))
return ofmsgs
def _add_vlan_flood_flow(self):
"""Add a flow to flood packets for unknown destinations."""
return [self.valve_flowmod(
self.dp.eth_dst_table,
priority=self.dp.low_priority,
inst=[valve_of.goto_table(self.dp.flood_table)])]
def _add_controller_learn_flow(self):
"""Add a flow for controller to learn/add flows for destinations."""
return [self.valve_flowcontroller(
self.dp.eth_src_table,
priority=self.dp.low_priority,
inst=[valve_of.goto_table(self.dp.eth_dst_table)])]
def _add_default_flows(self):
"""Configure datapath with necessary default tables and rules."""
ofmsgs = []
ofmsgs.extend(self._delete_all_valve_flows())
ofmsgs.extend(self._add_default_drop_flows())
ofmsgs.extend(self._add_vlan_flood_flow())
ofmsgs.extend(self._add_controller_learn_flow())
return ofmsgs
def _add_ports_and_vlans(self, discovered_port_nums):
"""Add all configured and discovered ports and VLANs."""
ofmsgs = []
all_port_nums = set()
# add vlan ports
for vlan in self.dp.vlans.itervalues():
self.logger.info('Configuring VLAN %s', vlan)
for port in vlan.get_ports():
all_port_nums.add(port.number)
# add mirror destination ports.
for port in vlan.mirror_destination_ports():
all_port_nums.add(port.number)
# install eth_dst_table flood ofmsgs
ofmsgs.extend(self.flood_manager.build_flood_rules(vlan))
# add controller IPs if configured.
ofmsgs.extend(self._add_controller_ips(vlan.controller_ips, vlan))
# add any ports discovered but not configured
for port_num in discovered_port_nums:
if valve_of.ignore_port(port_num):
continue
if port_num not in all_port_nums:
all_port_nums.add(port_num)
# now configure all ports
for port_num in all_port_nums:
ofmsgs.extend(self.port_add(self.dp.dp_id, port_num))
return ofmsgs
def datapath_connect(self, dp_id, discovered_port_nums):
"""Handle Ryu datapath connection event and provision pipeline.
Args:
dp_id (int): datapath ID.
discovered_port_nums (list): known datapath ports as ints.
Returns:
list: OpenFlow messages to send to datapath.
"""
if self._ignore_dpid(dp_id):
return []
if discovered_port_nums is None:
discovered_port_nums = []
self.logger.info('Configuring %s', util.dpid_log(dp_id))
ofmsgs = []
ofmsgs.extend(self._add_default_flows())
ofmsgs.extend(self._add_ports_and_vlans(discovered_port_nums))
self.dp.running = True
return ofmsgs
def datapath_disconnect(self, dp_id):
"""Handle Ryu datapath disconnection event.
Args:
dp_id (int): datapath ID.
"""
if not self._ignore_dpid(dp_id):
self.dp.running = False
self.logger.warning('%s down', util.dpid_log(dp_id))
def _port_add_acl(self, port_num):
ofmsgs = []
forwarding_table = self.dp.eth_src_table
if port_num in self.dp.acl_in:
acl_num = self.dp.acl_in[port_num]
forwarding_table = self.dp.acl_table
acl_rule_priority = self.dp.highest_priority
acl_allow_inst = valve_of.goto_table(self.dp.eth_src_table)
for rule_conf in self.dp.acls[acl_num]:
acl_match, acl_inst = valve_acl.build_acl_entry(
rule_conf, acl_allow_inst, port_num)
ofmsgs.append(self.valve_flowmod(
self.dp.acl_table,
acl_match,
priority=acl_rule_priority,
inst=acl_inst))
acl_rule_priority -= 1
return ofmsgs, forwarding_table
def _port_add_vlan_rules(self, port, vlan, vlan_vid, vlan_inst):
ofmsgs = []
ofmsgs.append(self.valve_flowmod(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, in_port=port.number, vlan=vlan_vid),
priority=self.dp.low_priority,
inst=vlan_inst))
ofmsgs.extend(self.flood_manager.build_flood_rules(vlan))
return ofmsgs
def _port_add_vlan_untagged(self, port, vlan, forwarding_table, mirror_act):
push_vlan_act = mirror_act + valve_of.push_vlan_act(vlan.vid)
push_vlan_inst = [
valve_of.apply_actions(push_vlan_act),
valve_of.goto_table(forwarding_table)
]
null_vlan = namedtuple('null_vlan', 'vid')
null_vlan.vid = ofp.OFPVID_NONE
return self._port_add_vlan_rules(port, vlan, null_vlan, push_vlan_inst)
def _port_add_vlan_tagged(self, port, vlan, forwarding_table, mirror_act):
vlan_inst = [
valve_of.goto_table(forwarding_table)
]
if mirror_act:
vlan_inst = [valve_of.apply_actions(mirror_act)] + vlan_inst
return self._port_add_vlan_rules(port, vlan, vlan, vlan_inst)
def _port_add_vlans(self, port, forwarding_table, mirror_act):
ofmsgs = []
vlans = self.dp.vlans.values()
tagged_vlans_with_port = [
vlan for vlan in vlans if port in vlan.tagged]
untagged_vlans_with_port = [
vlan for vlan in vlans if port in vlan.untagged]
for vlan in tagged_vlans_with_port:
ofmsgs.extend(self._port_add_vlan_tagged(
port, vlan, forwarding_table, mirror_act))
for vlan in untagged_vlans_with_port:
ofmsgs.extend(self._port_add_vlan_untagged(
port, vlan, forwarding_table, mirror_act))
return ofmsgs
def port_add(self, dp_id, port_num):
"""Handle the addition of a port.
Args:
dp_id (int): datapath ID.
port_num (int): port number.
Returns:
list: OpenFlow messages, if any.
"""
if self._ignore_dpid(dp_id) or valve_of.ignore_port(port_num):
return []
if port_num not in self.dp.ports:
self.logger.info(
'Ignoring port:%u not present in configuration file', port_num)
return []
port = self.dp.ports[port_num]
self.logger.info('Port %s added', port)
port.phys_up = True
if not port.running():
return []
ofmsgs = []
self.logger.info('Sending config for port %s', port)
# Delete all flows previously matching this port
ofmsgs.extend(self._delete_all_port_match_flows(port))
# Port is a mirror destination; drop all input packets
if port.mirror_destination:
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
match=self.valve_in_match(self.dp.vlan_table, in_port=port_num),
priority=self.dp.highest_priority))
return ofmsgs
# Add ACL if any.
acl_ofmsgs, forwarding_table = self._port_add_acl(port_num)
ofmsgs.extend(acl_ofmsgs)
# If this is a stacking port, accept all VLANs (came from another FAUCET)
if port.stack is not None:
ofmsgs.append(self.valve_flowmod(
self.dp.vlan_table,
match=self.valve_in_match(self.dp.vlan_table, in_port=port_num),
priority=self.dp.low_priority,
inst=[valve_of.goto_table(forwarding_table)]))
return ofmsgs
# Add mirroring if any
mirror_act = []
if port.mirror:
mirror_act = [valve_of.output_port(port.mirror)]
# Add port/to VLAN rules.
ofmsgs.extend(self._port_add_vlans(port, forwarding_table, mirror_act))
return ofmsgs
def port_delete(self, dp_id, port_num):
"""Handle the deletion of a port.
Args:
dp_id (int): datapath ID.
port_num (int): port number.
Returns:
list: OpenFlow messages, if any.
"""
if self._ignore_dpid(dp_id) or valve_of.ignore_port(port_num):
return []
if port_num not in self.dp.ports:
return []
port = self.dp.ports[port_num]
port.phys_up = False
self.logger.warning('Port %s down', port)
ofmsgs = []
if not port.permanent_learn:
ofmsgs.extend(self._delete_all_port_match_flows(port))
# delete eth_dst rules
ofmsgs.extend(self.valve_flowdel(
self.dp.eth_dst_table,
out_port=port_num))
for vlan in self.dp.vlans.values():
if port in vlan.get_ports():
ofmsgs.extend(self.flood_manager.build_flood_rules(
vlan, modify=True))
return ofmsgs
def control_plane_handler(self, in_port, vlan, eth_src, eth_dst, pkt):
"""Handle a packet probably destined to FAUCET's route managers.
For example, next hop resolution or ICMP echo requests.
Args:
in_port (int): port the packet was received on.
vlan (vlan): vlan of the port the packet was received on.
eth_src (str): source Ethernet MAC address.
eth_dst (str): destination Ethernet MAC address.
pkt (ryu.lib.packet.ethernet): packet received.
Returns:
list: OpenFlow messages, if any.
"""
if eth_dst == self.FAUCET_MAC or not valve_packet.mac_addr_is_unicast(eth_dst):
for handler in (self.ipv4_route_manager.control_plane_handler,
self.ipv6_route_manager.control_plane_handler):
ofmsgs = handler(in_port, vlan, eth_src, eth_dst, pkt)
if ofmsgs:
return ofmsgs
return []
def _known_up_dpid_and_port(self, dp_id, in_port):
"""Returns True if datapath and port are known and running.
Args:
dp_id (int): datapath ID.
in_port (int): port number.
Returns:
bool: True if datapath and port are known and running.
"""
if (not self._ignore_dpid(dp_id) and not valve_of.ignore_port(in_port) and
self.dp.running and in_port in self.dp.ports):
return True
return False
def rcv_packet(self, dp_id, valves, in_port, vlan_vid, pkt):
"""Handle a packet from the dataplane (eg to re/learn a host).
The packet may be sent to us also in response to FAUCET
initiating IPv6 neighbor discovery, or ARP, to resolve
a nexthop.
Args:
dp_id (int): datapath ID.
valves (dict): all datapaths, indexed by datapath ID.
in_port (int): port packet was received on.
vlan_vid (int): VLAN VID of port packet was received on.
pkt (ryu.lib.packet.packet): packet received.
Return:
list: OpenFlow messages, if any.
"""
if not self._known_up_dpid_and_port(dp_id, in_port):
return []
ofmsgs = []
eth_pkt = valve_packet.parse_pkt(pkt)
eth_src = eth_pkt.src
eth_dst = eth_pkt.dst
vlan = self.dp.vlans[vlan_vid]
port = self.dp.ports[in_port]
if valve_packet.mac_addr_is_unicast(eth_src):
self.logger.debug(
'Packet_in %s src:%s in_port:%d vid:%s',
util.dpid_log(dp_id), eth_src, in_port, vlan_vid)
ofmsgs.extend(self.control_plane_handler(
in_port, vlan, eth_src, eth_dst, pkt))
# ban learning new hosts if max_hosts reached on a VLAN.
if (vlan.max_hosts is not None and
len(vlan.host_cache) == vlan.max_hosts and
eth_src not in vlan.host_cache):
ofmsgs.append(self.host_manager.temp_ban_host_learning_on_vlan(
vlan))
self.logger.info(
'max hosts %u reached on vlan %u, ' +
'temporarily banning learning on this vlan, ' +
'and not learning %s',
vlan.max_hosts, vlan.vid, eth_src)
else:
# TODO: partial stacking implementation - unicast learning
# not yet implemented.
if port.stack is not None:
return ofmsgs
# TODO: it would be good to be able to notify an external
# system upon re/learning a host.
ofmsgs.extend(self.host_manager.learn_host_on_vlan_port(
port, vlan, eth_src))
self.logger.info(
'learned %u hosts on vlan %u',
len(vlan.host_cache), vlan.vid)
return ofmsgs
def host_expire(self):
"""Expire hosts not recently re/learned.
Expire state from the host manager only; the switch does its own flow
expiry.
"""
if not self.dp.running:
return
now = time.time()
for vlan in self.dp.vlans.itervalues():
self.host_manager.expire_hosts_from_vlan(vlan, now)
def reload_config(self, new_dp):
"""Reload configuration new_dp
Args:
new_dp (DP): new dataplane configuration.
Returns:
list: OpenFlow messages.
"""
# TODO: a reload currently causes a full pipeline restart.
# We could special case reloads if we need to change only
# (for example) an ACL on a port.
ofmsgs = []
if self.dp.running:
self.dp = new_dp
ofmsgs = self.datapath_connect(
self.dp.dp_id, self.dp.ports.keys())
return ofmsgs
def _add_controller_ips(self, controller_ips, vlan):
ofmsgs = []
for controller_ip in controller_ips:
controller_ip_host = ipaddr.IPNetwork(
'/'.join((str(controller_ip.ip),
str(controller_ip.max_prefixlen))))
if controller_ip_host.version == 6:
ofmsgs.extend(self.ipv6_route_manager.add_controller_ip(
vlan, controller_ip, controller_ip_host))
elif controller_ip_host.version == 4:
ofmsgs.extend(self.ipv4_route_manager.add_controller_ip(
vlan, controller_ip, controller_ip_host))
return ofmsgs
def add_route(self, vlan, ip_gw, ip_dst):
if ip_dst.version == 6:
return self.ipv6_route_manager.add_route(vlan, ip_gw, ip_dst)
else:
return self.ipv4_route_manager.add_route(vlan, ip_gw, ip_dst)
def del_route(self, vlan, ip_dst):
if ip_dst.version == 6:
return self.ipv6_route_manager.del_route(vlan, ip_dst)
else:
return self.ipv4_route_manager.del_route(vlan, ip_dst)
def resolve_gateways(self):
"""Call route managers to re/resolve gateways.
Returns:
list: OpenFlow messages, if any.
"""
if not self.dp.running:
return []
ofmsgs = []
now = time.time()
for vlan in self.dp.vlans.itervalues():
ofmsgs.extend(self.ipv4_route_manager.resolve_gateways(vlan, now))
ofmsgs.extend(self.ipv6_route_manager.resolve_gateways(vlan, now))
return ofmsgs
class ArubaValve(Valve):
def switch_features(self, dp_id, msg):
ryu_table_loader = aruba.LoadRyuTables()
ryu_table_loader.load_tables(
os.path.join(aruba.CFG_PATH, 'aruba_pipeline.json'), parser)
ofmsgs = [valve_of.table_features(ryu_table_loader.ryu_tables)]
return ofmsgs
Add configurable discard rate when learning to reduce controller load.
"""Implementation of Valve learning layer 2/3 switch."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASISo
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import os
from collections import namedtuple
import ipaddr
import aruba.aruba_pipeline as aruba
import valve_acl
import valve_flood
import valve_host
import valve_of
import valve_packet
import valve_route
import util
from ryu.lib import mac
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
def valve_factory(dp):
"""Return a Valve object based dp's hardware configuration field.
Arguments:
dp -- a DP object with the configuration for this valve.
"""
SUPPORTED_HARDWARE = {
'Allied-Telesis': Valve,
'Aruba': ArubaValve,
'NoviFlow': Valve,
'Open vSwitch': Valve,
'ZodiacFX': Valve,
}
if dp.hardware in SUPPORTED_HARDWARE:
return SUPPORTED_HARDWARE[dp.hardware]
else:
return None
class Valve(object):
"""Generates the messages to configure a datapath as a l2 learning switch.
Vendor specific implementations may require sending configuration flows.
This can be achieved by inheriting from this class and overwriting the
function switch_features.
"""
FAUCET_MAC = '0e:00:00:00:00:01'
TABLE_MATCH_TYPES = {}
def __init__(self, dp, logname, *args, **kwargs):
self.dp = dp
self.logger = logging.getLogger(logname + '.valve')
self.ofchannel_logger = None
self._register_table_match_types()
# TODO: functional flow managers require too much state.
# Should interface with a common composer class.
self.ipv4_route_manager = valve_route.ValveIPv4RouteManager(
self.logger, self.FAUCET_MAC, self.dp.arp_neighbor_timeout,
self.dp.ipv4_fib_table, self.dp.eth_src_table, self.dp.eth_dst_table,
self.dp.highest_priority,
self.valve_in_match, self.valve_flowdel, self.valve_flowmod,
self.valve_flowcontroller)
self.ipv6_route_manager = valve_route.ValveIPv6RouteManager(
self.logger, self.FAUCET_MAC, self.dp.arp_neighbor_timeout,
self.dp.ipv6_fib_table, self.dp.eth_src_table, self.dp.eth_dst_table,
self.dp.highest_priority,
self.valve_in_match, self.valve_flowdel, self.valve_flowmod,
self.valve_flowcontroller)
self.flood_manager = valve_flood.ValveFloodManager(
self.dp.flood_table, self.dp.low_priority,
self.valve_in_match, self.valve_flowmod,
self.dp.stack, self.dp.ports, self.dp.shortest_path_to_root)
self.host_manager = valve_host.ValveHostManager(
self.logger, self.dp.eth_src_table, self.dp.eth_dst_table,
self.dp.timeout, self.dp.low_priority, self.dp.highest_priority,
self.valve_in_match, self.valve_flowmod, self.valve_flowdel,
self.valve_flowdrop)
def _register_table_match_types(self):
# TODO: functional flow managers should be able to register
# the flows they need, themselves.
self.TABLE_MATCH_TYPES = {
self.dp.vlan_table: (
'in_port', 'vlan_vid', 'eth_src', 'eth_dst', 'eth_type'),
# TODO: eth_src_table matches too many things. It should
# be split further into two tables for IPv4/IPv6 entries.
self.dp.eth_src_table: (
'in_port', 'vlan_vid', 'eth_src', 'eth_dst', 'eth_type',
'ip_proto',
'icmpv6_type', 'ipv6_nd_target',
'arp_tpa', 'ipv4_src'),
self.dp.ipv4_fib_table: (
'vlan_vid', 'eth_type', 'ip_proto',
'ipv4_src', 'ipv4_dst'),
self.dp.ipv6_fib_table: (
'vlan_vid', 'eth_type', 'ip_proto',
'icmpv6_type', 'ipv6_dst'),
self.dp.eth_dst_table: (
'vlan_vid', 'eth_dst'),
self.dp.flood_table: (
'in_port', 'vlan_vid', 'eth_dst'),
}
def _in_port_tables(self):
in_port_tables = [self.dp.acl_table]
for table_id in self.TABLE_MATCH_TYPES:
if 'in_port' in self.TABLE_MATCH_TYPES:
in_port_tables.append(table_id)
return in_port_tables
def switch_features(self, dp_id, msg):
"""Send configuration flows necessary for the switch implementation.
Arguments:
dp_id -- the Datapath unique ID (64bit int)
msg -- OFPSwitchFeatures msg sent from switch.
Vendor specific configuration should be implemented here.
"""
return []
def ofchannel_log(self, ofmsgs):
if self.dp is not None:
if self.dp.ofchannel_log is not None:
self.ofchannel_logger = util.get_logger(
self.dp.ofchannel_log,
self.dp.ofchannel_log,
logging.DEBUG,
0)
for ofmsg in ofmsgs:
self.ofchannel_logger.debug(ofmsg)
def valve_in_match(self, table_id, in_port=None, vlan=None,
eth_type=None, eth_src=None,
eth_dst=None, eth_dst_mask=None,
ipv6_nd_target=None, icmpv6_type=None,
nw_proto=None, nw_src=None, nw_dst=None):
match_dict = valve_of.build_match_dict(
in_port, vlan, eth_type, eth_src,
eth_dst, eth_dst_mask, ipv6_nd_target, icmpv6_type,
nw_proto, nw_src, nw_dst)
if table_id != self.dp.acl_table:
assert table_id in self.TABLE_MATCH_TYPES,\
'%u table not registered' % table_id
for match_type in match_dict.iterkeys():
assert match_type in self.TABLE_MATCH_TYPES[table_id],\
'%s match not registered for table %u' % (
match_type, table_id)
match = valve_of.match(match_dict)
return match
def _ignore_dpid(self, dp_id):
"""Return True if this datapath ID is not ours.
Args:
dp_id (int): datapath ID
Returns:
bool: True if this datapath ID is not ours.
"""
if dp_id != self.dp.dp_id:
self.logger.error('Unknown %s', util.dpid_log(dp_id))
return True
return False
def _all_valve_tables(self):
"""Return all Valve tables.
Returns:
tuple: all Valve tables as ints.
"""
return (
self.dp.vlan_table,
self.dp.acl_table,
self.dp.eth_src_table,
self.dp.ipv4_fib_table,
self.dp.ipv6_fib_table,
self.dp.eth_dst_table,
self.dp.flood_table)
def valve_flowmod(self, table_id, match=None, priority=None,
inst=None, command=ofp.OFPFC_ADD, out_port=0,
out_group=0, hard_timeout=0, idle_timeout=0):
"""Helper function to construct a flow mod message with cookie."""
if match is None:
match = self.valve_in_match(table_id)
if priority is None:
priority = self.dp.lowest_priority
if inst is None:
inst = []
return valve_of.flowmod(
self.dp.cookie,
command,
table_id,
priority,
out_port,
out_group,
match,
inst,
hard_timeout,
idle_timeout)
def valve_flowdel(self, table_id, match=None, priority=None,
out_port=ofp.OFPP_ANY):
"""Delete matching flows from a table."""
return [
self.valve_flowmod(
table_id,
match=match,
priority=priority,
command=ofp.OFPFC_DELETE,
out_port=out_port,
out_group=ofp.OFPG_ANY),
valve_of.barrier()]
def valve_flowdrop(self, table_id, match=None, priority=None,
hard_timeout=0):
"""Add drop matching flow to a table."""
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
hard_timeout=hard_timeout,
inst=[])
def valve_flowcontroller(self, table_id, match=None, priority=None,
inst=None):
if inst is None:
inst = []
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
inst=[valve_of.apply_actions(
[valve_of.output_controller()])] + inst)
def _delete_all_valve_flows(self):
"""Delete all flows from all FAUCET tables."""
ofmsgs = []
for table_id in self._all_valve_tables():
ofmsgs.extend(self.valve_flowdel(table_id))
return ofmsgs
def _delete_all_port_match_flows(self, port):
ofmsgs = []
for table in self._in_port_tables():
in_port_match = self.valve_in_match(table, in_port=port.number)
ofmsgs.extend(self.valve_flowdel(table, in_port_match))
return ofmsgs
def _add_default_drop_flows(self):
"""Add default drop rules on all FAUCET tables."""
# default drop on all tables.
ofmsgs = []
for table in self._all_valve_tables():
ofmsgs.append(self.valve_flowdrop(
table,
priority=self.dp.lowest_priority))
# antispoof for FAUCET's MAC address
# TODO: antispoof for controller IPs on this VLAN, too.
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, eth_src=self.FAUCET_MAC),
priority=self.dp.high_priority))
# drop STDP BPDU
for bpdu_mac in ('01:80:C2:00:00:00', '01:00:0C:CC:CC:CD'):
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, eth_dst=bpdu_mac),
priority=self.dp.highest_priority))
# drop LLDP
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, eth_type=ether.ETH_TYPE_LLDP),
priority=self.dp.highest_priority))
# drop broadcast sources
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, eth_src=mac.BROADCAST_STR),
priority=self.dp.highest_priority))
return ofmsgs
def _add_vlan_flood_flow(self):
"""Add a flow to flood packets for unknown destinations."""
return [self.valve_flowmod(
self.dp.eth_dst_table,
priority=self.dp.low_priority,
inst=[valve_of.goto_table(self.dp.flood_table)])]
def _add_controller_learn_flow(self):
"""Add a flow for controller to learn/add flows for destinations."""
return [self.valve_flowcontroller(
self.dp.eth_src_table,
priority=self.dp.low_priority,
inst=[valve_of.goto_table(self.dp.eth_dst_table)])]
def _add_default_flows(self):
"""Configure datapath with necessary default tables and rules."""
ofmsgs = []
ofmsgs.extend(self._delete_all_valve_flows())
ofmsgs.extend(self._add_default_drop_flows())
ofmsgs.extend(self._add_vlan_flood_flow())
ofmsgs.extend(self._add_controller_learn_flow())
return ofmsgs
def _add_ports_and_vlans(self, discovered_port_nums):
"""Add all configured and discovered ports and VLANs."""
ofmsgs = []
all_port_nums = set()
# add vlan ports
for vlan in self.dp.vlans.itervalues():
self.logger.info('Configuring VLAN %s', vlan)
for port in vlan.get_ports():
all_port_nums.add(port.number)
# add mirror destination ports.
for port in vlan.mirror_destination_ports():
all_port_nums.add(port.number)
# install eth_dst_table flood ofmsgs
ofmsgs.extend(self.flood_manager.build_flood_rules(vlan))
# add controller IPs if configured.
ofmsgs.extend(self._add_controller_ips(vlan.controller_ips, vlan))
# add any ports discovered but not configured
for port_num in discovered_port_nums:
if valve_of.ignore_port(port_num):
continue
if port_num not in all_port_nums:
all_port_nums.add(port_num)
# now configure all ports
for port_num in all_port_nums:
ofmsgs.extend(self.port_add(self.dp.dp_id, port_num))
return ofmsgs
def datapath_connect(self, dp_id, discovered_port_nums):
"""Handle Ryu datapath connection event and provision pipeline.
Args:
dp_id (int): datapath ID.
discovered_port_nums (list): known datapath ports as ints.
Returns:
list: OpenFlow messages to send to datapath.
"""
if self._ignore_dpid(dp_id):
return []
if discovered_port_nums is None:
discovered_port_nums = []
self.logger.info('Configuring %s', util.dpid_log(dp_id))
ofmsgs = []
ofmsgs.extend(self._add_default_flows())
ofmsgs.extend(self._add_ports_and_vlans(discovered_port_nums))
self.dp.running = True
return ofmsgs
def datapath_disconnect(self, dp_id):
"""Handle Ryu datapath disconnection event.
Args:
dp_id (int): datapath ID.
"""
if not self._ignore_dpid(dp_id):
self.dp.running = False
self.logger.warning('%s down', util.dpid_log(dp_id))
def _port_add_acl(self, port_num):
ofmsgs = []
forwarding_table = self.dp.eth_src_table
if port_num in self.dp.acl_in:
acl_num = self.dp.acl_in[port_num]
forwarding_table = self.dp.acl_table
acl_rule_priority = self.dp.highest_priority
acl_allow_inst = valve_of.goto_table(self.dp.eth_src_table)
for rule_conf in self.dp.acls[acl_num]:
acl_match, acl_inst = valve_acl.build_acl_entry(
rule_conf, acl_allow_inst, port_num)
ofmsgs.append(self.valve_flowmod(
self.dp.acl_table,
acl_match,
priority=acl_rule_priority,
inst=acl_inst))
acl_rule_priority -= 1
return ofmsgs, forwarding_table
def _port_add_vlan_rules(self, port, vlan, vlan_vid, vlan_inst):
ofmsgs = []
ofmsgs.append(self.valve_flowmod(
self.dp.vlan_table,
self.valve_in_match(
self.dp.vlan_table, in_port=port.number, vlan=vlan_vid),
priority=self.dp.low_priority,
inst=vlan_inst))
ofmsgs.extend(self.flood_manager.build_flood_rules(vlan))
return ofmsgs
def _port_add_vlan_untagged(self, port, vlan, forwarding_table, mirror_act):
push_vlan_act = mirror_act + valve_of.push_vlan_act(vlan.vid)
push_vlan_inst = [
valve_of.apply_actions(push_vlan_act),
valve_of.goto_table(forwarding_table)
]
null_vlan = namedtuple('null_vlan', 'vid')
null_vlan.vid = ofp.OFPVID_NONE
return self._port_add_vlan_rules(port, vlan, null_vlan, push_vlan_inst)
def _port_add_vlan_tagged(self, port, vlan, forwarding_table, mirror_act):
vlan_inst = [
valve_of.goto_table(forwarding_table)
]
if mirror_act:
vlan_inst = [valve_of.apply_actions(mirror_act)] + vlan_inst
return self._port_add_vlan_rules(port, vlan, vlan, vlan_inst)
def _port_add_vlans(self, port, forwarding_table, mirror_act):
ofmsgs = []
vlans = self.dp.vlans.values()
tagged_vlans_with_port = [
vlan for vlan in vlans if port in vlan.tagged]
untagged_vlans_with_port = [
vlan for vlan in vlans if port in vlan.untagged]
for vlan in tagged_vlans_with_port:
ofmsgs.extend(self._port_add_vlan_tagged(
port, vlan, forwarding_table, mirror_act))
for vlan in untagged_vlans_with_port:
ofmsgs.extend(self._port_add_vlan_untagged(
port, vlan, forwarding_table, mirror_act))
return ofmsgs
def port_add(self, dp_id, port_num):
"""Handle the addition of a port.
Args:
dp_id (int): datapath ID.
port_num (int): port number.
Returns:
list: OpenFlow messages, if any.
"""
if self._ignore_dpid(dp_id) or valve_of.ignore_port(port_num):
return []
if port_num not in self.dp.ports:
self.logger.info(
'Ignoring port:%u not present in configuration file', port_num)
return []
port = self.dp.ports[port_num]
self.logger.info('Port %s added', port)
port.phys_up = True
if not port.running():
return []
ofmsgs = []
self.logger.info('Sending config for port %s', port)
# Delete all flows previously matching this port
ofmsgs.extend(self._delete_all_port_match_flows(port))
# Port is a mirror destination; drop all input packets
if port.mirror_destination:
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
match=self.valve_in_match(self.dp.vlan_table, in_port=port_num),
priority=self.dp.highest_priority))
return ofmsgs
# Add ACL if any.
acl_ofmsgs, forwarding_table = self._port_add_acl(port_num)
ofmsgs.extend(acl_ofmsgs)
# If this is a stacking port, accept all VLANs (came from another FAUCET)
if port.stack is not None:
ofmsgs.append(self.valve_flowmod(
self.dp.vlan_table,
match=self.valve_in_match(self.dp.vlan_table, in_port=port_num),
priority=self.dp.low_priority,
inst=[valve_of.goto_table(forwarding_table)]))
return ofmsgs
# Add mirroring if any
mirror_act = []
if port.mirror:
mirror_act = [valve_of.output_port(port.mirror)]
# Add port/to VLAN rules.
ofmsgs.extend(self._port_add_vlans(port, forwarding_table, mirror_act))
return ofmsgs
def port_delete(self, dp_id, port_num):
"""Handle the deletion of a port.
Args:
dp_id (int): datapath ID.
port_num (int): port number.
Returns:
list: OpenFlow messages, if any.
"""
if self._ignore_dpid(dp_id) or valve_of.ignore_port(port_num):
return []
if port_num not in self.dp.ports:
return []
port = self.dp.ports[port_num]
port.phys_up = False
self.logger.warning('Port %s down', port)
ofmsgs = []
if not port.permanent_learn:
ofmsgs.extend(self._delete_all_port_match_flows(port))
# delete eth_dst rules
ofmsgs.extend(self.valve_flowdel(
self.dp.eth_dst_table,
out_port=port_num))
for vlan in self.dp.vlans.values():
if port in vlan.get_ports():
ofmsgs.extend(self.flood_manager.build_flood_rules(
vlan, modify=True))
return ofmsgs
def control_plane_handler(self, in_port, vlan, eth_src, eth_dst, pkt):
"""Handle a packet probably destined to FAUCET's route managers.
For example, next hop resolution or ICMP echo requests.
Args:
in_port (int): port the packet was received on.
vlan (vlan): vlan of the port the packet was received on.
eth_src (str): source Ethernet MAC address.
eth_dst (str): destination Ethernet MAC address.
pkt (ryu.lib.packet.ethernet): packet received.
Returns:
list: OpenFlow messages, if any.
"""
if eth_dst == self.FAUCET_MAC or not valve_packet.mac_addr_is_unicast(eth_dst):
for handler in (self.ipv4_route_manager.control_plane_handler,
self.ipv6_route_manager.control_plane_handler):
ofmsgs = handler(in_port, vlan, eth_src, eth_dst, pkt)
if ofmsgs:
return ofmsgs
return []
def _known_up_dpid_and_port(self, dp_id, in_port):
"""Returns True if datapath and port are known and running.
Args:
dp_id (int): datapath ID.
in_port (int): port number.
Returns:
bool: True if datapath and port are known and running.
"""
if (not self._ignore_dpid(dp_id) and not valve_of.ignore_port(in_port) and
self.dp.running and in_port in self.dp.ports):
return True
return False
def rcv_packet(self, dp_id, valves, in_port, vlan_vid, pkt):
"""Handle a packet from the dataplane (eg to re/learn a host).
The packet may be sent to us also in response to FAUCET
initiating IPv6 neighbor discovery, or ARP, to resolve
a nexthop.
Args:
dp_id (int): datapath ID.
valves (dict): all datapaths, indexed by datapath ID.
in_port (int): port packet was received on.
vlan_vid (int): VLAN VID of port packet was received on.
pkt (ryu.lib.packet.packet): packet received.
Return:
list: OpenFlow messages, if any.
"""
if not self._known_up_dpid_and_port(dp_id, in_port):
return []
ofmsgs = []
eth_pkt = valve_packet.parse_pkt(pkt)
eth_src = eth_pkt.src
eth_dst = eth_pkt.dst
vlan = self.dp.vlans[vlan_vid]
port = self.dp.ports[in_port]
if valve_packet.mac_addr_is_unicast(eth_src):
self.logger.debug(
'Packet_in %s src:%s in_port:%d vid:%s',
util.dpid_log(dp_id), eth_src, in_port, vlan_vid)
ofmsgs.extend(self.control_plane_handler(
in_port, vlan, eth_src, eth_dst, pkt))
# Apply learning packet in rate limit.
if int(time.time() * 1e3) % self.dp.ignore_learn_ins == 0:
return ofmsgs
# ban learning new hosts if max_hosts reached on a VLAN.
if (vlan.max_hosts is not None and
len(vlan.host_cache) == vlan.max_hosts and
eth_src not in vlan.host_cache):
ofmsgs.append(self.host_manager.temp_ban_host_learning_on_vlan(
vlan))
self.logger.info(
'max hosts %u reached on vlan %u, ' +
'temporarily banning learning on this vlan, ' +
'and not learning %s',
vlan.max_hosts, vlan.vid, eth_src)
else:
# TODO: partial stacking implementation - unicast learning
# not yet implemented.
if port.stack is not None:
return ofmsgs
# TODO: it would be good to be able to notify an external
# system upon re/learning a host.
ofmsgs.extend(self.host_manager.learn_host_on_vlan_port(
port, vlan, eth_src))
self.logger.info(
'learned %u hosts on vlan %u',
len(vlan.host_cache), vlan.vid)
return ofmsgs
def host_expire(self):
"""Expire hosts not recently re/learned.
Expire state from the host manager only; the switch does its own flow
expiry.
"""
if not self.dp.running:
return
now = time.time()
for vlan in self.dp.vlans.itervalues():
self.host_manager.expire_hosts_from_vlan(vlan, now)
def reload_config(self, new_dp):
"""Reload configuration new_dp
Args:
new_dp (DP): new dataplane configuration.
Returns:
list: OpenFlow messages.
"""
# TODO: a reload currently causes a full pipeline restart.
# We could special case reloads if we need to change only
# (for example) an ACL on a port.
ofmsgs = []
if self.dp.running:
self.dp = new_dp
ofmsgs = self.datapath_connect(
self.dp.dp_id, self.dp.ports.keys())
return ofmsgs
def _add_controller_ips(self, controller_ips, vlan):
ofmsgs = []
for controller_ip in controller_ips:
controller_ip_host = ipaddr.IPNetwork(
'/'.join((str(controller_ip.ip),
str(controller_ip.max_prefixlen))))
if controller_ip_host.version == 6:
ofmsgs.extend(self.ipv6_route_manager.add_controller_ip(
vlan, controller_ip, controller_ip_host))
elif controller_ip_host.version == 4:
ofmsgs.extend(self.ipv4_route_manager.add_controller_ip(
vlan, controller_ip, controller_ip_host))
return ofmsgs
def add_route(self, vlan, ip_gw, ip_dst):
if ip_dst.version == 6:
return self.ipv6_route_manager.add_route(vlan, ip_gw, ip_dst)
else:
return self.ipv4_route_manager.add_route(vlan, ip_gw, ip_dst)
def del_route(self, vlan, ip_dst):
if ip_dst.version == 6:
return self.ipv6_route_manager.del_route(vlan, ip_dst)
else:
return self.ipv4_route_manager.del_route(vlan, ip_dst)
def resolve_gateways(self):
"""Call route managers to re/resolve gateways.
Returns:
list: OpenFlow messages, if any.
"""
if not self.dp.running:
return []
ofmsgs = []
now = time.time()
for vlan in self.dp.vlans.itervalues():
ofmsgs.extend(self.ipv4_route_manager.resolve_gateways(vlan, now))
ofmsgs.extend(self.ipv6_route_manager.resolve_gateways(vlan, now))
return ofmsgs
class ArubaValve(Valve):
def switch_features(self, dp_id, msg):
ryu_table_loader = aruba.LoadRyuTables()
ryu_table_loader.load_tables(
os.path.join(aruba.CFG_PATH, 'aruba_pipeline.json'), parser)
ofmsgs = [valve_of.table_features(ryu_table_loader.ryu_tables)]
return ofmsgs
|
#!/usr/bin/env python
"""
This script reformats each file supplied on the command line according to
the Google Java style (by calling out to the google-java-format program,
https://github.com/google/google-java-format), but with improvements to
the formatting of annotations in comments.
"""
from __future__ import print_function
from distutils import spawn
import filecmp
import os
import stat
import subprocess
import sys
import tempfile
try:
from urllib import urlretrieve # python 2
except ImportError:
from urllib.request import urlretrieve # python 3
debug = False
# debug = True
script_dir = os.path.dirname(os.path.abspath(__file__))
# Rather than calling out to the shell, it would be better to
# call directly in Python.
fixup_py = os.path.join(script_dir, "fixup-google-java-format.py")
## To use an officially released version.
## (Releases appear at https://github.com/google/google-java-format/releases/.)
# Version 1.3 and earlier do not wrap line comments.
gjf_version = os.getenv("GJF_VERSION", "1.7")
gjf_snapshot = os.getenv("GJF_SNAPSHOT", "")
gjf_url_base = os.getenv(
"GJF_URL_BASE",
"https://github.com/google/google-java-format/releases/download/google-java-format-" +
gjf_version + "/")
## To use a non-official version by default, because an official version is
## unusably buggy (like 1.1) or no new release has been made in a long time.
## Never change the file at a URL; make it unique by adding a date.
# gjf_version = "1.5"
# gjf_snapshot = "-SNAPSHOT-20171012"
# gjf_url_base = "http://types.cs.washington.edu/"
# gjf_url_base = "http://homes.cs.washington.edu/~mernst/tmp2/"
gjf_jar_name = "google-java-format-" + gjf_version + gjf_snapshot + "-all-deps.jar"
gjf_url = gjf_url_base + gjf_jar_name
# Set gjf_jar_path, or retrieve it if it doesn't appear locally. Does not update
# from remove path if remote is newer, so never change files on the server.
if os.path.isfile(os.path.join(script_dir, gjf_jar_name)):
gjf_jar_path = os.path.join(script_dir, gjf_jar_name)
elif os.path.isfile(os.path.join(os.path.dirname(script_dir), "lib", gjf_jar_name)):
gjf_jar_path = os.path.join(os.path.dirname(script_dir), "lib", gjf_jar_name)
else:
gjf_jar_path = os.path.join(script_dir, gjf_jar_name)
# print("retrieving " + gjf_url + " to " + gjf_jar_path)
try:
# Download to a temporary file, then rename atomically.
# This avoids race conditions with other run-google-java-format processes.
# "delete=False" because the file will be renamed.
f = tempfile.NamedTemporaryFile(dir=script_dir, delete=False)
urlretrieve(gjf_url, f.name)
os.rename(f.name, gjf_jar_path)
except:
print("Problem while retrieving " + gjf_url + " to " + gjf_jar_path)
raise
# For some reason, the "git ls-files" must be run from the root.
# (I can run "git ls-files" from the command line in any directory.)
def under_git(dir, filename):
"""Return true if filename in dir is under git control."""
if not spawn.find_executable("git"):
if debug:
print("no git executable found")
return False
FNULL = open(os.devnull, 'w')
p = subprocess.Popen(["git", "ls-files", filename, "--error-unmatch"],
cwd=dir,
stdout=FNULL,
stderr=subprocess.STDOUT)
p.wait()
if debug:
print("p.returncode", p.returncode)
return p.returncode == 0
# Don't replace local with remote if local is under version control.
# It would be better to just test whether the remote is newer than local,
# But raw GitHub URLs don't have the necessary last-modified information.
if not under_git(script_dir, "fixup-google-java-format.py"):
try:
urlretrieve(
"https://raw.githubusercontent.com/" +
"plume-lib/run-google-java-format/master/fixup-google-java-format.py", fixup_py)
except:
if os.path.exists(fixup_py):
print("Couldn't retrieve fixup-google-java-format.py; using cached version")
else:
print("Couldn't retrieve fixup-google-java-format.py")
sys.exit(1)
os.chmod(fixup_py, os.stat(fixup_py).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if debug:
print("script_dir:", script_dir)
print("fixup_py: ", fixup_py)
print("gjf_jar_path: ", gjf_jar_path)
files = sys.argv[1:]
if len(files) == 0:
print("run-google-java-format.py expects 1 or more filenames as arguments")
sys.exit(1)
result = subprocess.call(["java", "-jar", gjf_jar_path, "--replace"] + files)
## This if statement used to be commented out, because google-java-format
## crashed a lot. It seems more stable now.
# Don't stop if there was an error, because google-java-format won't munge
# files and we still want to run fixup-google-java-format.py.
if result != 0:
print("Error when running google-java-format")
sys.exit(result)
# Remove command-line arguments
files = [f for f in files if not f.startswith("-")]
# Exit if no files were supplied (maybe "--help" was supplied)
if not files:
sys.exit(0)
if debug:
print("Running fixup-google-java-format.py")
result = subprocess.call([fixup_py] + files)
if result != 0:
print("Error when running fixup-google-java-format.py")
sys.exit(result)
Comment about GJF version 1.8
#!/usr/bin/env python
"""
This script reformats each file supplied on the command line according to
the Google Java style (by calling out to the google-java-format program,
https://github.com/google/google-java-format), but with improvements to
the formatting of annotations in comments.
"""
from __future__ import print_function
from distutils import spawn
import filecmp
import os
import stat
import subprocess
import sys
import tempfile
try:
from urllib import urlretrieve # python 2
except ImportError:
from urllib.request import urlretrieve # python 3
debug = False
# debug = True
script_dir = os.path.dirname(os.path.abspath(__file__))
# Rather than calling out to the shell, it would be better to
# call directly in Python.
fixup_py = os.path.join(script_dir, "fixup-google-java-format.py")
## To use an officially released version.
## (Releases appear at https://github.com/google/google-java-format/releases/.)
# Version 1.3 and earlier do not wrap line comments.
# Version 1.8 and later require JDK 11 to run.
gjf_version = os.getenv("GJF_VERSION", "1.7")
gjf_snapshot = os.getenv("GJF_SNAPSHOT", "")
gjf_url_base = os.getenv(
"GJF_URL_BASE",
"https://github.com/google/google-java-format/releases/download/google-java-format-" +
gjf_version + "/")
## To use a non-official version by default, because an official version is
## unusably buggy (like 1.1) or no new release has been made in a long time.
## Never change the file at a URL; make it unique by adding a date.
# gjf_version = "1.5"
# gjf_snapshot = "-SNAPSHOT-20171012"
# gjf_url_base = "http://types.cs.washington.edu/"
# gjf_url_base = "http://homes.cs.washington.edu/~mernst/tmp2/"
gjf_jar_name = "google-java-format-" + gjf_version + gjf_snapshot + "-all-deps.jar"
gjf_url = gjf_url_base + gjf_jar_name
# Set gjf_jar_path, or retrieve it if it doesn't appear locally. Does not update
# from remove path if remote is newer, so never change files on the server.
if os.path.isfile(os.path.join(script_dir, gjf_jar_name)):
gjf_jar_path = os.path.join(script_dir, gjf_jar_name)
elif os.path.isfile(os.path.join(os.path.dirname(script_dir), "lib", gjf_jar_name)):
gjf_jar_path = os.path.join(os.path.dirname(script_dir), "lib", gjf_jar_name)
else:
gjf_jar_path = os.path.join(script_dir, gjf_jar_name)
# print("retrieving " + gjf_url + " to " + gjf_jar_path)
try:
# Download to a temporary file, then rename atomically.
# This avoids race conditions with other run-google-java-format processes.
# "delete=False" because the file will be renamed.
f = tempfile.NamedTemporaryFile(dir=script_dir, delete=False)
urlretrieve(gjf_url, f.name)
os.rename(f.name, gjf_jar_path)
except:
print("Problem while retrieving " + gjf_url + " to " + gjf_jar_path)
raise
# For some reason, the "git ls-files" must be run from the root.
# (I can run "git ls-files" from the command line in any directory.)
def under_git(dir, filename):
"""Return true if filename in dir is under git control."""
if not spawn.find_executable("git"):
if debug:
print("no git executable found")
return False
FNULL = open(os.devnull, 'w')
p = subprocess.Popen(["git", "ls-files", filename, "--error-unmatch"],
cwd=dir,
stdout=FNULL,
stderr=subprocess.STDOUT)
p.wait()
if debug:
print("p.returncode", p.returncode)
return p.returncode == 0
# Don't replace local with remote if local is under version control.
# It would be better to just test whether the remote is newer than local,
# But raw GitHub URLs don't have the necessary last-modified information.
if not under_git(script_dir, "fixup-google-java-format.py"):
try:
urlretrieve(
"https://raw.githubusercontent.com/" +
"plume-lib/run-google-java-format/master/fixup-google-java-format.py", fixup_py)
except:
if os.path.exists(fixup_py):
print("Couldn't retrieve fixup-google-java-format.py; using cached version")
else:
print("Couldn't retrieve fixup-google-java-format.py")
sys.exit(1)
os.chmod(fixup_py, os.stat(fixup_py).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if debug:
print("script_dir:", script_dir)
print("fixup_py: ", fixup_py)
print("gjf_jar_path: ", gjf_jar_path)
files = sys.argv[1:]
if len(files) == 0:
print("run-google-java-format.py expects 1 or more filenames as arguments")
sys.exit(1)
result = subprocess.call(["java", "-jar", gjf_jar_path, "--replace"] + files)
## This if statement used to be commented out, because google-java-format
## crashed a lot. It seems more stable now.
# Don't stop if there was an error, because google-java-format won't munge
# files and we still want to run fixup-google-java-format.py.
if result != 0:
print("Error when running google-java-format")
sys.exit(result)
# Remove command-line arguments
files = [f for f in files if not f.startswith("-")]
# Exit if no files were supplied (maybe "--help" was supplied)
if not files:
sys.exit(0)
if debug:
print("Running fixup-google-java-format.py")
result = subprocess.call([fixup_py] + files)
if result != 0:
print("Error when running fixup-google-java-format.py")
sys.exit(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.