text stringlengths 0 1.05M | meta dict |
|---|---|
"""Algorithms for computing symbolic roots of polynomials."""
import functools
import math
from ..core import (Dummy, Eq, Float, I, Integer, Rational, Symbol, comp,
factor_terms, pi, symbols)
from ..core.mul import expand_2arg
from ..core.sympify import sympify
from ..domains.compositedomain import CompositeDomain
from ..functions import Piecewise, acos, cos, exp, im, root, sign, sqrt
from ..ntheory import divisors, isprime, nextprime
from ..simplify.powsimp import powsimp
from ..simplify.simplify import simplify
from ..utilities import ordered
from .polyerrors import GeneratorsNeeded, PolynomialError
from .polyquinticconst import PolyQuintic
from .polytools import Poly, cancel, discriminant, factor
from .rationaltools import together
from .specialpolys import cyclotomic_poly
__all__ = 'roots',
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.coeff_monomial(1)/f.coeff_monomial(f.gen)
dom = f.domain
if not dom.is_Numerical:
if isinstance(dom, CompositeDomain):
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
c, b, a = f.all_coeffs()
dom = f.domain
def _simplify(expr):
if isinstance(dom, CompositeDomain):
return factor(expr)
else:
return simplify(expr)
if c == 0:
r0, r1 = Integer(0), -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif r1.is_negative:
r0, r1 = r1, r0
elif b == 0:
r = -c/a
if not dom.is_Numerical:
r = _simplify(r)
R = sqrt(r).doit()
r0 = -R
r1 = R
else:
d = (b**2 - 4*a*c)/a**2
B = -b/(2*a)
if not dom.is_Numerical:
d = _simplify(d)
B = _simplify(B)
D = factor_terms(sqrt(d)/2)
r0 = B - D
r1 = B + D
if not dom.is_Numerical:
r0, r1 = [expand_2arg(i) for i in (r0, r1)]
return [r0, r1]
def roots_cubic(f, trig=False):
"""Returns a list of roots of a cubic polynomial.
References
==========
* https://en.wikipedia.org/wiki/Cubic_function, General
formula for roots, (accessed November 17, 2014).
"""
if trig:
d, c, b, a = f.all_coeffs()
p = (3*a*c - b**2)/3/a**2
q = (2*b**3 - 9*a*b*c + 27*a**2*d)/(27*a**3)
D = 18*a*b*c*d - 4*b**3*d + b**2*c**2 - 4*a*c**3 - 27*a**2*d**2
if D.is_positive:
rv = []
for k in range(3):
rv.append(2*sqrt(-p/3)*cos(acos(3*q/2/p*sqrt(-3/p))/3 - k*2*pi/3))
return [i - b/3/a for i in rv]
c, b, a, _ = f.monic().all_coeffs()
if c == 0:
x1, x2 = roots([1, a, b], multiple=True)
return [x1, Integer(0), x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
u1 = None
if p == 0:
if q == 0:
return [-aon3]*3
elif q.is_nonnegative or q.is_negative:
u1 = -sign(q)*root(abs(q), 3)
else:
if q.could_extract_minus_sign():
u1 = +root(-q, 3)
else:
u1 = -root(+q, 3)
elif q.is_extended_real and q.is_negative:
u1 = -root(-q/2 + sqrt(q**2/4 + pon3**3), 3)
coeff = I*sqrt(3)/2
if u1 is None:
u1 = Integer(1)
u2 = -Rational(1, 2) + coeff
u3 = -Rational(1, 2) - coeff
a, b, c, d = Integer(1), a, b, c
D0 = b**2 - 3*a*c
D1 = 2*b**3 - 9*a*b*c + 27*a**2*d
C = root((D1 + sqrt(D1**2 - 4*D0**3))/2, 3)
return [-(b + uk*C + D0/C/uk)/3/a for uk in [u1, u2, u3]]
u2 = u1*(-Rational(1, 2) + coeff)
u3 = u1*(-Rational(1, 2) - coeff)
if p == 0:
return [u1 - aon3, u2 - aon3, u3 - aon3]
soln = [-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3]
return soln
def _roots_quartic_euler(p, q, r, a):
"""
Descartes-Euler solution of the quartic equation
Parameters
==========
p, q, r: coefficients of ``x**4 + p*x**2 + q*x + r``
a: shift of the roots
Notes
=====
This is a helper function for ``roots_quartic``.
Look for solutions of the form ::
``x1 = sqrt(R) - sqrt(A + B*sqrt(R))``
``x2 = -sqrt(R) - sqrt(A - B*sqrt(R))``
``x3 = -sqrt(R) + sqrt(A - B*sqrt(R))``
``x4 = sqrt(R) + sqrt(A + B*sqrt(R))``
To satisfy the quartic equation one must have
``p = -2*(R + A); q = -4*B*R; r = (R - A)**2 - B**2*R``
so that ``R`` must satisfy the Descartes-Euler resolvent equation
``64*R**3 + 32*p*R**2 + (4*p**2 - 16*r)*R - q**2 = 0``
If the resolvent does not have a rational solution, return None;
in that case it is likely that the Ferrari method gives a simpler
solution.
Examples
========
>>> p, q, r = -Rational(64, 5), -Rational(512, 125), -Rational(1024, 3125)
>>> _roots_quartic_euler(p, q, r, Integer(0))[0]
-sqrt(32*sqrt(5)/125 + 16/5) + 4*sqrt(5)/5
"""
# solve the resolvent equation
x = Symbol('x')
eq = 64*x**3 + 32*p*x**2 + (4*p**2 - 16*r)*x - q**2
xsols = list(roots(Poly(eq, x), cubics=False))
xsols = [sol for sol in xsols if sol.is_rational]
if not xsols:
return
R = max(xsols)
c1 = sqrt(R)
B = -q*c1/(4*R)
A = -R - p/2
c2 = sqrt(A + B)
c3 = sqrt(A - B)
return [c1 - c2 - a, -c1 - c3 - a, -c1 + c3 - a, c1 + c2 - a]
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-4].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [5] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although no general solution that is always applicable for all
coefficients is known to this reviewer, certain conditions are tested
to determine the simplest 4 expressions that can be returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
Examples
========
>>> r = roots_quartic((x**4 - 6*x**3 + 17*x**2 - 26*x + 20).as_poly())
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
References
==========
* http://mathforum.org/dr.math/faq/faq.cubic.equations.html
* https://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
* http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
* http://www.albmath.org/files/Math_5713.pdf
* http://www.statemaster.com/encyclopedia/Quartic-equation
* eqworld.ipmnet.ru/en/solutions/ae/ae0108.pdf
"""
d, c, b, a, _ = f.monic().all_coeffs()
if not d:
return [Integer(0)] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = (x**2 + a*x + b - 2*m).as_poly(x)
z1, z2 = roots_quadratic(g)
h1 = (x**2 - z1*x + m).as_poly(x)
h2 = (x**2 - z2*x + m).as_poly(x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
aon4 = a/4
if f == 0:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple=True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g == 0:
y = [Integer(0)] + roots([1, 0, e, f], multiple=True)
return [tmp - aon4 for tmp in y]
else:
# Descartes-Euler method, see [7]
sols = _roots_quartic_euler(e, f, g, aon4)
if sols:
return sols
# Ferrari method, see [1, 2]
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
def _ans(y):
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
ans = []
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
# p == 0 case
y1 = -5*e/6 - q**TH
if p.is_zero:
return _ans(y1)
# if p != 0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3 - r, x)
y2 = -5*e/6 + u - p/u/3
if p.is_nonzero:
return _ans(y2)
# sort it out once they know the values of the coefficients
return [Piecewise((a1, Eq(p, 0)), (a2, True))
for a1, a2 in zip(_ans(y1), _ans(y2))]
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
n = f.degree()
a, b = f.coeff_monomial((n,)), f.coeff_monomial(1)
base = -cancel(b/a)
alpha = root(base, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
# define some parameters that will allow us to order the roots.
# If the domain is ZZ this is guaranteed to return roots sorted
# with reals before non-real roots and non-real sorted according
# to real part and imaginary part, e.g. -1, 1, -1 + I, 2 - I
neg = base.is_negative
even = n % 2 == 0
if neg:
if even and (base + 1).is_positive:
big = True
else:
big = False
# get the indices in the right order so the computed
# roots will be sorted when the domain is ZZ
ks = []
imax = n//2
if even:
ks.append(imax)
imax -= 1
if not neg:
ks.append(0)
for i in range(imax, 0, -1):
if neg:
ks.extend([i, -i])
else:
ks.extend([-i, i])
if neg:
ks.append(0)
if big:
for i in range(0, len(ks), 2):
pair = ks[i: i + 2]
pair = list(reversed(pair))
# compute the roots
roots, d = [], 2*I*pi/n
for k in ks:
zeta = exp(k*d).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return roots
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
Examples
========
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [d + 1 for d in divisors(m) if isprime(d + 1)]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = math.ceil(m*(float(a)/b))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = math.ceil(m*(float(P)/b))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials."""
L, U = _inv_totient_estimate(f.degree())
for n in range(L, U + 1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f == g:
break
else: # pragma: no cover
raise RuntimeError('failed to find index of a cyclotomic polynomial')
roots = []
if not factor:
# get the indices in the right order so the computed
# roots will be sorted
h = n//2
ks = [i for i in range(1, n + 1) if math.gcd(i, n) == 1]
ks.sort(key=lambda x: (x, -1) if x <= h else (abs(x - n), 1))
d = 2*I*pi/n
for k in reversed(ks):
roots.append(exp(k*d).expand(complex=True))
else:
g = f.as_poly(extension=root(-1, n))
for h, _ in ordered(g.factor_list()[1]):
roots.append(-h.TC())
return roots
def roots_quintic(f):
"""Calulate exact roots of a solvable quintic."""
result = []
s, r, q, p, coeff_4, coeff_5 = f.all_coeffs()
# Eqn must be of the form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
return result
if coeff_5 != 1:
l = [p/coeff_5, q/coeff_5, r/coeff_5, s/coeff_5]
if not all(coeff.is_Rational for coeff in l):
return result
f = (f/coeff_5).as_poly()
quintic = PolyQuintic(f)
# Eqn standardized. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
_factors = f20.factor_list()[1]
assert _factors[0][0].is_linear
theta = _factors[0][0].root(0)
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = Float(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
l1 = _quintic_simplify((-alpha + sqrt(disc))/2)
l4 = _quintic_simplify((-alpha - sqrt(disc))/2)
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar))/2)
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar))/2)
order = quintic.order(theta, d)
test = order*delta - (l1 - l4)*(l2 - l3)
# Comparing floats
if not comp(test.evalf(strict=False), 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
sol = Symbol('sol')
# Simplifying improves performace a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# Solve imported here. Causing problems if imported as 'solve'
# and hence the changed name
from ..solvers import solve as _solve
a, b = symbols('a b', cls=Dummy)
_sol = _solve(sol**5 - a - I*b, sol)
for i in range(5):
_sol[i] = factor(_sol[i][sol])
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, root in enumerate(_sol):
Res[1][i] = _quintic_simplify(root.subs({a: R1[0], b: R1[1]}))
Res[2][i] = _quintic_simplify(root.subs({a: R2[0], b: R2[1]}))
Res[3][i] = _quintic_simplify(root.subs({a: R3[0], b: R3[1]}))
Res[4][i] = _quintic_simplify(root.subs({a: R4[0], b: R4[1]}))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].evalf()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5): # pragma: no branch
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
u, v = quintic.uv(theta, d)
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).evalf(strict=False)
testminus = (u - v*delta*sqrt(5)).evalf(strict=False)
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.evalf()
r2 = r3 = None
for i in range(5): # pragma: no branch
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if (comp(r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus, 0, tol) and
comp(r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus, 0, tol)):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2:
break
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
return [x1, x2, x3, x4, x5]
def _quintic_simplify(expr):
expr = powsimp(expr)
expr = cancel(expr)
return together(expr)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers.
Returns the integer ``div`` such that substituting ``x = div*y``
``p(x) = m*q(y)`` where the coefficients of ``q`` are smaller
than those of ``p``.
For example ``x**5 + 512*x + 1024 = 0``
with ``div = 4`` becomes ``y**5 + 2*y + 1 = 0``
Returns the integer ``div`` or ``None`` if there is no possible scaling.
Examples
========
>>> p = (x**5 + 512*x + 1024).as_poly()
>>> _integer_basis(p)
4
"""
if poly.is_zero:
return
monoms, coeffs = zip(*poly.terms())
monoms, = zip(*monoms)
coeffs = list(map(abs, coeffs))
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
n = monoms[0]
monoms = [n - i for i in reversed(monoms)]
else:
return
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(math.gcd(*coeffs))[1:])
try:
div = next(divs)
except StopIteration:
return
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = next(divs)
except StopIteration:
return
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``."""
coeff = Integer(1)
_, poly = poly.clear_denoms(convert=True)
poly = poly.primitive()[1]
poly = poly.retract()
if poly.domain.is_PolynomialRing and all(c.is_term for c in poly.rep.values()):
poly = poly.inject()
strips = list(zip(*poly.monoms()))
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.domain.is_IntegerRing:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n - k[0])
poly = poly.termwise(func)
coeff *= basis
return coeff, poly
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively. If cubic
roots are real but are expressed in terms of complex numbers
(casus irreducibilis) the ``trig`` flag can be set to True to
have the solutions returned in terms of cosine and inverse cosine
functions.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a list containing all
those roots set the ``multiple`` flag to True; the list will
have identical roots appearing next to each other in the result.
(For a given Poly, the all_roots method will give the roots in
sorted numerical order.)
Examples
========
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = (x**2 - 1).as_poly()
>>> roots(p)
{-1: 1, 1: 1}
>>> p = (x**2 - y).as_poly()
>>> roots(p.as_poly(x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
References
==========
* https://en.wikipedia.org/wiki/Cubic_equation#Trigonometric_and_hyperbolic_solutions
"""
from .polytools import to_rational_coeffs
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
trig = flags.pop('trig', False)
quartics = flags.pop('quartics', True)
quintics = flags.pop('quintics', False)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f) - 1
for coeff in f:
poly[i], i = sympify(coeff), i - 1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, root, k):
if root in result:
result[root] += k
else:
result[root] = k
def _try_decompose(f):
"""Find roots using functional decomposition."""
factors, roots = f.decompose(), []
for root in _try_heuristics(factors[0]):
roots.append(root)
for factor in factors[1:]:
previous, roots = list(roots), []
for root in previous:
g = factor - root.as_poly(f.gen, extension=False)
for root in _try_heuristics(g):
roots.append(root)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks."""
if f.is_ground:
return []
if f.length() == 2:
if f.degree() == 1:
return list(map(cancel, roots_linear(f)))
else:
return roots_binomial(f)
result = []
n = f.degree()
if n == 2:
result += list(map(cancel, roots_quadratic(f)))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f, trig=trig)
elif n == 4 and quartics:
result += roots_quartic(f)
elif n == 5 and quintics:
result += roots_quintic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {Integer(0): k}
coeff, f = preprocess_roots(f)
if auto and f.domain.is_Ring:
f = f.to_field()
rescale_x = None
translate_x = None
result = {}
if not f.is_ground:
if not f.domain.is_Exact:
for r in f.nroots(n=f.domain.dps):
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.length() == 2:
roots_fun = roots_quadratic if f.degree() == 2 else roots_binomial
for r in roots_fun(f):
_update_dict(result, r, 1)
else:
_, factors = f.as_expr().as_poly().factor_list()
if len(factors) == 1 and f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
else:
if len(factors) == 1 and factors[0][1] == 1:
if f.domain.is_ExpressionDomain:
res = to_rational_coeffs(f)
if res:
if res[0] is None:
translate_x, f = res[2:]
else:
rescale_x, f = res[1], res[-1]
result = roots(f)
else:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for factor, k in factors:
for r in _try_heuristics(factor.as_poly(f.gen, field=True)):
_update_dict(result, r, k)
if coeff != 1:
_result, result, = result, {}
for root, k in _result.items():
result[coeff*root] = k
result.update(zeros)
if filter not in [None, 'C']:
handlers = {
'Z': lambda r: r.is_Integer,
'Q': lambda r: r.is_Rational,
'R': lambda r: r.is_extended_real,
'I': lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError(f'Invalid filter: {filter}')
for zero in dict(result):
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result):
if not predicate(zero):
del result[zero]
if rescale_x:
result1 = {}
for k, v in result.items():
result1[k*rescale_x] = v
result = result1
if translate_x:
result1 = {}
for k, v in result.items():
result1[k + translate_x] = v
result = result1
if not multiple:
return result
else:
zeros = []
for zero in ordered(result):
zeros.extend([zero]*result[zero])
return zeros
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
Examples
========
>>> root_factors(x**2 - y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = f.as_poly(*gens, **args)
if F.is_multivariate:
raise ValueError('multivariate polynomials are not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in ordered(zeros.items()):
factors, N = factors + [(x - r).as_poly(x)]*n, N + n
if N < F.degree():
G = functools.reduce(lambda p, q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
factors = [f.as_expr() for f in factors]
return factors
| {
"repo_name": "diofant/diofant",
"path": "diofant/polys/polyroots.py",
"copies": "1",
"size": "28966",
"license": "bsd-3-clause",
"hash": 6566477803474561000,
"line_mean": 27.3702252693,
"line_max": 89,
"alpha_frac": 0.5167437686,
"autogenerated": false,
"ratio": 3.179582875960483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9195833641303839,
"avg_score": 0.00009860065132872186,
"num_lines": 1021
} |
'''Algorithms for converting grammars to Chomsky Normal Form.'''
from cfg.core import ContextFreeGrammar, Terminal, Nonterminal, \
ProductionRule, SubscriptedNonterminal
from util.moreitertools import powerset
def is_cnf_rule(r, start):
'''Return whether a production rule is in CNF. Must indicate the grammar's
start variable.'''
rs = r.right_side
return (len(rs) == 1 and rs[0].is_terminal()) or \
(len(rs) == 2 and all(map(lambda x: x.is_nonterminal() and \
x != start, rs))) or \
(r.left_side == start and not rs)
def is_cnf(G):
'''Return whether a grammar is in CNF.'''
return all(map(lambda x: is_cnf_rule(x, G.start), G.productions))
def _first_rule_that(productions, pred):
for i, p in enumerate(productions):
if pred(p):
return i
def _first_empty_rule(productions, start):
return _first_rule_that(productions, \
lambda x: not x.right_side and \
not x.left_side == start)
def _first_unit_rule(productions):
return _first_rule_that(productions, \
lambda x: len(x.right_side) == 1 \
and isinstance(x.right_side[0], Nonterminal))
def substitutions(sentence, production):
'''Returns all of the distinct ways of applying a derivation rule to a
sentence, including no change at all.'''
indices = [i for i, s in enumerate(sentence) if s == production.left_side]
result = []
for subset in powerset(indices):
substitution = []
for i, symbol in enumerate(sentence):
if i in subset:
substitution.extend(production.right_side)
else:
substitution.append(symbol)
if substitution not in result:
result.append(substitution)
return result
def chain(p, used_variables):
'''Given a production rule p, return a list of equivalent rules such that
the right side of each rule is no more than two symbols long.'''
rs = p.right_side
if len(rs) <= 2:
return [p]
first = rs[0]
second_name = ''.join([str(s) for s in rs[1:]])
second = SubscriptedNonterminal.next_unused(second_name, used_variables)
first_new_rule = ProductionRule(p.left_side, (first, second))
second_new_rule = ProductionRule(second, rs[1:])
return [first_new_rule] + \
chain(second_new_rule, used_variables | set([second]))
def get_variables(productions):
'''Return a set of all the variables which appear in a list of productions.
'''
result = set()
for p in productions:
result.add(p.left_side)
for s in p.right_side:
if isinstance(s, Nonterminal):
result.add(s)
return result
def replace_terminals(p, proxy_rules):
'''Replace all the terminal symbols in a production rule with equivalent
variables, given a mapping from terminals to proxy production rules. Return
a pair containing the fixed rule and a list of the terminals replaced.'''
rs = p.right_side
if len(rs) < 2 or p in proxy_rules.itervalues():
return p, []
new_rs = []
replaced = []
for s in rs:
if isinstance(s, Terminal):
new_rs.append(proxy_rules[s].left_side)
replaced.append(s)
else:
new_rs.append(s)
return ProductionRule(p.left_side, new_rs), replaced
def ChomskyNormalForm(G):
'''Given a CFG G, return an equivalent CFG in Chomsky normal form.'''
productions = list(G.productions)
# Add a new start variable S0 and add the rule S0 -> S
S0 = SubscriptedNonterminal(G.start.name, 0)
productions[:0] = [ProductionRule(S0, [G.start])]
# Remove e rules
removed_rules = []
while True:
i = _first_empty_rule(productions, S0)
if i is None:
break
pe = productions[i]
removed_rules.append(pe)
del productions[i]
new_rules = [ProductionRule(rule.left_side, sentence) \
for rule in productions[1:] \
for sentence in substitutions(rule.right_side, pe)]
productions[1:] = [r for r in new_rules if r not in removed_rules]
# Remove unit rules
removed_rules = []
while True:
i = _first_unit_rule(productions)
if i is None:
break
pu = productions[i]
removed_rules.append(pu)
new_rules = [ProductionRule(pu.left_side, p.right_side) \
for p in productions if p.left_side == pu.right_side[0]]
productions[i:i+1] = [r for r in new_rules if r not in productions \
and r not in removed_rules]
# Chain right sides of rules
i = 0
while i < len(productions):
new_rules = chain(productions[i], get_variables(productions))
productions[i:i+1] = new_rules
i += len(new_rules)
# Replace terminal symbols with proxy variables
terminals = G.terminals
variables = get_variables(productions)
proxy_rules = \
{t : ProductionRule(
SubscriptedNonterminal.next_unused(t.name.upper(), variables),
[t]
) for t in terminals}
added = {t : False for t in terminals}
i = 0
while i < len(productions):
new_rule, replaced = replace_terminals(productions[i], proxy_rules)
productions[i] = new_rule
for t in replaced:
if not added[t]:
productions.append(proxy_rules[t])
added[t] = True
i += len(new_rules)
return ContextFreeGrammar(productions)
| {
"repo_name": "bdusell/pycfg",
"path": "src/cfg/cnf.py",
"copies": "1",
"size": "5697",
"license": "mit",
"hash": 3390698130314284000,
"line_mean": 35.5192307692,
"line_max": 79,
"alpha_frac": 0.5961032122,
"autogenerated": false,
"ratio": 3.8260577568838148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4922160969083815,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for determination of Laue group symmetry."""
import json
import logging
import math
import scipy.stats
import libtbx
from cctbx import crystal, sgtbx
from scitbx.array_family import flex
from scitbx.math import five_number_summary
import dials.util
from dials.algorithms.symmetry import symmetry_base
logger = logging.getLogger(__name__)
class LaueGroupAnalysis(symmetry_base):
"""Determination of Laue group symmetry using algorithms similar to POINTLESS.
See also:
`Evans, P. (2006). Acta Cryst. D62, 72-82
<https://doi.org/10.1107/S0907444905036693>`_ and
`Evans, P. R. (2011). Acta Cryst. D67, 282-292
<https://doi.org/10.1107/S090744491003982X>`_.
"""
def __init__(
self,
intensities,
normalisation="ml_aniso",
lattice_symmetry_max_delta=2.0,
d_min=libtbx.Auto,
min_i_mean_over_sigma_mean=4,
min_cc_half=0.6,
relative_length_tolerance=None,
absolute_angle_tolerance=None,
best_monoclinic_beta=True,
):
"""Intialise a LaueGroupAnalysis object.
Args:
intensities (cctbx.miller.array): The intensities on which to perform
symmetry anaylsis.
normalisation (str): The normalisation method to use. Possible choices are
'kernel', 'quasi', 'ml_iso' and 'ml_aniso'. Set to None to switch off
normalisation altogether.
lattice_symmetry_max_delta (float): The maximum value of delta for
determining the lattice symmetry using the algorithm of Le Page (1982).
d_min (float): Optional resolution cutoff to be applied to the input
intensities. If set to :data:`libtbx.Auto` then d_min will be
automatically determined according to the parameters
``min_i_mean_over_sigma_mean`` and ``min_cc_half``.
min_i_mean_over_sigma_mean (float): minimum value of :math:`|I|/|sigma(i)|` for
automatic determination of resolution cutoff.
min_cc_half (float): minimum value of CC1/2 for automatic determination of
resolution cutoff.
relative_length_tolerance (float): Relative length tolerance in checking
consistency of input unit cells against the median unit cell.
absolute_angle_tolerance (float): Absolute angle tolerance in checking
consistency of input unit cells against the median unit cell.
best_monoclinic_beta (bool): If True, then for monoclinic centered cells, I2
will be preferred over C2 if it gives a more oblique cell (i.e. smaller
beta angle).
"""
super().__init__(
intensities,
normalisation=normalisation,
lattice_symmetry_max_delta=lattice_symmetry_max_delta,
d_min=d_min,
min_i_mean_over_sigma_mean=min_i_mean_over_sigma_mean,
min_cc_half=min_cc_half,
relative_length_tolerance=relative_length_tolerance,
absolute_angle_tolerance=absolute_angle_tolerance,
best_monoclinic_beta=best_monoclinic_beta,
)
self._estimate_cc_sig_fac()
self._estimate_cc_true()
self._score_symmetry_elements()
self._score_laue_groups()
def _estimate_cc_sig_fac(self):
"""Estimation of sigma(CC) as a function of sample size.
Estimate the error in the correlation coefficient, sigma(CC) by using
pairs of reflections at similar resolutions that are not related by
potential symmetry. Using pairs of unrelated reflections at similar
resolutions, calculate sigma(CC) == rms(CC) for groups of size N = 3..200.
The constant CCsigFac is obtained from a linear fit of
sigma(CC) to 1/N^(1/2), i.e.:
sigma(CC) = CCsigFac/N^(1/2)
"""
max_bins = 500
reflections_per_bin = max(
200, int(math.ceil(self.intensities.size() / max_bins))
)
binner = self.intensities.setup_binner_counting_sorted(
reflections_per_bin=reflections_per_bin
)
a = flex.double()
b = flex.double()
ma_tmp = self.intensities.customized_copy(
crystal_symmetry=crystal.symmetry(
space_group=self.lattice_group,
unit_cell=self.intensities.unit_cell(),
assert_is_compatible_unit_cell=False,
)
).map_to_asu()
for i in range(binner.n_bins_all()):
count = binner.counts()[i]
if count == 0:
continue
bin_isel = binner.array_indices(i)
p = flex.random_permutation(count)
p = p[: 2 * (count // 2)] # ensure even count
ma_a = ma_tmp.select(bin_isel.select(p[: count // 2]))
ma_b = ma_tmp.select(bin_isel.select(p[count // 2 :]))
# only choose pairs of reflections that don't have the same indices
# in the asu of the lattice group
sel = ma_a.indices() != ma_b.indices()
a.extend(ma_a.data().select(sel))
b.extend(ma_b.data().select(sel))
perm = flex.random_selection(a.size(), min(20000, a.size()))
a = a.select(perm)
b = b.select(perm)
self.corr_unrelated = CorrelationCoefficientAccumulator(a, b)
n_pairs = a.size()
min_num_groups = 10 # minimum number of groups
max_n_group = int(min(n_pairs / min_num_groups, 200)) # maximum number in group
min_n_group = int(min(5, max_n_group)) # minimum number in group
if (max_n_group - min_n_group) < 4:
self.cc_sig_fac = 0
return
mean_ccs = flex.double()
rms_ccs = flex.double()
ns = flex.double()
for n in range(min_n_group, max_n_group + 1):
ns.append(n)
ccs = flex.double()
for i in range(200):
isel = flex.random_selection(a.size(), n)
corr = CorrelationCoefficientAccumulator(a.select(isel), b.select(isel))
ccs.append(corr.coefficient())
mean_ccs.append(flex.mean(ccs))
rms_ccs.append(flex.mean(flex.pow2(ccs)) ** 0.5)
x = 1 / flex.pow(ns, 0.5)
y = rms_ccs
fit = flex.linear_regression(x, y)
if fit.is_well_defined():
self.cc_sig_fac = fit.slope()
else:
self.cc_sig_fac = 0
def _estimate_cc_true(self):
# A1.2. Estimation of E(CC; S).
# (i)
var_intensities = flex.mean_and_variance(
self.intensities.data()
).unweighted_sample_variance()
var_sigmas = flex.mean_and_variance(flex.pow2(self.intensities.sigmas())).mean()
self.E_cc_true = var_intensities / (var_intensities + var_sigmas)
# (ii)
reindexed_intensities = self.intensities.change_basis(
sgtbx.change_of_basis_op("-x,-y,-z")
).map_to_asu()
x, y = self.intensities.common_sets(
reindexed_intensities, assert_is_similar_symmetry=False
)
self.cc_identity = CorrelationCoefficientAccumulator(x.data(), y.data())
min_sd = 0.05
min_sample = 10
sigma_1 = max(min_sd, self.cc_sig_fac / 200 ** 0.5)
w1 = 0
w2 = 0
if sigma_1 > 0.0001:
w1 = 1 / sigma_1 ** 2
if self.cc_identity.n() > min_sample:
sigma_2 = max(min_sd, self.cc_sig_fac / self.cc_identity.n() ** 0.5)
w2 = 1 / sigma_2 ** 2
assert (w1 + w2) > 0
self.cc_true = (w1 * self.E_cc_true + w2 * self.cc_identity.coefficient()) / (
w1 + w2
)
logger.debug("cc_true = w1 * E_cc_true + w2 * cc_identity)/(w1 + w2)")
logger.debug("w1: %g", w1)
logger.debug("w2: %g", w2)
logger.debug("E_cc_true: %g", self.E_cc_true)
logger.debug("cc_identity: %g", self.cc_identity.coefficient())
logger.debug("cc_true: %g", self.cc_true)
def _score_symmetry_elements(self):
self.sym_op_scores = []
for smx in self.lattice_group.smx():
if smx.r().info().sense() < 0:
continue
self.sym_op_scores.append(
ScoreSymmetryElement(
self.intensities, smx, self.cc_true, self.cc_sig_fac
)
)
def _score_laue_groups(self):
subgroup_scores = [
ScoreSubGroup(subgrp, self.sym_op_scores)
for subgrp in self.subgroups.result_groups
]
total_likelihood = sum(score.likelihood for score in subgroup_scores)
for score in subgroup_scores:
score.likelihood /= total_likelihood
self.subgroup_scores = sorted(
subgroup_scores, key=lambda score: score.likelihood, reverse=True
)
# The 'confidence' scores are derived from the total probability of the best
# solution p_best and that for the next best solution p_next:
# confidence = [p_best * (p_best - p_next)]^1/2.
for i, score in enumerate(self.subgroup_scores[:-1]):
next_score = self.subgroup_scores[i + 1]
if score.likelihood > 0 and next_score.likelihood > 0:
lgc = score.likelihood * (score.likelihood - next_score.likelihood)
confidence = abs(lgc) ** 0.5
if lgc < 0:
confidence = -confidence
score.confidence = confidence
self.best_solution = self.subgroup_scores[0]
def __str__(self):
"""Return a string representation of the results.
Returns:
str:
"""
output = []
output.append("Input crystal symmetry:")
output.append(
str(
self.input_intensities[0]
.crystal_symmetry()
.customized_copy(unit_cell=self.median_unit_cell)
)
)
output.append(f"Change of basis op to minimum cell: {self.cb_op_inp_min}")
output.append("Crystal symmetry in minimum cell:")
output.append(str(self.intensities.crystal_symmetry()))
output.append(f"Lattice point group: {self.lattice_group.info()}")
output.append(
"\nOverall CC for %i unrelated pairs: %.3f"
% (self.corr_unrelated.n(), self.corr_unrelated.coefficient())
)
output.append(
"Estimated expectation value of true correlation coefficient E(CC) = %.3f"
% self.E_cc_true
)
if self.cc_sig_fac:
output.append(f"Estimated sd(CC) = {self.cc_sig_fac:.3f} / sqrt(N)")
else:
output.append("Too few reflections to estimate sd(CC).")
output.append(
"Estimated E(CC) of true correlation coefficient from identity = %.3f"
% self.cc_true
)
header = ("likelihood", "Z-CC", "CC", "N", "", "Operator")
rows = [header]
for score in self.sym_op_scores:
if score.likelihood > 0.9:
stars = "***"
elif score.likelihood > 0.7:
stars = "**"
elif score.likelihood > 0.5:
stars = "*"
else:
stars = ""
rows.append(
(
f"{score.likelihood:.3f}",
f"{score.z_cc:.2f}",
f"{score.cc.coefficient():.2f}",
"%i" % score.n_refs,
stars,
f"{score.sym_op.r().info()}",
)
)
output.append("\n" + "-" * 80 + "\n")
output.append("Scoring individual symmetry elements\n")
output.append(dials.util.tabulate(rows, headers="firstrow"))
header = (
"Patterson group",
"",
"Likelihood",
"NetZcc",
"Zcc+",
"Zcc-",
"CC",
"CC-",
"delta",
"Reindex operator",
)
rows = [header]
for score in self.subgroup_scores:
if score.likelihood > 0.8:
stars = "***"
elif score.likelihood > 0.6:
stars = "**"
elif score.likelihood > 0.4:
stars = "*"
else:
stars = ""
rows.append(
(
f"{score.subgroup['best_subsym'].space_group_info()}",
stars,
f"{score.likelihood:.3f}",
f"{score.z_cc_net: .2f}",
f"{score.z_cc_for: .2f}",
f"{score.z_cc_against: .2f}",
f"{score.cc_for.coefficient(): .2f}",
f"{score.cc_against.coefficient(): .2f}",
f"{score.subgroup['max_angular_difference']:.1f}",
f"{score.subgroup['cb_op_inp_best']}",
)
)
output.append("\n" + "-" * 80 + "\n")
output.append("Scoring all possible sub-groups\n")
output.append(dials.util.tabulate(rows, headers="firstrow"))
output.append(
"\nBest solution: %s"
% self.best_solution.subgroup["best_subsym"].space_group_info()
)
output.append(
f"Unit cell: {self.best_solution.subgroup['best_subsym'].unit_cell()}"
)
output.append(
f"Reindex operator: {self.best_solution.subgroup['cb_op_inp_best']}"
)
output.append(f"Laue group probability: {self.best_solution.likelihood:.3f}")
output.append(f"Laue group confidence: {self.best_solution.confidence:.3f}\n")
return "\n".join(output)
def as_dict(self):
"""Return a dictionary representation of the results.
Returns:
dict
"""
d = {
"input_symmetry": {
"hall_symbol": self.input_intensities[0]
.space_group()
.type()
.hall_symbol(),
"unit_cell": self.median_unit_cell.parameters(),
},
"cb_op_inp_min": self.cb_op_inp_min.as_xyz(),
"lattice_point_group": self.lattice_group.type().hall_symbol(),
"cc_unrelated_pairs": self.corr_unrelated.coefficient(),
"n_unrelated_pairs": self.corr_unrelated.n(),
"E_cc_true": self.E_cc_true,
"cc_sig_fac": self.cc_sig_fac,
"cc_true": self.cc_true,
}
d["sym_op_scores"] = [score.as_dict() for score in self.sym_op_scores]
d["subgroup_scores"] = [score.as_dict() for score in self.subgroup_scores]
return d
def as_json(self, filename=None, indent=2):
"""Return a json representation of the results.
Args:
filename (str): Optional filename to export the json representation of
the results.
indent (int): The indent level for pretty-printing of the json. If ``None``
is the most compact representation.
Returns:
str:
"""
json_str = json.dumps(self.as_dict(), indent=indent)
if filename:
with open(filename, "w") as f:
f.write(json_str)
return json_str
class ScoreCorrelationCoefficient:
def __init__(self, cc, sigma_cc, expected_cc, lower_bound=-1, upper_bound=1, k=2):
self.cc = cc
self.sigma_cc = sigma_cc
self.expected_cc = expected_cc
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._k = k
self._compute_p_cc_given_s()
self._compute_p_cc_given_not_s()
def _compute_p_cc_given_s(self):
self._p_cc_given_s = trunccauchy_pdf(
self.cc,
self._lower_bound,
self._upper_bound,
self.expected_cc,
self.sigma_cc,
)
def _compute_p_cc_given_not_s(self):
sump = scipy.integrate.quad(self._numerator, 0, 1)[0]
sumw = scipy.integrate.quad(self._denominator, 0, 1)[0]
self._p_cc_given_not_s = sump / sumw
@property
def p_cc_given_s(self):
"""Probability of observing this CC if the sym op is present, p(CC; S).
Modelled by a Cauchy distribution centred on cc_true and width gamma = sigma_cc
"""
return self._p_cc_given_s
@property
def p_cc_given_not_s(self):
"""Probability of observing this CC if the sym op is NOT present, p(CC; !S)."""
return self._p_cc_given_not_s
@property
def p_s_given_cc(self):
"""The likelihood of this symmetry element being present.
p(S; CC) = p(CC; S) / (p(CC; S) + p(CC; !S))
"""
return self._p_cc_given_s / (self._p_cc_given_s + self._p_cc_given_not_s)
def _p_mu_power_pdf(self, m):
return (1.0 - pow(m, self._k)) ** (1.0 / self._k)
def _numerator(self, x):
return trunccauchy_pdf(
self.cc, self._lower_bound, self._upper_bound, loc=x, scale=self.sigma_cc
) * self._p_mu_power_pdf(x)
def _denominator(self, x):
return self._p_mu_power_pdf(x)
class ScoreSymmetryElement:
"""Analyse intensities for presence of a given symmetry operation.
1) Calculate the correlation coefficient, CC, for the given sym op.
2) Calculate the probability of observing this CC if the sym op is present,
p(CC; S), modelled by a Cauchy distribution centred on cc_true and width
gamma = sigma_cc.
3) Calculate the probability of observing this CC if the sym op is
NOT present, p(CC; !S).
4) Calculate the likelihood of symmetry element being present,
p(S; CC) = p(CC; S) / (p(CC; S) + p(CC; !S))
See appendix A1 of `Evans, P. R. (2011). Acta Cryst. D67, 282-292.
<https://doi.org/10.1107/S090744491003982X>`_
"""
def __init__(self, intensities, sym_op, cc_true, cc_sig_fac):
"""Initialise a ScoreSymmetryElement object.
Args:
intensities (cctbx.miller.array): The intensities on which to perform
symmetry anaylsis.
sym_op (cctbx.sgtbx.rt_mx): The symmetry operation for analysis.
cc_true (float): the expected value of CC if the symmetry element is present,
E(CC; S)
cc_sig_fac (float): Estimation of sigma(CC) as a function of sample size.
"""
self.sym_op = sym_op
assert self.sym_op.r().info().sense() >= 0
self.cc = CorrelationCoefficientAccumulator()
cb_op = sgtbx.change_of_basis_op(self.sym_op)
cb_ops = [cb_op]
if self.sym_op.r().order() > 2:
# include inverse symmetry operation
cb_ops.append(cb_op.inverse())
for cb_op in cb_ops:
if cb_op.is_identity_op():
cb_op = sgtbx.change_of_basis_op("-x,-y,-z")
reindexed_intensities = intensities.change_basis(cb_op).map_to_asu()
x, y = intensities.common_sets(
reindexed_intensities, assert_is_similar_symmetry=False
)
sel = sgtbx.space_group().expand_smx(self.sym_op).epsilon(x.indices()) == 1
x = x.select(sel)
y = y.select(sel)
outliers = flex.bool(len(x.data()), False)
iqr_multiplier = 20 # very generous tolerance
for col in (x.data(), y.data()):
if col.size():
min_x, q1_x, med_x, q3_x, max_x = five_number_summary(col)
iqr_x = q3_x - q1_x
cut_x = iqr_multiplier * iqr_x
outliers.set_selected(col > q3_x + cut_x, True)
outliers.set_selected(col < q1_x - cut_x, True)
if outliers.count(True):
logger.debug(
"Rejecting %s outlier value%s",
libtbx.utils.plural_s(outliers.count(True)),
)
x = x.select(~outliers)
y = y.select(~outliers)
self.cc += CorrelationCoefficientAccumulator(x.data(), y.data())
self.n_refs = self.cc.n()
if self.n_refs <= 0:
self.likelihood = 0
self.z_cc = 0
return
self.sigma_cc = max(0.1, cc_sig_fac / self.n_refs ** 0.5)
self.z_cc = self.cc.coefficient() / self.sigma_cc
score_cc = ScoreCorrelationCoefficient(
self.cc.coefficient(), self.sigma_cc, cc_true
)
self.p_cc_given_s = score_cc.p_cc_given_s
self.p_cc_given_not_s = score_cc.p_cc_given_not_s
self.likelihood = score_cc.p_s_given_cc
def __str__(self):
"""Return a string representation of the symmetry element scoring.
Returns:
str:
"""
return "%.3f %.2f %.2f %i %s" % (
self.likelihood,
self.z_cc,
self.cc.coefficient(),
self.n_refs,
self.sym_op.r().info(),
)
def as_dict(self):
"""Return a dictionary representation of the symmetry element scoring.
The dictionary will contain the following keys:
- likelihood: The likelihood of the symmetry element being present
- z_cc: The Z-score for the correlation coefficent
- cc: The correlation coefficient for the symmetry element
- n_ref: The number of reflections contributing to the correlation
coefficient
- operator: The xyz representation of the symmetry element
Returns:
dict:
"""
return {
"likelihood": self.likelihood,
"z_cc": self.z_cc,
"cc": self.cc.coefficient(),
"n_ref": self.n_refs,
"operator": self.sym_op.as_xyz(),
}
class ScoreSubGroup:
"""Score the probability of a given subgroup being the true subgroup.
1) Calculates the combined correlation coefficients for symmetry operations
present/absent from the subgroup.
2) Calculates overall Zcc scores for symmetry elements present/absent from
the subgroup.
3) Calculates the overall likelihood for this subgroup.
See appendix A2 of `Evans, P. R. (2011). Acta Cryst. D67, 282-292.
<https://doi.org/10.1107/S090744491003982X>`_
"""
def __init__(self, subgroup, sym_op_scores):
"""Initialise a ScoreSubGroup object.
Args:
subgroup (dict): A dictionary describing the subgroup as generated by
:class:`cctbx.sgtbx.lattice_symmetry.metric_subgroups`.
sym_op_scores (list): A list of :class:`ScoreSymmetryElement` objects for each
symmetry element possibly in the lattice symmetry.
"""
# Combined correlation coefficients for symmetry operations
# present/absent from subgroup
self.subgroup = subgroup
patterson_group = subgroup["subsym"].space_group()
self.cc_for = CorrelationCoefficientAccumulator()
self.cc_against = CorrelationCoefficientAccumulator()
for score in sym_op_scores:
if score.sym_op in patterson_group:
self.cc_for += score.cc
else:
self.cc_against += score.cc
# Overall Zcc scores for symmetry elements present/absent from subgroup
self.z_cc_for = 0
self.z_cc_against = 0
n_for = 0
n_against = 0
PL_for = 0
PL_against = 0
power = 2
for score in sym_op_scores:
if score.n_refs <= 2:
continue
if score.sym_op in patterson_group:
self.z_cc_for += score.z_cc ** power
n_for += 1
PL_for += math.log(score.p_cc_given_s)
else:
self.z_cc_against += score.z_cc ** power
n_against += 1
PL_against += math.log(score.p_cc_given_not_s)
# Overall likelihood for this subgroup
self.likelihood = math.exp(PL_for + PL_against)
if n_against > 0:
self.z_cc_against = (self.z_cc_against / n_against) ** (1 / power)
if n_for > 0:
self.z_cc_for = (self.z_cc_for / n_for) ** (1 / power)
self.z_cc_net = self.z_cc_for - self.z_cc_against
self.confidence = 0
def __str__(self):
"""Return a string representation of the subgroup scores.
Returns:
str:
"""
return "{} {:.3f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}".format(
self.subgroup["best_subsym"].space_group_info(),
self.likelihood,
self.z_cc_net,
self.z_cc_for,
self.z_cc_against,
self.cc_for.coefficient(),
self.cc_against.coefficient(),
)
def as_dict(self):
"""Return a dictionary representation of the subgroup scoring.
The dictionary will contain the following keys:
- patterson_group: The current subgroup
- likelihood: The likelihood of the subgroup being correct
- confidence: The confidence of the subgroup being correct
- z_cc_for: The combined Z-scores for all symmetry elements present in the
subgroup
- z_cc_against: The combined Z-scores for all symmetry elements present in
the lattice group but not in the subgroup
- z_cc_net: The net Z-score, i.e. z_cc_for - z_cc_against
- cc_for: The overall correlation coefficient for all symmetry elements
present in the subgroup
- cc_against: The overall correlation coefficient for all symmetry
elements present in the lattice group but not in the subgroup
- max_angular_difference: The maximum angular difference between the
symmetrised unit cell and the P1 unit cell.
- cb_op: The change of basis operation from the input unit cell to the
'best' unit cell.
Returns:
dict:
"""
return {
"patterson_group": self.subgroup["best_subsym"]
.space_group()
.type()
.hall_symbol(),
"likelihood": self.likelihood,
"confidence": self.confidence,
"z_cc_net": self.z_cc_net,
"z_cc_for": self.z_cc_for,
"z_cc_against": self.z_cc_against,
"cc_for": self.cc_for.coefficient(),
"cc_against": self.cc_against.coefficient(),
"max_angular_difference": self.subgroup["max_angular_difference"],
"cb_op": f"{self.subgroup['cb_op_inp_best']}",
}
class CorrelationCoefficientAccumulator:
"""Class for incremental computation of correlation coefficients.
Uses the single-pass formula for Pearson correlation coefficient:
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient#For_a_sample
"""
def __init__(self, x=None, y=None):
"""Initialise a CorrelationCoefficientAccumulator object.
Args:
x (list): Optional list of `x` values to initialise the accumulator.
y (list): Optional list of `y` values to initialise the accumulator.
"""
self._n = 0
self._sum_x = 0
self._sum_y = 0
self._sum_xy = 0
self._sum_x_sq = 0
self._sum_y_sq = 0
if x is not None and y is not None:
self.accumulate(x, y)
def accumulate(self, x, y):
"""Accumulate the `x` and `y` values provided.
Args:
x (list): The list of `x` values to accumulate.
y (list): The list of `y` values to accumulate.
"""
assert x.size() == y.size()
self._n += x.size()
self._sum_x += flex.sum(x)
self._sum_y += flex.sum(y)
self._sum_xy += flex.sum(x * y)
self._sum_x_sq += flex.sum(flex.pow2(x))
self._sum_y_sq += flex.sum(flex.pow2(y))
def coefficient(self):
"""Calculate the correlation coefficient.
Returns:
float: The correlation coefficient.
"""
if self._n == 0:
return 0
return self.numerator() / self.denominator()
def n(self):
"""Return the number of values contributing to the correlation coefficient.
Returns:
n (int)
"""
return self._n
def numerator(self):
r"""Calculate the numerator of the correlation coefficient formula.
.. math:: n \sum{x y} - \sum{x} \sum{y}
Returns:
float: The value of the numerator.
"""
return self._n * self._sum_xy - self._sum_x * self._sum_y
def denominator(self):
r"""Calculate the denominator of the correlation coefficient formula.
.. math:: \sqrt{n \sum{x^2} - \sum{x}^2} \sqrt{n \sum{y^2} - \sum{y}^2}
Returns:
float: The value of the denominator.
"""
return math.sqrt(self._n * self._sum_x_sq - self._sum_x ** 2) * math.sqrt(
self._n * self._sum_y_sq - self._sum_y ** 2
)
def __iadd__(self, other):
"""Add together two instances of :class:`CorrelationCoefficientAccumulator`.
Args:
other (CorrelationCoefficientAccumulator):
The :class:`CorrelationCoefficientAccumualator` to add to the current object.
Returns:
self (CorrelationCoefficientAccumulator): The current object.
"""
self._n += other._n
self._sum_x += other._sum_x
self._sum_y += other._sum_y
self._sum_xy += other._sum_xy
self._sum_x_sq += other._sum_x_sq
self._sum_y_sq += other._sum_y_sq
return self
def trunccauchy_pdf(x, a, b, loc=0, scale=1):
"""Calculate a truncated Cauchy probability density function.
Args:
x (float): The point at which to calculate the PDF.
a (float): The lower bound of the truncated distribution.
b (float): The upper bound of the truncated distribution.
loc (float): The location parameter for the Cauchy distribution.
scale (float): The scale parameter for the Cauchy distribution.
Returns:
float: The value of the probability density function.
"""
assert b > a
rv = scipy.stats.cauchy(loc=loc, scale=scale)
return rv.pdf(x) / (rv.cdf(b) - rv.cdf(a))
| {
"repo_name": "dials/dials",
"path": "algorithms/symmetry/laue_group.py",
"copies": "1",
"size": "30298",
"license": "bsd-3-clause",
"hash": 2961777214921668600,
"line_mean": 35.7694174757,
"line_max": 89,
"alpha_frac": 0.5561753251,
"autogenerated": false,
"ratio": 3.6715947649054774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.972424005765552,
"avg_score": 0.0007060064699913709,
"num_lines": 824
} |
"""Algorithms for finding cut vertices and cut edges from undirected graph.
Cut vertex or cut edge is a vertex or edge of which removal will disconnect
the graph. Time complexity: O(V + E)
"""
from algolib.graph.dfs import DFS
def __process_early(_graph, dfs, vertex):
dfs[vertex].reachable_ancestor = vertex
dfs[vertex].out_degree = 0
def __process_late_cut_vertex(_graph, dfs, vertex):
obj = dfs[vertex]
if obj.parent is None:
if obj.out_degree > 1:
# Root cut-vertex
dfs.result.add(vertex)
return
parent = dfs[obj.parent]
if parent.parent:
if obj.reachable_ancestor == obj.parent:
# Parent cut-vertex
dfs.result.add(obj.parent)
elif obj.reachable_ancestor == vertex:
# Bridge cut-vertex
dfs.result.add(obj.parent)
if obj.out_degree:
dfs.result.add(vertex)
if dfs[obj.reachable_ancestor].entry < dfs[parent.reachable_ancestor].entry:
parent.reachable_ancestor = obj.reachable_ancestor
def __process_late_cut_edge(_graph, dfs, vertex):
obj = dfs[vertex]
if obj.parent is None:
return
if obj.reachable_ancestor == vertex:
dfs.result.add((obj.parent, vertex))
parent = dfs[obj.parent]
if dfs[obj.reachable_ancestor].entry < dfs[parent.reachable_ancestor].entry:
parent.reachable_ancestor = obj.reachable_ancestor
def __process_edge(_graph, dfs, source, dest, _edge):
category = dfs.edge_category(source, dest)
if category == DFS.TREE:
dfs[source].out_degree += 1
elif category == DFS.BACK and dest != dfs[source].parent:
dfs[source].reachable_ancestor = dest
return True
def cut_vertices(graph):
"""Returns all the cut vertices in given undirected graph.
Args:
graph: Undirected graph.
Returns:
Set of cut vertices.
"""
dfs = DFS(graph,
process_vertex_early=__process_early,
process_vertex_late=__process_late_cut_vertex,
process_edge=__process_edge)
dfs.result = set()
for v in graph.vertices:
obj = dfs[v]
if obj.state == DFS.UNDISCOVERED:
dfs.execute(v)
return dfs.result
def cut_edges(graph):
"""Returns all the cut edges in given undirected graph.
Args:
graph: Undirected graph.
Returns:
Set of cut edges where edge is tuple consisting two edges in no
particular order.
"""
dfs = DFS(graph,
process_vertex_early=__process_early,
process_vertex_late=__process_late_cut_edge,
process_edge=__process_edge)
dfs.result = set()
for v in graph.vertices:
obj = dfs[v]
if obj.state == DFS.UNDISCOVERED:
dfs.execute(v)
return dfs.result
| {
"repo_name": "niemmi/algolib",
"path": "algolib/graph/cut.py",
"copies": "1",
"size": "2846",
"license": "bsd-3-clause",
"hash": 4563371950588936000,
"line_mean": 26.1047619048,
"line_max": 80,
"alpha_frac": 0.6152494729,
"autogenerated": false,
"ratio": 3.6913099870298316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9805383680976275,
"avg_score": 0.00023515579071134625,
"num_lines": 105
} |
"""Algorithms for generating prototypes.
A prototype algorithm is a callable with the signature:
prototypes = algorithm(num_prototypes, model, make_training_exp, pool, progress=None)
where `images` is a generator of image paths.
See also :func:`glimpse.experiment.MakePrototypes`.
"""
# Copyright (c) 2011-2013 Mick Thomure
# All rights reserved.
#
# Please see the file LICENSE.txt in this distribution for usage terms.
import logging
import os
from glimpse.prototypes import *
from glimpse import prototypes
from . import mf_wkmeans, om_wkmeans
class ImprintAlg(object):
"""Learn prototypes by imprinting from training images."""
#: Image locations from which samples were drawn.
locations = None
def __init__(self, record_locations=True):
self.record_locations = record_locations
self.locations = None
def __call__(self, num_prototypes, model, make_training_exp, pool, progress):
patch_widths = model.params.s2_kernel_widths
images = make_training_exp().corpus.paths
logging.info("Learning %d prototypes at %d sizes from %d images by "
"imprinting", num_prototypes, len(patch_widths), len(images))
patches_per_shape,sample_locations = SamplePatchesFromImages(model,
model.LayerClass.C1, patch_widths, num_prototypes, images, pool=pool,
normalize=False, progress=progress)
if model.params.s2_kernels_are_normed:
patches_per_shape = map(prototypes.NormalizeLength, patches_per_shape)
if self.record_locations:
self.locations = sample_locations
return patches_per_shape
# deprecated name
ImprintProtoAlg = ImprintAlg
class ShuffledAlg(ImprintAlg):
"""Learn prototypes by shuffling a set of imprinted prototypes."""
def __call__(self, num_prototypes, model, make_training_exp, pool, progress):
patches_per_shape = super(ShuffledAlg, self).__call__(num_prototypes, model,
make_training_exp, pool, progress)
logging.info("Shuffling learned prototypes")
for ps in patches_per_shape:
for p in ps:
np.random.shuffle(p.flat)
return patches_per_shape
class UniformAlg():
"""Create prototypes by sampling components uniformly."""
def __init__(self, low=0, high=1):
#: Lower limit of uniform distribution.
self.low = low
#: Upper limit of uniform distribution.
self.high = high
def __call__(self, num_prototypes, model, make_training_exp, pool, progress):
patch_shapes = model.params.s2_kernel_shapes
logging.info("Sampling %d prototypes at %d sizes " % (num_prototypes,
len(patch_shapes)) + "from uniform random distribution")
# XXX `progress` is ignored
patches_per_shape = [ prototypes.UniformRandom(num_prototypes, kshape,
self.low, self.high) for kshape in patch_shapes ]
if model.params.s2_kernels_are_normed:
patches_per_shape = map(prototypes.NormalizeLength, patches_per_shape)
return patches_per_shape
# deprecated
UniformProtoAlg = UniformAlg
class _SimpleLearningAlg():
#: Number of samples from which to learn.
num_samples = None
def __init__(self, learn_patches=None):
# We conditionally set this attribute, since sub-classes directly implement
# learn_patches().
if learn_patches is not None:
self.learn_patches = learn_patches
def __call__(self, num_prototypes, model, make_training_exp, pool, progress):
patch_shapes = model.params.s2_kernel_shapes
exp = make_training_exp()
paths = exp.corpus.paths
logging.info("Learning %d prototypes at %d sizes " % (num_prototypes,
len(patch_shapes)) + "from %d images" % len(paths))
patches_per_shape = prototypes.SampleAndLearnPatches(model, paths,
self.learn_patches, num_prototypes, model.params.s2_kernel_widths,
model.LayerClass.C1, pool, num_samples=self.num_samples,
progress=progress)
if model.params.s2_kernels_are_normed:
patches_per_shape = map(prototypes.NormalizeLength, patches_per_shape)
return patches_per_shape
# deprecated name
LearningAlg = _SimpleLearningAlg
#: Learn prototypes from a histogram over sample C1 values.
HistogramAlg = lambda: _SimpleLearningAlg(prototypes.Histogram)
#: Create prototypes by sampling elements from a standard normal distribution.
NormalAlg = lambda: _SimpleLearningAlg(prototypes.NormalRandom)
#: Learn prototypes as centroids of samples using nearest-value k-Means.
NearestKmeansAlg = lambda: _SimpleLearningAlg(prototypes.NearestKmeans)
#: Learn prototypes as centroids of samples using k-Medoids.
KmedoidsAlg = lambda: _SimpleLearningAlg(prototypes.Kmedoids)
#: Learn prototypes using Independent Components Analysis.
IcaAlg = lambda: _SimpleLearningAlg(prototypes.Ica)
#: Learn prototypes using Principal Components Analysis.
PcaAlg = lambda: _SimpleLearningAlg(prototypes.Pca)
#: Learn prototypes using Sparse Principal Components Analysis.
SparsePcaAlg = lambda: _SimpleLearningAlg(prototypes.SparsePca)
#: Learn prototypes using Non-negative Matrix Factorization.
NmfAlg = lambda: _SimpleLearningAlg(prototypes.Nmf)
class KmeansAlg(_SimpleLearningAlg):
"""Learn prototypes as centroids of C1 patches using k-Means."""
normalize_contrast = False
whiten = False
unwhiten = False
batch = False
def learn_patches(self, num_patches, samples, progress=None):
"""Learn patches by k-Means. Modifies `samples` array."""
whitener = None
if self.normalize_contrast:
samples = prototypes.NormalizeLocalContrast(samples)
if self.whiten:
whitener = prototypes.Whitener().fit(samples)
samples = whitener.transform(samples)
patches = prototypes.Kmeans(num_patches, samples, progress, self.batch)
if self.unwhiten and whitener is not None:
patches = whitener.inverse_transform(patches)
return patches
# deprecated
KmeansProtoAlg = KmeansAlg
class _WeightedKmeansAlg():
#: Number of weighted samples to use for learning.
num_samples = None
def __call__(self, num_prototypes, model, make_training_exp, pool, progress):
num_samples = self.num_samples
if num_samples == 0 or num_samples is None:
# Allow 10 patches per cluster
num_samples = num_prototypes * 10
exp = make_training_exp()
if len(exp.extractor.model.params.s2_kernel_widths) > 1:
raise ValueError("Only single-sized S2 prototypes are supported")
logging.info(("Learning %d prototypes at 1 size " % num_prototypes)
+ "from %d images" % len(exp.corpus.paths))
patches = self.LearnPatches(exp, num_samples, num_prototypes, pool,
progress)
if model.params.s2_kernels_are_normed:
patches = prototypes.NormalizeLength(patches)
return (patches,)
class MFWKmeansAlg(_WeightedKmeansAlg):
"""Learn patch models by meta-feature weighted k-Means clustering.
.. seealso::
:func:`mf_wkmeans.LearnPatchesFromImages
<glimpse.experiment.mf_wkmeans.LearnPatchesFromImages>`.
"""
#: Number of samples with which to train regr model
num_regr_samples = None
def LearnPatches(self, exp, num_samples, num_prototypes, pool, progress):
num_regr_samples = self.num_regr_samples
if num_regr_samples == 0 or num_regr_samples is None:
num_regr_samples = 250 # this was found to be effective on Caltech101
return mf_wkmeans.LearnPatchesFromImages(exp, num_regr_samples, num_samples,
num_prototypes, pool, progress=progress)
class OMWKmeansAlg(_WeightedKmeansAlg):
"""Learn patch models by object-mask weighted k-Means clustering.
.. seealso::
:func:`om_wkmeans.LearnPatchesFromImages
<glimpse.experiment.om_wkmeans.LearnPatchesFromImages>`.
"""
#: Directory containing object masks.
mask_dir = None
#: Weight added for all patches.
base_weight = None
def LearnPatches(self, exp, num_samples, num_prototypes, pool, progress):
masks = om_wkmeans.MaskCache(exp.extractor.model, self.mask_dir)
return om_wkmeans.LearnPatchesFromImages(exp, masks, num_samples,
num_prototypes, pool, base_weight=self.base_weight, progress=progress)
_ALGORITHMS = dict(
imprint = ImprintAlg,
uniform = UniformAlg,
shuffle = ShuffledAlg,
histogram = HistogramAlg,
normal = NormalAlg,
kmeans = KmeansAlg,
nearest_kmeans = NearestKmeansAlg,
kmedoids = KmedoidsAlg,
pca = PcaAlg,
ica = IcaAlg,
nmf = NmfAlg,
sparse_pca = SparsePcaAlg,
meta_feature_wkmeans = MFWKmeansAlg,
mfwkmeans = MFWKmeansAlg,
object_mask_wkmeans = OMWKmeansAlg,
omwkmeans = OMWKmeansAlg,
)
def GetAlgorithmNames():
"""Lookup the name of all available prototype algorithm.
:rtype: list of str
:returns: Name of all known prototype algorithms, any of which can be passed
to :func:`ResolveAlgorithm`.
"""
return _ALGORITHMS.keys()
def ResolveAlgorithm(alg):
"""Lookup a prototype algorithm by name.
:param str alg: Name of a prototype algorithm, as defined by :func:`GetNames`.
This value is not case sensitive.
:rtype: callable
:returns: Prototype algorithm.
"""
try:
return _ALGORITHMS[alg.lower()]
except KeyError:
raise ExpError("Unknown prototype algorithm: %s" % alg)
| {
"repo_name": "mthomure/glimpse-project",
"path": "glimpse/experiment/prototype_algorithms.py",
"copies": "1",
"size": "9119",
"license": "mit",
"hash": 6471195121129637000,
"line_mean": 33.938697318,
"line_max": 88,
"alpha_frac": 0.7210220419,
"autogenerated": false,
"ratio": 3.559328649492584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4780350691392584,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for indexing."""
import numpy as np
from recordlinkage.measures import full_index_size
def _map_tril_1d_on_2d(indices, dims):
"""Map 1d indices on lower triangular matrix in 2d. """
N = (dims * dims - dims) / 2
m = np.ceil(np.sqrt(2 * N))
c = m - np.round(np.sqrt(2 * (N - indices))) - 1
r = np.mod(indices + (c + 1) * (c + 2) / 2 - 1, m) + 1
return np.array([r, c], dtype=np.int64)
def random_pairs_with_replacement(n, shape, random_state=None):
"""make random record pairs"""
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
n_max = full_index_size(shape)
if n_max <= 0:
raise ValueError('n_max must be larger than 0')
# make random pairs
indices = random_state.randint(0, n_max, n)
if len(shape) == 1:
return _map_tril_1d_on_2d(indices, shape[0])
else:
return np.array(np.unravel_index(indices, shape))
def random_pairs_without_replacement(
n, shape, random_state=None):
"""Return record pairs for dense sample.
Sample random record pairs without replacement bounded by the
maximum number of record pairs (based on shape). This algorithm is
efficient and fast for relative small samples.
"""
n_max = full_index_size(shape)
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
if not isinstance(n, int) or n <= 0 or n > n_max:
raise ValueError("n must be a integer satisfying 0<n<=%s" % n_max)
# make a sample without replacement
sample = random_state.choice(
np.arange(n_max), n, replace=False)
# return 2d indices
if len(shape) == 1:
return _map_tril_1d_on_2d(sample, shape[0])
else:
return np.array(np.unravel_index(sample, shape))
def random_pairs_without_replacement_low_memory(
n, shape, random_state=None):
"""Make a sample of random pairs with replacement.
Sample random record pairs without replacement bounded by the
maximum number of record pairs (based on shape). This algorithm
consumes low memory and is fast for relatively small samples.
"""
n_max = full_index_size(shape)
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
if not isinstance(n, int) or n <= 0 or n > n_max:
raise ValueError("n must be a integer satisfying 0<n<=%s" % n_max)
sample = np.array([], dtype=np.int64)
# Run as long as the number of pairs is less than the requested number
# of pairs n.
while len(sample) < n:
# The number of pairs to sample (sample twice as much record pairs
# because the duplicates are dropped).
n_sample_size = (n - len(sample)) * 2
sample_sub = random_state.randint(
n_max,
size=n_sample_size
)
# concatenate pairs and deduplicate
pairs_non_unique = np.append(sample, sample_sub)
sample = np.unique(pairs_non_unique)
# return 2d indices
if len(shape) == 1:
return _map_tril_1d_on_2d(sample[0:n], shape[0])
else:
return np.array(np.unravel_index(sample[0:n], shape))
| {
"repo_name": "J535D165/recordlinkage",
"path": "recordlinkage/algorithms/indexing.py",
"copies": "1",
"size": "3274",
"license": "bsd-3-clause",
"hash": -3362676591491719700,
"line_mean": 29.5981308411,
"line_max": 74,
"alpha_frac": 0.6365302382,
"autogenerated": false,
"ratio": 3.497863247863248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9632057037465116,
"avg_score": 0.00046728971962616824,
"num_lines": 107
} |
"""Algorithms for manipulating BibTeX data.
This module implements various algorithms supplied by BibTeX to style
files, as well as some algorithms to make BibTeX data more accessible
to Python.
"""
__all__ = ('Name parse_names ' +
'parse_month ' +
'title_case ' +
'TeXProcessor TeXToUnicode tex_to_unicode').split()
import re
import collections
import unicodedata
import string
from . import messages
# Control sequences (defined as "control_seq_ilk" in bibtex) and their
# Unicode translations. This is similar to, but slightly different
# from the TeX definitions (of course).
_CONTROL_SEQS = {
'\\i': 'ı', '\\j': 'ȷ', '\\oe': 'œ', '\\OE': 'Œ',
'\\ae': 'æ', '\\AE': 'Æ', '\\aa': 'å', '\\AA': 'Å',
'\\o': 'ø', '\\O': 'Ø', '\\l': 'ł', '\\L': 'Ł', '\\ss': 'ß'
}
class NameParser:
def __init__(self):
pass
def __depth(self, data):
depth, depths = 0, [0] * len(data)
for pos, ch in enumerate(data):
depths[pos] = depth
if ch == '{':
depth += 1
depths[pos] = depth
elif ch == '}':
depth -= 1
return depths
def __split_depth0(self, regexp, data, flags=0):
regexp = re.compile(regexp, flags=flags)
depths = self.__depth(data)
parts, last = [], 0
for m in regexp.finditer(data):
if depths[m.start()] == 0:
parts.append(data[last:m.start()])
last = m.end()
if regexp.groups:
parts.extend(m.groups())
parts.append(data[last:])
return parts
def _first_char(self, data):
"""Return the first character of data (in bibtex's sense)."""
# XXX Should this be pulled out as some generic algorithm?
pos = 0
depths = self.__depth(data)
while True:
if pos == len(data):
return ''
elif data[pos].isalpha():
return data[pos]
elif data.startswith('{\\', pos):
# Special character
pos += 1
m = re.compile(r'\\[a-zA-Z]+').match(data, pos)
if m and m.group() in _CONTROL_SEQS:
# Known bibtex control sequence
return _CONTROL_SEQS[m.group()]
# Scan for the first alphabetic character
while pos < len(data) and depths[pos]:
if data[pos].isalpha():
return data[pos]
pos += 1
elif data[pos] == '{':
# Skip brace group
while pos < len(data) and depths[pos]:
pos += 1
else:
pos += 1
def __split_von_last(self, toks):
# See von_name_ends_and_last_name_starts_stuff
for von_end in range(len(toks) - 1, 1, -2):
if self._first_char(toks[von_end - 2]).islower():
return (toks[:von_end-1], toks[von_end:])
return ([], toks)
def parse(self, string, pos):
"""Parse a BibTeX name list.
Returns a list of Name objects. Raises InputError if there is
a syntax error.
"""
# See x_format_name
# Split names (see name_scan_for_and)
name_strings = [n.strip() for n in self.__split_depth0(
'[ \t]and(?=[ \t])', string, flags=re.IGNORECASE)]
# Process each name
names = []
for name_string in name_strings:
# Remove leading and trailing white space, ~, and -, and
# trailing commas.
name_string = name_trailing = name_string.lstrip('-~ \t')
name_string = name_string.rstrip('-~ \t,')
if ',' in name_trailing[len(name_string):]:
# BibTeX warns about this because it often indicates a
# bigger syntax problem
pos.warn('trailing comma after name `{}\''.format(name_string))
# Split on depth-0 commas and further split tokens in each
# part, keeping only the first connector between each
# token.
parts = [self.__split_depth0('([-~ \t])[-~ \t]*', part.strip())
for part in self.__split_depth0(',', name_string)]
# Process name depending on how many commas there were
first = von = last = jr = []
if len(parts) == 1:
# "First von Last"
toks = parts[0]
# The von tokens start with the first lower-case token
# (but cannot start at the last token)
for von_start in range(0, len(toks) - 2, 2):
if self._first_char(toks[von_start]).islower():
# Found beginning; now find the end
first = toks[:max(0, von_start-1)]
von, last = self.__split_von_last(toks[von_start:])
break
else:
# No von tokens. Find hyphen-connected last name
# tokens.
for last_start in range(len(toks) - 1, -1, -2):
if last_start and toks[last_start-1] != '-':
break
first = toks[:max(0, last_start-1)]
last = toks[last_start:]
elif 2 <= len(parts) <= 3:
# "von Last, First[, Jr]"
von, last = self.__split_von_last(parts[0])
first = parts[1]
if len(parts) == 3:
jr = parts[2]
else:
pos.raise_error(
'too many commas in name `{}\''.format(name_string))
names.append(Name(''.join(first), ''.join(von),
''.join(last), ''.join(jr)))
return names
class Name(collections.namedtuple('Name', 'first von last jr')):
"""A parsed name.
The name is parsed in to first name, "von", last name, and the
complement (or "jr"). Each component is in uninterpreted form
(e.g., TeX syntax). Missing components are set to the empty
string.
"""
def is_others(self):
return self.first == '' and self.von == '' and \
self.last == 'others' and self.jr == ''
def pretty(self, template='{first} {von} {last} {jr}'):
"""Pretty-print author according to template.
The template is a 'format' template with the added feature
that literal text surrounding fields that expand to empty
strings is prioritized, rather than concatenated.
Specifically, of the literal text snippets between two
non-null fields, only the first of the highest priority is
kept, where non-white space outranks white space outranks the
empty string. Literal text before and after the first and
last fields is always kept.
Hence, if the template is '{von} {last}, {first}, {jr}' and
the name has a last and a jr not no von or first, then the
first comma will be kept and the space and second dropped. If
the name has only a von and a last, then both commas will be
dropped. If the name has only a last, then all separators
will be dropped.
"""
# XXX BibTeX's own format.name$ templates are more
# sophisticated than this, and it's not clear these are easier
# to use. These do have the (dubious) benefit of having
# access to the usual format machinery.
def priority(string):
if not string:
return 0
elif string.isspace():
return 1
return 2
fields = {'first': self.first, 'von': self.von,
'last': self.last, 'jr': self.jr}
f = string.Formatter()
pieces = ['']
first_field, last_field = 0, -1
leading = trailing = ''
for i, (literal_text, field_name, format_spec, conv) in \
enumerate(f.parse(template)):
if i == 0:
# Always keep leading text
leading = literal_text
elif field_name is None:
# Always keep trailing test
trailing = literal_text
elif priority(literal_text) > priority(pieces[-1]):
# Overrides previous piece
pieces[-1] = literal_text
if field_name is not None:
obj, _ = f.get_field(field_name, (), fields)
if not obj:
continue
obj = f.convert_field(obj, conv)
if first_field == 0:
first_field = len(pieces)
last_field = len(pieces)
pieces.extend([f.format_field(obj, format_spec), ''])
# Only keep the pieces between non-null fields
pieces = pieces[first_field:last_field + 1]
return leading + ''.join(pieces) + trailing
def parse_names(string, pos=messages.Pos.unknown):
"""Parse a BibTeX name list (e.g., an author or editor field).
Returns a list of Name objects. The parsing is equivalent to
BibTeX's built-in "format.name$" function. Raises InputError if
there is a syntax error.
"""
return NameParser().parse(string, pos)
_MONTHS = 'January February March April May June July August September October November December'.lower().split()
def parse_month(string, pos=messages.Pos.unknown):
"""Parse a BibTeX month field.
This performs fairly fuzzy parsing that supports all standard
month macro styles (and then some).
Raises InputError if the field cannot be parsed.
"""
val = string.strip().rstrip('.').lower()
for i, name in enumerate(_MONTHS):
if name.startswith(val) and len(val) >= 3:
return i + 1
pos.raise_error('invalid month `{}\''.format(string))
CS_RE = re.compile(r'\\[a-zA-Z]+')
def title_case(string, pos=messages.Pos.unknown):
"""Convert to title case (like BibTeX's built-in "change.case$").
Raises InputError if the title string contains syntax errors.
"""
# See "@<Perform the case conversion@>"
out = []
level, prev_colon, pos = 0, False, 0
while pos < len(string):
keep = (pos == 0 or (prev_colon and string[pos-1] in ' \t\n'))
if level == 0 and string.startswith('{\\', pos) and not keep:
# Special character
out.append(string[pos])
pos += 1
level += 1
while level and pos < len(string):
if string[pos] == '\\':
m = CS_RE.match(string, pos)
if m:
if m.group() in _CONTROL_SEQS:
# Lower case control sequence
out.append(m.group().lower())
else:
# Unknown control sequence, keep case
out.append(m.group())
pos = m.end()
continue
elif string[pos] == '{':
level += 1
elif string[pos] == '}':
level -= 1
# Lower-case non-control sequence
out.append(string[pos].lower())
pos += 1
prev_colon = False
continue
# Handle braces
char = string[pos]
if char == '{':
level += 1
elif char == '}':
if level == 0:
pos.raise_error('unexpected }')
level -= 1
# Handle colon state
if char == ':':
prev_colon = True
elif char not in ' \t\n':
prev_colon = False
# Change case of a regular character
if level > 0 or keep:
out.append(string[pos])
else:
out.append(string[pos].lower())
pos += 1
return ''.join(out)
# A TeX control sequence is
#
# 1) an active character (subsequent white space is NOT ignored) or,
# 2) a \ followed by either
# 2.1) a sequence of letter-category characters (subsequent white
# space is ignored), or
# 2.2) a single space-category character (subsequent white space is
# ignored), or
# 2.3) a single other character (subsequent white space is NOT
# ignored).
#
# This regexp assumes plain TeX's initial category codes. Technically
# only ~ and \f are active characters, but we include several other
# special characters that we want to abort on.
tex_cs_re = re.compile(
r'([~\f$&#^_]|(\\[a-zA-Z]+|\\[ \t\r\n])|\\.)(?(2)[ \t\r\n]*)')
class TeXProcessor:
"""Base class for simple TeX macro processors.
This assumes the initial category codes set up by plain.tex (and,
likewise, LaTeX).
"""
def process(self, string, pos):
"""Expand active characters and macros in string.
Raises InputError if it encounters an active character or
macro it doesn't recognize.
"""
self.__data = string
self.__off = 0
self.__pos = pos
# Process macros
while True:
m = tex_cs_re.search(self.__data, self.__off)
if not m:
break
self.__off = m.end()
macro = m.group(1)
nval = self._expand(macro)
if nval is None:
if macro.startswith('\\'):
pos.raise_error('unknown macro `{}\''.format(macro))
pos.raise_error(
'unknown special character `{}\''.format(macro))
self.__data = self.__data[:m.start()] + nval + \
self.__data[self.__off:]
self.__off = m.start() + len(nval)
return self.__data
def _scan_argument(self):
"""Scan an return a macro argument."""
if self.__off >= len(self.__data):
self.__pos.raise_error('macro argument expected')
if self.__data[self.__off] == '{':
start = self.__off
depth = 0
while depth or self.__off == start:
if self.__data[self.__off] == '{':
depth += 1
elif self.__data[self.__off] == '}':
depth -= 1
self.__off += 1
return self.__data[start + 1:self.__off - 1]
elif self.__data[self.__off] == '\\':
m = tex_cs_re.match(self.__data, self.__off)
self.__off = m.end()
return m.group(1)
else:
arg = self.__data[self.__off]
self.__off += 1
return arg
def _expand(self, cs):
"""Return the expansion of an active character or control sequence.
Returns None if the sequence is unknown. This should be
overridden by sub-classes.
"""
return None
class TeXToUnicode(TeXProcessor):
"""A simple TeX-to-unicode converter.
This interprets accents and other special tokens like '--' and
eliminates braces.
"""
# Simple TeX-to-Unicode replacements
_SIMPLE = {
# Active characters
'~': '\u00A0',
# chardefs from plain.tex
'\\%': '%', '\\&': '&', '\\#': '#', '\\$': '$', '\\ss': 'ß',
'\\ae': 'æ', '\\oe': 'œ', '\\o': 'ø',
'\\AE': 'Æ', '\\OE': 'Œ', '\\O': 'Ø',
'\\i': 'ı', '\\j': 'ȷ',
'\\aa': 'å', '\\AA': 'Å', '\\l': 'ł', '\\L': 'Ł',
# Other defs from plain.tex
'\\_': '_', '\\dag': '†', '\\ddag': '‡', '\\S': '§', '\\P': '¶',
}
# TeX accent control sequences to Unicode combining characters
_ACCENTS = {
# Accents defined in plain.tex
'\\`': '\u0300', "\\'": '\u0301', '\\v': '\u030C', '\\u': '\u0306',
'\\=': '\u0304', '\\^': '\u0302', '\\.': '\u0307', '\\H': '\u030B',
'\\~': '\u0303', '\\"': '\u0308',
'\\d': '\u0323', '\\b': '\u0331', '\\c': '\u0327',
# Other accents that seem to be standard, but I can't find
# their definitions
'\\r': '\u030A', '\\k': '\u0328'
}
def process(self, string, pos):
string = super().process(string, pos)
# Handle ligatures that are unique to TeX. This must be done
# after macro expansion, but before brace removal because
# braces inhibit ligatures.
string = string.replace('---', '\u2014').replace('--', '\u2013')
# Remove braces
return string.replace('{', '').replace('}', '')
def _expand(self, cs):
if cs in self._SIMPLE:
return self._SIMPLE[cs]
if cs in self._ACCENTS:
arg = self._scan_argument()
if len(arg) == 0:
seq, rest = ' ' + self._ACCENTS[cs], ''
elif arg.startswith('\\i') or arg.startswith('\\j'):
# Unicode combining marks should be applied to the
# regular i, not the dotless i.
seq, rest = arg[1] + self._ACCENTS[cs], arg[2:]
else:
seq, rest = arg[0] + self._ACCENTS[cs], arg[1:]
return unicodedata.normalize('NFC', seq) + rest
return None
def tex_to_unicode(string, pos=messages.Pos.unknown):
"""Convert a BibTeX field value written in TeX to Unicode.
This interprets accents and other special tokens like '--' and
eliminates braces. Raises InputError if it encounters a macro it
doesn't understand.
Note that BibTeX's internal understanding of accented characters
(e.g., purify$ and change.case$) is much more limited than TeX's.
This implements something closer to TeX on the assumption that the
goal is to display the string.
"""
return TeXToUnicode().process(string, pos)
| {
"repo_name": "ajbouh/tfi",
"path": "src/tfi/parse/biblib/algo.py",
"copies": "2",
"size": "17807",
"license": "mit",
"hash": 4353573591234447400,
"line_mean": 35.6494845361,
"line_max": 113,
"alpha_frac": 0.5139803094,
"autogenerated": false,
"ratio": 4.022403258655804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5536383568055804,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for partial fraction decomposition of rational functions. """
from __future__ import print_function, division
from sympy.core import S, Add, sympify, Function, Lambda, Dummy
from sympy.core.basic import preorder_traversal
from sympy.core.compatibility import range
from sympy.polys import Poly, RootSum, cancel, factor
from sympy.polys.polyerrors import PolynomialError
from sympy.polys.polyoptions import allowed_flags, set_defaults
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.utilities import numbered_symbols, take, xthreaded, public
@xthreaded
@public
def apart(f, x=None, full=False, **options):
"""
Compute partial fraction decomposition of a rational function.
Given a rational function ``f``, computes the partial fraction
decomposition of ``f``. Two algorithms are available: One is based on the
undertermined coefficients method, the other is Bronstein's full partial
fraction decomposition algorithm.
The undetermined coefficients method (selected by ``full=False``) uses
polynomial factorization (and therefore accepts the same options as
factor) for the denominator. Per default it works over the rational
numbers, therefore decomposition of denominators with non-rational roots
(e.g. irrational, complex roots) is not supported by default (see options
of factor).
Bronstein's algorithm can be selected by using ``full=True`` and allows a
decomposition of denominators with non-rational roots. A human-readable
result can be obtained via ``doit()`` (see examples below).
Examples
========
>>> from sympy.polys.partfrac import apart
>>> from sympy.abc import x, y
By default, using the undetermined coefficients method:
>>> apart(y/(x + 2)/(x + 1), x)
-y/(x + 2) + y/(x + 1)
The undetermined coefficients method does not provide a result when the
denominators roots are not rational:
>>> apart(y/(x**2 + x + 1), x)
y/(x**2 + x + 1)
You can choose Bronstein's algorithm by setting ``full=True``:
>>> apart(y/(x**2 + x + 1), x, full=True)
RootSum(_w**2 + _w + 1, Lambda(_a, (-2*_a*y/3 - y/3)/(-_a + x)))
Calling ``doit()`` yields a human-readable result:
>>> apart(y/(x**2 + x + 1), x, full=True).doit()
(-y/3 - 2*y*(-1/2 - sqrt(3)*I/2)/3)/(x + 1/2 + sqrt(3)*I/2) + (-y/3 -
2*y*(-1/2 + sqrt(3)*I/2)/3)/(x + 1/2 - sqrt(3)*I/2)
See Also
========
apart_list, assemble_partfrac_list
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
_options = options.copy()
options = set_defaults(options, extension=True)
try:
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
except PolynomialError as msg:
if f.is_commutative:
raise PolynomialError(msg)
# non-commutative
if f.is_Mul:
c, nc = f.args_cnc(split_1=False)
nc = f.func(*nc)
if c:
c = apart(f.func._from_args(c), x=x, full=full, **_options)
return c*nc
else:
return nc
elif f.is_Add:
c = []
nc = []
for i in f.args:
if i.is_commutative:
c.append(i)
else:
try:
nc.append(apart(i, x=x, full=full, **_options))
except NotImplementedError:
nc.append(i)
return apart(f.func(*c), x=x, full=full, **_options) + f.func(*nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
try:
reps.append((e, apart(e, x=x, full=full, **_options)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
if P.is_multivariate:
fc = f.cancel()
if fc != f:
return apart(fc, x=x, full=full, **_options)
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
if Q.degree() <= 1:
partial = P/Q
else:
if not full:
partial = apart_undetermined_coeffs(P, Q)
else:
partial = apart_full_decomposition(P, Q)
terms = S.Zero
for term in Add.make_args(partial):
if term.has(RootSum):
terms += term
else:
terms += factor(term)
return common*(poly.as_expr() + terms)
def apart_undetermined_coeffs(P, Q):
"""Partial fractions via method of undetermined coefficients. """
X = numbered_symbols(cls=Dummy)
partial, symbols = [], []
_, factors = Q.factor_list()
for f, k in factors:
n, q = f.degree(), Q
for i in range(1, k + 1):
coeffs, q = take(X, n), q.quo(f)
partial.append((coeffs, q, f, i))
symbols.extend(coeffs)
dom = Q.get_domain().inject(*symbols)
F = Poly(0, Q.gen, domain=dom)
for i, (coeffs, q, f, k) in enumerate(partial):
h = Poly(coeffs, Q.gen, domain=dom)
partial[i] = (h, f, k)
q = q.set_domain(dom)
F += h*q
system, result = [], S.Zero
for (k,), coeff in F.terms():
system.append(coeff - P.nth(k))
from sympy.solvers import solve
solution = solve(system, symbols)
for h, f, k in partial:
h = h.as_expr().subs(solution)
result += h/f.as_expr()**k
return result
def apart_full_decomposition(P, Q):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
.. [1] [Bronstein93]_
"""
return assemble_partfrac_list(apart_list(P/Q, P.gens[0]))
@public
def apart_list(f, x=None, dummies=None, **options):
"""
Compute partial fraction decomposition of a rational function
and return the result in structured form.
Given a rational function ``f`` compute the partial fraction decomposition
of ``f``. Only Bronstein's full partial fraction decomposition algorithm
is supported by this method. The return value is highly structured and
perfectly suited for further algorithmic treatment rather than being
human-readable. The function returns a tuple holding three elements:
* The first item is the common coefficient, free of the variable `x` used
for decomposition. (It is an element of the base field `K`.)
* The second item is the polynomial part of the decomposition. This can be
the zero polynomial. (It is an element of `K[x]`.)
* The third part itself is a list of quadruples. Each quadruple
has the following elements in this order:
- The (not necessarily irreducible) polynomial `D` whose roots `w_i` appear
in the linear denominator of a bunch of related fraction terms. (This item
can also be a list of explicit roots. However, at the moment ``apart_list``
never returns a result this way, but the related ``assemble_partfrac_list``
function accepts this format as input.)
- The numerator of the fraction, written as a function of the root `w`
- The linear denominator of the fraction *excluding its power exponent*,
written as a function of the root `w`.
- The power to which the denominator has to be raised.
On can always rebuild a plain expression by using the function ``assemble_partfrac_list``.
Examples
========
A first example:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, t
>>> f = (2*x**3 - 2*x) / (x**2 - 2*x + 1)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(2*x + 4, x, domain='ZZ'),
[(Poly(_w - 1, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
2*x + 4 + 4/(x - 1)
Second example:
>>> f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
>>> pfd = apart_list(f)
>>> pfd
(-1,
Poly(2/3, x, domain='QQ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-2/3 - 2/(x - 2)
Another example, showing symbolic parameters:
>>> pfd = apart_list(t/(x**2 + x + t), x)
>>> pfd
(1,
Poly(0, x, domain='ZZ[t]'),
[(Poly(_w**2 + _w + t, _w, domain='ZZ[t]'),
Lambda(_a, -2*_a*t/(4*t - 1) - t/(4*t - 1)),
Lambda(_a, -_a + x),
1)])
>>> assemble_partfrac_list(pfd)
RootSum(_w**2 + _w + t, Lambda(_a, (-2*_a*t/(4*t - 1) - t/(4*t - 1))/(-_a + x)))
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
See also
========
apart, assemble_partfrac_list
References
==========
.. [1] [Bronstein93]_
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
options = set_defaults(options, extension=True)
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
if P.is_multivariate:
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
polypart = poly
if dummies is None:
def dummies(name):
d = Dummy(name)
while True:
yield d
dummies = dummies("w")
rationalpart = apart_list_full_decomposition(P, Q, dummies)
return (common, polypart, rationalpart)
def apart_list_full_decomposition(P, Q, dummygen):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
.. [1] [Bronstein93]_
"""
f, x, U = P/Q, P.gen, []
u = Function('u')(x)
a = Dummy('a')
partial = []
for d, n in Q.sqf_list_include(all=True):
b = d.as_expr()
U += [ u.diff(x, n - 1) ]
h = cancel(f*b**n) / u**n
H, subs = [h], []
for j in range(1, n):
H += [ H[-1].diff(x) / j ]
for j in range(1, n + 1):
subs += [ (U[j - 1], b.diff(x, j) / j) ]
for j in range(0, n):
P, Q = cancel(H[j]).as_numer_denom()
for i in range(0, j + 1):
P = P.subs(*subs[j - i])
Q = Q.subs(*subs[0])
P = Poly(P, x)
Q = Poly(Q, x)
G = P.gcd(d)
D = d.quo(G)
B, g = Q.half_gcdex(D)
b = (P * B.quo(g)).rem(D)
Dw = D.subs(x, next(dummygen))
numer = Lambda(a, b.as_expr().subs(x, a))
denom = Lambda(a, (x - a))
exponent = n-j
partial.append((Dw, numer, denom, exponent))
return partial
@public
def assemble_partfrac_list(partial_list):
r"""Reassemble a full partial fraction decomposition
from a structured result obtained by the function ``apart_list``.
Examples
========
This example is taken from Bronstein's original paper:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, y
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
If we happen to know some roots we can provide them easily inside the structure:
>>> pfd = apart_list(2/(x**2-2))
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w**2 - 2, _w, domain='ZZ'),
Lambda(_a, _a/2),
Lambda(_a, -_a + x),
1)])
>>> pfda = assemble_partfrac_list(pfd)
>>> pfda
RootSum(_w**2 - 2, Lambda(_a, _a/(-_a + x)))/2
>>> pfda.doit()
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
>>> from sympy import Dummy, Poly, Lambda, sqrt
>>> a = Dummy("a")
>>> pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)])
>>> assemble_partfrac_list(pfd)
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
See Also
========
apart, apart_list
"""
# Common factor
common = partial_list[0]
# Polynomial part
polypart = partial_list[1]
pfd = polypart.as_expr()
# Rational parts
for r, nf, df, ex in partial_list[2]:
if isinstance(r, Poly):
# Assemble in case the roots are given implicitly by a polynomials
an, nu = nf.variables, nf.expr
ad, de = df.variables, df.expr
# Hack to make dummies equal because Lambda created new Dummies
de = de.subs(ad[0], an[0])
func = Lambda(tuple(an), nu/de**ex)
pfd += RootSum(r, func, auto=False, quadratic=False)
else:
# Assemble in case the roots are given explicitly by a list of algebraic numbers
for root in r:
pfd += nf(root)/df(root)**ex
return common*pfd
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/polys/partfrac.py",
"copies": "2",
"size": "14755",
"license": "bsd-3-clause",
"hash": 8209638807654570000,
"line_mean": 28.6285140562,
"line_max": 104,
"alpha_frac": 0.5609623856,
"autogenerated": false,
"ratio": 3.3942949160340463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9952514179151322,
"avg_score": 0.0005486244965449919,
"num_lines": 498
} |
"""Algorithms for partial fraction decomposition of rational functions. """
from __future__ import print_function, division
from sympy.polys import Poly, RootSum, cancel, factor
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.polys.polyoptions import allowed_flags, set_defaults
from sympy.polys.polyerrors import PolynomialError
from sympy.core import S, Add, sympify, Function, Lambda, Dummy, Expr
from sympy.core.basic import preorder_traversal
from sympy.utilities import numbered_symbols, take, xthreaded, public
from sympy.core.compatibility import xrange
@xthreaded
@public
def apart(f, x=None, full=False, **options):
"""
Compute partial fraction decomposition of a rational function.
Given a rational function ``f`` compute partial fraction decomposition
of ``f``. Two algorithms are available: one is based on undetermined
coefficients method and the other is Bronstein's full partial fraction
decomposition algorithm.
Examples
========
>>> from sympy.polys.partfrac import apart
>>> from sympy.abc import x, y
By default, using the undetermined coefficients method:
>>> apart(y/(x + 2)/(x + 1), x)
-y/(x + 2) + y/(x + 1)
You can choose Bronstein's algorithm by setting ``full=True``:
>>> apart(y/(x**2 + x + 1), x)
y/(x**2 + x + 1)
>>> apart(y/(x**2 + x + 1), x, full=True)
RootSum(_w**2 + _w + 1, Lambda(_a, (-2*_a*y/3 - y/3)/(-_a + x)))
See Also
========
apart_list, assemble_partfrac_list
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
_options = options.copy()
options = set_defaults(options, extension=True)
try:
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
except PolynomialError as msg:
if f.is_commutative:
raise PolynomialError(msg)
# non-commutative
if f.is_Mul:
c, nc = f.args_cnc(split_1=False)
nc = f.func(*[apart(i, x=x, full=full, **_options) for i in nc])
if c:
c = apart(f.func._from_args(c), x=x, full=full, **_options)
return c*nc
else:
return nc
elif f.is_Add:
c = []
nc = []
for i in f.args:
if i.is_commutative:
c.append(i)
else:
try:
nc.append(apart(i, x=x, full=full, **_options))
except NotImplementedError:
nc.append(i)
return apart(f.func(*c), x=x, full=full, **_options) + f.func(*nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
try:
reps.append((e, apart(e, x=x, full=full, **_options)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
if P.is_multivariate:
fc = f.cancel()
if fc != f:
return apart(fc, x=x, full=full, **_options)
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
if Q.degree() <= 1:
partial = P/Q
else:
if not full:
partial = apart_undetermined_coeffs(P, Q)
else:
partial = apart_full_decomposition(P, Q)
terms = S.Zero
for term in Add.make_args(partial):
if term.has(RootSum):
terms += term
else:
terms += factor(term)
return common*(poly.as_expr() + terms)
def apart_undetermined_coeffs(P, Q):
"""Partial fractions via method of undetermined coefficients. """
X = numbered_symbols(cls=Dummy)
partial, symbols = [], []
_, factors = Q.factor_list()
for f, k in factors:
n, q = f.degree(), Q
for i in xrange(1, k + 1):
coeffs, q = take(X, n), q.quo(f)
partial.append((coeffs, q, f, i))
symbols.extend(coeffs)
dom = Q.get_domain().inject(*symbols)
F = Poly(0, Q.gen, domain=dom)
for i, (coeffs, q, f, k) in enumerate(partial):
h = Poly(coeffs, Q.gen, domain=dom)
partial[i] = (h, f, k)
q = q.set_domain(dom)
F += h*q
system, result = [], S(0)
for (k,), coeff in F.terms():
system.append(coeff - P.nth(k))
from sympy.solvers import solve
solution = solve(system, symbols)
for h, f, k in partial:
h = h.as_expr().subs(solution)
result += h/f.as_expr()**k
return result
def apart_full_decomposition(P, Q):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
1. [Bronstein93]_
"""
return assemble_partfrac_list(apart_list(P/Q, P.gens[0]))
@public
def apart_list(f, x=None, dummies=None, **options):
"""
Compute partial fraction decomposition of a rational function
and return the result in structured form.
Given a rational function ``f`` compute the partial fraction decomposition
of ``f``. Only Bronstein's full partial fraction decomposition algorithm
is supported by this method. The return value is highly structured and
perfectly suited for further algorithmic treatment rather than being
human-readable. The function returns a tuple holding three elements:
* The first item is the common coefficient, free of the variable `x` used
for decomposition. (It is an element of the base field `K`.)
* The second item is the polynomial part of the decomposition. This can be
the zero polynomial. (It is an element of `K[x]`.)
* The third part itself is a list of quadruples. Each quadruple
has the following elements in this order:
- The (not necessarily irreducible) polynomial `D` whose roots `w_i` appear
in the linear denominator of a bunch of related fraction terms. (This item
can also be a list of explicit roots. However, at the moment ``apart_list``
never returns a result this way, but the related ``assemble_partfrac_list``
function accepts this format as input.)
- The numerator of the fraction, written as a function of the root `w`
- The linear denominator of the fraction *excluding its power exponent*,
written as a function of the root `w`.
- The power to which the denominator has to be raised.
On can always rebuild a plain expression by using the function ``assemble_partfrac_list``.
Examples
========
A first example:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, t
>>> f = (2*x**3 - 2*x) / (x**2 - 2*x + 1)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(2*x + 4, x, domain='ZZ'),
[(Poly(_w - 1, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
2*x + 4 + 4/(x - 1)
Second example:
>>> f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
>>> pfd = apart_list(f)
>>> pfd
(-1,
Poly(2/3, x, domain='QQ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-2/3 - 2/(x - 2)
Another example, showing symbolic parameters:
>>> pfd = apart_list(t/(x**2 + x + t), x)
>>> pfd
(1,
Poly(0, x, domain='ZZ[t]'),
[(Poly(_w**2 + _w + t, _w, domain='ZZ[t]'),
Lambda(_a, -2*_a*t/(4*t - 1) - t/(4*t - 1)),
Lambda(_a, -_a + x),
1)])
>>> assemble_partfrac_list(pfd)
RootSum(_w**2 + _w + t, Lambda(_a, (-2*_a*t/(4*t - 1) - t/(4*t - 1))/(-_a + x)))
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
See also
========
apart, assemble_partfrac_list
References
==========
1. [Bronstein93]_
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
options = set_defaults(options, extension=True)
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
if P.is_multivariate:
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
polypart = poly
if dummies is None:
def dummies(name):
d = Dummy(name)
while True:
yield d
dummies = dummies("w")
rationalpart = apart_list_full_decomposition(P, Q, dummies)
return (common, polypart, rationalpart)
def apart_list_full_decomposition(P, Q, dummygen):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
1. [Bronstein93]_
"""
f, x, U = P/Q, P.gen, []
u = Function('u')(x)
a = Dummy('a')
partial = []
for d, n in Q.sqf_list_include(all=True):
b = d.as_expr()
U += [ u.diff(x, n - 1) ]
h = cancel(f*b**n) / u**n
H, subs = [h], []
for j in range(1, n):
H += [ H[-1].diff(x) / j ]
for j in range(1, n + 1):
subs += [ (U[j - 1], b.diff(x, j) / j) ]
for j in range(0, n):
P, Q = cancel(H[j]).as_numer_denom()
for i in range(0, j + 1):
P = P.subs(*subs[j - i])
Q = Q.subs(*subs[0])
P = Poly(P, x)
Q = Poly(Q, x)
G = P.gcd(d)
D = d.quo(G)
B, g = Q.half_gcdex(D)
b = (P * B.quo(g)).rem(D)
Dw = D.subs(x, next(dummygen))
numer = Lambda(a, b.as_expr().subs(x, a))
denom = Lambda(a, (x - a))
exponent = n-j
partial.append((Dw, numer, denom, exponent))
return partial
@public
def assemble_partfrac_list(partial_list):
r"""Reassemble a full partial fraction decomposition
from a structured result obtained by the function ``apart_list``.
Examples
========
This example is taken from Bronstein's original paper:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, y
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
If we happen to know some roots we can provide them easily inside the structure:
>>> pfd = apart_list(2/(x**2-2))
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w**2 - 2, _w, domain='ZZ'),
Lambda(_a, _a/2),
Lambda(_a, -_a + x),
1)])
>>> pfda = assemble_partfrac_list(pfd)
>>> pfda
RootSum(_w**2 - 2, Lambda(_a, _a/(-_a + x)))/2
>>> pfda.doit()
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
>>> from sympy import Dummy, Poly, Lambda, sqrt
>>> a = Dummy("a")
>>> pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)])
>>> assemble_partfrac_list(pfd)
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
See also
========
apart, apart_list
"""
# Common factor
common = partial_list[0]
# Polynomial part
polypart = partial_list[1]
pfd = polypart.as_expr()
# Rational parts
for r, nf, df, ex in partial_list[2]:
if isinstance(r, Poly):
# Assemble in case the roots are given implicitly by a polynomials
an, nu = nf.variables, nf.expr
ad, de = df.variables, df.expr
# Hack to make dummies equal because Lambda created new Dummies
de = de.subs(ad[0], an[0])
func = Lambda(an, nu/de**ex)
pfd += RootSum(r, func, auto=False, quadratic=False)
else:
# Assemble in case the roots are given explicitely by a list of algebraic numbers
for root in r:
pfd += nf(root)/df(root)**ex
return common*pfd
| {
"repo_name": "lidavidm/sympy",
"path": "sympy/polys/partfrac.py",
"copies": "9",
"size": "13809",
"license": "bsd-3-clause",
"hash": -4670793405576447000,
"line_mean": 28.0105042017,
"line_max": 104,
"alpha_frac": 0.5534796147,
"autogenerated": false,
"ratio": 3.368048780487805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8421528395187805,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for partial fraction decomposition of rational functions. """
from sympy.polys import Poly, RootSum, cancel, factor
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.polys.polyoptions import allowed_flags, set_defaults
from sympy.core import S, Add, sympify, Function, Lambda, Dummy
from sympy.utilities import numbered_symbols, take, threaded
@threaded
def apart(f, x=None, full=False, **options):
"""
Compute partial fraction decomposition of a rational function.
Given a rational function ``f`` compute partial fraction decomposition
of ``f``. Two algorithms are available: one is based on undetermined
coefficients method and the other is Bronstein's full partial fraction
decomposition algorithm.
Examples
========
>>> from sympy.polys.partfrac import apart
>>> from sympy.abc import x, y
By default, using the undetermined coefficients method:
>>> apart(y/(x + 2)/(x + 1), x)
-y/(x + 2) + y/(x + 1)
You can choose Bronstein's algorithm by setting ``full=True``:
>>> apart(y/(x**2 + x + 1), x)
y/(x**2 + x + 1)
>>> apart(y/(x**2 + x + 1), x, full=True)
RootSum(_w**2 + _w + 1, Lambda(_a, (-2*_a*y/3 - y/3)/(-_a + x)))
See Also
========
apart_list, assemble_partfrac_list
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
options = set_defaults(options, extension=True)
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
if P.is_multivariate:
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
if Q.degree() <= 1:
partial = P/Q
else:
if not full:
partial = apart_undetermined_coeffs(P, Q)
else:
partial = apart_full_decomposition(P, Q)
terms = S.Zero
for term in Add.make_args(partial):
if term.has(RootSum):
terms += term
else:
terms += factor(term)
return common*(poly.as_expr() + terms)
def apart_undetermined_coeffs(P, Q):
"""Partial fractions via method of undetermined coefficients. """
X = numbered_symbols(cls=Dummy)
partial, symbols = [], []
_, factors = Q.factor_list()
for f, k in factors:
n, q = f.degree(), Q
for i in xrange(1, k + 1):
coeffs, q = take(X, n), q.quo(f)
partial.append((coeffs, q, f, i))
symbols.extend(coeffs)
dom = Q.get_domain().inject(*symbols)
F = Poly(0, Q.gen, domain=dom)
for i, (coeffs, q, f, k) in enumerate(partial):
h = Poly(coeffs, Q.gen, domain=dom)
partial[i] = (h, f, k)
q = q.set_domain(dom)
F += h*q
system, result = [], S(0)
for (k,), coeff in F.terms():
system.append(coeff - P.nth(k))
from sympy.solvers import solve
solution = solve(system, symbols)
for h, f, k in partial:
h = h.as_expr().subs(solution)
result += h/f.as_expr()**k
return result
def apart_full_decomposition(P, Q):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
1. [Bronstein93]_
"""
return assemble_partfrac_list(apart_list(P/Q, P.gens[0]))
def apart_list(f, x=None, dummies=None, **options):
"""
Compute partial fraction decomposition of a rational function
and return the result in structured form.
Given a rational function ``f`` compute the partial fraction decomposition
of ``f``. Only Bronstein's full partial fraction decomposition algorithm
is supported by this method. The return value is highly structured and
perfectly suited for further algorithmic treatment rather than being
human-readable. The function returns a tuple holding three elements:
* The first item is the common coefficient, free of the variable `x` used
for decomposition. (It is an element of the base field `K`.)
* The second item is the polynomial part of the decomposition. This can be
the zero polynomial. (It is an element of `K[x]`.)
* The third part itself is a list of quadruples. Each quadruple
has the following elements in this order:
- The (not necessarily irreducible) polynomial `D` whose roots `w_i` appear
in the linear denominator of a bunch of related fraction terms. (This item
can also be a list of explicit roots. However, at the moment ``apart_list``
never returns a result this way, but the related ``assemble_partfrac_list``
function accepts this format as input.)
- The numerator of the fraction, written as a function of the root `w`
- The linear denominator of the fraction *excluding its power exponent*,
written as a function of the root `w`.
- The power to which the denominator has to be raised.
On can always rebuild a plain expression by using the function ``assemble_partfrac_list``.
Examples
========
A first example:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, t
>>> f = (2*x**3 - 2*x) / (x**2 - 2*x + 1)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(2*x + 4, x, domain='ZZ'),
[(Poly(_w - 1, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
2*x + 4 + 4/(x - 1)
Second example:
>>> f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
>>> pfd = apart_list(f)
>>> pfd
(-1,
Poly(2/3, x, domain='QQ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-2/3 - 2/(x - 2)
Another example, showing symbolic parameters:
>>> pfd = apart_list(t/(x**2 + x + t), x)
>>> pfd
(1,
Poly(0, x, domain='ZZ[t]'),
[(Poly(_w**2 + _w + t, _w, domain='ZZ[t]'),
Lambda(_a, -2*_a*t/(4*t - 1) - t/(4*t - 1)),
Lambda(_a, -_a + x),
1)])
>>> assemble_partfrac_list(pfd)
RootSum(_w**2 + _w + t, Lambda(_a, (-2*_a*t/(4*t - 1) - t/(4*t - 1))/(-_a + x)))
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
See also
========
apart, assemble_partfrac_list
References
==========
1. [Bronstein93]_
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
options = set_defaults(options, extension=True)
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
if P.is_multivariate:
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
polypart = poly
if dummies is None:
def dummies(name):
d = Dummy(name)
while True:
yield d
dummies = dummies("w")
rationalpart = apart_list_full_decomposition(P, Q, dummies)
return (common, polypart, rationalpart)
def apart_list_full_decomposition(P, Q, dummygen):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
1. [Bronstein93]_
"""
f, x, U = P/Q, P.gen, []
u = Function('u')(x)
a = Dummy('a')
partial = []
for d, n in Q.sqf_list_include(all=True):
b = d.as_expr()
U += [ u.diff(x, n - 1) ]
h = cancel(f*b**n) / u**n
H, subs = [h], []
for j in range(1, n):
H += [ H[-1].diff(x) / j ]
for j in range(1, n + 1):
subs += [ (U[j - 1], b.diff(x, j) / j) ]
for j in range(0, n):
P, Q = cancel(H[j]).as_numer_denom()
for i in range(0, j + 1):
P = P.subs(*subs[j - i])
Q = Q.subs(*subs[0])
P = Poly(P, x)
Q = Poly(Q, x)
G = P.gcd(d)
D = d.quo(G)
B, g = Q.half_gcdex(D)
b = (P * B.quo(g)).rem(D)
Dw = D.subs(x, dummygen.next())
numer = Lambda(a, b.as_expr().subs(x, a))
denom = Lambda(a, (x - a))
exponent = n-j
partial.append((Dw, numer, denom, exponent))
return partial
def assemble_partfrac_list(partial_list):
r"""Reassemble a full partial fraction decomposition
from a structured result obtained by the function ``apart_list``.
Examples
========
This example is taken from Bronstein's original paper:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, y
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
If we happen to know some roots we can provide them easily inside the structure:
>>> pfd = apart_list(2/(x**2-2))
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w**2 - 2, _w, domain='ZZ'),
Lambda(_a, _a/2),
Lambda(_a, -_a + x),
1)])
>>> pfda = assemble_partfrac_list(pfd)
>>> pfda
RootSum(_w**2 - 2, Lambda(_a, _a/(-_a + x)))/2
>>> pfda.doit()
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
>>> from sympy import Dummy, Poly, Lambda, sqrt
>>> a = Dummy("a")
>>> pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)])
>>> assemble_partfrac_list(pfd)
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
See also
========
apart, apart_list
"""
# Common factor
common = partial_list[0]
# Polynomial part
polypart = partial_list[1]
pfd = polypart.as_expr()
# Rational parts
for r, nf, df, ex in partial_list[2]:
if isinstance(r, Poly):
# Assemble in case the roots are given implicitely by a polynomials
an, nu = nf.variables, nf.expr
ad, de = df.variables, df.expr
# Hack to make dummies equal because Lambda created new Dummies
de = de.subs(ad[0], an[0])
func = Lambda(an, nu/de**ex)
pfd += RootSum(r, func, auto=False, quadratic=False)
else:
# Assemble in case the roots are given explicitely by a list of algebraic numbers
for root in r:
pfd += nf(root)/df(root)**ex
return common*pfd
| {
"repo_name": "amitjamadagni/sympy",
"path": "sympy/polys/partfrac.py",
"copies": "2",
"size": "12158",
"license": "bsd-3-clause",
"hash": 8859705591683495000,
"line_mean": 27.4065420561,
"line_max": 104,
"alpha_frac": 0.5635795361,
"autogenerated": false,
"ratio": 3.300217155266015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9860606263691019,
"avg_score": 0.0006380855349993068,
"num_lines": 428
} |
"""Algorithms for partial fraction decomposition of rational functions. """
from sympy.polys import Poly, RootSum, cancel, factor
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.core import S, Add, sympify, Symbol, Function, Lambda, Dummy
from sympy.utilities import numbered_symbols, take, threaded
@threaded
def apart(f, x=None, full=False, **args):
"""
Compute partial fraction decomposition of a rational function.
Given a rational function ``f`` compute partial fraction decomposition
of ``f``. Two algorithms are available: one is based on undetermined
coefficients method and the other is Bronstein's full partial fraction
decomposition algorithm.
**Examples**
>>> from sympy.polys.partfrac import apart
>>> from sympy.abc import x, y
>>> apart(y/(x + 2)/(x + 1), x)
-y/(x + 2) + y/(x + 1)
"""
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **args)
if P.is_multivariate:
raise NotImplementedError("multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
if Q.degree() <= 1:
partial = P/Q
else:
if not full:
partial = apart_undetermined_coeffs(P, Q)
else:
partial = apart_full_decomposition(P, Q)
terms = S.Zero
for term in Add.make_args(partial):
if term.has(RootSum):
terms += term
else:
terms += factor(term)
return common*(poly.as_expr() + terms)
def apart_undetermined_coeffs(P, Q):
"""Partial fractions via method of undetermined coefficients. """
X = numbered_symbols(cls=Dummy)
partial, symbols = [], []
_, factors = Q.factor_list()
for f, k in factors:
n, q = f.degree(), Q
for i in xrange(1, k+1):
coeffs, q = take(X, n), q.quo(f)
partial.append((coeffs, q, f, i))
symbols.extend(coeffs)
dom = Q.get_domain().inject(*symbols)
F = Poly(0, Q.gen, domain=dom)
for i, (coeffs, q, f, k) in enumerate(partial):
h = Poly(coeffs, Q.gen, domain=dom)
partial[i] = (h, f, k)
q = q.set_domain(dom)
F += h*q
system, result = [], S(0)
for (k,), coeff in F.terms():
system.append(coeff - P.nth(k))
from sympy.solvers import solve
solution = solve(system, symbols)
for h, f, k in partial:
h = h.as_expr().subs(solution)
result += h/f.as_expr()**k
return result
def apart_full_decomposition(P, Q):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
**References**
1. [Bronstein93]_
"""
f, x, U = P/Q, P.gen, []
u = Function('u')(x)
a = Dummy('a')
partial = S(0)
for d, n in Q.sqf_list_include(all=True):
b = d.as_expr()
U += [ u.diff(x, n-1) ]
h = cancel(f*b**n) / u**n
H, subs = [h], []
for j in range(1, n):
H += [ H[-1].diff(x) / j ]
for j in range(1, n+1):
subs += [ (U[j-1], b.diff(x, j) / j) ]
for j in range(0, n):
P, Q = cancel(H[j]).as_numer_denom()
for i in range(0, j+1):
P = P.subs(*subs[j-i])
Q = Q.subs(*subs[0])
P = Poly(P, x)
Q = Poly(Q, x)
G = P.gcd(d)
D = d.quo(G)
B, g = Q.half_gcdex(D)
b = (P * B.quo(g)).rem(D)
numer = b.as_expr()
denom = (x-a)**(n-j)
func = Lambda(a, numer.subs(x, a)/denom)
partial += RootSum(D, func, auto=False)
return partial
| {
"repo_name": "Cuuuurzel/KiPyCalc",
"path": "sympy_old/polys/partfrac.py",
"copies": "2",
"size": "4192",
"license": "mit",
"hash": 8503107723532895000,
"line_mean": 24.717791411,
"line_max": 80,
"alpha_frac": 0.5591603053,
"autogenerated": false,
"ratio": 3.4109031733116355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49700634786116354,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for partial fraction decomposition of rational functions."""
import itertools
from ..core import Add, Dummy, Function, Integer, Lambda, preorder_traversal
from ..core.sympify import sympify
from ..utilities import numbered_symbols
from . import Poly, RootSum, cancel, factor
from .polyerrors import PolynomialError
from .polyoptions import allowed_flags, set_defaults
from .polytools import parallel_poly_from_expr
__all__ = 'apart', 'apart_list', 'assemble_partfrac_list'
def apart(f, x=None, full=False, **options):
"""
Compute partial fraction decomposition of a rational function.
Given a rational function ``f``, computes the partial fraction
decomposition of ``f``. Two algorithms are available: One is based on the
undertermined coefficients method, the other is Bronstein's full partial
fraction decomposition algorithm.
The undetermined coefficients method (selected by ``full=False``) uses
polynomial factorization (and therefore accepts the same options as
factor) for the denominator. Per default it works over the rational
numbers, therefore decomposition of denominators with non-rational roots
(e.g. irrational, complex roots) is not supported by default (see options
of factor).
Bronstein's algorithm can be selected by using ``full=True`` and allows a
decomposition of denominators with non-rational roots. A human-readable
result can be obtained via ``doit()`` (see examples below).
Examples
========
By default, using the undetermined coefficients method:
>>> apart(y/(x + 2)/(x + 1), x)
-y/(x + 2) + y/(x + 1)
The undetermined coefficients method does not provide a result when the
denominators roots are not rational:
>>> apart(y/(x**2 + x + 1), x)
y/(x**2 + x + 1)
You can choose Bronstein's algorithm by setting ``full=True``:
>>> apart(y/(x**2 + x + 1), x, full=True)
RootSum(_w**2 + _w + 1, Lambda(_a, (-2*y*_a/3 - y/3)/(x - _a)))
Calling ``doit()`` yields a human-readable result:
>>> apart(y/(x**2 + x + 1), x, full=True).doit()
(-y/3 - 2*y*(-1/2 - sqrt(3)*I/2)/3)/(x + 1/2 + sqrt(3)*I/2) + (-y/3 -
2*y*(-1/2 + sqrt(3)*I/2)/3)/(x + 1/2 - sqrt(3)*I/2)
See Also
========
apart_list, assemble_partfrac_list
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
_options = options.copy()
options = set_defaults(options, extension=True)
try:
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
except PolynomialError as msg:
if f.is_commutative:
raise PolynomialError(msg)
# non-commutative
if f.is_Mul:
c, nc = f.args_cnc(split_1=False)
nc = f.func(*[apart(i, x=x, full=full, **_options) for i in nc])
if c:
c = apart(f.func._from_args(c), x=x, full=full, **_options)
return c*nc
else:
return nc
elif f.is_Add:
c = []
nc = []
for i in f.args:
if i.is_commutative:
c.append(i)
else:
nc.append(apart(i, x=x, full=full, **_options))
return apart(f.func(*c), x=x, full=full, **_options) + f.func(*nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
reps.append((e, apart(e, x=x, full=full, **_options)))
pot.skip() # this was handled successfully
return f.xreplace(dict(reps))
if P.is_multivariate:
fc = f.cancel()
if fc != f:
return apart(fc, x=x, full=full, **_options)
raise NotImplementedError(
'multivariate partial fraction decomposition')
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
if Q.degree() <= 1:
partial = P/Q
else:
if not full:
partial = apart_undetermined_coeffs(P, Q)
else:
partial = apart_full_decomposition(P, Q)
terms = Integer(0)
for term in Add.make_args(partial):
if term.has(RootSum):
terms += term
else:
terms += factor(term)
return common*(poly.as_expr() + terms)
def apart_undetermined_coeffs(P, Q):
"""Partial fractions via method of undetermined coefficients."""
X = numbered_symbols(cls=Dummy)
partial, symbols = [], []
_, factors = Q.factor_list()
for f, k in factors:
n, q = f.degree(), Q
for i in range(1, k + 1):
coeffs, q = list(itertools.islice(X, n)), q.quo(f)
partial.append((coeffs, q, f, i))
symbols.extend(coeffs)
dom = Q.domain.inject(*symbols)
F = Integer(0).as_poly(Q.gen, domain=dom)
for i, (coeffs, q, f, k) in enumerate(partial):
h = Poly(coeffs, Q.gen, domain=dom)
partial[i] = (h, f, k)
q = q.set_domain(dom)
F += h*q
system, result = [], Integer(0)
for (k,), coeff in F.terms():
system.append(coeff - P.coeff_monomial((k,)))
from ..solvers import solve
solution = solve(system, symbols)[0]
for h, f, k in partial:
h = h.as_expr().subs(solution)
result += h/f.as_expr()**k
return result
def apart_full_decomposition(P, Q):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
* :cite:`Bronstein1993partial`
"""
return assemble_partfrac_list(apart_list(P/Q, P.gens[0]))
def apart_list(f, x=None, dummies=None, **options):
"""
Compute partial fraction decomposition of a rational function
and return the result in structured form.
Given a rational function ``f`` compute the partial fraction decomposition
of ``f``. Only Bronstein's full partial fraction decomposition algorithm
is supported by this method. The return value is highly structured and
perfectly suited for further algorithmic treatment rather than being
human-readable. The function returns a tuple holding three elements:
* The first item is the common coefficient, free of the variable `x` used
for decomposition. (It is an element of the base field `K`.)
* The second item is the polynomial part of the decomposition. This can be
the zero polynomial. (It is an element of `K[x]`.)
* The third part itself is a list of quadruples. Each quadruple
has the following elements in this order:
- The (not necessarily irreducible) polynomial `D` whose roots `w_i` appear
in the linear denominator of a bunch of related fraction terms. (This item
can also be a list of explicit roots. However, at the moment ``apart_list``
never returns a result this way, but the related ``assemble_partfrac_list``
function accepts this format as input.)
- The numerator of the fraction, written as a function of the root `w`
- The linear denominator of the fraction *excluding its power exponent*,
written as a function of the root `w`.
- The power to which the denominator has to be raised.
On can always rebuild a plain expression by using the function ``assemble_partfrac_list``.
Examples
========
A first example:
>>> f = (2*x**3 - 2*x) / (x**2 - 2*x + 1)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(2*x + 4, x, domain='ZZ'),
[(Poly(_w - 1, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
2*x + 4 + 4/(x - 1)
Second example:
>>> f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
>>> pfd = apart_list(f)
>>> pfd
(-1,
Poly(2/3, x, domain='QQ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 2), Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
-2/3 - 2/(x - 2)
Another example, showing symbolic parameters:
>>> pfd = apart_list(t/(x**2 + x + t), x)
>>> pfd
(1,
Poly(0, x, domain='ZZ[t]'),
[(Poly(_w**2 + _w + t, _w, domain='ZZ[t]'),
Lambda(_a, -2*t*_a/(4*t - 1) - t/(4*t - 1)),
Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
RootSum(t + _w**2 + _w, Lambda(_a, (-2*t*_a/(4*t - 1) - t/(4*t - 1))/(x - _a)))
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, x - _a), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, x - _a), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
See also
========
apart, assemble_partfrac_list
References
==========
* :cite:`Bronstein1993partial`
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
options = set_defaults(options, extension=True)
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
if P.is_multivariate:
raise NotImplementedError('multivariate partial fraction decomposition')
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
polypart = poly
if dummies is None:
def dummies(name):
d = Dummy(name)
while True:
yield d
dummies = dummies('w')
rationalpart = apart_list_full_decomposition(P, Q, dummies)
return common, polypart, rationalpart
def apart_list_full_decomposition(P, Q, dummygen):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
* :cite:`Bronstein1993partial`
"""
f, x, U = P/Q, P.gen, []
u = Function('u')(x)
a = Dummy('a')
Q_c, Q_sqf = Q.sqf_list()
if Q_c != 1 and Q_sqf:
if Q_sqf[0][1] == 1:
Q_sqf[0] = Q_c*Q_sqf[0][0], 1
else:
Q_sqf.insert(0, (Q_c.as_poly(x), 1))
partial = []
for d, n in Q_sqf:
b = d.as_expr()
U += [u.diff((x, n - 1))]
h = cancel(f*b**n) / u**n
H, subs = [h], []
for j in range(1, n):
H += [H[-1].diff(x) / j]
for j in range(1, n + 1):
subs += [(U[j - 1], b.diff((x, j)) / j)]
for j in range(n):
P, Q = cancel(H[j]).as_numer_denom()
for i in range(j + 1):
P = P.subs([subs[j - i]])
Q = Q.subs([subs[0]])
P = P.as_poly(x)
Q = Q.as_poly(x)
G = P.gcd(d)
D = d.quo(G)
B, g = Q.half_gcdex(D)
b = (P * B.quo(g)).rem(D)
Dw = D.subs({x: next(dummygen)})
numer = Lambda(a, b.as_expr().subs({x: a}))
denom = Lambda(a, (x - a))
exponent = n-j
partial.append((Dw, numer, denom, exponent))
return partial
def assemble_partfrac_list(partial_list):
r"""Reassemble a full partial fraction decomposition
from a structured result obtained by the function ``apart_list``.
Examples
========
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, x - _a), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, x - _a), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
If we happen to know some roots we can provide them easily inside the structure:
>>> pfd = apart_list(2/(x**2-2))
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w**2 - 2, _w, domain='ZZ'),
Lambda(_a, _a/2), Lambda(_a, x - _a),
1)])
>>> pfda = assemble_partfrac_list(pfd)
>>> pfda
RootSum(_w**2 - 2, Lambda(_a, _a/(x - _a)))/2
>>> pfda.doit()
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
>>> a = Dummy('a')
>>> pfd = (1, Integer(0).as_poly(x),
... [([sqrt(2), -sqrt(2)],
... Lambda(a, a/2), Lambda(a, -a + x), 1)])
>>> assemble_partfrac_list(pfd)
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
See also
========
apart, apart_list
"""
# Common factor
common = partial_list[0]
# Polynomial part
polypart = partial_list[1]
pfd = polypart.as_expr()
# Rational parts
for r, nf, df, ex in partial_list[2]:
if isinstance(r, Poly):
# Assemble in case the roots are given implicitly by a polynomials
an, nu = nf.variables, nf.expr
ad, de = df.variables, df.expr
# Hack to make dummies equal because Lambda created new Dummies
de = de.subs({ad[0]: an[0]})
func = Lambda(an, nu/de**ex)
pfd += RootSum(r, func, auto=False, quadratic=False)
else:
# Assemble in case the roots are given explicitly by a list of algebraic numbers
for root in r:
pfd += nf(root)/df(root)**ex
return common*pfd
| {
"repo_name": "diofant/diofant",
"path": "diofant/polys/partfrac.py",
"copies": "1",
"size": "14342",
"license": "bsd-3-clause",
"hash": -6138820087521659000,
"line_mean": 28.4496919918,
"line_max": 94,
"alpha_frac": 0.5580811602,
"autogenerated": false,
"ratio": 3.3299280241467377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9386688827023881,
"avg_score": 0.00026407146457144403,
"num_lines": 487
} |
"""Algorithms for partial fraction decomposition of rational functions."""
import itertools
from ..core import (Add, Dummy, Function, Integer, Lambda, preorder_traversal,
sympify)
from ..utilities import numbered_symbols
from . import Poly, RootSum, cancel, factor
from .polyerrors import PolynomialError
from .polyoptions import allowed_flags, set_defaults
from .polytools import parallel_poly_from_expr
__all__ = 'apart', 'apart_list', 'assemble_partfrac_list'
def apart(f, x=None, full=False, **options):
"""
Compute partial fraction decomposition of a rational function.
Given a rational function ``f``, computes the partial fraction
decomposition of ``f``. Two algorithms are available: One is based on the
undertermined coefficients method, the other is Bronstein's full partial
fraction decomposition algorithm.
The undetermined coefficients method (selected by ``full=False``) uses
polynomial factorization (and therefore accepts the same options as
factor) for the denominator. Per default it works over the rational
numbers, therefore decomposition of denominators with non-rational roots
(e.g. irrational, complex roots) is not supported by default (see options
of factor).
Bronstein's algorithm can be selected by using ``full=True`` and allows a
decomposition of denominators with non-rational roots. A human-readable
result can be obtained via ``doit()`` (see examples below).
Examples
========
By default, using the undetermined coefficients method:
>>> apart(y/(x + 2)/(x + 1), x)
-y/(x + 2) + y/(x + 1)
The undetermined coefficients method does not provide a result when the
denominators roots are not rational:
>>> apart(y/(x**2 + x + 1), x)
y/(x**2 + x + 1)
You can choose Bronstein's algorithm by setting ``full=True``:
>>> apart(y/(x**2 + x + 1), x, full=True)
RootSum(_w**2 + _w + 1, Lambda(_a, (-2*y*_a/3 - y/3)/(x - _a)))
Calling ``doit()`` yields a human-readable result:
>>> apart(y/(x**2 + x + 1), x, full=True).doit()
(-y/3 - 2*y*(-1/2 - sqrt(3)*I/2)/3)/(x + 1/2 + sqrt(3)*I/2) + (-y/3 -
2*y*(-1/2 + sqrt(3)*I/2)/3)/(x + 1/2 - sqrt(3)*I/2)
See Also
========
apart_list, assemble_partfrac_list
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
_options = options.copy()
options = set_defaults(options, extension=True)
try:
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
except PolynomialError as msg:
if f.is_commutative:
raise PolynomialError(msg)
# non-commutative
if f.is_Mul:
c, nc = f.args_cnc(split_1=False)
nc = f.func(*[apart(i, x=x, full=full, **_options) for i in nc])
if c:
c = apart(f.func._from_args(c), x=x, full=full, **_options)
return c*nc
else:
return nc
elif f.is_Add:
c = []
nc = []
for i in f.args:
if i.is_commutative:
c.append(i)
else:
nc.append(apart(i, x=x, full=full, **_options))
return apart(f.func(*c), x=x, full=full, **_options) + f.func(*nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
reps.append((e, apart(e, x=x, full=full, **_options)))
pot.skip() # this was handled successfully
return f.xreplace(dict(reps))
if P.is_multivariate:
fc = f.cancel()
if fc != f:
return apart(fc, x=x, full=full, **_options)
raise NotImplementedError(
'multivariate partial fraction decomposition')
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
if Q.degree() <= 1:
partial = P/Q
else:
if not full:
partial = apart_undetermined_coeffs(P, Q)
else:
partial = apart_full_decomposition(P, Q)
terms = Integer(0)
for term in Add.make_args(partial):
if term.has(RootSum):
terms += term
else:
terms += factor(term)
return common*(poly.as_expr() + terms)
def apart_undetermined_coeffs(P, Q):
"""Partial fractions via method of undetermined coefficients."""
X = numbered_symbols(cls=Dummy)
partial, symbols = [], []
_, factors = Q.factor_list()
for f, k in factors:
n, q = f.degree(), Q
for i in range(1, k + 1):
coeffs, q = list(itertools.islice(X, n)), q.quo(f)
partial.append((coeffs, q, f, i))
symbols.extend(coeffs)
dom = Q.domain.inject(*symbols)
F = Poly(0, Q.gen, domain=dom)
for i, (coeffs, q, f, k) in enumerate(partial):
h = Poly(coeffs, Q.gen, domain=dom)
partial[i] = (h, f, k)
q = q.set_domain(dom)
F += h*q
system, result = [], Integer(0)
for (k,), coeff in F.terms():
system.append(coeff - P.coeff_monomial((k,)))
from ..solvers import solve
solution = solve(system, symbols)[0]
for h, f, k in partial:
h = h.as_expr().subs(solution)
result += h/f.as_expr()**k
return result
def apart_full_decomposition(P, Q):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
* :cite:`Bronstein1993partial`
"""
return assemble_partfrac_list(apart_list(P/Q, P.gens[0]))
def apart_list(f, x=None, dummies=None, **options):
"""
Compute partial fraction decomposition of a rational function
and return the result in structured form.
Given a rational function ``f`` compute the partial fraction decomposition
of ``f``. Only Bronstein's full partial fraction decomposition algorithm
is supported by this method. The return value is highly structured and
perfectly suited for further algorithmic treatment rather than being
human-readable. The function returns a tuple holding three elements:
* The first item is the common coefficient, free of the variable `x` used
for decomposition. (It is an element of the base field `K`.)
* The second item is the polynomial part of the decomposition. This can be
the zero polynomial. (It is an element of `K[x]`.)
* The third part itself is a list of quadruples. Each quadruple
has the following elements in this order:
- The (not necessarily irreducible) polynomial `D` whose roots `w_i` appear
in the linear denominator of a bunch of related fraction terms. (This item
can also be a list of explicit roots. However, at the moment ``apart_list``
never returns a result this way, but the related ``assemble_partfrac_list``
function accepts this format as input.)
- The numerator of the fraction, written as a function of the root `w`
- The linear denominator of the fraction *excluding its power exponent*,
written as a function of the root `w`.
- The power to which the denominator has to be raised.
On can always rebuild a plain expression by using the function ``assemble_partfrac_list``.
Examples
========
A first example:
>>> f = (2*x**3 - 2*x) / (x**2 - 2*x + 1)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(2*x + 4, x, domain='ZZ'),
[(Poly(_w - 1, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
2*x + 4 + 4/(x - 1)
Second example:
>>> f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
>>> pfd = apart_list(f)
>>> pfd
(-1,
Poly(2/3, x, domain='QQ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 2), Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
-2/3 - 2/(x - 2)
Another example, showing symbolic parameters:
>>> pfd = apart_list(t/(x**2 + x + t), x)
>>> pfd
(1,
Poly(0, x, domain='ZZ[t]'),
[(Poly(_w**2 + _w + t, _w, domain='ZZ[t]'),
Lambda(_a, -2*t*_a/(4*t - 1) - t/(4*t - 1)),
Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
RootSum(t + _w**2 + _w, Lambda(_a, (-2*t*_a/(4*t - 1) - t/(4*t - 1))/(x - _a)))
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, x - _a), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, x - _a), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
See also
========
apart, assemble_partfrac_list
References
==========
* :cite:`Bronstein1993partial`
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
options = set_defaults(options, extension=True)
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
if P.is_multivariate: # pragma: no cover
raise NotImplementedError('multivariate partial fraction decomposition')
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
polypart = poly
if dummies is None:
def dummies(name):
d = Dummy(name)
while True:
yield d
dummies = dummies('w')
rationalpart = apart_list_full_decomposition(P, Q, dummies)
return common, polypart, rationalpart
def apart_list_full_decomposition(P, Q, dummygen):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
* :cite:`Bronstein1993partial`
"""
f, x, U = P/Q, P.gen, []
u = Function('u')(x)
a = Dummy('a')
Q_c, Q_sqf = Q.sqf_list()
if Q_c != 1 and Q_sqf:
if Q_sqf[0][1] == 1:
Q_sqf[0] = Q_c*Q_sqf[0][0], 1
else:
Q_sqf.insert(0, (Poly(Q_c, x), 1))
partial = []
for d, n in Q_sqf:
b = d.as_expr()
U += [u.diff(x, n - 1)]
h = cancel(f*b**n) / u**n
H, subs = [h], []
for j in range(1, n):
H += [H[-1].diff(x) / j]
for j in range(1, n + 1):
subs += [(U[j - 1], b.diff(x, j) / j)]
for j in range(n):
P, Q = cancel(H[j]).as_numer_denom()
for i in range(j + 1):
P = P.subs([subs[j - i]])
Q = Q.subs([subs[0]])
P = Poly(P, x)
Q = Poly(Q, x)
G = P.gcd(d)
D = d.quo(G)
B, g = Q.half_gcdex(D)
b = (P * B.quo(g)).rem(D)
Dw = D.subs({x: next(dummygen)})
numer = Lambda(a, b.as_expr().subs({x: a}))
denom = Lambda(a, (x - a))
exponent = n-j
partial.append((Dw, numer, denom, exponent))
return partial
def assemble_partfrac_list(partial_list):
r"""Reassemble a full partial fraction decomposition
from a structured result obtained by the function ``apart_list``.
Examples
========
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, x - _a), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, x - _a), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, x - _a), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
If we happen to know some roots we can provide them easily inside the structure:
>>> pfd = apart_list(2/(x**2-2))
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w**2 - 2, _w, domain='ZZ'),
Lambda(_a, _a/2), Lambda(_a, x - _a),
1)])
>>> pfda = assemble_partfrac_list(pfd)
>>> pfda
RootSum(_w**2 - 2, Lambda(_a, _a/(x - _a)))/2
>>> pfda.doit()
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
>>> a = Dummy('a')
>>> pfd = (1, Poly(0, x), [([sqrt(2), -sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)])
>>> assemble_partfrac_list(pfd)
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
See also
========
apart, apart_list
"""
# Common factor
common = partial_list[0]
# Polynomial part
polypart = partial_list[1]
pfd = polypart.as_expr()
# Rational parts
for r, nf, df, ex in partial_list[2]:
if isinstance(r, Poly):
# Assemble in case the roots are given implicitly by a polynomials
an, nu = nf.variables, nf.expr
ad, de = df.variables, df.expr
# Hack to make dummies equal because Lambda created new Dummies
de = de.subs({ad[0]: an[0]})
func = Lambda(an, nu/de**ex)
pfd += RootSum(r, func, auto=False, quadratic=False)
else:
# Assemble in case the roots are given explicitly by a list of algebraic numbers
for root in r:
pfd += nf(root)/df(root)**ex
return common*pfd
| {
"repo_name": "skirpichev/omg",
"path": "diofant/polys/partfrac.py",
"copies": "1",
"size": "14294",
"license": "bsd-3-clause",
"hash": -4036554379675930000,
"line_mean": 28.4721649485,
"line_max": 94,
"alpha_frac": 0.5577165244,
"autogenerated": false,
"ratio": 3.3327115877827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43904281121827,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for read generation"""
import numpy as np
class Node(object):
__slots__ = ('ps', 'pr', 'cigarop', 'oplen', 'seq', 'v')
def __init__(self, ps, pr, cigarop, oplen, seq):
self.ps = ps
self.pr = pr
self.cigarop = cigarop
self.oplen = oplen
self.seq = seq
# The variant size code. None for reference matching nodes
self.v = {
'=': None,
'X': 0,
'I': oplen,
'D': -oplen
}[cigarop]
def tuple(self):
return self.ps, self.pr, self.cigarop, self.oplen, self.seq, self.v
def __repr__(self):
return self.tuple().__repr__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.tuple() == other.tuple()
else:
return self.tuple() == other
def __ne__(self, other):
return not self.__eq__(other)
def create_node_list(ref_seq, ref_start_pos, vl):
"""Apply the appropriate variants (i.e. those that are on this copy of the
chromosome) to the reference sequence and create the sequence of nodes which
other functions use to generate reads and alignment metadata.
:param ref_seq: Reference sequence
:param ref_start_pos: position where this part of the reference sequence starts from. 1 indexed
:param vl: list of Variant objects in order
:return:
"""
samp_pos, ref_pos = ref_start_pos, ref_start_pos # 1 indexed
# These are both 1 indexed.
# ref_pos is relative to whole sequence, samp_pos is relative to this fragment of the expanded sequence
nodes = []
for v in vl:
if v.pos + 1 < ref_pos: continue # We are starting from a later position
new_nodes, samp_pos, ref_pos = create_nodes(ref_seq, samp_pos, ref_pos, v, ref_start_pos)
nodes += new_nodes
offset = ref_pos - ref_start_pos
if offset <= len(ref_seq): # Last part of sequence, needs an M
nodes.append(Node(samp_pos, ref_pos, '=', len(ref_seq) - offset, ref_seq[offset:]))
return nodes
def create_nodes(ref_seq, samp_pos, ref_pos, v, ref_start_pos):
if v.cigarop == 'X':
return snp(ref_seq, samp_pos, ref_pos, v, ref_start_pos)
elif v.cigarop == 'I':
return insertion(ref_seq, samp_pos, ref_pos, v, ref_start_pos)
else:
return deletion(ref_seq, samp_pos, ref_pos, v, ref_start_pos)
def snp(ref_seq, samp_pos, ref_pos, v, ref_start_pos):
nodes = []
delta = v.pos - ref_pos
if delta > 0: # Need to make an M node
# ps pr op oplen seq
nodes.append(Node(samp_pos, ref_pos, '=', delta, ref_seq[ref_pos - ref_start_pos:v.pos - ref_start_pos])) # M node
ref_pos = v.pos
samp_pos += delta
# ps pr op oplen seq
nodes.append(Node(samp_pos, ref_pos, 'X', 1, v.alt))
ref_pos += 1
samp_pos += 1
return nodes, samp_pos, ref_pos
def insertion(ref_seq, samp_pos, ref_pos, v, ref_start_pos):
nodes = []
delta = v.pos + 1 - ref_pos # This is INS, so we include the first base in the M segment
if delta > 0: # Need to make an M node
# ps pr op oplen seq
nodes.append(Node(samp_pos, ref_pos, '=', delta, ref_seq[ref_pos - ref_start_pos:v.pos + 1 - ref_start_pos])) # M node
samp_pos += delta
ref_pos = v.pos + 1 # The next ref pos is the ref base just after the insertion
# ps pr op oplen seq
nodes.append(Node(samp_pos, ref_pos, 'I', v.oplen, v.alt[1:]))
samp_pos += v.oplen
return nodes, samp_pos, ref_pos
def deletion(ref_seq, samp_pos, ref_pos, v, ref_start_pos):
nodes = []
delta = v.pos + 1 - ref_pos # This is DEL, so we include the first base in the M segment
if delta > 0: # Need to make an M node
# ps pr op oplen seq
nodes.append(Node(samp_pos, ref_pos, '=', delta, ref_seq[ref_pos - ref_start_pos:v.pos + 1 - ref_start_pos])) # M node
samp_pos += delta
ref_pos = v.pos + 1 + v.oplen # The next ref pos is the ref base just after the deletion
# ps pr op oplen seq
nodes.append(Node(samp_pos - 1, ref_pos, 'D', v.oplen, ''))
return nodes, samp_pos, ref_pos
def get_begin_end_nodes(pl, ll, nodes):
"""Given a list of read positions and lengths return us a list of start and end nodes
:param pl: Positions of reads. should be np.array so we can sum pl and ll
:param ll: Lengths of reads. "
:param nodes:
:return: nse 2 x N np.array (N = len(pl)
"""
ps = np.array([n.ps if n.cigarop != 'D' else n.ps + 1 for n in nodes], dtype=np.uint64)
# D nodes .ps values are set to last base before deletion. We increment this by one so
# we can get proper start/stop node computation
return [ps.searchsorted(pl, 'right') - 1, ps.searchsorted(pl + ll - 1, 'right') - 1]
def generate_read(p, l, n0, n1, nodes):
"""
:param p: Start position of read in sample coordinates
:param l: Length of read
:param n0: starting node
:param n1: ending node
:param nodes: as returned by create_node_list
:return: (pos, cigar, v_list, seq)
v_list = [-d, +i, 0] -> a list of ints indicating size of variants carried by read
"""
pos = nodes[n0].pr
cigar = [str((min(p + l - n.ps, n.oplen) - max(0, p - n.ps)) if n.cigarop != 'D' else n.oplen) + n.cigarop for n in nodes[n0:n1 + 1]]
v_list = [n.v for n in nodes[n0:n1 + 1] if n.v is not None]
seq = [n.seq[max(0, p - n.ps):min(p + l - n.ps, n.oplen)] for n in nodes[n0:n1 + 1]]
if nodes[n0].cigarop == 'I':
if n0 == n1:
# Special case - read is from inside a long insertion
# We want to pile up reads from a long insertion at the start of the insertion, which happens automatically
# Now we need to override the CIGAR
cigar = ['>{}+{}I'.format(p - nodes[n0].ps, l)]
else:
pos = p - nodes[n0].ps + nodes[n0].pr
return pos, ''.join(cigar), v_list, ''.join(seq) | {
"repo_name": "sbg/Mitty",
"path": "mitty/simulation/rpc.py",
"copies": "1",
"size": "5839",
"license": "apache-2.0",
"hash": -7730485680460287000,
"line_mean": 35.9620253165,
"line_max": 135,
"alpha_frac": 0.6088371296,
"autogenerated": false,
"ratio": 2.988229273285568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4097066402885568,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for searching and optimisation."""
import copy
def hill_climb(nsteps, start_node, get_next_node):
"""Modular hill climbing algorithm.
Example:
>>> def get_next_node(node):
... a, b = random.sample(range(len(node)), 2)
... node[a], node[b] = node[b], node[a]
... plaintext = decrypt(node, ciphertext)
... score = lantern.score(plaintext, *fitness_functions)
... return node, score, Decryption(plaintext, ''.join(node), score)
>>> final_node, best_score, outputs = hill_climb(10, "ABC", get_next_node)
Args:
nsteps (int): The number of neighbours to visit
start_node: The starting node
get_next_node (function): Function to return the next node
the score of the current node and any optional output from the current node
Returns:
The highest node found, the score of this node and the outputs from the best nodes along the way
"""
outputs = []
best_score = -float('inf')
for step in range(nsteps):
next_node, score, output = get_next_node(copy.deepcopy(start_node))
# Keep track of best score and the start node becomes finish node
if score > best_score:
start_node = copy.deepcopy(next_node)
best_score = score
outputs.append(output)
return start_node, best_score, outputs
| {
"repo_name": "CameronLonsdale/lantern",
"path": "lantern/analysis/search.py",
"copies": "1",
"size": "1411",
"license": "mit",
"hash": -6857011294354643000,
"line_mean": 35.1794871795,
"line_max": 104,
"alpha_frac": 0.6144578313,
"autogenerated": false,
"ratio": 3.8342391304347827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49486969617347826,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array, deprecated
from ..utils.extmath import norm
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
k=None, eigen_tol=0.0,
assign_labels='kmeans',
mode=None):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity: array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetic k-nearest neighbours connectivity matrix of the samples.
n_clusters: integer, optional
Number of clusters to extract.
n_components: integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels: array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if not assign_labels in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
if not k is None:
warnings.warn("'k' was renamed to n_clusters and will "
"be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if not mode is None:
warnings.warn("'mode' was renamed to eigen_solver "
"and will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either the
Gaussian (aka RBF) kernel of the euclidean distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity: string, 'nearest_neighbors', 'rbf' or 'precomputed'
gamma: float
Scaling factor of Gaussian (rbf) affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
n_neighbors: integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
Attributes
----------
`affinity_matrix_` : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
`labels_` :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10, k=None,
eigen_tol=0.0, assign_labels='kmeans', mode=None):
if k is not None:
warnings.warn("'k' was renamed to n_clusters and "
"will be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if mode is not None:
warnings.warn("'mode' was renamed to eigen_solver and "
"will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
def fit(self, X):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'rbf':
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma)
elif self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
raise ValueError("Invalid 'affinity'. Expected 'rbf', "
"'nearest_neighbors' or 'precomputed', got '%s'."
% self.affinity)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
@property
@deprecated("'mode' was renamed to eigen_solver and will be removed in"
" 0.15.")
def mode(self):
return self.eigen_solver
@property
@deprecated("'k' was renamed to n_clusters and will be removed in"
" 0.15.")
def k(self):
return self.n_clusters
| {
"repo_name": "florian-f/sklearn",
"path": "sklearn/cluster/spectral.py",
"copies": "2",
"size": "17870",
"license": "bsd-3-clause",
"hash": -1982757053190527200,
"line_mean": 37.9324618736,
"line_max": 79,
"alpha_frac": 0.6314493565,
"autogenerated": false,
"ratio": 4.327924436909663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5959373793409664,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state
from ..utils.graph import graph_laplacian
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
from .k_means_ import k_means
def spectral_embedding(adjacency, n_components=8, mode=None,
random_state=None):
"""Project the sample on the first eigen vectors of the graph Laplacian
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigen vectors associated to the
smallest eigen values) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigen vector decomposition works as expected.
Parameters
-----------
adjacency: array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components: integer, optional
The dimension of the projection subspace.
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when mode == 'amg'. By default
arpack is used.
Returns
--------
embedding: array, shape: (n_samples, n_components)
The reduced samples
Notes
------
The graph should contain only one connected component, elsewhere the
results make little sense.
"""
from scipy import sparse
from ..utils.arpack import eigsh
from scipy.sparse.linalg import lobpcg
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if mode == "amg":
raise ValueError("The mode was set to 'amg', but pyamg is "
"not available.")
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# XXX: Should we check that the matrices given is symmetric
if mode is None:
mode = 'arpack'
laplacian, dd = graph_laplacian(adjacency,
normed=True, return_diag=True)
if (mode == 'arpack'
or not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components):
# lobpcg used with mode='amg' has bugs for low number of nodes
# We need to put the diagonal at zero
if not sparse.isspmatrix(laplacian):
laplacian.flat[::n_nodes + 1] = 0
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = 0
# If the matrix has a small number of diagonals (as in the
# case of structured matrices comming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
lambdas, diffusion_map = eigsh(-laplacian, k=n_components,
sigma=1.0, which='LM')
embedding = diffusion_map.T[::-1] * dd
elif mode == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
ml = smoothed_aggregation_solver(laplacian.tocsr())
X = random_state.rand(laplacian.shape[0], n_components)
X[:, 0] = 1. / dd.ravel()
M = ml.aspreconditioner()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
else:
raise ValueError("Unknown value for mode: '%s'."
"Should be 'amg' or 'arpack'" % mode)
return embedding
def spectral_clustering(affinity, n_clusters=8, n_components=None, mode=None,
random_state=None, n_init=10, k=None):
"""Apply k-means to a projection to the normalized laplacian
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity: array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetic k-nearest neighbours connectivity matrix of the samples.
n_clusters: integer, optional
Number of clusters to extract.
n_components: integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
Returns
-------
labels: array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if not k is None:
warnings.warn("'k' was renamed to n_clusters", DeprecationWarning)
n_clusters = k
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
mode=mode, random_state=random_state)
maps = maps[1:]
_, labels, _ = k_means(maps.T, n_clusters, random_state=random_state,
n_init=n_init)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply k-means to a projection to the normalized laplacian
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either the
Gaussian (aka RBF) kernel of the euclidean distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity: string, 'nearest_neighbors', 'rbf' or 'precomputed'
gamma: float
Scaling factor of Gaussian (rbf) affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
n_neighbors: integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
Attributes
----------
`affinity_matrix_` : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
`labels_` :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
"""
def __init__(self, n_clusters=8, mode=None, random_state=None, n_init=10,
gamma=1., affinity='rbf', n_neighbors=10, k=None,
precomputed=False):
if not k is None:
warnings.warn("'k' was renamed to n_clusters", DeprecationWarning)
n_clusters = k
self.n_clusters = n_clusters
self.mode = mode
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
def fit(self, X):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use "
"a custom affinity matrix, set ``affinity=precomputed``.")
if self.affinity == 'rbf':
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma)
elif self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
raise ValueError("Invalid 'affinity'. Expected 'rbf', "
"'nearest_neighbors' or 'precomputed', got '%s'."
% self.affinity)
self.random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters, mode=self.mode,
random_state=self.random_state, n_init=self.n_init)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| {
"repo_name": "pradyu1993/scikit-learn",
"path": "sklearn/cluster/spectral.py",
"copies": "1",
"size": "14443",
"license": "bsd-3-clause",
"hash": -2560481668920930000,
"line_mean": 38.5698630137,
"line_max": 78,
"alpha_frac": 0.6484802326,
"autogenerated": false,
"ratio": 4.241703377386197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007881786312360684,
"num_lines": 365
} |
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD
import warnings
import numpy as np
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils.graph import graph_laplacian
from .k_means_ import k_means
def spectral_embedding(adjacency, n_components=8, mode=None,
random_state=None):
"""Project the sample on the first eigen vectors of the graph Laplacian
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigen vectors associated to the
smallest eigen values) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigen vector decomposition works as expected.
Parameters
-----------
adjacency: array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components: integer, optional
The dimension of the projection subspace.
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG (Algebraic
MultiGrid) is much faster, but requires pyamg to be
installed.
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when mode == 'amg'.
Returns
--------
embedding: array, shape: (n_samples, n_components)
The reduced samples
Notes
------
The graph should contain only one connected component, elsewhere the
results make little sense.
"""
from scipy import sparse
from ..utils.fixes import arpack_eigsh
from scipy.sparse.linalg import lobpcg
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# XXX: Should we check that the matrices given is symmetric
if not amg_loaded:
warnings.warn('pyamg not available, using scipy.sparse')
if mode is None:
mode = ('amg' if amg_loaded else 'arpack')
laplacian, dd = graph_laplacian(adjacency,
normed=True, return_diag=True)
if (mode == 'arpack'
or not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components):
# lobpcg used with mode='amg' has bugs for low number of nodes
# We need to put the diagonal at zero
if not sparse.isspmatrix(laplacian):
laplacian[::n_nodes + 1] = 0
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = 0
# If the matrix has a small number of diagonals (as in the
# case of structured matrices comming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
lambdas, diffusion_map = arpack_eigsh(-laplacian, k=n_components,
which='LA')
embedding = diffusion_map.T[::-1] * dd
elif mode == 'amg':
# Use AMG to get a preconditionner and speed up the eigenvalue
# problem.
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
ml = smoothed_aggregation_solver(laplacian.tocsr())
X = random_state.rand(laplacian.shape[0], n_components)
X[:, 0] = 1. / dd.ravel()
M = ml.aspreconditioner()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
else:
raise ValueError("Unknown value for mode: '%s'."
"Should be 'amg' or 'arpack'" % mode)
return embedding
def spectral_clustering(affinity, k=8, n_components=None, mode=None,
random_state=None):
"""Apply k-means to a projection to the normalized laplacian
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity: array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetic k-nearest neighbours connectivity matrix of the samples.
k: integer, optional
Number of clusters to extract.
n_components: integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG (Algebraic
MultiGrid) is much faster, but requires pyamg to be
installed.
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
Returns
-------
labels: array of integers, shape: n_samples
The labels of the clusters.
centers: array of integers, shape: k
The indices of the cluster centers
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
random_state = check_random_state(random_state)
n_components = k if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
mode=mode, random_state=random_state)
maps = maps[1:]
_, labels, _ = k_means(maps.T, k, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator):
"""Apply k-means to a projection to the normalized laplacian
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
k: integer, optional
The dimension of the projection subspace.
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG (Algebraic
MultiGrid) is much faster, but requires pyamg to be installed.
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
Methods
-------
fit(X):
Compute spectral clustering
Attributes
----------
labels_:
Labels of each point
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
"""
def __init__(self, k=8, mode=None, random_state=None):
self.k = k
self.mode = mode
self.random_state = random_state
def fit(self, X):
"""Compute the spectral clustering from the affinity matrix
Parameters
-----------
X: array-like or sparse matrix, shape: (n_samples, n_samples)
An affinity matrix describing the pairwise similarity of the
data. If can also be an adjacency matrix of the graph to embed.
X must be symmetric and its entries must be positive or
zero. Zero means that elements have nothing in common,
whereas high values mean that elements are strongly similar.
Notes
------
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the gaussian (heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
"""
self.random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(X, k=self.k, mode=self.mode,
random_state=self.random_state)
return self
| {
"repo_name": "joshbohde/scikit-learn",
"path": "sklearn/cluster/spectral.py",
"copies": "1",
"size": "10632",
"license": "bsd-3-clause",
"hash": -1537913726048693800,
"line_mean": 36.1748251748,
"line_max": 77,
"alpha_frac": 0.647103085,
"autogenerated": false,
"ratio": 4.2528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008741258741258741,
"num_lines": 286
} |
"""Algorithms for the computation of Groebner bases"""
from sympy.polynomials.base import *
from sympy.polynomials import div_
def groebner(f, var=None, order=None, reduced=True):
"""Computes a (reduced) Groebner base for a given list of polynomials.
Usage:
======
The input consists of a list of polynomials, either as SymPy
expressions or instances of Polynomials. In the first case,
you should also specify the variables and the monomial order
with the arguments 'var' and 'order'. Only the first
polynomial is checked for its type, the rest is assumed to
match.
By default, this algorithm returns the unique reduced Groebner
base for the given ideal. By setting reduced=False, you can
prevent the reduction steps.
Examples:
=========
>>> x, y = symbols('xy')
>>> G = groebner([x**2 + y**3, y**2-x], order='lex')
>>> for g in G: print g
x - y**2
y**3 + y**4
Notes:
======
Groebner bases are used to choose specific generators for a
polynomial ideal. Because these bases are unique, you can
check for ideal equality, by comparing the Groebner bases. To
see if one polynomial lies in on ideal, divide by the elements
in the base and see if the remainder if 0. They can also be
applied to equation systems: By choosing lexicographic
ordering, you can eliminate one variable at a time, given that
the ideal is zero-dimensional (finite number of solutions).
Here, an improved version of Buchberger's algorithm is
used. For all pairs of polynomials, the s-polynomial is
computed, by mutually eliminating the leading terms of these 2
polynomials. It's remainder (after division by the base) is
then added. Sometimes, it is easy to see, that one
s-polynomial will be reduced to 0 before computing it. At the
end, the base is reduced, by trying to eliminate as many terms
as possible with the leading terms of other base elements. The
final step is to make all polynomials monic.
References:
===========
Cox, Little, O'Shea: Ideals, Varieties and Algorithms,
Springer, 2. edition, p. 87
"""
if isinstance(f, Basic):
f = [f]
if not isinstance(f[0], Polynomial):
if var is None:
var = merge_var(*map(lambda p: p.atoms(type=Symbol), f))
if isinstance(var, Symbol):
var = [var]
f = map(lambda p: Polynomial(p, var=var, order=order), f)
# Filter out the zero elements.
f = filter(lambda p: p.sympy_expr is not S.Zero, f)
# Empty Ideal.
if len(f) == 0:
return [Polynomial(S.Zero)]
# Stores the unchecked combinations for s-poly's.
b = []
s = len(f)
for i in range(0, s-1):
for j in range(i+1, s):
b.append((i, j))
while b:
# TODO: Choose better (non-arbitrary) pair: sugar method?
i, j = b[0]
crit = False
lcm = term_lcm(f[i].coeffs[0], f[j].coeffs[0])
# Check if leading terms are relativly prime.
if lcm[1:] != term_mult(f[i].coeffs[0],
f[j].coeffs[0])[1:]:
# TODO: Don't operate on the whole lists, do nested ifs instead?
kk = filter(lambda k: k!=i and k!=j,range(0, s))
kk = filter(lambda k: not (min(i,k),max(i,k)) in b, kk)
kk = filter(lambda k: not (min(j,k),max(j,k)) in b, kk)
# Check if the lcm is divisible by another base element.
kk = filter(lambda k: term_is_mult(lcm,f[k].coeffs[0]), kk)
crit = not bool(kk)
if crit:
factor_i = Polynomial(coeffs=(term_div(lcm, f[i].coeffs[0]),),
var=f[0].var, order=f[0].order)
factor_j = Polynomial(coeffs=(term_div(lcm, f[j].coeffs[0]),),
var=f[0].var, order=f[0].order)
s_poly = f[i]*factor_i - f[j]*factor_j
s_poly = div_.div(s_poly, f)[-1] # reduce
if s_poly.sympy_expr is not S.Zero:
# we still have to add it to the base.
s += 1
f.append(s_poly)
for t in range(0, s-1): # With a new element come
b.append((t, s-1)) # new combinationas to test.
b = b[1:] # Checked one more.
# We now have one possible Groebner base, probably too big.
if not reduced:
return f
# We can get rid of all elements, where the leading term can be
# reduced in the ideal of the remaining leading terms, that is,
# can be divided by one of the other leading terms.
blacklist = []
for p in f:
if filter(lambda x: term_is_mult(p.coeffs[0], x.coeffs[0]),
filter(lambda x: not x in blacklist and x != p, f)):
blacklist.append(p)
for p in blacklist:
f.remove(p)
# We can now sort the basis elements according to their leading
# term.
f.sort(cmp=lambda a,b: term_cmp(a.coeffs[0], b.coeffs[0],
a.order), reverse=True)
# Divide all basis elements by their leading coefficient, to get a
# leading 1.
for i, p in enumerate(f):
c = p.coeffs[0][0]
f[i] = Polynomial(coeffs=tuple(map(lambda t:(t[0]/c,) + t[1:],
p.coeffs)),
var=p.var, order=p.order)
# We now have a minimal Groebner basis, which is still not unique.
# The next step is to reduce all basis elements in respect to the
# rest of the base (without touching the leading terms).
# As the basis is already sorted, the rest gets smaller each time.
for i,p in enumerate(f[0:-1]):
pp = div_.div(p, f[i+1:])[-1]
f[i] = pp
return f
| {
"repo_name": "certik/sympy-oldcore",
"path": "sympy/polynomials/groebner_.py",
"copies": "1",
"size": "5923",
"license": "bsd-3-clause",
"hash": 5676863946287562000,
"line_mean": 39.2925170068,
"line_max": 76,
"alpha_frac": 0.5735269289,
"autogenerated": false,
"ratio": 3.6788819875776397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47524089164776395,
"avg_score": null,
"num_lines": null
} |
#Algorithms for Trie
class Node():
def __init__(self,letter=None):
self.letter=letter
self.isEnd=False
self.branches={}
return
def __getitem__(self,index):
if index in self.branches:
return self.branches[index]
else:
return False
def __setitem__(self,index,data):
self.branches[index]=data
def __str__(self):
return '%s: %s'%(self.letter,','.join(self.branches.keys()))
def __repr__(self):
return str(self)
def add(T,data):
#T is a trie and data
if(len(data)==0):
return
CNode=T
for cletter in data:
if not CNode[cletter]:
Tprime=Node(cletter)
CNode[cletter]=Tprime
CNode=CNode[cletter]
CNode.isEnd=True
def search(T,data):
if(len(data)==0):
return False
CNode=T
for cletter in data:
if not CNode[cletter]:
return False
CNode=CNode[cletter]
return CNode.isEnd
def searchPrefix(T,prefix):
if(len(prefix)==0):
return False
CNode=T
for cletter in prefix:
if not CNode[cletter]:
return False
CNode=CNode[cletter]
return True
T=Node()
add(T,'adadd')
add(T,'adbc')
add(T,'bcd')
class Trie():
def __init__(self):
self.T=Node()
return
def add(self,word):
return add(self.T,word)
def search(self,word):
return search(self.T,word)
def searchPrefix(self,prefix):
return searchPrefix(self.T,prefix)
def test():
print("Testing Trie methods")
T=Trie()
T.add('adadd')
T.add('adbc')
T.add('bcd')
print(T.search('adadd'),"==>should be True")
print(T.search('bcd'),"===>should be True")
print(T.search('a'),"===>should be False")
print(T.searchPrefix('a'),"===>should be True")
print(T.searchPrefix('bc'),"===>should be True")
print(T.searchPrefix('d'),"===>should be False")
if __name__ == "__main__":
test() | {
"repo_name": "Seenivasanseeni/PyDaS",
"path": "PDaS/Trie.py",
"copies": "1",
"size": "1990",
"license": "mit",
"hash": 6375520377288167000,
"line_mean": 23.2804878049,
"line_max": 68,
"alpha_frac": 0.5608040201,
"autogenerated": false,
"ratio": 3.273026315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9171333149618018,
"avg_score": 0.03249943725429109,
"num_lines": 82
} |
# algorithms.py
# 14th March 2015
# Examples of Discrete Fourier Transform code in Python
# http://forum.micropython.org/viewtopic.php?f=2&t=208&hilit=fft
#
import math, cmath
# Recursive algorithm: simple but slow and stack-hungry
# Enter with a list of complex numbers of length 2**N
def fft_recursive(x):
n = len(x)
if n <= 1:
return x
even = fft(x[0::2])
odd = fft(x[1::2])
return [even[m] + math.e**(-2j*math.pi*m/n)*odd[m] for m in range(n//2)] + \
[even[m] - math.e**(-2j*math.pi*m/n)*odd[m] for m in range(n//2)]
# Standard Cooley-Tukey: an effective floating point solution in pure Python
# nums is a list of complex numbers.
def ffft(nums, forward=True, scale=False):
n = len(nums)
m = int(math.log2(n))
#n= 2**m #Calculate the number of points
#Do the bit reversal
i2 = n >> 1
j = 0
for i in range(n-1):
if i<j: nums[i], nums[j] = nums[j], nums[i]
k = i2
while (k <= j):
j -= k
k >>= 1
j+=k
#Compute the FFT
c = 0j-1
l2 = 1
for l in range(m):
l1 = l2
l2 <<= 1
u = 0j+1
for j in range(l1):
for i in range(j, n, l2):
i1 = i+l1
t1 = u*nums[i1]
nums[i1] = nums[i] - t1
nums[i] += t1
u *= c
ci = math.sqrt((1.0 - c.real) / 2.0) # Generate complex roots of unity
if forward: ci=-ci # for forward transform
cr = math.sqrt((1.0 + c.real) / 2.0) # phi = -pi/2 -pi/4 -pi/8...
c = cr + ci*1j#complex(cr,ci)
# Scaling for forward transform
if (scale and forward):
for i in range(n):
nums[i] /= n
return nums
# Code used as the basis for the assembler routine. Cooley-Tukey algorithm with
# twiddle factors precomputed. Scale factors are included as proof of concept.
SCALE = 1000
SCALEC = 64
ROOTSCALEC = math.sqrt(SCALEC)
def fft(nums, roots, forward=True, scale=True):
n = len(nums)
m = int(math.log(n)/math.log(2))
#n= 2**m
#Do the bit reversal
i2 = n >> 1
j = 0
for i in range(n-1):
if i<j: nums[i], nums[j] = nums[j], nums[i]
k = i2
while (k <= j):
j -= k
k >>= 1
j+=k
#Compute the FFT
l2 = 1
for l in range(m):
c = roots[l]
if forward:
c = c.real -c.imag*1j
l1 = l2
l2 <<= 1
u = (0j+1)*SCALE
for j in range(l1):
for i in range(j, n, l2):
i1 = i+l1
t1 = u*nums[i1]/SCALE
nums[i1] = nums[i] - t1
nums[i] += t1
u *= c/SCALEC
# Scaling for forward transform
if (scale and forward):
for i in range(n):
nums[i] /= n
return nums
def buildarrays(length):
bits = int(math.log(length)/math.log(2))
roots = []
c = (-1+0j)*SCALEC
roots.append(c) # Complex roots of unity
for x in range(bits):
cimag = ROOTSCALEC*math.sqrt((1.0*SCALEC - c.real) / 2.0) # Imaginary part
creal = ROOTSCALEC*math.sqrt((1.0*SCALEC + c.real) / 2.0) # Real part
c = creal + cimag*1j
roots.append(c)
re = [0+0j]*length
return re, roots
# Test/demo for above algorithm
def printlist(q):
s = ""
for t in q:
try:
s += "[{:5.2f}{:5.2f}] ".format(t.real, t.imag)
except AttributeError: # For MicroPython
s += "[{:5.2f}{:5.2f}] ".format(t, 0)
print(s)
nums, roots = buildarrays(16)
for x in range(len(nums)):
nums[x] = 0.1+ math.cos(2*math.pi*x/len(nums))
print("Initial")
printlist(nums)
fft(nums, roots, True, True)
print("fft")
printlist(nums)
print("Reverse transform")
fft(nums, roots, False)
printlist(nums)
| {
"repo_name": "peterhinch/micropython-fft",
"path": "algorithms.py",
"copies": "1",
"size": "3846",
"license": "mit",
"hash": 1769043380472413000,
"line_mean": 26.4714285714,
"line_max": 82,
"alpha_frac": 0.5200208008,
"autogenerated": false,
"ratio": 2.933638443935927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3953659244735927,
"avg_score": null,
"num_lines": null
} |
# algorithms.py
# 14th March 2015
# Examples of Discrete Fourier Transform code in Python
# http://forum.micropython.org/viewtopic.php?f=2&t=208&hilit=fft
# Released under the MIT license.
#
import math, cmath
# Recursive algorithm: simple but slow and stack-hungry
# Enter with an array of complex numbers of length 2**N
def fft_recursive(x):
n = len(x)
if n <= 1:
return x
even = fft(x[0::2])
odd = fft(x[1::2])
return [even[m] + math.e**(-2j*math.pi*m/n)*odd[m] for m in range(n//2)] + \
[even[m] - math.e**(-2j*math.pi*m/n)*odd[m] for m in range(n//2)]
# Code used as the basis for the assembler routine. Cooley-Tukey algorithm with
# twiddle factors precomputed.
def fft(nums, roots, forward=True):
n = len(nums)
m = int(math.log(n)/math.log(2))
#n= 2**m
#Do the bit reversal
i2 = n >> 1
j = 0
for i in range(n-1):
if i<j: nums[i], nums[j] = nums[j], nums[i]
k = i2
while (k <= j):
j -= k
k >>= 1
j+=k
#Compute the FFT
l2 = 1
for l in range(m):
c = roots[l]
if forward:
c = c.real -c.imag*1j
l1 = l2
l2 <<= 1
u = 0j+1
for j in range(l1):
for i in range(j, n, l2):
i1 = i+l1
t1 = u*nums[i1]
nums[i1] = nums[i] - t1
nums[i] += t1
u *= c
# Scaling for forward transform
if forward:
for i in range(n):
nums[i] /= n
return nums
def buildarrays(length):
bits = int(math.log(length)/math.log(2))
roots = []
c = -1+0j
roots.append(c) # Complex roots of unity
for x in range(bits):
cimag = math.sqrt((1.0 - c.real) / 2.0) # Imaginary part
creal = math.sqrt((1.0 + c.real) / 2.0) # Real part
c = creal + cimag*1j
roots.append(c)
re = [0+0j]*length
return re, roots
# Test/demo for above algorithm
def printlist(q):
s = ""
for t in q:
s += "[{:5.2f}{:5.2f}] ".format(t.real, t.imag)
print(s)
nums, roots = buildarrays(16)
for x in range(len(nums)):
nums[x] = 0.1+ math.cos(2*math.pi*x/len(nums))
print("Initial")
printlist(nums)
fft(nums, roots, True)
print("fft")
printlist(nums)
print("Reverse transform")
fft(nums, roots, False)
printlist(nums)
| {
"repo_name": "peterhinch/micropython-fourier",
"path": "algorithms.py",
"copies": "1",
"size": "2369",
"license": "mit",
"hash": 7264192233145653000,
"line_mean": 24.75,
"line_max": 80,
"alpha_frac": 0.5356690587,
"autogenerated": false,
"ratio": 2.8542168674698796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8777250676714292,
"avg_score": 0.022527049891117402,
"num_lines": 92
} |
# algorithms.py
"""Contains various quantum computing algorithms."""
from cmath import exp
from math import asin, ceil, log, pi, sqrt
from numpy import eye, outer
from quantum.gate_factory import function_gate, grover_diffusion_operator,\
hadamard_gate
from quantum.quantum_gate import kron, QuantumGate
from quantum.qubit_system import QubitSystem
from quantum._util import gcd, int_root, is_prime
def qft(q):
"""Performs the quantum Fourier transform on a system of qubits."""
# Make quantum gate F, the DFT matrix of dimension N := 2^n, with a unitary
# normalization constant of 1/sqrt(N)
N = 2**q.n()
indices = range(N) # row and column indices
F = QuantumGate(exp(-2.j*pi/N)**outer(indices, indices) / sqrt(N))
# apply F to q
F * q
def grover_search(match_text, lst):
"""Grover's quantum algorithm for searching.
Args:
match_text: Text to find in the list lst.
lst: List of strings to search to find a string matching match_text.
Returns:
The index i of the item such that lst[i] is the same string as
match_text. The lines must match exactly; it is not enough for the text
to be contained in the line. If two or more lines match, it will only
return one of the line numbers. Returns -1 if no matching line is found,
i.e. the algorithm fails to find a solution.
"""
if len(lst) <= 0:
raise ValueError('List must be of positive length')
n = len(lst)
N = int(ceil(log(n, 2))) # number of qubits needed
Hn = hadamard_gate(N)
Ui = _search_oracle(match_text, lst)
Ud = grover_diffusion_operator(N)
MAX_ITER = 50
count = 0
index = n
# Repeat until a solution is found or the iteration limit is reached
while count < MAX_ITER and (index >= n or lst[index] != match_text):
q = QubitSystem(N) # system of log2(n) bits in state |0>
# apply Hadamard gate to create uniform superposition of basis states
Hn * q
for _ in range(_r(2**N)):
Ui * q # apply operator that flips the sign of the matching index
Ud * q # apply Grover's diffusion operator
index = q.measure()
count += 1
return index if index < n and lst[index] == match_text else -1
def grover_invert(f, y, n):
"""Grover's algorithm for inverting a general function f that maps a
sequence of n bits (represented as an int whose binary representation is the
bit sequence) to another sequence of bits.
Args:
f: Function to invert
y: Value of the function at which to evaluate the inverse.
Returns:
The input x such that f(x) = y. If more than one input suffices, it
returns one at random. If no input suffices, returns -1.
"""
if n <= 0:
raise ValueError('n must be positive')
Hn = hadamard_gate(n)
Ui = _function_oracle(f, y, n)
Ud = grover_diffusion_operator(n)
MAX_ITER = 50
count = 0
x = None
# Repeat until a solution is found or the iteration limit is reached
while count < MAX_ITER and (x is None or f(x) != y):
q = QubitSystem(n) # system of n bits in state |0>
# apply Hadamard gate to create uniform superposition of basis states
Hn * q
for _ in range(_r(2**n)):
Ui * q # apply operator that flips the sign of the matching index
Ud * q # apply Grover's diffusion operator
x = q.measure()
count += 1
return x if f(x) == y else -1
def shor_factor(n):
"""Shor's factorization algorithm.
Args:
n: Integer >=2 to factor.
Returns:
If n is composite, a non-trivial factor of n. If n is prime, returns 1.
Raises:
ValueError if n is <= 1.
"""
if n <= 1:
raise ValueError('n must be at least 2')
if is_prime(n):
return 1
if n % 2 == 0:
return 2 # even numbers > 2 are trivial
# Need to check that n is not a power of an integer for algorithm to work
root = int_root(n)
if root != -1:
return root
# choose m s.t. n^2 <= 2^m < 2*n^2
# log2(n^2) <= m <= log2(2 * n^2) = 1 + log2(n^2)
m = ceil(log(n**2, 2))
ny = ceil(log(n - 1, 2)) # number of qubits in output of function f
I = QuantumGate(eye(2**ny))
H = kron(hadamard_gate(m), I)
MAX_ITER = 10
niter = 0
while True:
a = n - 1 # arbitrary integer coprime to n
# Initialize a system of qubits long enough to represent the integers 0
# to 2^m - 1 alongside an integer up to n - 1, then apply Hadamard gate
# to the first m qubits to create a uniform superposition.
q = QubitSystem(m + ny)
H * q
# Apply the function f(x) = a^x (mod n) to the system
f = lambda x: pow(a, x, n)
Uf = function_gate(f, m, ny)
Uf * q
# Find the period of f via quantum Fourier transform
qft(q)
r = 1. / q.measure() # period = 1 / frequency
niter += 1
if niter >= MAX_ITER or (r % 2 == 0 and (a**(r / 2)) % n != -1):
break
return gcd(a**(r / 2) + 1, n)
# Creates the "quantum oracle/black box" gate used by the Grover search
# algorithm.
# Args:
# match_text: Text to find in the list lst.
# lst: List of strings to be searched to find a string matching match_text.
# Returns:
# A gate that maps a state |k> to -|k> if the kth item of "list" matches
# the text match_text, and maps it to itself otherwise.
def _search_oracle(match_text, lst):
n = len(lst)
N = int(ceil(log(n, 2))) # number of qubits needed
gate = QuantumGate(eye(2**N)) # identity gate
for i in range(n):
if lst[i] == match_text:
gate[i][i] = -1.
return gate
# Creates the "quantum oracle/black box" gate used by the Grover function
# inversion algorithm.
# Args:
# f: Function to invert.
# y: Value of the function at which to evaluate the inverse.
# Returns:
# A gate that maps a state |x> to -|x> if f(x) = y, and maps it to itself
# otherwise.
def _function_oracle(f, y, n):
gate = QuantumGate(eye(2**n)) # identity gate
for i in range(2**n):
if f(i) == y:
gate[i][i] = -1.
return gate
# Function returning the optimal number of iterations for Grover's
# algorithm. It is important to stop at exactly this many iterations or else
# future iterations may actually lower the probability of measuring the
# correct answer. See http://www.quantiki.org/wiki/Grover's_search_algorithm.
def _r(n):
theta = asin(1. / sqrt(n))
return int(round((pi / theta - 2.) / 4.))
| {
"repo_name": "kcoltin/quantum",
"path": "python/quantum/algorithms.py",
"copies": "1",
"size": "6654",
"license": "mit",
"hash": -6655314907934318000,
"line_mean": 29.66359447,
"line_max": 80,
"alpha_frac": 0.6127141569,
"autogenerated": false,
"ratio": 3.3931667516573176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45058809085573176,
"avg_score": null,
"num_lines": null
} |
# algorithms.py
# This is all about searches and sorts
# Binary search is definitely faster than Linear search
# Python's built in sort is ridiculously faster than Selection, 2nd, and Insertion, 3rd, but Selection is only around 30% faster.
# <Chad Hobbs>
def readData(filename):
numbers = []
infile = open(filename,"r")
lines = infile.readlines()
for i in range(len(lines)):
line = lines[i].strip().split(" ")
for j in range(len(line)):
numbers.append(int(line[j]))
infile.close()
return numbers
def isInLinear(srchValue, values):
for i in range(len(values)):
if srchValue == values[i]:
return True
return False
def isInBinary(srchValue, values):
iMin = 0
iMid = len(values)/2
iMax = len(values)
while True:
iMid = int((iMax+iMin)/2)
if values[iMid] < srchValue:
iMin = iMid+1
elif values[iMid] > srchValue:
iMax = iMid-1
else:
return True
if iMid == iMin or iMid == iMax:
return False
def bubbleSort(values):
print("Bubblesort before")
print(values)
switch = True
while switch:
switch = False
for i in range(len(values)-1):
if values[i] > values[i+1]:
values[i],values[i+1] = values[i+1],values[i]
switch = True
print("Bubblesort after")
print(values)
return
def insertionSort(values):
print("Insertion before")
print(values)
for i in range(1, len(values)):
key = values[i]
j = i
while j > 0 and values[j - 1] > key:
values[j] = values[j - 1]
j = j - 1
values[j] = key
return values
def selectionSort(values):
print("Selection before")
print(values)
for i in range(0, len (values)):
min = i
for j in range(i + 1, len(values)):
if values[j] < values[min]:
min = j
values[i], values[min] = values[min], values[i]
print("Selection after")
print(values)
return
def main():
file = "dataSorted.txt"
nums = readData(file)
# Search area------------------------
entry = eval(input("Enter a number to search for: "))
cond = isInLinear(entry,nums)
if cond:
print("Your number was found via Linear search!")
else:
print("Your number was not found via Linear search.:(")
cond = isInBinary(entry,nums)
if cond:
print("Your number was found via Binary search!")
else:
print("Your number was not found via Binary search.:(")
# Sort area---------------------------
file = "dataUnsorted.txt"
nums = readData(file)
bubbleSort(nums)
nums = readData(file)
values = insertionSort(nums)
print("Insertion after")
print(values)
nums = readData(file)
selectionSort(nums)
main()
| {
"repo_name": "itsallvoodoo/csci-school",
"path": "CSCI220/Week 11 - MAR26-30/Lab 11 - APR04/algorithms.py",
"copies": "1",
"size": "3060",
"license": "apache-2.0",
"hash": -6231396199389622000,
"line_mean": 25.3214285714,
"line_max": 129,
"alpha_frac": 0.5401960784,
"autogenerated": false,
"ratio": 3.8154613466334166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.980182424014806,
"avg_score": 0.010766636977071182,
"num_lines": 112
} |
"""Algorithms related to graphs."""
from __future__ import absolute_import
import numpy as np
import scipy as sp
from scipy import sparse
from . import amg_core
__all__ = ['maximal_independent_set', 'vertex_coloring',
'bellman_ford',
'lloyd_cluster', 'connected_components']
from pyamg.graph_ref import bellman_ford_reference, bellman_ford_balanced_reference
__all__ += ['bellman_ford_reference', 'bellman_ford_balanced_reference']
def asgraph(G):
if not (sparse.isspmatrix_csr(G) or sparse.isspmatrix_csc(G)):
G = sparse.csr_matrix(G)
if G.shape[0] != G.shape[1]:
raise ValueError('expected square matrix')
return G
def maximal_independent_set(G, algo='serial', k=None):
"""Compute a maximal independent vertex set for a graph.
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
algo : {'serial', 'parallel'}
Algorithm used to compute the MIS
* serial : greedy serial algorithm
* parallel : variant of Luby's parallel MIS algorithm
Returns
-------
S : array
S[i] = 1 if vertex i is in the MIS
S[i] = 0 otherwise
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
Luby's algorithm is significantly more expensive than the
greedy serial algorithm.
"""
G = asgraph(G)
N = G.shape[0]
mis = np.empty(N, dtype='intc')
mis[:] = -1
if k is None:
if algo == 'serial':
fn = amg_core.maximal_independent_set_serial
fn(N, G.indptr, G.indices, -1, 1, 0, mis)
elif algo == 'parallel':
fn = amg_core.maximal_independent_set_parallel
fn(N, G.indptr, G.indices, -1, 1, 0, mis, np.random.rand(N), -1)
else:
raise ValueError('unknown algorithm (%s)' % algo)
else:
fn = amg_core.maximal_independent_set_k_parallel
fn(N, G.indptr, G.indices, k, mis, np.random.rand(N), -1)
return mis
def vertex_coloring(G, method='MIS'):
"""Compute a vertex coloring of a graph.
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
coloring : array
An array of vertex colors (integers beginning at 0)
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
"""
G = asgraph(G)
N = G.shape[0]
coloring = np.empty(N, dtype='intc')
if method == 'MIS':
fn = amg_core.vertex_coloring_mis
fn(N, G.indptr, G.indices, coloring)
elif method == 'JP':
fn = amg_core.vertex_coloring_jones_plassmann
fn(N, G.indptr, G.indices, coloring, np.random.rand(N))
elif method == 'LDF':
fn = amg_core.vertex_coloring_LDF
fn(N, G.indptr, G.indices, coloring, np.random.rand(N))
else:
raise ValueError('unknown method (%s)' % method)
return coloring
def bellman_ford(G, centers, method='standard'):
"""Bellman-Ford iteration.
Parameters
----------
G : sparse matrix
Directed graph with positive weights.
centers : list
Starting centers or source nodes
method : string
'standard': base implementation of Bellman-Ford
'balanced': a balanced version of Bellman-Ford
Returns
-------
distances : array
Distance of each point to the nearest center
nearest : array
Index of the nearest center
predecessors : array
Predecessors in the array
See Also
--------
pyamg.amg_core.bellman_ford
scipy.sparse.csgraph.bellman_ford
"""
G = asgraph(G)
n = G.shape[0]
if G.nnz > 0:
if G.data.min() < 0:
raise ValueError('Bellman-Ford is defined only for positive weights.')
if G.dtype == complex:
raise ValueError('Bellman-Ford is defined only for real weights.')
centers = np.asarray(centers, dtype=np.int32)
# allocate space for returns and working arrays
distances = np.empty(n, dtype=G.dtype)
nearest = np.empty(n, dtype=np.int32)
predecessors = np.empty(n, dtype=np.int32)
if method == 'balanced':
predecessors_count = np.empty(n, dtype=np.int32)
cluster_size = np.empty(len(centers), dtype=np.int32)
if method == 'standard':
amg_core.bellman_ford(n, G.indptr, G.indices, G.data, centers, # IN
distances, nearest, predecessors, # OUT
True)
elif method == 'balanced':
amg_core.bellman_ford_balanced(n, G.indptr, G.indices, G.data, centers, # IN
distances, nearest, predecessors, # OUT
predecessors_count, cluster_size, # OUT
True)
else:
raise ValueError(f'method {method} is not supported in Bellman-Ford')
return distances, nearest, predecessors
def lloyd_cluster(G, centers):
"""Perform Lloyd clustering on graph with weighted edges.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse nxn matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
centers : int array
If centers is an integer, then its value determines the number of
clusters. Otherwise, centers is an array of unique integers between 0
and n-1 that will be used as the initial centers for clustering.
Returns
-------
distances : array
final distances
clusters : int array
id of each cluster of points
centers : int array
index of each center
Notes
-----
If G has complex values, abs(G) is used instead.
"""
G = asgraph(G)
n = G.shape[0]
# complex dtype
if G.dtype.kind == 'c':
G = np.abs(G)
if G.nnz > 0:
if G.data.min() < 0:
raise ValueError('Lloyd Clustering is defined only for positive weights.')
if np.isscalar(centers):
centers = np.random.permutation(n)[:centers]
centers = centers.astype('intc')
else:
centers = np.asarray(centers, dtype=np.int32)
if len(centers) < 1:
raise ValueError('at least one center is required')
if centers.min() < 0:
raise ValueError(f'invalid center index {centers.min()}')
if centers.max() >= n:
raise ValueError(f'invalid center index {centers.max()}')
centers = np.asarray(centers, dtype=np.int32)
distances = np.empty(n, dtype=G.dtype)
olddistances = np.empty(n, dtype=G.dtype)
clusters = np.empty(n, dtype=np.int32)
predecessors = np.full(n, -1, dtype=np.int32)
amg_core.lloyd_cluster(n, G.indptr, G.indices, G.data, # IN
centers, # INOUT
distances, olddistances, clusters, predecessors, # OUT
True)
return distances, clusters, centers
def breadth_first_search(G, seed):
"""Breadth First search of a graph.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seed : int
Index of the seed location
Returns
-------
order : int array
Breadth first order
level : int array
Final levels
Examples
--------
0---2
| /
| /
1---4---7---8---9
| /| /
| / | /
3/ 6/
|
|
5
>>> import numpy as np
>>> import pyamg
>>> import scipy.sparse as sparse
>>> edges = np.array([[0,1],[0,2],[1,2],[1,3],[1,4],[3,4],[3,5],
[4,6], [4,7], [6,7], [7,8], [8,9]])
>>> N = np.max(edges.ravel())+1
>>> data = np.ones((edges.shape[0],))
>>> A = sparse.coo_matrix((data, (edges[:,0], edges[:,1])), shape=(N,N))
>>> c, l = pyamg.graph.breadth_first_search(A, 0)
>>> print(l)
>>> print(c)
[0 1 1 2 2 3 3 3 4 5]
[0 1 2 3 4 5 6 7 8 9]
"""
G = asgraph(G)
N = G.shape[0]
order = np.empty(N, G.indptr.dtype)
level = np.empty(N, G.indptr.dtype)
level[:] = -1
BFS = amg_core.breadth_first_search
BFS(G.indptr, G.indices, int(seed), order, level)
return order, level
def connected_components(G):
"""Compute the connected components of a graph.
The connected components of a graph G, which is represented by a
symmetric sparse matrix, are labeled with the integers 0,1,..(K-1) where
K is the number of components.
Parameters
----------
G : symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
Returns
-------
components : ndarray
An array of component labels for each vertex of the graph.
Notes
-----
If the nonzero structure of G is not symmetric, then the
result is undefined.
Examples
--------
>>> from pyamg.graph import connected_components
>>> print connected_components( [[0,1,0],[1,0,1],[0,1,0]] )
[0 0 0]
>>> print connected_components( [[0,1,0],[1,0,0],[0,0,0]] )
[0 0 1]
>>> print connected_components( [[0,0,0],[0,0,0],[0,0,0]] )
[0 1 2]
>>> print connected_components( [[0,1,0,0],[1,0,0,0],[0,0,0,1],[0,0,1,0]] )
[0 0 1 1]
"""
G = asgraph(G)
N = G.shape[0]
components = np.empty(N, G.indptr.dtype)
fn = amg_core.connected_components
fn(N, G.indptr, G.indices, components)
return components
def symmetric_rcm(A):
"""Symmetric Reverse Cutthill-McKee.
Parameters
----------
A : sparse matrix
Sparse matrix
Returns
-------
B : sparse matrix
Permuted matrix with reordering
Notes
-----
Get a pseudo-peripheral node, then call BFS
Examples
--------
>>> from pyamg import gallery
>>> from pyamg.graph import symmetric_rcm
>>> n = 200
>>> density = 1.0/n
>>> A = gallery.sprand(n, n, density, format='csr')
>>> S = A + A.T
>>> # try the visualizations
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(121)
>>> plt.spy(S,marker='.')
>>> plt.subplot(122)
>>> plt.spy(symmetric_rcm(S),marker='.')
See Also
--------
pseudo_peripheral_node
"""
n = A.shape[0]
root, order, level = pseudo_peripheral_node(A)
Perm = sparse.identity(n, format='csr')
p = level.argsort()
Perm = Perm[p, :]
return Perm * A * Perm.T
def pseudo_peripheral_node(A):
"""Find a pseudo peripheral node.
Parameters
----------
A : sparse matrix
Sparse matrix
Returns
-------
x : int
Locaiton of the node
order : array
BFS ordering
level : array
BFS levels
Notes
-----
Algorithm in Saad
"""
from pyamg.graph import breadth_first_search
n = A.shape[0]
valence = np.diff(A.indptr)
# select an initial node x, set delta = 0
x = int(np.random.rand() * n)
delta = 0
while True:
# do a level-set traversal from x
order, level = breadth_first_search(A, x)
# select a node y in the last level with min degree
maxlevel = level.max()
lastnodes = np.where(level == maxlevel)[0]
lastnodesvalence = valence[lastnodes]
minlastnodesvalence = lastnodesvalence.min()
y = np.where(lastnodesvalence == minlastnodesvalence)[0][0]
y = lastnodes[y]
# if d(x,y)>delta, set, and go to bfs above
if level[y] > delta:
x = y
delta = level[y]
else:
return x, order, level
| {
"repo_name": "pyamg/pyamg",
"path": "pyamg/graph.py",
"copies": "1",
"size": "12162",
"license": "mit",
"hash": 1385509611404787500,
"line_mean": 25.8476821192,
"line_max": 86,
"alpha_frac": 0.5719454037,
"autogenerated": false,
"ratio": 3.6014213799230084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4673366783623008,
"avg_score": null,
"num_lines": null
} |
"""Algorithms related to graphs"""
__docformat__ = "restructuredtext en"
import numpy as np
import scipy as sp
from scipy import sparse
import amg_core
__all__ = ['maximal_independent_set', 'vertex_coloring', 'bellman_ford',
'lloyd_cluster', 'connected_components']
def max_value(datatype):
try:
return np.iinfo(datatype).max
except:
return np.finfo(datatype).max
def asgraph(G):
if not (sparse.isspmatrix_csr(G) or sparse.isspmatrix_csc(G)):
G = sparse.csr_matrix(G)
if G.shape[0] != G.shape[1]:
raise ValueError('expected square matrix')
return G
def maximal_independent_set(G, algo='serial', k=None):
"""Compute a maximal independent vertex set for a graph
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
algo : {'serial', 'parallel'}
Algorithm used to compute the MIS
* serial : greedy serial algorithm
* parallel : variant of Luby's parallel MIS algorithm
Returns
-------
An array S where
S[i] = 1 if vertex i is in the MIS
S[i] = 0 otherwise
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
Luby's algorithm is significantly more expensive than the
greedy serial algorithm.
"""
G = asgraph(G)
N = G.shape[0]
mis = np.empty(N, dtype='intc')
mis[:] = -1
if k is None:
if algo == 'serial':
fn = amg_core.maximal_independent_set_serial
fn(N, G.indptr, G.indices, -1, 1, 0, mis)
elif algo == 'parallel':
fn = amg_core.maximal_independent_set_parallel
fn(N, G.indptr, G.indices, -1, 1, 0, mis, sp.rand(N))
else:
raise ValueError('unknown algorithm (%s)' % algo)
else:
fn = amg_core.maximal_independent_set_k_parallel
fn(N, G.indptr, G.indices, k, mis, sp.rand(N))
return mis
def vertex_coloring(G, method='MIS'):
"""Compute a vertex coloring of a graph
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
method : {string}
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
An array of vertex colors (integers beginning at 0)
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
"""
G = asgraph(G)
N = G.shape[0]
coloring = np.empty(N, dtype='intc')
if method == 'MIS':
fn = amg_core.vertex_coloring_mis
fn(N, G.indptr, G.indices, coloring)
elif method == 'JP':
fn = amg_core.vertex_coloring_jones_plassmann
fn(N, G.indptr, G.indices, coloring, sp.rand(N))
elif method == 'LDF':
fn = amg_core.vertex_coloring_LDF
fn(N, G.indptr, G.indices, coloring, sp.rand(N))
else:
raise ValueError('unknown method (%s)' % method)
return coloring
def bellman_ford(G, seeds, maxiter=None):
"""
Bellman-Ford iteration
Parameters
----------
Returns
-------
Notes
-----
References
----------
CLR
Examples
--------
"""
G = asgraph(G)
N = G.shape[0]
if maxiter is not None and maxiter < 0:
raise ValueError('maxiter must be positive')
if G.dtype == complex:
raise ValueError('Bellman-Ford algorithm only defined for real\
weights')
seeds = np.asarray(seeds, dtype='intc')
distances = np.empty(N, dtype=G.dtype)
distances[:] = max_value(G.dtype)
distances[seeds] = 0
nearest_seed = np.empty(N, dtype='intc')
nearest_seed[:] = -1
nearest_seed[seeds] = seeds
old_distances = np.empty_like(distances)
iter = 0
while maxiter is None or iter < maxiter:
old_distances[:] = distances
amg_core.bellman_ford(N, G.indptr, G.indices, G.data, distances,
nearest_seed)
if (old_distances == distances).all():
break
return (distances, nearest_seed)
def lloyd_cluster(G, seeds, maxiter=10):
"""Perform Lloyd clustering on graph with weighted edges
Parameters
----------
G : csr_matrix or csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seeds : {int, array}
If seeds is an integer, then its value determines the number of
clusters. Otherwise, seeds is an array of unique integers between 0
and N-1 that will be used as the initial seeds for clustering.
maxiter : int
The maximum number of iterations to perform.
Notes
-----
If G has complex values, abs(G) is used instead.
"""
G = asgraph(G)
N = G.shape[0]
if G.dtype.kind == 'c':
# complex dtype
G = np.abs(G)
# interpret seeds argument
if np.isscalar(seeds):
seeds = np.random.permutation(N)[:seeds]
seeds = seeds.astype('intc')
else:
seeds = np.array(seeds, dtype='intc')
if len(seeds) < 1:
raise ValueError('at least one seed is required')
if seeds.min() < 0:
raise ValueError('invalid seed index (%d)' % seeds.min())
if seeds.max() >= N:
raise ValueError('invalid seed index (%d)' % seeds.max())
clusters = np.empty(N, dtype='intc')
distances = np.empty(N, dtype=G.dtype)
for i in range(maxiter):
last_seeds = seeds.copy()
amg_core.lloyd_cluster(N, G.indptr, G.indices, G.data,
len(seeds), distances, clusters, seeds)
if (seeds == last_seeds).all():
break
return (distances, clusters, seeds)
def breadth_first_search(G, seed):
"""Breadth First search of a graph
Parameters
----------
Returns
-------
Notes
-----
References
----------
CLR
Examples
--------
"""
# TODO document
G = asgraph(G)
N = G.shape[0]
# Check symmetry?
order = np.empty(N, G.indptr.dtype)
level = np.empty(N, G.indptr.dtype)
level[:] = -1
BFS = amg_core.breadth_first_search
BFS(G.indptr, G.indices, int(seed), order, level)
return order, level
def connected_components(G):
"""Compute the connected components of a graph
The connected components of a graph G, which is represented by a
symmetric sparse matrix, are labeled with the integers 0,1,..(K-1) where
K is the number of components.
Parameters
----------
G : symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
Returns
-------
components : ndarray
An array of component labels for each vertex of the graph.
Notes
-----
If the nonzero structure of G is not symmetric, then the
result is undefined.
Examples
--------
>>> from pyamg.graph import connected_components
>>> print connected_components( [[0,1,0],[1,0,1],[0,1,0]] )
[0 0 0]
>>> print connected_components( [[0,1,0],[1,0,0],[0,0,0]] )
[0 0 1]
>>> print connected_components( [[0,0,0],[0,0,0],[0,0,0]] )
[0 1 2]
>>> print connected_components( [[0,1,0,0],[1,0,0,0],[0,0,0,1],[0,0,1,0]] )
[0 0 1 1]
"""
G = asgraph(G)
N = G.shape[0]
# Check symmetry?
components = np.empty(N, G.indptr.dtype)
fn = amg_core.connected_components
fn(N, G.indptr, G.indices, components)
return components
def symmetric_rcm(A):
"""
Symmetric Reverse Cutthill-McKee
Get a pseudo-peripheral node, then call BFS
return a symmetric permuted matrix
Example
-------
>>> from pyamg import gallery
>>> from pyamg.graph import symmetric_rcm
>>> n = 200
>>> density = 1.0/n
>>> A = gallery.sprand(n, n, density, format='csr')
>>> S = A + A.T
>>> # try the visualizations
>>> #import pylab
>>> #pylab.figure()
>>> #pylab.subplot(121)
>>> #pylab.spy(S,marker='.')
>>> #pylab.subplot(122)
>>> #pylab.spy(symmetric_rcm(S),marker='.')
See Also
--------
pseudo_peripheral_node
"""
n = A.shape[0]
root, order, level = pseudo_peripheral_node(A)
Perm = sparse.identity(n, format='csr')
p = level.argsort()
Perm = Perm[p, :]
return Perm * A * Perm.T
def pseudo_peripheral_node(A):
"""
Algorithm in Saad
"""
from pyamg.graph import breadth_first_search
n = A.shape[0]
valence = np.diff(A.indptr)
# select an initial node x, set delta = 0
x = int(np.random.rand() * n)
delta = 0
while 1:
# do a level-set traversal from x
order, level = breadth_first_search(A, x)
# select a node y in the last level with min degree
maxlevel = level.max()
lastnodes = np.where(level == maxlevel)[0]
lastnodesvalence = valence[lastnodes]
minlastnodesvalence = lastnodesvalence.min()
y = np.where(lastnodesvalence == minlastnodesvalence)[0][0]
y = lastnodes[y]
# if d(x,y)>delta, set, and go to bfs above
if level[y] > delta:
x = y
delta = level[y]
else:
return x, order, level
| {
"repo_name": "kidaa/pyamg",
"path": "pyamg/graph.py",
"copies": "2",
"size": "9545",
"license": "bsd-3-clause",
"hash": 8510628706907840000,
"line_mean": 23.6005154639,
"line_max": 79,
"alpha_frac": 0.5790466213,
"autogenerated": false,
"ratio": 3.5682242990654207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002147766323024055,
"num_lines": 388
} |
"""Algorithms related to graphs"""
__docformat__ = "restructuredtext en"
import numpy
import scipy
from scipy import sparse
import amg_core
__all__ = ['maximal_independent_set', 'vertex_coloring', 'bellman_ford',
'lloyd_cluster', 'connected_components']
def max_value(datatype):
try:
return numpy.iinfo(datatype).max
except:
return numpy.finfo(datatype).max
def asgraph(G):
if not (sparse.isspmatrix_csr(G) or sparse.isspmatrix_csc(G)):
G = sparse.csr_matrix(G)
if G.shape[0] != G.shape[1]:
raise ValueError('expected square matrix')
return G
def maximal_independent_set(G, algo='serial', k=None):
"""Compute a maximal independent vertex set for a graph
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
algo : {'serial', 'parallel'}
Algorithm used to compute the MIS
* serial : greedy serial algorithm
* parallel : variant of Luby's parallel MIS algorithm
Returns
-------
An array S where
S[i] = 1 if vertex i is in the MIS
S[i] = 0 otherwise
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
Luby's algorithm is significantly more expensive than the
greedy serial algorithm.
"""
G = asgraph(G)
N = G.shape[0]
mis = numpy.empty(N, dtype='intc')
mis[:] = -1
if k is None:
if algo == 'serial':
fn = amg_core.maximal_independent_set_serial
fn(N, G.indptr, G.indices, -1, 1, 0, mis)
elif algo == 'parallel':
fn = amg_core.maximal_independent_set_parallel
fn(N, G.indptr, G.indices, -1, 1, 0, mis, scipy.rand(N))
else:
raise ValueError('unknown algorithm (%s)' % algo)
else:
fn = amg_core.maximal_independent_set_k_parallel
fn(N, G.indptr, G.indices, k, mis, scipy.rand(N))
return mis
def vertex_coloring(G, method='MIS'):
"""Compute a vertex coloring of a graph
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
method : {string}
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
An array of vertex colors (integers beginning at 0)
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
"""
G = asgraph(G)
N = G.shape[0]
coloring = numpy.empty(N, dtype='intc')
if method == 'MIS':
fn = amg_core.vertex_coloring_mis
fn(N, G.indptr, G.indices, coloring)
elif method == 'JP':
fn = amg_core.vertex_coloring_jones_plassmann
fn(N, G.indptr, G.indices, coloring, scipy.rand(N))
elif method == 'LDF':
fn = amg_core.vertex_coloring_LDF
fn(N, G.indptr, G.indices, coloring, scipy.rand(N))
else:
raise ValueError('unknown method (%s)' % method)
return coloring
def bellman_ford(G, seeds, maxiter=None):
"""
Bellman-Ford iteration
Parameters
----------
Returns
-------
Notes
-----
References
----------
CLR
Examples
--------
"""
G = asgraph(G)
N = G.shape[0]
if maxiter is not None and maxiter < 0:
raise ValueError('maxiter must be positive')
if G.dtype == complex:
raise ValueError('Bellman-Ford algorithm only defined for real\
weights')
seeds = numpy.asarray(seeds, dtype='intc')
distances = numpy.empty(N, dtype=G.dtype)
distances[:] = max_value(G.dtype)
distances[seeds] = 0
nearest_seed = numpy.empty(N, dtype='intc')
nearest_seed[:] = -1
nearest_seed[seeds] = seeds
old_distances = numpy.empty_like(distances)
iter = 0
while maxiter is None or iter < maxiter:
old_distances[:] = distances
amg_core.bellman_ford(N, G.indptr, G.indices, G.data, distances,
nearest_seed)
if (old_distances == distances).all():
break
return (distances, nearest_seed)
def lloyd_cluster(G, seeds, maxiter=10):
"""Perform Lloyd clustering on graph with weighted edges
Parameters
----------
G : csr_matrix or csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seeds : {int, array}
If seeds is an integer, then its value determines the number of
clusters. Otherwise, seeds is an array of unique integers between 0
and N-1 that will be used as the initial seeds for clustering.
maxiter : int
The maximum number of iterations to perform.
Notes
-----
If G has complex values, abs(G) is used instead.
"""
G = asgraph(G)
N = G.shape[0]
if G.dtype.kind == 'c':
# complex dtype
G = numpy.abs(G)
#interpret seeds argument
if numpy.isscalar(seeds):
seeds = numpy.random.permutation(N)[:seeds]
seeds = seeds.astype('intc')
else:
seeds = numpy.array(seeds, dtype='intc')
if len(seeds) < 1:
raise ValueError('at least one seed is required')
if seeds.min() < 0:
raise ValueError('invalid seed index (%d)' % seeds.min())
if seeds.max() >= N:
raise ValueError('invalid seed index (%d)' % seeds.max())
clusters = numpy.empty(N, dtype='intc')
distances = numpy.empty(N, dtype=G.dtype)
for i in range(maxiter):
last_seeds = seeds.copy()
amg_core.lloyd_cluster(N, G.indptr, G.indices, G.data,
len(seeds), distances, clusters, seeds)
if (seeds == last_seeds).all():
break
return (distances, clusters, seeds)
def breadth_first_search(G, seed):
"""Breadth First search of a graph
Parameters
----------
Returns
-------
Notes
-----
References
----------
CLR
Examples
--------
"""
#TODO document
G = asgraph(G)
N = G.shape[0]
#Check symmetry?
order = numpy.empty(N, G.indptr.dtype)
level = numpy.empty(N, G.indptr.dtype)
level[:] = -1
BFS = amg_core.breadth_first_search
BFS(G.indptr, G.indices, int(seed), order, level)
return order, level
def connected_components(G):
"""Compute the connected components of a graph
The connected components of a graph G, which is represented by a
symmetric sparse matrix, are labeled with the integers 0,1,..(K-1) where
K is the number of components.
Parameters
----------
G : symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
Returns
-------
components : ndarray
An array of component labels for each vertex of the graph.
Notes
-----
If the nonzero structure of G is not symmetric, then the
result is undefined.
Examples
--------
>>> print connected_components( [[0,1,0],[1,0,1],[0,1,0]] )
[0 0 0]
>>> print connected_components( [[0,1,0],[1,0,0],[0,0,0]] )
[0 0 1]
>>> print connected_components( [[0,0,0],[0,0,0],[0,0,0]] )
[0 1 2]
>>> print connected_components( [[0,1,0,0],[1,0,0,0],[0,0,0,1],[0,0,1,0]] )
[0 0 1 1]
"""
G = asgraph(G)
N = G.shape[0]
#Check symmetry?
components = numpy.empty(N, G.indptr.dtype)
fn = amg_core.connected_components
fn(N, G.indptr, G.indices, components)
return components
def symmetric_rcm(A):
"""
Symmetric Reverse Cutthill-McKee
Get a pseudo-peripheral node, then call BFS
return a symmetric permuted matrix
Example
-------
>>> import pylab
>>> from pyamg import gallery
>>> from pyamg.graph import symmetric_rcm
>>> n = 200
>>> density = 1.0/n
>>> A = gallery.sprand(n, n, density, format='csr')
>>> S = A + A.T
>>> # try the visualizations
>>> #pylab.figure()
>>> #pylab.subplot(121)
>>> #pylab.spy(S,marker='.')
>>> #pylab.subplot(122)
>>> #pylab.spy(symmetric_rcm(S),marker='.')
See Also
--------
pseudo_peripheral_node
"""
n = A.shape[0]
root, order, level = pseudo_peripheral_node(A)
Perm = sparse.identity(n)
p = level.argsort()
Perm = Perm[p, :]
return Perm * A * Perm.T
def pseudo_peripheral_node(A):
"""
Algorithm in Saad
"""
import numpy
from pyamg.graph import breadth_first_search
n = A.shape[0]
valence = numpy.diff(A.indptr)
# select an initial node x, set delta = 0
x = int(numpy.random.rand() * n)
delta = 0
while 1:
# do a level-set traversal from x
order, level = breadth_first_search(A, x)
# select a node y in the last level with min degree
maxlevel = level.max()
lastnodes = numpy.where(level == maxlevel)[0]
lastnodesvalence = valence[lastnodes]
minlastnodesvalence = lastnodesvalence.min()
y = numpy.where(lastnodesvalence == minlastnodesvalence)[0][0]
y = lastnodes[y]
# if d(x,y)>delta, set, and go to bfs above
if level[y] > delta:
x = y
delta = level[y]
else:
return x, order, level
| {
"repo_name": "pombreda/pyamg",
"path": "pyamg/graph.py",
"copies": "1",
"size": "9553",
"license": "bsd-3-clause",
"hash": -2512739349549351000,
"line_mean": 23.6211340206,
"line_max": 79,
"alpha_frac": 0.5817020831,
"autogenerated": false,
"ratio": 3.5954083552879186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46771104383879186,
"avg_score": null,
"num_lines": null
} |
""" algorithms tests
thomas moll 2015
"""
import unittest
import algorithms
class TestAlgorithms(unittest.TestCase):
def setUp(self):
self.list_a = [12,43,60,32,10]
self.list_b = [43,60,32,10,12]
self.list_c = [10,12,9,7,40,6]
self.big_sorted = list(xrange(1000))
def test_array_equals(self):
test = algorithms.array_equals(self.list_a, self.list_b)
self.assertTrue(test)
test = algorithms.array_equals(self.list_a, self.list_c)
self.assertFalse(test)
def test_sequential_search(self):
item_in_list = 738
item_not_in_list = -30
test = algorithms.find_sequentially(self.big_sorted, item_in_list)
self.assertTrue(test)
test = algorithms.find_sequentially(self.big_sorted, item_not_in_list)
self.assertFalse(test)
def test_binary_search(self):
item_in_list = 738
item_not_in_list = -30
test = algorithms.binary_search(self.big_sorted, item_in_list)
self.assertTrue(test)
test = algorithms.binary_search(self.big_sorted, item_not_in_list)
self.assertFalse(test)
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"repo_name": "QuantumFractal/Data-Structure-Zoo",
"path": "1-Algorithm Analysis/test.py",
"copies": "5",
"size": "1254",
"license": "mit",
"hash": -5072729073633675000,
"line_mean": 27.1627906977,
"line_max": 78,
"alpha_frac": 0.6028708134,
"autogenerated": false,
"ratio": 3.3619302949061662,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6464801108306166,
"avg_score": null,
"num_lines": null
} |
"""Algorithms to determine the roots of polynomials"""
from sympy.polynomials.base import *
from sympy.polynomials import div_, groebner_
def cubic(f):
"""Computes the roots of a cubic polynomial.
Usage:
======
This function is called by the wrapper L{roots}, don't use it
directly. The input is assumed to be a univariate instance of
Polynomial of degree 3.
References:
===========
http://en.wikipedia.org/wiki/Cubic_equation#Cardano.27s_method
"""
# Get monic polynomial.
f = f.as_monic()[1]
a = f.nth_coeff(2)
b = f.nth_coeff(1)
c = f.nth_coeff(0)
# Substitute variable to get depressed cubic: t**3 + p * t + q
p = b + -a**2/3
q = c + (2*a**3 - 9*a*b)/27
if p is S.Zero: # Solve special cases:
if q is S.Zero:
return [-a/3]
else:
u1 = q**Rational(1, 3)
else:
u1 = (q/2 + Basic.sqrt(q**2/4 + p**3/27))**Rational(1, 3)
u2 = u1*(Rational(-1, 2) + S.ImaginaryUnit*Basic.sqrt(3)/2)
u3 = u1*(Rational(-1, 2) - S.ImaginaryUnit*Basic.sqrt(3)/2)
return map(lambda u: (p/(u*3) - u - a/3).expand(), [u1, u2, u3])
def n_poly(f):
"""Tries to substitute a power of a variable, to simplify.
Usage:
======
This function is called by the wrapper L{roots}, don't use it
directly. It returns 'None' if no such simplifcation is
possible. The input f is assumed to be a univariate instance
of Polynomial.
References:
===========
http://en.wikipedia.org/wiki/Root_of_unity
http://en.wikipedia.org/wiki/Radical_root#Positive_real_numbers
"""
def roots_of_unity(n):
"""Computes the list of the n-th roots of unity."""
result = []
for i in range(0,n):
result.append(Basic.exp(2*i*S.Pi*S.ImaginaryUnit/n))
return result
exponents = map(lambda t:int(t[1]), f.coeffs)
g = reduce(numbers.gcd, exponents)
if g == 1 or g == 0:
return None
n = int(f.coeffs[0][1]/g)
if not n in [1, 2, 3]: # Cases where solution can be computed
return None
ff = Polynomial(coeffs=tuple(map(lambda t:(t[0], t[1]/g), f.coeffs)),
var=f.var, order=f.order)
return [(zeta*s**Rational(1,g)).expand(complex=True)
for s in roots(ff) for zeta in roots_of_unity(g)]
def quadratic(f):
"""Computes the roots of a quadratic polynomial.
Usage:
======
This function is called by the wrapper L{roots}, don't use it
directly. The input is assumed to be a univariate instance of
Polynomial of degree 2.
References:
===========
http://en.wikipedia.org/wiki/Quadratic_equation#Quadratic_formula
"""
# Get monic polynomial, for p-q formula
f = f.as_monic()[1]
# Solve special cases:
if len(f.coeffs) == 1:
return [S.Zero]
if len(f.coeffs) == 2:
if f.coeffs[1][1] == 1: # No constant term
return [S.Zero, -(f.coeffs[1][0])]
else: # No linear term
q = -(f.coeffs[1][0])
if q > 0:
return [-Basic.sqrt(q), Basic.sqrt(q)]
else:
return [-Basic.sqrt(q), Basic.sqrt(q)]
p = f.coeffs[1][0]
q = f.coeffs[2][0]
discr = p**2 - 4*q
if (not discr.is_complex) or discr > 0:
return [-p/2 + Basic.sqrt(discr)/2,
-p/2 - Basic.sqrt(discr)/2]
elif discr == 0:
return [-p/2]
else: # discr < 0
return [-p/2 + S.ImaginaryUnit*Basic.sqrt(-discr)/2,
-p/2 - S.ImaginaryUnit*Basic.sqrt(-discr)/2]
# TODO: Implement function to find roots of quartic polynomials?
def rat_roots(f):
"""Computes the rational roots of a polynomial.
Usage:
======
This function is called by the wrapper L{roots}, don't use it
directly. The input is assumed to be a univariate and
square-free instance of Polynomial, with integer coefficients.
References:
===========
http://en.wikipedia.org/wiki/Rational_root_theorem
"""
# For an integer polynomial an*x**n + ... + a0, all rational roots
# are of the form p/q, where p and q are factors of a0 and an.
an_divs = integer_divisors(int(f.coeffs[0][0]))
a0_divs = integer_divisors(int(f.coeffs[-1][0]))
result = []
for p in a0_divs:
for q in an_divs:
if f(Rational(p, q)) is S.Zero:
result.append(Rational(p, q))
if f(Rational(-p, q)) is S.Zero:
result.append(Rational(-p, q))
# Now check if 0 is a root.
if f.sympy_expr.subs(f.var[0], S.Zero).expand() is S.Zero:
result.append(S.Zero)
return result
def count_real_roots(s, a=None, b=None):
"""Returns the number of unique real roots of f in the interval (a, b].
Usage:
======
The input can be a square-free and univariate polynomial, or a
precomputed Sturm sequence, if you want to check one specific
polynomial with several intervals. See L{sturm}.
The boundaries a and b can be omitted to check the whole real
line or one ray.
Examples:
=========
>>> x = Symbol('x')
>>> count_real_roots(x**2 - 1)
2
>>> count_real_roots(x**2 - 1, 0, 2)
1
References:
===========
Davenport, Siret, Tournier: Computer Algebra, 1988
"""
def sign_changes(lisp):
"""Counts how often the sign of consecutive list elements"""
counter = 0
current = lisp[0]
for el in lisp:
if (current < 0 and el >= 0) or \
(current > 0 and el <= 0):
counter += 1
current = el
return counter
# Allow a polynomial instead of its Sturm sequence
if not isinstance(s, list):
s = sturm(s)
if a is not None:
a = sympify(a)
if b is not None:
b = sympify(b)
if a is None: # a = -oo
sa = sign_changes(map(
lambda p:p.coeffs[0][0]*(-1)**p.coeffs[0][1], s))
else:
sa = sign_changes(map(lambda p:p.sympy_expr.subs(p.var[0], a), s))
if b is None: # b = oo
sb = sign_changes(map(lambda p:p.coeffs[0][0], s))
else:
sb = sign_changes(map(lambda p:p.sympy_expr.subs(p.var[0], b), s))
return sa - sb
def sturm(f):
"""Compute the Sturm sequence of given polynomial.
Usage:
======
The input is assumed to be a square-free and univariate
polynomial, either as a SymPy expression or as instance of
Polynomial.
The output is a list representing f's Sturm sequence, which is
built similarly to the euclidian algorithm, beginning with f
and its derivative.
The result can be used in L{count_real_roots}.
References:
===========
Davenport, Siret, Tournier: Computer Algebra, 1988
"""
if not isinstance(f, Polynomial):
f = Polynomial(f)
seq = [f]
seq.append(f.diff(f.var[0]))
while seq[-1].sympy_expr is not S.Zero:
seq.append(-(div_.div(seq[-2], seq[-1])[-1]))
return seq[:-1]
def roots(f, var=None):
"""Compute the roots of a univariate polynomial.
Usage:
======
The input f is assumed to be a univariate polynomial, either
as SymPy expression or as instance of Polynomial. In the
latter case, you can optionally specify the variable with
'var'.
The output is a list of all found roots with multiplicity.
Examples:
=========
>>> x, y = symbols('xy')
>>> roots(x**2 - 1)
[1, -1]
>>> roots(x - y, x)
[y]
Also see L{factor_.factor}, L{quadratic}, L{cubic}. L{n-poly},
L{count_real_roots}.
"""
from sympy.polynomials import factor_
if not isinstance(f, Polynomial):
f = Polynomial(f, var=var, order=None)
if len(f.var) == 0:
return []
if len(f.var) > 1:
raise PolynomialException('Multivariate polynomials not supported.')
# Determine type of coeffs (for factorization purposes)
symbols = f.sympy_expr.atoms(type=Symbol)
symbols = filter(lambda a: not a in f.var, symbols)
if symbols:
coeff = 'sym'
else:
coeff = coeff_ring(get_numbers(f.sympy_expr))
if coeff == 'rat':
denom, f = f.as_integer()
coeff = 'int'
if coeff == 'int':
content, f = f.as_primitive()
# Hack to get some additional cases right:
result = n_poly(f)
if result is not None:
return result
factors = factor_.factor(f)
else: # It's not possible to factorize.
factors = [f]
# Now check for roots in each factor.
result = []
for p in factors:
n = p.coeffs[0][1] # Degree of the factor.
if n == 0: # We have a constant factor.
pass
elif n == 1:
if len(p.coeffs) == 2:
result += [-(p.coeffs[1][0] / p.coeffs[0][0])]
else:
result += [S.Zero]
elif n == 2:
result += quadratic(p)
elif n == 3:
result += cubic(p)
else:
res = n_poly(p)
if res is not None:
result += res
# With symbols, __nonzero__ returns a StrictInequality, Exception.
try: result.sort()
except: pass
return result
def solve_system(eqs, var=None, order=None):
"""Solves a system of polynomial equations.
Usage:
======
Assumes to get a list of polynomials, either as SymPy
expressions or instances of Polynomial. In the first case, you
should specify the variables and monomial order through 'var'
and 'order'. Otherwise, the polynomials should have matching
variables and orders already. Only the first polynomial is
checked for its type.
This algorithm uses variable elimination and only works for
zero-dimensional varieties, that is, a finite number of
solutions, which is currently not tested.
Examples:
=========
>>> x, y = symbols('xy')
>>> f = y - x
>>> g = x**2 + y**2 - 1
>>> solve_system([f, g])
[(-(1/2)**(1/2), -(1/2)**(1/2)), ((1/2)**(1/2), (1/2)**(1/2))]
References:
===========
Cox, Little, O'Shea: Ideals, Varieties and Algorithms,
Springer, 2. edition, p. 113
"""
def is_uv(f):
"""Is an instance of Polynomial univariate in its last variable?"""
for term in f.coeffs:
for exponent in term[1:-1]:
if exponent > 0:
return False
return True
if not isinstance(eqs, list):
eqs = [eqs]
if not isinstance(eqs[0], Polynomial):
if var is None:
var = merge_var(*[f.atoms(type=Symbol) for f in eqs])
eqs = [Polynomial(f, var=var, order='lex') for f in eqs]
else:
eqs = [Polynomial(f.sympy_expr, var=f.var, order='lex') for f in eqs]
# First compute a Groebner base with the polynomials,
# with lexicographic ordering, so that the last polynomial is
# univariate and can be solved.
gb = groebner_.groebner(eqs)
# Now filter the the base elements, to get only the univariate ones.
eliminated = filter(is_uv, gb)
if len(eliminated) != 1:
raise PolynomialException("System currently not solvable.")
# Try to solve the polynomials with var eliminated.
f = eliminated[0]
partial_solutions = set(roots(f.sympy_expr, var=f.var[-1]))
# No solutions were found.
# TODO: Check if there exist some anyways?
if len(partial_solutions) == 0:
return []
# Is this the last equation, that is, deepest hierarchy?
if len(gb) == 1:
return map(lambda s:(s,), partial_solutions)
# Finally, call this function recursively for each root replacing
# the corresponding variable in the system.
result = []
for r in partial_solutions:
new_system = []
for eq in gb[:-1]:
new_eq = eq.sympy_expr.subs(eq.var[-1], r).expand()
if new_eq is not S.Zero:
new_system.append(
Polynomial(new_eq, var=eq.var[:-1], order='lex'))
if not new_system:
return []
for nps in solve_system(new_system):
result.append(nps + (r,))
# With symbols, __nonzero__ returns a StrictInequality, Exception.
try: result.sort()
except: pass
return result
| {
"repo_name": "certik/sympy-oldcore",
"path": "sympy/polynomials/roots_.py",
"copies": "1",
"size": "12552",
"license": "bsd-3-clause",
"hash": -2768478862339429000,
"line_mean": 28.6737588652,
"line_max": 77,
"alpha_frac": 0.5618228171,
"autogenerated": false,
"ratio": 3.5228739825989335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45846967996989335,
"avg_score": null,
"num_lines": null
} |
"""Algorithms to reorder triangle list order and vertex order aiming to
minimize vertex cache misses.
This is effectively an implementation of
'Linear-Speed Vertex Cache Optimisation' by Tom Forsyth, 28th September 2006
http://home.comcast.net/~tom_forsyth/papers/fast_vert_cache_opt.html
"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, Python File Format Interface
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Python File Format Interface
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import collections
from functools import reduce
from pyffi.utils.tristrip import OrientedStrip
class VertexScore:
"""Vertex score calculation."""
# constants used for scoring algorithm
CACHE_SIZE = 32 # higher values yield virtually no improvement
"""The size of the modeled cache."""
CACHE_DECAY_POWER = 1.5
LAST_TRI_SCORE = 0.75
VALENCE_BOOST_SCALE = 2.0
VALENCE_BOOST_POWER = 0.5
# implementation note: limitation of 255 triangles per vertex
# this is unlikely to be exceeded...
MAX_TRIANGLES_PER_VERTEX = 255
def __init__(self):
# calculation of score is precalculated for speed
self.precalculate()
def precalculate(self):
self.CACHE_SCORE = [
self.LAST_TRI_SCORE
if cache_position < 3 else
((self.CACHE_SIZE - cache_position)
/ (self.CACHE_SIZE - 3)) ** self.CACHE_DECAY_POWER
for cache_position in range(self.CACHE_SIZE)]
self.VALENCE_SCORE = [
self.VALENCE_BOOST_SCALE * (valence ** (-self.VALENCE_BOOST_POWER))
if valence > 0 else None
for valence in range(self.MAX_TRIANGLES_PER_VERTEX + 1)]
def update_score(self, vertex_info):
"""Update score:
* -1 if vertex has no triangles
* cache score + valence score otherwise
where cache score is
* 0 if vertex is not in cache
* 0.75 if vertex has been used very recently
(position 0, 1, or 2)
* (1 - (cache position - 3) / (32 - 3)) ** 1.5
otherwise
and valence score is 2 * (num triangles ** (-0.5))
>>> vertex_score = VertexScore()
>>> def get_score(cache_position, triangle_indices):
... vert = VertexInfo(cache_position=cache_position,
... triangle_indices=triangle_indices)
... vertex_score.update_score(vert)
... return vert.score
>>> for cache_position in [-1, 0, 1, 2, 3, 4, 5]:
... print("cache position = {0}".format(cache_position))
... for num_triangles in range(4):
... print(" num triangles = {0} : {1:.3f}"
... .format(num_triangles,
... get_score(cache_position,
... list(range(num_triangles)))))
cache position = -1
num triangles = 0 : -1.000
num triangles = 1 : 2.000
num triangles = 2 : 1.414
num triangles = 3 : 1.155
cache position = 0
num triangles = 0 : -1.000
num triangles = 1 : 2.750
num triangles = 2 : 2.164
num triangles = 3 : 1.905
cache position = 1
num triangles = 0 : -1.000
num triangles = 1 : 2.750
num triangles = 2 : 2.164
num triangles = 3 : 1.905
cache position = 2
num triangles = 0 : -1.000
num triangles = 1 : 2.750
num triangles = 2 : 2.164
num triangles = 3 : 1.905
cache position = 3
num triangles = 0 : -1.000
num triangles = 1 : 3.000
num triangles = 2 : 2.414
num triangles = 3 : 2.155
cache position = 4
num triangles = 0 : -1.000
num triangles = 1 : 2.949
num triangles = 2 : 2.363
num triangles = 3 : 2.103
cache position = 5
num triangles = 0 : -1.000
num triangles = 1 : 2.898
num triangles = 2 : 2.313
num triangles = 3 : 2.053
"""
if not vertex_info.triangle_indices:
# no triangle needs this vertex
vertex_info.score = -1
return
if vertex_info.cache_position < 0:
# not in cache
vertex_info.score = 0
else:
# use cache score lookup table
vertex_info.score = self.CACHE_SCORE[vertex_info.cache_position]
# bonus points for having low number of triangles still in use
# note: example mesh with more than 255 triangles per vertex is
# falloutnv/meshes/landscape/lod/freesidefortworld/freesidefortworld.level8.x-9.y1.nif
vertex_info.score += self.VALENCE_SCORE[
min(len(vertex_info.triangle_indices),
self.MAX_TRIANGLES_PER_VERTEX)]
class VertexInfo:
"""Stores information about a vertex."""
def __init__(self, cache_position=-1, score=-1,
triangle_indices=None):
self.cache_position = cache_position
self.score = score
# only triangles that have *not* yet been drawn are in this list
self.triangle_indices = ([] if triangle_indices is None
else triangle_indices)
class TriangleInfo:
def __init__(self, score=0, vertex_indices=None):
self.score = score
self.vertex_indices = ([] if vertex_indices is None
else vertex_indices)
class Mesh:
"""Simple mesh implementation which keeps track of which triangles
are used by which vertex, and vertex cache positions.
"""
_DEBUG = False # to enable debugging of the algorithm
def __init__(self, triangles, vertex_score=None):
"""Initialize mesh from given set of triangles.
Empty mesh
----------
>>> Mesh([]).triangle_infos
[]
Single triangle mesh (with degenerate)
--------------------------------------
>>> m = Mesh([(0,1,2), (1,2,0)])
>>> [vertex_info.triangle_indices for vertex_info in m.vertex_infos]
[[0], [0], [0]]
>>> [triangle_info.vertex_indices for triangle_info in m.triangle_infos]
[(0, 1, 2)]
Double triangle mesh
--------------------
>>> m = Mesh([(0,1,2), (2,1,3)])
>>> [vertex_info.triangle_indices for vertex_info in m.vertex_infos]
[[0], [0, 1], [0, 1], [1]]
>>> [triangle_info.vertex_indices for triangle_info in m.triangle_infos]
[(0, 1, 2), (1, 3, 2)]
"""
# initialize vertex and triangle information, and vertex cache
self.vertex_infos = []
self.triangle_infos = []
# add all vertices
if triangles:
num_vertices = max(max(verts) for verts in triangles) + 1
else:
num_vertices = 0
# scoring algorithm
if vertex_score is None:
self.vertex_score = VertexScore()
else:
self.vertex_score = vertex_score
self.vertex_infos = [VertexInfo() for i in range(num_vertices)]
# add all triangles
for triangle_index, verts in enumerate(get_unique_triangles(triangles)):
self.triangle_infos.append(TriangleInfo(vertex_indices=verts))
for vertex in verts:
self.vertex_infos[vertex].triangle_indices.append(
triangle_index)
# calculate score of all vertices
for vertex_info in self.vertex_infos:
self.vertex_score.update_score(vertex_info)
# calculate score of all triangles
for triangle_info in self.triangle_infos:
triangle_info.score = sum(
self.vertex_infos[vertex].score
for vertex in triangle_info.vertex_indices)
def get_cache_optimized_triangles(self):
"""Reorder triangles in a cache efficient way.
>>> m = Mesh([(0,1,2), (7,8,9),(2,3,4)])
>>> m.get_cache_optimized_triangles()
[(7, 8, 9), (0, 1, 2), (2, 3, 4)]
"""
triangles = []
cache = collections.deque()
# set of vertex indices whose scores were updated in the previous run
updated_vertices = set()
# set of triangle indices whose scores were updated in the previous run
updated_triangles = set()
while (updated_triangles
or any(triangle_info for triangle_info in self.triangle_infos)):
# pick triangle with highest score
if self._DEBUG or not updated_triangles:
# very slow but correct global maximum
best_triangle_index, best_triangle_info = max(
(triangle
for triangle in enumerate(self.triangle_infos)
if triangle[1]),
key=lambda triangle: triangle[1].score)
if updated_triangles:
if self._DEBUG:
globally_optimal_score = best_triangle_info.score
# if scores of triangles were updated in the previous run
# then restrict the search to those
# this is suboptimal, but the difference is usually very small
# and it is *much* faster (as noted by Forsyth)
best_triangle_index = max(
updated_triangles,
key=lambda triangle_index:
self.triangle_infos[triangle_index].score)
best_triangle_info = self.triangle_infos[best_triangle_index]
if (self._DEBUG and
globally_optimal_score - best_triangle_info.score > 0.01):
print(globally_optimal_score,
globally_optimal_score - best_triangle_info.score,
len(updated_triangles))
# mark as added
self.triangle_infos[best_triangle_index] = None
# append to ordered list of triangles
triangles.append(best_triangle_info.vertex_indices)
# clean lists of vertices and triangles whose score we will update
updated_vertices = set()
updated_triangles = set()
# for each vertex in the just added triangle
for vertex in best_triangle_info.vertex_indices:
vertex_info = self.vertex_infos[vertex]
# remove triangle from the triangle list of the vertex
vertex_info.triangle_indices.remove(best_triangle_index)
# must update its score
updated_vertices.add(vertex)
updated_triangles.update(vertex_info.triangle_indices)
# add each vertex to cache (score is updated later)
for vertex in best_triangle_info.vertex_indices:
if vertex not in cache:
cache.appendleft(vertex)
if len(cache) > self.vertex_score.CACHE_SIZE:
# cache overflow!
# remove vertex from cache
removed_vertex = cache.pop()
removed_vertex_info = self.vertex_infos[removed_vertex]
# update its cache position
removed_vertex_info.cache_position = -1
# must update its score
updated_vertices.add(removed_vertex)
updated_triangles.update(removed_vertex_info.triangle_indices)
# for each vertex in the cache (this includes those from the
# just added triangle)
for i, vertex in enumerate(cache):
vertex_info = self.vertex_infos[vertex]
# update cache positions
vertex_info.cache_position = i
# must update its score
updated_vertices.add(vertex)
updated_triangles.update(vertex_info.triangle_indices)
# update scores
for vertex in updated_vertices:
self.vertex_score.update_score(self.vertex_infos[vertex])
for triangle in updated_triangles:
triangle_info = self.triangle_infos[triangle]
triangle_info.score = sum(
self.vertex_infos[vertex].score
for vertex in triangle_info.vertex_indices)
# return result
return triangles
def get_cache_optimized_triangles(triangles):
"""Calculate cache optimized triangles, and return the result as
a reordered set of triangles or strip of stitched triangles.
:param triangles: The triangles (triples of vertex indices).
:return: A list of reordered triangles.
"""
mesh = Mesh(triangles)
return mesh.get_cache_optimized_triangles()
def get_unique_triangles(triangles):
"""Yield unique triangles.
>>> list(get_unique_triangles([(0, 1, 2), (1, 1, 0), (2, 1, 0), (1, 0, 0)]))
[(0, 1, 2), (0, 2, 1)]
>>> list(get_unique_triangles([(0, 1, 2), (1, 1, 0), (2, 0, 1)]))
[(0, 1, 2)]
"""
_added_triangles = set()
for v0, v1, v2 in triangles:
if v0 == v1 or v1 == v2 or v2 == v0:
# skip degenerate triangles
continue
if v0 < v1 and v0 < v2:
verts = (v0, v1, v2)
elif v1 < v0 and v1 < v2:
verts = (v1, v2, v0)
elif v2 < v0 and v2 < v1:
verts = (v2, v0, v1)
if verts not in _added_triangles:
yield verts
_added_triangles.add(verts)
def stable_stripify(triangles, stitchstrips=False):
"""Stitch all triangles together into a strip without changing the
triangle ordering (for example because their ordering is already
optimized).
:param triangles: The triangles (triples of vertex indices).
:return: A list of strips (list of vertex indices).
>>> stable_stripify([(0, 1, 2), (2, 1, 4)])
[[0, 1, 2, 4]]
>>> stable_stripify([(0, 1, 2), (2, 3, 4)])
[[0, 1, 2], [2, 3, 4]]
>>> stable_stripify([(0, 1, 2), (2, 1, 3), (2, 3, 4), (1, 4, 5), (5, 4, 6)])
[[0, 1, 2, 3, 4], [1, 4, 5, 6]]
>>> stable_stripify([(0, 1, 2), (0, 3, 1), (0, 4, 3), (3, 5, 1), (6, 3, 4)])
[[2, 0, 1, 3], [0, 4, 3], [3, 5, 1], [6, 3, 4]]
"""
# all orientation preserving triangle permutations
indices = ((0, 1, 2), (1, 2, 0), (2, 0, 1))
# list of all strips so far
strips = []
# current strip that is being built
strip = []
# add a triangle at a time
for tri in triangles:
if not strip:
# empty strip
strip.extend(tri)
elif len(strip) == 3:
# strip with single triangle
# see if we can append a vertex
# we can rearrange the original strip as well
added = False
for v0, v1, v2 in indices:
for ov0, ov1, ov2 in indices:
if strip[v1] == tri[ov1] and strip[v2] == tri[ov0]:
strip = [strip[v0], strip[v1], strip[v2], tri[ov2]]
added = True
break
if added:
# triangle added: break loop
break
if added:
# triangle added: process next triangle
continue
# start new strip
strips.append(strip)
strip = list(tri)
else:
# strip with multiple triangles
added = False
for ov0, ov1, ov2 in indices:
if len(strip) & 1:
if strip[-2] == tri[ov1] and strip[-1] == tri[ov0]:
strip.append(tri[ov2])
added = True
break
else:
if strip[-2] == tri[ov0] and strip[-1] == tri[ov1]:
strip.append(tri[ov2])
added = True
break
if added:
# triangle added: process next triangle
continue
# start new strip
strips.append(strip)
strip = list(tri)
# append last strip
strips.append(strip)
if not stitchstrips or not strips:
return strips
else:
result = reduce(lambda x, y: x + y,
(OrientedStrip(strip) for strip in strips))
return [list(result)]
def stripify(triangles, stitchstrips=False):
"""Stripify triangles, optimizing for the vertex cache."""
return stable_stripify(
get_cache_optimized_triangles(triangles),
stitchstrips=stitchstrips)
def get_cache_optimized_vertex_map(strips):
"""Map vertices so triangles/strips have consequetive indices.
>>> get_cache_optimized_vertex_map([])
[]
>>> get_cache_optimized_vertex_map([[]])
[]
>>> get_cache_optimized_vertex_map([[0, 1, 3], []])
[0, 1, None, 2]
>>> get_cache_optimized_vertex_map([(5,2,1),(0,2,3)])
[3, 2, 1, 4, None, 0]
"""
if strips:
num_vertices = max(max(strip) if strip else -1
for strip in strips) + 1
else:
num_vertices = 0
vertex_map = [None for i in range(num_vertices)]
new_vertex = 0
for strip in strips:
for old_vertex in strip:
if vertex_map[old_vertex] is None:
vertex_map[old_vertex] = new_vertex
new_vertex += 1
return vertex_map
def average_transform_to_vertex_ratio(strips, cache_size=16):
"""Calculate number of transforms per vertex for a given cache size
and triangles/strips. See
http://castano.ludicon.com/blog/2009/01/29/acmr/
"""
cache = collections.deque(maxlen=cache_size)
# get number of vertices
vertices = set([])
for strip in strips:
vertices.update(strip)
# get number of cache misses (each miss needs a transform)
num_misses = 0
for strip in strips:
for vertex in strip:
if vertex in cache:
pass
else:
cache.appendleft(vertex)
num_misses += 1
# return result
if vertices:
return num_misses / float(len(vertices))
else:
# no vertices...
return 1
if __name__=='__main__':
import doctest
doctest.testmod()
| {
"repo_name": "griest024/PokyrimTools",
"path": "pyffi-develop/pyffi/utils/vertex_cache.py",
"copies": "1",
"size": "19824",
"license": "mit",
"hash": -98108486258272640,
"line_mean": 38.1778656126,
"line_max": 94,
"alpha_frac": 0.5626008878,
"autogenerated": false,
"ratio": 4.043238833367326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5105839721167326,
"avg_score": null,
"num_lines": null
} |
"""Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : callable, default np.mean
Function to call on the args that are passed in.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(map(len, args))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Do the bootstrap
if smooth:
return _smooth_bootstrap(args, n_boot, func, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, func,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def randomize_corrmat(a, tail="both", corrected=True, n_iter=1000,
random_seed=None, return_dist=False):
"""Test the significance of set of correlations with permutations.
By default this corrects for multiple comparisons across one side
of the matrix.
Parameters
----------
a : n_vars x n_obs array
array with variables as rows
tail : both | upper | lower
whether test should be two-tailed, or which tail to integrate over
corrected : boolean
if True reports p values with respect to the max stat distribution
n_iter : int
number of permutation iterations
random_seed : int or None
seed for RNG
return_dist : bool
if True, return n_vars x n_vars x n_iter
Returns
-------
p_mat : float
array of probabilites for actual correlation from null CDF
"""
if tail not in ["upper", "lower", "both"]:
raise ValueError("'tail' must be 'upper', 'lower', or 'both'")
rs = np.random.RandomState(random_seed)
a = np.asarray(a)
flat_a = a.ravel()
n_vars, n_obs = a.shape
# Do the permutations to establish a null distribution
null_dist = np.empty((n_vars, n_vars, n_iter))
for i_i in range(n_iter):
perm_i = np.concatenate([rs.permutation(n_obs) + (v * n_obs)
for v in range(n_vars)])
a_i = flat_a[perm_i].reshape(n_vars, n_obs)
null_dist[..., i_i] = np.corrcoef(a_i)
# Get the observed correlation values
real_corr = np.corrcoef(a)
# Figure out p values based on the permutation distribution
p_mat = np.zeros((n_vars, n_vars))
upper_tri = np.triu_indices(n_vars, 1)
if corrected:
if tail == "both":
max_dist = np.abs(null_dist[upper_tri]).max(axis=0)
elif tail == "lower":
max_dist = null_dist[upper_tri].min(axis=0)
elif tail == "upper":
max_dist = null_dist[upper_tri].max(axis=0)
cdf = lambda x: stats.percentileofscore(max_dist, x) / 100.
for i, j in zip(*upper_tri):
observed = real_corr[i, j]
if tail == "both":
p_ij = 1 - cdf(abs(observed))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
else:
for i, j in zip(*upper_tri):
null_corrs = null_dist[i, j]
cdf = lambda x: stats.percentileofscore(null_corrs, x) / 100.
observed = real_corr[i, j]
if tail == "both":
p_ij = 2 * (1 - cdf(abs(observed)))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
# Make p matrix symettrical with nans on the diagonal
p_mat += p_mat.T
p_mat[np.diag_indices(n_vars)] = np.nan
if return_dist:
return p_mat, null_dist
return p_mat
| {
"repo_name": "cpcloud/seaborn",
"path": "seaborn/algorithms.py",
"copies": "1",
"size": "6873",
"license": "bsd-3-clause",
"hash": -2485529422238807600,
"line_mean": 32.6911764706,
"line_max": 79,
"alpha_frac": 0.5901353121,
"autogenerated": false,
"ratio": 3.7252032520325202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.481533856413252,
"avg_score": null,
"num_lines": null
} |
"""Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
from six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : callable, default np.mean
Function to call on the args that are passed in.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(map(len, args))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Do the bootstrap
if smooth:
return _smooth_bootstrap(args, n_boot, func, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, func,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def randomize_corrmat(a, tail="both", corrected=True, n_iter=1000,
random_seed=None, return_dist=False):
"""Test the significance of set of correlations with permutations.
By default this corrects for multiple comparisons across one side
of the matrix.
Parameters
----------
a : n_vars x n_obs array
array with variables as rows
tail : both | upper | lower
whether test should be two-tailed, or which tail to integrate over
corrected : boolean
if True reports p values with respect to the max stat distribution
n_iter : int
number of permutation iterations
random_seed : int or None
seed for RNG
return_dist : bool
if True, return n_vars x n_vars x n_iter
Returns
-------
p_mat : float
array of probabilites for actual correlation from null CDF
"""
if tail not in ["upper", "lower", "both"]:
raise ValueError("'tail' must be 'upper', 'lower', or 'both'")
rs = np.random.RandomState(random_seed)
a = np.asarray(a)
flat_a = a.ravel()
n_vars, n_obs = a.shape
# Do the permutations to establish a null distribution
null_dist = np.empty((n_vars, n_vars, n_iter))
for i_i in range(n_iter):
perm_i = np.concatenate([rs.permutation(n_obs) + (v * n_obs)
for v in range(n_vars)])
a_i = flat_a[perm_i].reshape(n_vars, n_obs)
null_dist[..., i_i] = np.corrcoef(a_i)
# Get the observed correlation values
real_corr = np.corrcoef(a)
# Figure out p values based on the permutation distribution
p_mat = np.zeros((n_vars, n_vars))
upper_tri = np.triu_indices(n_vars, 1)
if corrected:
if tail == "both":
max_dist = np.abs(null_dist[upper_tri]).max(axis=0)
elif tail == "lower":
max_dist = null_dist[upper_tri].min(axis=0)
elif tail == "upper":
max_dist = null_dist[upper_tri].max(axis=0)
cdf = lambda x: stats.percentileofscore(max_dist, x) / 100.
for i, j in zip(*upper_tri):
observed = real_corr[i, j]
if tail == "both":
p_ij = 1 - cdf(abs(observed))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
else:
for i, j in zip(*upper_tri):
null_corrs = null_dist[i, j]
cdf = lambda x: stats.percentileofscore(null_corrs, x) / 100.
observed = real_corr[i, j]
if tail == "both":
p_ij = 2 * (1 - cdf(abs(observed)))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
# Make p matrix symettrical with nans on the diagonal
p_mat += p_mat.T
p_mat[np.diag_indices(n_vars)] = np.nan
if return_dist:
return p_mat, null_dist
return p_mat
| {
"repo_name": "yarikoptic/seaborn",
"path": "seaborn/algorithms.py",
"copies": "1",
"size": "6863",
"license": "bsd-3-clause",
"hash": 1302225294867658500,
"line_mean": 32.6421568627,
"line_max": 79,
"alpha_frac": 0.5898295206,
"autogenerated": false,
"ratio": 3.725841476655809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48156709972558087,
"avg_score": null,
"num_lines": null
} |
"""Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
import warnings
from .external.six import string_types
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : string or callable, default np.mean
Function to call on the args that are passed in. If string, tries
to use as named method on numpy array.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Allow for a function that is the name of a method on an array
if isinstance(func, string_types):
def f(x):
return getattr(x, func)()
else:
f = func
# Do the bootstrap
if smooth:
msg = "Smooth bootstraps are deprecated and will be removed."
warnings.warn(msg)
return _smooth_bootstrap(args, n_boot, f, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, f,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
| {
"repo_name": "lukauskas/seaborn",
"path": "seaborn/algorithms.py",
"copies": "3",
"size": "4233",
"license": "bsd-3-clause",
"hash": 7271285285809882000,
"line_mean": 34.275,
"line_max": 79,
"alpha_frac": 0.6234349161,
"autogenerated": false,
"ratio": 3.8799266727772688,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6003361588877268,
"avg_score": null,
"num_lines": null
} |
"""Algorithms to support fitting routines in seaborn plotting functions."""
import numbers
import numpy as np
import warnings
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default=10000
Number of iterations
axis : int, default=None
Will pass axis to ``func`` as a keyword argument.
units : array, default=None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
func : string or callable, default="mean"
Function to call on the args that are passed in. If string, uses as
name of function in the numpy namespace. If nans are present in the
data, will try to use nan-aware version of named function.
seed : Generator | SeedSequence | RandomState | int | None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", "mean")
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
random_seed = kwargs.get("random_seed", None)
if random_seed is not None:
msg = "`random_seed` has been renamed to `seed` and will be removed"
warnings.warn(msg)
seed = kwargs.get("seed", random_seed)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rng = _handle_random_seed(seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
if isinstance(func, str):
# Allow named numpy functions
f = getattr(np, func)
# Try to use nan-aware version of function if necessary
missing_data = np.isnan(np.sum(np.column_stack(args)))
if missing_data and not func.startswith("nan"):
nanf = getattr(np, f"nan{func}", None)
if nanf is None:
msg = f"Data contain nans but no nan-aware version of `{func}` found"
warnings.warn(msg, UserWarning)
else:
f = nanf
else:
f = func
# Handle numpy changes
try:
integers = rng.integers
except AttributeError:
integers = rng.randint
# Do the bootstrap
if units is not None:
return _structured_bootstrap(args, n_boot, units, f,
func_kwargs, integers)
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n_units, n_units, dtype=np.intp)
sample = [[a[i] for i in resampler] for a in args]
lengths = map(len, sample[0])
resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _handle_random_seed(seed=None):
"""Given a seed in one of many formats, return a random number generator.
Generalizes across the numpy 1.17 changes, preferring newer functionality.
"""
if isinstance(seed, np.random.RandomState):
rng = seed
else:
try:
# General interface for seeding on numpy >= 1.17
rng = np.random.default_rng(seed)
except AttributeError:
# We are on numpy < 1.17, handle options ourselves
if isinstance(seed, (numbers.Integral, np.integer)):
rng = np.random.RandomState(seed)
elif seed is None:
rng = np.random.RandomState()
else:
err = "{} cannot be used to seed the randomn number generator"
raise ValueError(err.format(seed))
return rng
| {
"repo_name": "mwaskom/seaborn",
"path": "seaborn/algorithms.py",
"copies": "2",
"size": "4971",
"license": "bsd-3-clause",
"hash": 4144861291383294500,
"line_mean": 34.0070422535,
"line_max": 85,
"alpha_frac": 0.6099376383,
"autogenerated": false,
"ratio": 4.048045602605863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5657983240905863,
"avg_score": null,
"num_lines": null
} |
""" Algorithms, using only one class for classification
Though this focuses on one class, the relation to the ``REST`` should still
be specified.
"""
from pySPACE.missions.nodes.classification.base import RegularizedClassifierBase
import logging
# import the external libraries
from pySPACE.missions.nodes.classification.svm_variants.external import LibSVMClassifierNode
try: # Libsvm
import svmutil
except ImportError:
pass
class OneClassClassifierBase(RegularizedClassifierBase):
""" Base node to handle class labels during training to filter out irrelevant data
:class_labels:
List of the two or more classes,
where first element is the relevant one
and the second is the negative class.
"""
def train(self, data, label):
""" Special mapping for one-class classification
Reduce training data to the one main class.
"""
#one vs. REST case
if "REST" in self.classes and not label in self.classes:
label = "REST"
# one vs. one case
if not self.multinomial and len(self.classes)==2 and not label in self.classes:
return
if len(self.classes)==0:
self.classes.append(label)
self._log("No positive class label given in: %s. Taking now: %s."\
%(self.__class__.__name__,label),
level=logging.ERROR)
if not label==self.classes[0]:
if len(self.classes)==1:
self.classes.append(label)
self._log("No negative class label given in: %s. Taking now: %s."\
%(self.__class__.__name__,label),
level=logging.WARNING)
return
super(RegularizedClassifierBase, self).train(data,label)
class LibsvmOneClassNode(LibSVMClassifierNode, OneClassClassifierBase):
""" Interface to one-class SVM in Libsvm package
**Parameters**
Parameters are as specified in
:class:`~pySPACE.missions.nodes.classification.svm_variants.external.LibSVMClassifierNode`,
except the ``svm_type``, which is set manually in this node to
"one-class SVM".
:class_labels:
see: :class:`OneClassClassifierBase`
**Exemplary Call**
.. code-block:: yaml
-
node : LibsvmOneClass
parameters :
complexity : 1
kernel_type : "LINEAR"
class_labels : ['Target', 'Standard']
weight : [1,3]
debug : True
store : True
max_iterations : 100
"""
def __init__(self,**kwargs):
LibSVMClassifierNode.__init__(self,svm_type="one-class SVM", **kwargs)
def train(self,data,label):
OneClassClassifierBase.train(self, data, label)
| {
"repo_name": "pyspace/pyspace",
"path": "pySPACE/missions/nodes/classification/one_class.py",
"copies": "3",
"size": "2848",
"license": "bsd-3-clause",
"hash": 8486670824768563000,
"line_mean": 32.1162790698,
"line_max": 99,
"alpha_frac": 0.6025280899,
"autogenerated": false,
"ratio": 4.38828967642527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005838987697020974,
"num_lines": 86
} |
# Algorithms were taken directly from https://github.com/dgasmith/psi4numpy/blob/master/Coupled-Cluster/CCSD.dat
# Special thanks to Daniel Crawford's programming website:
# http://sirius.chem.vt.edu/wiki/doku.php?id=crawdad:programming
import time
import numpy as np
from itertools import product
from functools import reduce
def ccsd(hamiltonian, orbs, orbe, ndocc, nvirt, verbose=False):
H = hamiltonian.h
nmo = ndocc + nvirt
C = orbs
eps = orbe
#Make spin-orbital MO
t=time.time()
print 'Starting AO -> spin-orbital MO transformation...'
nso = nmo * 2
MO = np.einsum('rJ, pqrs->pqJs', C, hamiltonian.i2._2e_ints)
MO = np.einsum('pI, pqJs->IqJs', C, MO)
MO = np.einsum('sB, IqJs->IqJB', C, MO)
MO = np.einsum('qA, IqJB->IAJB', C, MO)
# Tile MO array so that we have alternating alpha/beta spin orbitals
MO = np.repeat(MO, 2, axis=0)
MO = np.repeat(MO, 2, axis=1)
MO = np.repeat(MO, 2, axis=2)
MO = np.repeat(MO, 2, axis=3)
# Build spin mask
spin_ind = np.arange(nso, dtype=np.int) % 2
spin_mask = (spin_ind.reshape(-1, 1, 1, 1) == spin_ind.reshape(-1, 1, 1))
spin_mask = spin_mask * (spin_ind.reshape(-1, 1) == spin_ind)
# Compute antisymmetrized MO integrals
MO *= spin_mask
MO = MO - MO.swapaxes(1, 3)
MO = MO.swapaxes(1, 2)
print '..finished transformation in %.3f seconds.\n' % (time.time()-t)
# Update nocc and nvirt
nocc = ndocc * 2
nvirt = MO.shape[0] - nocc
# Make slices
o = slice(0, nocc)
v = slice(nocc, MO.shape[0])
#Extend eigenvalues
eps = np.repeat(eps, 2)
Eocc = eps[o]
Evirt = eps[v]
# DPD approach to CCSD equations
# See: http://sirius.chem.vt.edu/wiki/doku.php?id=crawdad:programming
# occ orbitals i, j, k, l, m, n
# virt orbitals a, b, c, d, e, f
# all oribitals p, q, r, s, t, u, v
#Bulid Eqn 9: tilde{\Tau})
def build_tilde_tau(t1, t2):
ttau = t2.copy()
tmp = 0.5 * np.einsum('ia,jb->ijab', t1, t1)
ttau += tmp
ttau -= tmp.swapaxes(2, 3)
return ttau
#Build Eqn 10: \Tau)
def build_tau(t1, t2):
ttau = t2.copy()
tmp = np.einsum('ia,jb->ijab', t1, t1)
ttau += tmp
ttau -= tmp.swapaxes(2, 3)
return ttau
#Build Eqn 3:
def build_Fae(t1, t2):
Fae = F[v, v].copy()
Fae[np.diag_indices_from(Fae)] = 0
Fae -= 0.5 * np.einsum('me,ma->ae', F[o, v], t1)
Fae += np.einsum('mf,mafe->ae', t1, MO[o, v, v, v])
tmp_tau = build_tilde_tau(t1, t2)
Fae -= 0.5 * np.einsum('mnaf,mnef->ae', tmp_tau, MO[o, o, v, v])
return Fae
#Build Eqn 4:
def build_Fmi(t1, t2):
Fmi = F[o, o].copy()
Fmi[np.diag_indices_from(Fmi)] = 0
Fmi += 0.5 * np.einsum('ie,me->mi', t1, F[o, v])
Fmi += np.einsum('ne,mnie->mi', t1, MO[o, o, o, v])
tmp_tau = build_tilde_tau(t1, t2)
Fmi += 0.5 * np.einsum('inef,mnef->mi', tmp_tau, MO[o, o, v, v])
return Fmi
#Build Eqn 5:
def build_Fme(t1, t2):
Fme = F[o, v].copy()
Fme += np.einsum('nf,mnef->me', t1, MO[o, o, v, v])
return Fme
#Build Eqn 6:
def build_Wmnij(t1, t2):
Wmnij = MO[o, o, o, o].copy()
Pij = np.einsum('je,mnie->mnij', t1, MO[o, o, o, v])
Wmnij += Pij
Wmnij -= Pij.swapaxes(2, 3)
tmp_tau = build_tau(t1, t2)
Wmnij += 0.25 * np.einsum('ijef,mnef->mnij', tmp_tau, MO[o, o, v, v])
return Wmnij
#Build Eqn 7:
def build_Wabef(t1, t2):
Wabef = MO[v, v, v, v].copy()
Pab = np.einsum('mb,amef->abef', t1, MO[v, o, v, v])
Wabef -= Pab
Wabef += Pab.swapaxes(0, 1)
tmp_tau = build_tau(t1, t2)
Wabef += 0.25 * np.einsum('mnab,efmn->abef', tmp_tau, MO[v, v, o, o])
return Wabef
#Build Eqn 8:
def build_Wmbej(t1, t2):
Wmbej = MO[o, v, v, o].copy()
Wmbej += np.einsum('jf,mbef->mbej', t1, MO[o, v, v, v])
Wmbej -= np.einsum('nb,mnej->mbej', t1, MO[o, o, v, o])
tmp = (0.5 * t2) + np.einsum('jf,nb->jnfb', t1, t1)
Wmbej -= np.einsum('jnfb,mnef->mbej', tmp, MO[o, o, v, v])
return Wmbej
### Build so Fock matirx
# Update H, transform to MO basis and tile for alpha/beta spin
H = np.einsum('uj,vi,uv', C, C, H)
H = np.repeat(H, 2, axis=0)
H = np.repeat(H, 2, axis=1)
# Make H block diagonal
spin_ind = np.arange(H.shape[0], dtype=np.int) % 2
H *= (spin_ind.reshape(-1, 1) == spin_ind)
# Compute Fock matrix
F = H + np.einsum('pmqm->pq', MO[:, o, :, o])
### Build D matrices
Focc = F[np.arange(nocc), np.arange(nocc)].flatten()
Fvirt = F[np.arange(nocc, nvirt + nocc), np.arange(nocc, nvirt + nocc)].flatten()
Dia = Focc.reshape(-1, 1) - Fvirt
Dijab = Focc.reshape(-1, 1, 1, 1) + Focc.reshape(-1, 1, 1) - Fvirt.reshape(-1, 1) - Fvirt
### Construct initial guess
# t^a_i
t1 = np.zeros((nocc, nvirt))
# t^{ab}_{ij}
MOijab = MO[o, o, v, v]
t2 = MOijab / Dijab
### Compute MP2 in MO basis set to make sure the transformation was correct
MP2corr_E = np.einsum('ijab,ijab->', MOijab, t2)/4
#MP2_E = SCF_E + MP2corr_E
print 'MO based MP2 correlation energy: %.8f' % MP2corr_E
#print 'MP2 total energy: %.8f' % MP2_E
#compare_values(energy('mp2'), MP2_E, 6, 'MP2 Energy')
### Start Iterations
E_conv = 1.e-12
maxiter = 60
CCSDcorr_E_old = 0.0
for CCSD_iter in range(1, maxiter + 1):
### Build intermediates
Fae = build_Fae(t1, t2)
Fmi = build_Fmi(t1, t2)
Fme = build_Fme(t1, t2)
Wmnij = build_Wmnij(t1, t2)
Wabef = build_Wabef(t1, t2)
Wmbej = build_Wmbej(t1, t2)
#### Build RHS side of t1 equations
rhs_T1 = F[o, v].copy()
rhs_T1 += np.einsum('ie,ae->ia', t1, Fae)
rhs_T1 -= np.einsum('ma,mi->ia', t1, Fmi)
rhs_T1 += np.einsum('imae,me->ia', t2, Fme)
rhs_T1 -= np.einsum('nf,naif->ia', t1, MO[o, v, o, v])
rhs_T1 -= 0.5 * np.einsum('imef,maef->ia', t2, MO[o, v, v, v])
rhs_T1 -= 0.5 * np.einsum('mnae,nmei->ia', t2, MO[o, o, v, o])
### Build RHS side of t2 equations
rhs_T2 = MO[o, o, v, v].copy()
# P_(ab) t_ijae (F_be - 0.5 t_mb F_me)
tmp = Fae - 0.5 * np.einsum('mb,me->be', t1, Fme)
Pab = np.einsum('ijae,be->ijab', t2, tmp)
rhs_T2 += Pab
rhs_T2 -= Pab.swapaxes(2, 3)
# P_(ij) t_imab (F_mj + 0.5 t_je F_me)
tmp = Fmi + 0.5 * np.einsum('je,me->mj', t1, Fme)
Pij = np.einsum('imab,mj->ijab', t2, tmp)
rhs_T2 -= Pij
rhs_T2 += Pij.swapaxes(0, 1)
tmp_tau = build_tau(t1, t2)
rhs_T2 += 0.5 * np.einsum('mnab,mnij->ijab', tmp_tau, Wmnij)
rhs_T2 += 0.5 * np.einsum('ijef,abef->ijab', tmp_tau, Wabef)
# P_(ij) * P_(ab)
# (ij - ji) * (ab - ba)
# ijab - ijba -jiab + jiba
tmp = np.einsum('ie,ma,mbej->ijab', t1, t1, MO[o, v, v, o])
Pijab = np.einsum('imae,mbej->ijab', t2, Wmbej)
Pijab -= tmp
rhs_T2 += Pijab
rhs_T2 -= Pijab.swapaxes(2, 3)
rhs_T2 -= Pijab.swapaxes(0, 1)
rhs_T2 += Pijab.swapaxes(0, 1).swapaxes(2, 3)
Pij = np.einsum('ie,abej->ijab', t1, MO[v, v, v, o])
rhs_T2 += Pij
rhs_T2 -= Pij.swapaxes(0, 1)
Pab = np.einsum('ma,mbij->ijab', t1, MO[o, v, o, o])
rhs_T2 -= Pab
rhs_T2 += Pab.swapaxes(2, 3)
### Update t1 and t2 amplitudes
t1 = rhs_T1 / Dia
t2 = rhs_T2 / Dijab
### Compute CCSD correlation energy
CCSDcorr_E = np.einsum('ia,ia->', F[o, v], t1)
CCSDcorr_E += 0.25 * np.einsum('ijab,ijab->', MO[o, o, v, v], t2)
CCSDcorr_E += 0.5 * np.einsum('ijab,ia,jb->', MO[o, o, v, v], t1, t1)
### Print CCSD correlation energy
print 'CCSD Iteration %3d: CCSD correlation = %.12f dE = %.5E' % (CCSD_iter, CCSDcorr_E, (CCSDcorr_E - CCSDcorr_E_old))
if (abs(CCSDcorr_E - CCSDcorr_E_old) < E_conv):
break
CCSDcorr_E_old = CCSDcorr_E
return CCSDcorr_E
if __name__ == '__main__': test_ccsd()
| {
"repo_name": "Konjkov/pyquante2",
"path": "pyquante2/cc/ccsd.py",
"copies": "1",
"size": "8215",
"license": "bsd-3-clause",
"hash": -8707895786182627000,
"line_mean": 36.6834862385,
"line_max": 127,
"alpha_frac": 0.5335362142,
"autogenerated": false,
"ratio": 2.4333530805687205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.346688929476872,
"avg_score": null,
"num_lines": null
} |
"""Simple test of the algorithms used in the Atlas 6DOF simulation framework."""
import numpy as np
import numpy.matlib as m
from scipy.integrate import odeint
#import datetime
from EarthFrame import *
# The state vector X is laid out as follows:
# 0 - x - x axis coordinate (m)
# 1 - y - y axis coordinate (m)
# 2 - z - z axis coordinate (m)
# 3 - vx - x velocity (m/s)
# 4 - vy - y velocity (m/s)
# 5 - vz - z velocity (m/s)
# 6 - q0 - first quaternion component
# 7 - q1 - second quaternion component
# 8 - q2 - third quaternion component
# 9 - q3 - fourth quaternion component
# 10 - wx - rotation about x axis (rad/s)
# 11 - wy - rotation about y axis (rad/s)
# 12 - wz - rotation about z axis (rad/s)
def force_torque(y):
x = y[0:3]
f = [0, 0, 0] #(-G * mass * earth_mass / (np.linalg.norm(x) ** 3)) * x
t = m.matrix("0 0 0")
return (f, t)
def dX(y, t):
dXdt = [0 for i in range(13)]
force, torque = force_torque(y, t)
# Linear acceleration
dXdt[3] = force[0] / mass
dXdt[4] = force[1] / mass
dXdt[5] = force[2] / mass
# Angular acceleration
alpha = I_cm_inv * np.asmatrix(torque).T
dXdt[10] = float(alpha[0])
dXdt[11] = float(alpha[1])
dXdt[12] = float(alpha[2])
# Linear Velocity (xyz coordinates)
dXdt[0:3] = y[3:6]
# Angular velocity
# Quaternion derivative math: \dot{q} = Q \vec{w} / 2
# \dot{q}_0 = 0.5 * (-q1*w0 - q2*w1 - q3*w2)
dXdt[6] = 0.5 * (-y[7]*y[10] - y[8]*y[11] - y[9]*y[12])
# \dot{q}_1 = 0.5 * (+q0*w0 - q3*w1 - q2*w2)
dXdt[7] = 0.5 * ( y[6]*y[10] - y[9]*y[11] - y[8]*y[12])
# \dot{q}_2 = 0.5 * (+q3*w0 + q0*w1 - q1*w2)
dXdt[8] = 0.5 * ( y[9]*y[10] + y[6]*y[11] - y[7]*y[12])
# \dot{q}_3 = 0.5 * (-q2*w0 + q1*w1 + q0*w2)
dXdt[9] = 0.5 * (-y[8]*y[10] + y[7]*y[11] + y[6]*y[12])
#print(dXdt)
return dXdt
X0 = [0 for i in range(13)]
mass = 1.0
r = 1.0
# Sphere moment of inertia
I_cm = [[0.4*mass*(r**2), 0, 0],
[0, 0.4*mass*(r**2), 0],
[0, 0, 0.4*mass*(r**2)]]
I_cm_inv = np.linalg.inv(np.asmatrix(I_cm))
# rotation about x axis
X0 = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 4*np.pi, 0, 0]
print(" X = %s,\ndXdt = %s\n" % (X0, dX(X0, 0)))
time = np.arange(0, 1, 0.01)
y = odeint(dX, X0, time)
for i in range(10):
print(y[i], time[i])
print(y[-1], time[-1])
for i in range(100):
print(np.sqrt(y[i][6]**2 + y[i][7]**2 + y[i][8]**2 + y[i][9]**2))
| {
"repo_name": "pchokanson/atlas-sim",
"path": "algorithm-test.py",
"copies": "1",
"size": "2461",
"license": "mit",
"hash": -6098646581807300000,
"line_mean": 24.6354166667,
"line_max": 80,
"alpha_frac": 0.5542462414,
"autogenerated": false,
"ratio": 2.094468085106383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7963095120058077,
"avg_score": 0.037123841289661265,
"num_lines": 96
} |
"""Algorithm Tests."""
from SafeRLBench.algo import PolicyGradient, A3C
from SafeRLBench.envs import LinearCar
from .policygradient import CentralFDEstimator, estimators
from SafeRLBench.policy import NeuralNetwork
from unittest2 import TestCase
from mock import MagicMock, Mock
class TestPolicyGradient(TestCase):
"""PolicyGradientTestClass."""
def test_pg_init(self):
"""Test: POLICYGRADIENT: initialization."""
env_mock = MagicMock()
pol_mock = Mock()
for key, item in estimators.items():
pg = PolicyGradient(env_mock, pol_mock, estimator=key)
self.assertIsInstance(pg.estimator, item)
pg = PolicyGradient(env_mock, pol_mock, estimator=CentralFDEstimator)
self.assertIsInstance(pg.estimator, CentralFDEstimator)
self.assertRaises(ImportError, PolicyGradient,
env_mock, pol_mock, CentralFDEstimator(env_mock))
class TestA3C(TestCase):
"""A3C Test Class."""
def test_a3c_init(self):
"""Test: A3C: initialization."""
a3c = A3C(LinearCar(), NeuralNetwork([2, 6, 1]))
fields = ['environment', 'policy', 'max_it', 'num_workers', 'rate',
'done', 'policy', 'p_net', 'v_net', 'workers', 'threads',
'global_counter', 'sess']
for field in fields:
assert hasattr(a3c, field)
| {
"repo_name": "befelix/Safe-RL-Benchmark",
"path": "SafeRLBench/algo/test.py",
"copies": "1",
"size": "1382",
"license": "mit",
"hash": -2312248252766539000,
"line_mean": 30.4090909091,
"line_max": 77,
"alpha_frac": 0.6410998553,
"autogenerated": false,
"ratio": 3.7967032967032965,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9937803152003297,
"avg_score": 0,
"num_lines": 44
} |
# Algorithm to compute pi via subdividing unit square recursively
# and seeing it is part of the circle or not.
from __future__ import division
def iterate(x, y, iteration, N):
'''
Determines whether a square
size = (1 / 2 ^ iteration)
at coord (x, y)
is inside the unit circle or not.
returns 2-tuple of
(volume_inside, volume_outside)
where volume_insdie + volume_outside <= 1 / 4 ^ iteration
'''
length = 2 ** iteration
double_length = length * length
low_x = x
low_y = y
high_x = (x + 1)
high_y = (y + 1)
low_length = low_x ** 2 + low_y ** 2
high_length = high_x ** 2 + high_y ** 2
if low_length >= double_length: return (0, 1) # NOT part of the circle
elif high_length <= double_length: return (1, 0) # part of the circle
elif iteration == N: return (0, 0) # uncertain
# recursive call - subdivide the square to 4 chunks
# and collect their results.
ld1, ud1 = iterate(x * 2, y * 2, iteration + 1, N)
ld2, ud2 = iterate(x * 2, y * 2 + 1, iteration + 1, N)
ld3, ud3 = iterate(x * 2 + 1, y * 2, iteration + 1, N)
ld4, ud4 = iterate(x * 2 + 1, y * 2 + 1, iteration + 1, N)
return ((ld1 + ld2 + ld3 + ld4) / 4, (ud1 + ud2 + ud3 + ud4) / 4)
def around_border(x, y, N):
length = 2 ** N
double_length = length * length
low_x = x
low_y = y
high_x = (x + 1)
high_y = (y + 1)
low_length = low_x ** 2 + low_y ** 2
high_length = high_x ** 2 + high_y ** 2
if low_length > double_length: return False
elif high_length < double_length: return False
return True
def navigating(N):
'''
calculate the area of a quarter-circle via following its outlines.
'''
length = 2 ** N
x = 0
y = length - 1
inside = length - 1
outside = 0
border = 1
while not (x == 2 ** N - 1 and y == 0):
right = around_border(x + 1, y, N)
bottom = around_border(x, y - 1, N)
assert not (right and bottom), "(%d, %d) Cannot navigate" % (x, y)
assert right or bottom, "(%d, %d) Navigating both" % (x, y)
if right:
# move to the right pixel
inside += y
outside += (length - y - 1)
x += 1
elif bottom:
# move to the bottom pixel
# subtract 1 pixel from the circle.
# no need to add 1 to outside, because this area is taken by the border.
inside -= 1
y -= 1
return (
(inside / length / length),
(outside / length / length)
)
def calculate(N, algorithm):
lower, upper = algorithm(N)
pi_lower = lower * 4
pi_upper = (1 - upper) * 4
delta = ((1 - upper) - lower) * 4
print("%2d: %f < pi < %f (delta = %.10f)" % (N, pi_lower, pi_upper, delta))
for i in range(25, 30):
print("")
# calculate(i, lambda N: iterate(0, 0, 0, N))
calculate(i, navigating)
# sample out:
'''
0: 0.000000 < pi < 4.000000 (delta = 4.000000)
1: 1.000000 < pi < 4.000000 (delta = 3.000000)
2: 2.000000 < pi < 3.750000 (delta = 1.750000)
3: 2.562500 < pi < 3.500000 (delta = 0.937500)
4: 2.859375 < pi < 3.343750 (delta = 0.484375)
5: 3.007812 < pi < 3.253906 (delta = 0.246094)
6: 3.075195 < pi < 3.199219 (delta = 0.124023)
7: 3.107910 < pi < 3.170166 (delta = 0.062256)
8: 3.125549 < pi < 3.156738 (delta = 0.031189)
9: 3.133484 < pi < 3.149094 (delta = 0.015610)
10: 3.137589 < pi < 3.145397 (delta = 0.007809)
11: 3.139624 < pi < 3.143529 (delta = 0.003905)
12: 3.140601 < pi < 3.142554 (delta = 0.001953)
13: 3.141100 < pi < 3.142076 (delta = 0.000977)
14: 3.141347 < pi < 3.141835 (delta = 0.000488)
15: 3.141470 < pi < 3.141714 (delta = 0.000244)
16: 3.141531 < pi < 3.141653 (delta = 0.000122)
17: 3.141562 < pi < 3.141623 (delta = 0.000061)
18: 3.141577 < pi < 3.141608 (delta = 0.000031)
19: 3.141585 < pi < 3.141600 (delta = 0.000015)
'''
| {
"repo_name": "jeeyoungk/exercise",
"path": "python/pi.py",
"copies": "1",
"size": "3900",
"license": "mit",
"hash": 6377673966239517000,
"line_mean": 32.0508474576,
"line_max": 84,
"alpha_frac": 0.5607692308,
"autogenerated": false,
"ratio": 2.8655400440852317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8913987614532992,
"avg_score": 0.002464332070447933,
"num_lines": 118
} |
# Algorithm Trial 01
# Purpose: compare 2 signals to find a measure of their correlation
# Method: inspired by cross-correlation.
# - Multiply of the signals' values at each time step, and sum all the
# multiples, and normalize (divide by length of vector).
# - offset how the signals are lined-up by 1, and repeat
# - the offset that begets the greatest magnitude sum is the offset of
# greatest signal correlation
import numpy as np
# DEFINE FUNCTION
def calcCorrelates(n1, n2, period=None, maxstep=3):
"""
For two input vectors n1 and n2 this returns the value of their correlation
in the timeframe 'period' and for each # days offset up to 'maxstep', in
the format of a list of (offset, correlation_coefficient)
"""
# period = period (range of timesteps) from signals to use in comparison. i.e. looks back 5 days
# maxstep = max offset to compare signals
if period:
start = len(n1)-period
else:
start = 0
endd = len(n1)
vals = [] # list of (time offset, correlation value)
for tstep in range(0, maxstep):
n12 = []
for x in range(start, endd):
n12.append(float(n1[x-tstep]*n2[x]))
corr = sum(n12)/len(n12)
vals.append((tstep, corr))
return vals
if __name__ == "__main__":
# 1. EXAMPLE VECTORS. Each input signal is a vector of integer (& binary, to start) values.
A = [-1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1]
B = A
C = [-1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1] # =A delayed 1 timestep
D = [x*-1 for x in A] # = -A
# 2. CALCULATE CORRELATION
vals = calcCorrelates(A, C)
print vals
| {
"repo_name": "Ondross/statsq",
"path": "correlation.py",
"copies": "1",
"size": "1698",
"license": "mit",
"hash": -8554443253585108000,
"line_mean": 32.96,
"line_max": 100,
"alpha_frac": 0.609540636,
"autogenerated": false,
"ratio": 3.1213235294117645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42308641654117646,
"avg_score": null,
"num_lines": null
} |
# Algos from https://github.com/dirceup/tiled-vegetation-indices/blob/master/app/lib/vegetation_index.rb
# Functions can use all of the supported functions and operators from
# https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-operators
import re
from functools import lru_cache
from django.utils.translation import gettext_lazy as _
algos = {
'NDVI': {
'expr': '(N - R) / (N + R)',
'help': _('Normalized Difference Vegetation Index shows the amount of green vegetation.')
},
'NDVI (Blue)': {
'expr': '(N - B) / (N + B)',
'help': _('Normalized Difference Vegetation Index shows the amount of green vegetation.')
},
'ENDVI':{
'expr': '((N + G) - (2 * B)) / ((N + G) + (2 * B))',
'help': _('Enhanced Normalized Difference Vegetation Index is like NDVI, but uses Blue and Green bands instead of only Red to isolate plant health.')
},
'VARI': {
'expr': '(G - R) / (G + R - B)',
'help': _('Visual Atmospheric Resistance Index shows the areas of vegetation.'),
'range': (-1, 1)
},
'EXG': {
'expr': '(2 * G) - (R + B)',
'help': _('Excess Green Index emphasizes the greenness of leafy crops such as potatoes.',)
},
'BAI': {
'expr': '1.0 / (((0.1 - R) ** 2) + ((0.06 - N) ** 2))',
'help': _('Burn Area Index hightlights burned land in the red to near-infrared spectrum.')
},
'GLI': {
'expr': '((G * 2) - R - B) / ((G * 2) + R + B)',
'help': _('Green Leaf Index shows greens leaves and stems.'),
'range': (-1, 1)
},
'GNDVI':{
'expr': '(N - G) / (N + G)',
'help': _('Green Normalized Difference Vegetation Index is similar to NDVI, but measures the green spectrum instead of red.')
},
'GRVI':{
'expr': 'N / G',
'help': _('Green Ratio Vegetation Index is sensitive to photosynthetic rates in forests.')
},
'SAVI':{
'expr': '(1.5 * (N - R)) / (N + R + 0.5)',
'help': _('Soil Adjusted Vegetation Index is similar to NDVI but attempts to remove the effects of soil areas using an adjustment factor (0.5).')
},
'MNLI':{
'expr': '((N ** 2 - R) * 1.5) / (N ** 2 + R + 0.5)',
'help': _('Modified Non-Linear Index improves the Non-Linear Index algorithm to account for soil areas.')
},
'MSR': {
'expr': '((N / R) - 1) / (sqrt(N / R) + 1)',
'help': _('Modified Simple Ratio is an improvement of the Simple Ratio (SR) index to be more sensitive to vegetation.')
},
'RDVI': {
'expr': '(N - R) / sqrt(N + R)',
'help': _('Renormalized Difference Vegetation Index uses the difference between near-IR and red, plus NDVI to show areas of healthy vegetation.')
},
'TDVI': {
'expr': '1.5 * ((N - R) / sqrt(N ** 2 + R + 0.5))',
'help': _('Transformed Difference Vegetation Index highlights vegetation cover in urban environments.')
},
'OSAVI': {
'expr': '(N - R) / (N + R + 0.16)',
'help': _('Optimized Soil Adjusted Vegetation Index is based on SAVI, but tends to work better in areas with little vegetation where soil is visible.')
},
'LAI': {
'expr': '3.618 * (2.5 * (N - R) / (N + 6*R - 7.5*B + 1)) * 0.118',
'help': _('Leaf Area Index estimates foliage areas and predicts crop yields.'),
'range': (-1, 1)
},
'EVI': {
'expr': '2.5 * (N - R) / (N + 6*R - 7.5*B + 1)',
'help': _('Enhanced Vegetation Index is useful in areas where NDVI might saturate, by using blue wavelengths to correct soil signals.'),
'range': (-1, 1)
},
# more?
'_TESTRB': {
'expr': 'R + B',
'range': (0,1)
},
'_TESTFUNC': {
'expr': 'R + (sqrt(B) )'
}
}
camera_filters = [
'RGB',
'RGN',
'NGB',
'NRG',
'NRB',
'RGBN',
'BGRNRe',
'BGRReN',
'RGBNRe',
'RGBReN',
# more?
# TODO: certain cameras have only two bands? eg. MAPIR NDVI BLUE+NIR
]
@lru_cache(maxsize=20)
def lookup_formula(algo, band_order = 'RGB'):
if algo is None:
return None, None
if band_order is None:
band_order = 'RGB'
if algo not in algos:
raise ValueError("Cannot find algorithm " + algo)
input_bands = tuple(band_order)
def repl(matches):
b = matches.group(1)
try:
return 'b' + str(input_bands.index(b) + 1)
except ValueError:
raise ValueError("Cannot find band \"" + b + "\" from \"" + band_order + "\". Choose a proper band order.")
expr = re.sub("([A-Z]+?[a-z]*)", repl, re.sub("\s+", "", algos[algo]['expr']))
hrange = algos[algo].get('range', None)
return expr, hrange
@lru_cache(maxsize=2)
def get_algorithm_list(max_bands=3):
return [{'id': k, 'filters': get_camera_filters_for(algos[k], max_bands), **algos[k]} for k in algos if not k.startswith("_")]
def get_camera_filters_for(algo, max_bands=3):
result = []
expr = algo['expr']
pattern = re.compile("([A-Z]+?[a-z]*)")
bands = list(set(re.findall(pattern, expr)))
for f in camera_filters:
# Count bands that show up in the filter
count = 0
fbands = list(set(re.findall(pattern, f)))
for b in fbands:
if b in bands:
count += 1
# If all bands are accounted for, this is a valid filter for this algo
if count >= len(bands) and len(fbands) <= max_bands:
result.append(f)
return result
| {
"repo_name": "OpenDroneMap/WebODM",
"path": "app/api/formulas.py",
"copies": "1",
"size": "5539",
"license": "mpl-2.0",
"hash": -749807877132644000,
"line_mean": 33.61875,
"line_max": 159,
"alpha_frac": 0.5475717639,
"autogenerated": false,
"ratio": 3.183333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4230905097233333,
"avg_score": null,
"num_lines": null
} |
"""
Generate visualizations of classification, regression, and clustering
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.lda import LDA
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.cluster import KMeans
rgb_colors = ['#FF0000', '#00FF00', '#0000FF']
cm_bright = ListedColormap(rgb_colors)
def visualize_classification(estimator, n_samples=100, n_features=2):
# Create the linear dataset and estimator
kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_classes': 3,
'n_redundant': 0,
'n_clusters_per_class': 1,
'class_sep': 1.22,
}
X, y = datasets.make_classification(**kwargs)
# Create the figure
fix, axes = plt.subplots()
# no ticks
axes.set_xticks(())
axes.set_yticks(())
axes.set_ylabel('$x_1$')
axes.set_xlabel('$x_0$')
# Plot the surface
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
np.arange(y_min, y_max, 0.02))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cm_bright, alpha=0.3)
# Plot the points on the grid
axes.scatter(X[:,0], X[:,1], c=y, s=30, cmap=cm_bright)
# Show the plot
plt.axis("tight")
plt.show()
def visualize_regression(n_samples=100):
def f(x):
return np.sin(2 * np.pi * x)
# Generate data
X = np.random.uniform(0, 1, size=n_samples)[:,np.newaxis]
y = f(X) + np.random.normal(scale=0.3, size=n_samples)[:,np.newaxis]
# Create the linespace
x_plot = np.linspace(0, 1, 100)[:,np.newaxis]
poly = PolynomialFeatures(degree=6)
lreg = LinearRegression()
pipeline = Pipeline([("polynomial_features", poly),
("linear_regression", lreg)])
pipeline.fit(X, y)
# Create the figure
fix, axes = plt.subplots()
# no ticks
axes.set_xticks(())
axes.set_yticks(())
axes.set_ylabel('$y$')
axes.set_xlabel('$x$')
# Plot the estimator and the true line
axes.plot(x_plot, pipeline.predict(x_plot), color='red', label="estimated")
axes.plot(x_plot, f(x_plot), color='green', label='true function')
# Plot the points
axes.scatter(X, y)
plt.legend(loc="best")
plt.show()
def visualize_clustering(n_samples=350, n_centers=3, n_features=2):
# Create the data
X,y = datasets.make_blobs(n_samples=n_samples, centers=n_centers, n_features=n_features)
# Create the estimator
estimator = KMeans(n_clusters=n_centers, n_init=10)
estimator.fit(X)
centroids = estimator.cluster_centers_
# Create the figure
fig, axes = plt.subplots()
# Plot the clusters
for k, col in zip(xrange(n_centers), rgb_colors):
m = estimator.labels_ == k
center = centroids[k]
# axes.plot(X[m,0], X[m, 1], 'w', markerfacecolor=col, marker='.', markersize=10)
axes.plot(center[0], center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=200, alpha=.15)
# no ticks
axes.set_xticks(())
axes.set_yticks(())
axes.set_ylabel('$x_1$')
axes.set_xlabel('$x_0$')
# Plot the points
axes.scatter(X[:,0], X[:,1], c='k')
plt.show()
if __name__ == '__main__':
# visualize_classification(KNeighborsClassifier(n_neighbors=3))
# visualize_regression()
visualize_clustering()
| {
"repo_name": "wavelets/machine-learning",
"path": "code/algviz.py",
"copies": "5",
"size": "4351",
"license": "mit",
"hash": 2723088663044569600,
"line_mean": 28.2013422819,
"line_max": 113,
"alpha_frac": 0.6161801885,
"autogenerated": false,
"ratio": 3.3781055900621118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6494285778562111,
"avg_score": null,
"num_lines": null
} |
# Alias 8 remote script for Ableton Live 10.
# by Kevin Ferguson (http://www.ofrecordings.com/)
from __future__ import with_statement
g_logger = None
DEBUG = True
def log(msg):
global g_logger
if DEBUG:
if g_logger is not None:
g_logger(msg)
import Live
import MidiRemoteScript
from _Framework.ButtonElement import ButtonElement
from _Framework.ControlSurface import ControlSurface
from _Framework.DeviceComponent import DeviceComponent
from _Framework.EncoderElement import EncoderElement
from _Framework.InputControlElement import *
from _Framework.MixerComponent import MixerComponent
from _Framework.SessionComponent import SessionComponent
from _Framework.SliderElement import SliderElement
CHANNEL = 0
# Channel strip mapping:
# knobs -> first 2 controls on first device on the channel
# fader -> channel volume
# top button -> channel mute
# bottom button -> channel arm
# master fader -> master volume
# encoder -> move grid
# Alias 8 color map.
WHITE = 2
CYAN = 4
MAGENTA = 8
RED = 16
BLUE = 32
YELLOW = 64
GREEN = 127
def button(notenr, name=None, color=None):
if color is None:
rv = ButtonElement(True, MIDI_NOTE_TYPE, CHANNEL, notenr)
else:
rv = ColorButton(True, MIDI_NOTE_TYPE, CHANNEL, notenr, color)
if name:
rv.name = name
return rv
def fader(notenr):
rv = SliderElement(MIDI_CC_TYPE, CHANNEL, notenr)
return rv
def knob(cc):
return EncoderElement(MIDI_CC_TYPE, 0, cc, Live.MidiMap.MapMode.absolute)
class ColorButton(ButtonElement):
"""A ButtonElement with a custom on color."""
def __init__(self, is_momentary, msg_type, channel, identifier, color):
super(ColorButton, self).__init__(
is_momentary, msg_type, channel, identifier)
self.button_value = color
def turn_on(self):
log('ColorButton turn on ' + str(self.button_value))
self.send_value(self.button_value)
class MixerWithDevices(MixerComponent):
def __init__(self, *args, **kwargs):
self.devices = []
super(MixerWithDevices, self).__init__(*args, **kwargs)
for i in range(len(self._channel_strips)):
dev = {
"component": DeviceComponent(),
"cb": None,
"track": None
}
self.devices.append(dev)
self.register_components(dev["component"])
self._reassign_tracks()
def get_active_tracks(self):
tracks_to_use = self.tracks_to_use()
num_tracks = len(self._channel_strips)
return tracks_to_use[
self._track_offset:self._track_offset + num_tracks
]
def _reassign_tracks(self):
super(MixerWithDevices, self)._reassign_tracks()
# assign each DeviceComponent to the first device on its track
# this could be called before we construct self.devices
if self.devices:
log("reassigning tracks")
tracks_to_use = self.get_active_tracks()
log("tracks_to_use has %d elements" % len(tracks_to_use))
log("devices has %d" % len(self.devices))
for i, dev in enumerate(self.devices):
if i < len(tracks_to_use):
log("device %d gets a track %s" % (
i, tracks_to_use[i].name))
self.assign_device_to_track(tracks_to_use[i], i)
else:
log("device %d gets no track" % i)
self.assign_device_to_track(None, i)
def assign_device_to_track(self, track, i):
# nuke existing listener
dev = self.devices[i]
if dev["track"]:
dev["track"].remove_devices_listener(dev["cb"])
dev["track"] = None
dev["cb"] = None
dev["component"].set_lock_to_device(True, None)
if track is not None:
# listen for changes to the device chain
def dcb():
return self._on_device_changed(i)
dev["cb"] = dcb
dev["track"] = track
track.add_devices_listener(dcb)
# force an update to attach to any existing device
dcb()
def _on_device_changed(self, i):
log("_on_device_changed %d" % i)
# the device chain on track i changed-- reassign device if needed
track = self.devices[i]["track"]
device_comp = self.devices[i]["component"]
if not track.devices:
device_comp.set_lock_to_device(True, None)
else:
log('lock track %d to device %s' % (i, track.devices[0].name))
device_comp.set_lock_to_device(True, track.devices[0])
self.update()
def set_device_controls(self, track_nr, controls):
device_comp = self.devices[track_nr]["component"]
device_comp.set_parameter_controls(controls)
device_comp.update()
class Alias8(ControlSurface):
num_tracks = 8
knobs_top = [1, 2, 3, 4, 5, 6, 7, 8]
knobs_bottom = [9, 10, 11, 12, 13, 14, 15, 16]
faders = [17, 18, 19, 20, 21, 22, 23, 24]
master_fader = 25
encoder = 42
buttons_top = [0, 1, 2, 3, 4, 5, 6, 7]
buttons_bottom = [8, 9, 10, 11, 12, 13, 14, 15]
def __init__(self, instance):
super(Alias8, self).__init__(instance, False)
global g_logger
g_logger = self.log_message
with self.component_guard():
self._set_suppress_rebuild_requests(True)
self.init_session()
self.init_mixer()
# Connect mixer to session.
self.session.set_mixer(self.mixer)
self.session.update()
# New in Live 9: must explicitly activate session component.
self.set_highlighting_session_component(self.session)
self._set_suppress_rebuild_requests(False)
def init_session(self):
self.session = SessionComponent(self.num_tracks, 1)
self.session.name = 'Alias 8 Session'
self.session.update()
# Connect the encoder to track scroller.
def scroll_cb(value):
if value == 1:
self.session._horizontal_banking.scroll_down()
elif value == 127:
self.session._horizontal_banking.scroll_up()
self.track_encoder = EncoderElement(
MIDI_CC_TYPE,
0,
self.encoder,
Live.MidiMap.MapMode.absolute
)
self.track_encoder.add_value_listener(scroll_cb)
def init_mixer(self):
self.mixer = MixerWithDevices(self.num_tracks, 0)
self.mixer.id = 'Mixer'
self.song().view.selected_track = self.mixer.channel_strip(0)._track
for i in range(self.num_tracks):
self.mixer.channel_strip(i).set_volume_control(
fader(self.faders[i]))
self.mixer.channel_strip(i).set_mute_button(
button(self.buttons_top[i], color=YELLOW))
self.mixer.channel_strip(i).set_invert_mute_feedback(True)
self.mixer.channel_strip(i).set_arm_button(
button(self.buttons_bottom[i], color=RED))
self.mixer.set_device_controls(
i,
(
knob(self.knobs_top[i]),
knob(self.knobs_bottom[i])
)
)
self.mixer.master_strip().set_volume_control(fader(self.master_fader))
self.mixer.update()
| {
"repo_name": "macfergus/alias8",
"path": "alias8/alias.py",
"copies": "1",
"size": "7443",
"license": "cc0-1.0",
"hash": -5127657943387977000,
"line_mean": 32.9863013699,
"line_max": 78,
"alpha_frac": 0.5890098079,
"autogenerated": false,
"ratio": 3.6755555555555555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9754136480381856,
"avg_score": 0.0020857766147395975,
"num_lines": 219
} |
"""Alias command."""
from . import Command
from ...packets import MessagePacket
class Alias(Command):
"""Alias command."""
COMMAND = "alias"
@Command.command(role="moderator")
async def add(self, alias: "?command", command: "?command", *_: False,
raw: "packet"):
"""Add a new command alias."""
_, _, _, _, *args = raw.split()
if args:
packet_args = MessagePacket.join(
*args, separator=' ').json["message"]
else:
packet_args = None
response = await self.api.add_alias(command, alias, packet_args)
if response.status == 201:
return "Alias !{} for !{} created.".format(alias, command)
elif response.status == 200:
return "Alias !{} for command !{} updated.".format(alias, command)
elif response.status == 404:
return "Command !{} does not exist.".format(command)
@Command.command(role="moderator")
async def remove(self, alias: "?command"):
"""Remove a command alias."""
response = await self.api.remove_alias(alias)
if response.status == 200:
return "Alias !{} removed.".format(alias)
elif response.status == 404:
return "Alias !{} doesn't exist!".format(alias)
@Command.command("list", role="moderator")
async def list_aliases(self):
"""List all aliases."""
response = await self.api.get_command()
if response.status == 200:
commands = (await response.json())["data"]
return "Aliases: {}.".format(', '.join(sorted(
"{} ({})".format(
command["attributes"]["name"],
command["attributes"]["commandName"])
for command in commands
if command.get("type") == "aliases"
)))
return "No aliases added!"
| {
"repo_name": "CactusDev/CactusBot",
"path": "cactusbot/commands/magic/alias.py",
"copies": "2",
"size": "1908",
"license": "mit",
"hash": 2289707931717732900,
"line_mean": 31.8965517241,
"line_max": 78,
"alpha_frac": 0.5393081761,
"autogenerated": false,
"ratio": 4.468384074941452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6007692251041451,
"avg_score": null,
"num_lines": null
} |
""" Aliases gather aliasing informations. """
from pythran.analyses.global_declarations import GlobalDeclarations
from pythran.intrinsic import Intrinsic, Class, NewMem
from pythran.passmanager import ModuleAnalysis
from pythran.syntax import PythranSyntaxError
from pythran.tables import functions, methods, MODULES
import pythran.metadata as md
from itertools import product
import ast
IntrinsicAliases = dict()
class ContainerOf(object):
def __init__(self, index, containee):
self.index = index
self.containee = containee
def save_intrinsic_alias(module):
""" Recursively save default aliases for pythonic functions. """
for v in module.values():
if isinstance(v, dict): # Submodules case
save_intrinsic_alias(v)
else:
IntrinsicAliases[v] = {v}
if isinstance(v, Class):
save_intrinsic_alias(v.fields)
for module in MODULES.values():
save_intrinsic_alias(module)
class CopyOnWriteAliasesMap(object):
def __init__(self, *args, **kwargs):
self.src = kwargs.get('src', None)
if self.src is None:
self.data = dict(*args)
else:
assert not args, "cannot use a src and positional arguments"
self.data = self.src
def _copy_on_write(self):
if self.src is not None:
# need to do a copy
assert self.data is self.src
self.data = self.src.copy()
self.src = None
def __setitem__(self, k, v):
self._copy_on_write()
return self.data.__setitem__(k, v)
def update(self, *values):
self._copy_on_write()
return self.data.update(*values)
def copy(self):
return CopyOnWriteAliasesMap(src=self.data)
def __iter__(self):
return self.data.__iter__()
def __getitem__(self, k):
return self.data.__getitem__(k)
def __getattr__(self, name):
return getattr(self.data, name)
class Aliases(ModuleAnalysis):
"""Gather aliasing informations across nodes."""
RetId = '@'
class Info(object):
def __init__(self, state, aliases):
self.state = state
self.aliases = aliases
def __init__(self):
self.result = dict()
self.aliases = None
super(Aliases, self).__init__(GlobalDeclarations)
def expand_unknown(self, node):
# should include built-ins too?
unkowns = {NewMem()}.union(self.global_declarations.values())
return unkowns.union(node.args)
@staticmethod
def access_path(node):
def rec(w, n):
if isinstance(n, ast.Name):
return w.get(n.id, n.id)
elif isinstance(n, ast.Attribute):
return rec(w, n.value)[n.attr]
elif isinstance(n, ast.FunctionDef):
return node.name
else:
return node
return rec(MODULES, node)
# aliasing created by expressions
def add(self, node, values=None):
if not values: # no given target for the alias
if isinstance(node, Intrinsic):
values = {node} # an Intrinsic always aliases to itself
else:
values = set() # otherwise aliases to nothing
assert isinstance(values, set)
self.result[node] = Aliases.Info(self.aliases.copy(), values)
return values
def visit_BoolOp(self, node):
return self.add(node, set.union(*map(self.visit, node.values)))
def visit_UnaryOp(self, node):
self.generic_visit(node)
return self.add(node)
visit_BinOp = visit_UnaryOp
visit_Compare = visit_UnaryOp
def visit_IfExp(self, node):
self.visit(node.test)
rec = map(self.visit, [node.body, node.orelse])
return self.add(node, set.union(*rec))
def visit_Dict(self, node):
self.generic_visit(node)
return self.add(node) # not very accurate
def visit_Set(self, node):
self.generic_visit(node)
return self.add(node) # not very accurate
def visit_Return(self, node):
if not node.value:
return
self.aliases.setdefault(Aliases.RetId, set()).update(
self.visit(node.value))
def call_return_alias(self, node):
def interprocedural_aliases(func, args):
arg_aliases = [self.result[arg].aliases
for arg in args]
return_aliases = set()
for args_combination in product(*arg_aliases):
return_aliases.update(
func.return_alias(args_combination))
return set(map(expand_subscript, return_aliases))
def expand_subscript(node):
if isinstance(node, ast.Subscript):
if isinstance(node.value, ContainerOf):
return node.value.containee
return node
def full_args(func, call):
args = call.args
if isinstance(func, ast.FunctionDef):
extra = len(func.args.args) - len(args)
args = args + func.args.defaults[extra:]
return args
func = node.func
aliases = set()
if node.keywords:
# too soon, we don't support keywords in interprocedural_aliases
pass
elif isinstance(func, ast.Attribute):
_, signature = methods.get(func.attr,
functions.get(func.attr,
[(None, None)])[0])
if signature:
args = full_args(signature, node)
aliases = interprocedural_aliases(signature, args)
elif isinstance(func, ast.Name):
func_aliases = self.result[func].aliases
for func_alias in func_aliases:
if hasattr(func_alias, 'return_alias'):
args = full_args(func_alias, node)
aliases.update(interprocedural_aliases(func_alias, args))
else:
pass # better thing to do ?
[self.add(a) for a in aliases if a not in self.result]
return aliases or self.expand_unknown(node)
def visit_Call(self, node):
self.generic_visit(node)
f = node.func
# special handler for bind functions
if isinstance(f, ast.Attribute) and f.attr == "partial":
return self.add(node, {node})
else:
return_alias = self.call_return_alias(node)
# expand collected aliases
all_aliases = set()
for value in return_alias:
if isinstance(value, NewMem):
all_aliases.add(value)
elif value in self.result:
all_aliases.update(self.result[value].aliases)
else:
try:
ap = Aliases.access_path(value)
all_aliases.update(self.aliases.get(ap, ()))
except NotImplementedError:
# should we do something better here?
all_aliases.add(value)
return self.add(node, all_aliases)
visit_Num = visit_UnaryOp
visit_Str = visit_UnaryOp
def visit_Attribute(self, node):
return self.add(node, {Aliases.access_path(node)})
def visit_Subscript(self, node):
if isinstance(node.slice, ast.Index):
aliases = set()
self.visit(node.slice)
value_aliases = self.visit(node.value)
for alias in value_aliases:
if isinstance(alias, ContainerOf):
if isinstance(node.slice.value, ast.Slice):
continue
if isinstance(node.slice.value, ast.Num):
if node.slice.value.n != alias.index:
continue
# FIXME: what if the index is a slice variable...
aliases.add(alias.containee)
elif isinstance(getattr(alias, 'ctx', None), ast.Param):
aliases.add(ast.Subscript(alias, node.slice, node.ctx))
else:
# could be enhanced through better handling of containers
aliases = None
self.generic_visit(node)
return self.add(node, aliases)
def visit_Name(self, node):
if node.id not in self.aliases:
err = ("identifier {0} unknown, either because "
"it is an unsupported intrinsic, "
"the input code is faulty, "
"or... pythran is buggy.")
raise PythranSyntaxError(err.format(node.id), node)
return self.add(node, self.aliases[node.id].copy())
def visit_List(self, node):
if node.elts:
elts_aliases = set()
for i, elt in enumerate(node.elts):
elt_aliases = self.visit(elt)
elts_aliases.update(ContainerOf(i, alias)
for alias in elt_aliases)
else:
elts_aliases = None
return self.add(node, elts_aliases)
visit_Tuple = visit_List
def visit_comprehension(self, node):
self.aliases[node.target.id] = {node.target}
self.generic_visit(node)
def visit_ListComp(self, node):
map(self.visit_comprehension, node.generators)
self.visit(node.elt)
return self.add(node)
visit_SetComp = visit_ListComp
visit_GeneratorExp = visit_ListComp
def visit_DictComp(self, node):
map(self.visit_comprehension, node.generators)
self.visit(node.key)
self.visit(node.value)
return self.add(node)
# aliasing created by statements
def visit_FunctionDef(self, node):
"""
Initialise aliasing default value before visiting.
Add aliasing values for :
- Pythonic
- globals declarations
- current function arguments
"""
self.aliases = CopyOnWriteAliasesMap(IntrinsicAliases.items())
self.aliases.update((f.name, {f})
for f in self.global_declarations.values())
self.aliases.update((arg.id, {arg})
for arg in node.args.args)
self.generic_visit(node)
if Aliases.RetId in self.aliases:
# multiple return alias not supported... yet!
if len(self.aliases[Aliases.RetId]) == 1:
ret_alias = next(iter(self.aliases[Aliases.RetId]))
# parametrize the expression
def parametrize(exp):
if isinstance(exp, ast.Index):
return lambda _: {exp}
elif isinstance(exp, ast.Name):
try:
w = node.args.args.index(exp)
def return_alias(args):
if w < len(args):
return {args[w]}
else:
return {node.args.defaults[w - len(args)]}
return return_alias
except ValueError:
return lambda _: {NewMem}
elif isinstance(exp, ast.Subscript):
values = parametrize(exp.value)
slices = parametrize(exp.slice)
return lambda args: {
ast.Subscript(value, slice, ast.Load())
for value in values(args)
for slice in slices(args)}
else:
return lambda _: {NewMem}
node.return_alias = parametrize(ret_alias)
def visit_Assign(self, node):
md.visit(self, node)
value_aliases = self.visit(node.value)
for t in node.targets:
if isinstance(t, ast.Name):
self.aliases[t.id] = value_aliases or {t}
for alias in list(value_aliases):
if isinstance(alias, ast.Name):
self.aliases[alias.id].add(t)
self.add(t, self.aliases[t.id].copy())
else:
self.visit(t)
def visit_For(self, node):
# FIXME: node.target.id could alias to the content of node.iter
self.aliases[node.target.id] = {node.target}
# Error may come from false branch evaluation so we have to try again
try:
self.generic_visit(node)
except PythranSyntaxError:
self.generic_visit(node)
def visit_While(self, node):
# Error may come from false branch evaluation so we have to try again
try:
self.generic_visit(node)
except PythranSyntaxError:
self.generic_visit(node)
def visit_If(self, node):
md.visit(self, node)
self.visit(node.test)
false_aliases = {k: v.copy() for k, v in self.aliases.items()}
try: # first try the true branch
map(self.visit, node.body)
true_aliases, self.aliases = self.aliases, false_aliases
except PythranSyntaxError: # it failed, try the false branch
map(self.visit, node.orelse)
raise # but still throw the exception, maybe we are in a For
try: # then try the false branch
map(self.visit, node.orelse)
except PythranSyntaxError: # it failed
# we still get some info from the true branch, validate them
self.aliases = true_aliases
raise # and let other visit_ handle the issue
for k, v in true_aliases.items():
if k in self.aliases:
self.aliases[k].update(v)
else:
assert isinstance(v, set)
self.aliases[k] = v
def visit_ExceptHandler(self, node):
if node.name:
self.aliases[node.name.id] = {node.name}
self.generic_visit(node)
class StrictAliases(Aliases):
"""
Gather aliasing informations across nodes,
without adding unsure aliases.
"""
def expand_unknown(self, node):
return {}
| {
"repo_name": "pombredanne/pythran",
"path": "pythran/analyses/aliases.py",
"copies": "2",
"size": "14289",
"license": "bsd-3-clause",
"hash": 6622609306301754000,
"line_mean": 34.1081081081,
"line_max": 78,
"alpha_frac": 0.5474840787,
"autogenerated": false,
"ratio": 4.248884924174844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5796369002874844,
"avg_score": null,
"num_lines": null
} |
""" Aliases gather aliasing informations. """
from pythran.analyses.global_declarations import GlobalDeclarations
from pythran.intrinsic import Intrinsic, Class, UnboundValue
from pythran.passmanager import ModuleAnalysis
from pythran.syntax import PythranSyntaxError
from pythran.tables import functions, methods, MODULES
from pythran.unparse import Unparser
from pythran.conversion import demangle
import pythran.metadata as md
from pythran.utils import isnum
import gast as ast
from copy import deepcopy
from itertools import product
import io
IntrinsicAliases = dict()
class ContainerOf(object):
'''
Represents a container of something
We just know that if indexed by the integer value `index',
we get `containee'
'''
UnknownIndex = float('nan')
__slots__ = 'index', 'containee'
cache = {}
def __new__(cls, *args, **kwargs):
# cache the creation of new objects, so that same keys give same id
# thus great hashing
key = tuple(args), tuple(kwargs.items())
if key not in ContainerOf.cache:
new_obj = super(ContainerOf, cls).__new__(cls)
ContainerOf.cache[key] = new_obj
return ContainerOf.cache[key]
def __init__(self, containee, index=UnknownIndex):
self.index = index
self.containee = containee
def save_intrinsic_alias(module):
""" Recursively save default aliases for pythonic functions. """
for v in module.values():
if isinstance(v, dict): # Submodules case
save_intrinsic_alias(v)
else:
IntrinsicAliases[v] = frozenset((v,))
if isinstance(v, Class):
save_intrinsic_alias(v.fields)
for module in MODULES.values():
save_intrinsic_alias(module)
class Aliases(ModuleAnalysis):
'''
Gather aliasing informations across nodes
As a result, each node from the module is associated to a set of node or
Intrinsic to which it *may* alias to.
'''
RetId = '@'
def __init__(self):
self.result = dict()
self.aliases = None
ContainerOf.cache.clear()
super(Aliases, self).__init__(GlobalDeclarations)
@staticmethod
def dump(result, filter=None):
def pp(n):
output = io.StringIO()
Unparser(n, output)
return output.getvalue().strip()
if isinstance(result, dict):
for k, v in result.items():
if (filter is None) or isinstance(k, filter):
print('{} => {}'.format(pp(k), sorted(map(pp, v))))
elif isinstance(result, (frozenset, set)):
print(sorted(map(pp, result)))
def get_unbound_value_set(self):
return {UnboundValue}
@staticmethod
def access_path(node):
if isinstance(node, ast.Name):
return MODULES.get(demangle(node.id), node.id)
elif isinstance(node, ast.Attribute):
attr_key = demangle(node.attr)
value_dict = Aliases.access_path(node.value)
if attr_key not in value_dict:
raise PythranSyntaxError(
"Unsupported attribute '{}' for this object"
.format(attr_key),
node.value)
return value_dict[attr_key]
elif isinstance(node, ast.FunctionDef):
return node.name
else:
return node
# aliasing created by expressions
def add(self, node, values=None):
if values is None: # no given target for the alias
if isinstance(node, Intrinsic):
values = {node} # an Intrinsic always aliases to itself
else:
values = self.get_unbound_value_set()
self.result[node] = values
return values
def visit_BoolOp(self, node):
'''
Resulting node may alias to either operands:
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b): return a or b')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.BoolOp)
(a or b) => ['a', 'b']
Note that a literal does not create any alias
>>> module = ast.parse('def foo(a, b): return a or 0')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.BoolOp)
(a or 0) => ['<unbound-value>', 'a']
'''
return self.add(node, set.union(*[self.visit(n) for n in node.values]))
def visit_UnaryOp(self, node):
'''
Resulting node does not alias to anything
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a): return -a')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.UnaryOp)
(- a) => ['<unbound-value>']
'''
self.generic_visit(node)
return self.add(node)
visit_BinOp = visit_UnaryOp
visit_Compare = visit_UnaryOp
def visit_IfExp(self, node):
'''
Resulting node alias to either branch
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b, c): return a if c else b')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.IfExp)
(a if c else b) => ['a', 'b']
'''
self.visit(node.test)
rec = [self.visit(n) for n in (node.body, node.orelse)]
return self.add(node, set.union(*rec))
def visit_Dict(self, node):
'''
A dict is abstracted as an unordered container of its values
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b): return {0: a, 1: b}')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Dict)
{0: a, 1: b} => ['|a|', '|b|']
where the |id| notation means something that may contain ``id``.
'''
if node.keys:
elts_aliases = set()
for key, val in zip(node.keys, node.values):
self.visit(key) # res ignored, just to fill self.aliases
elt_aliases = self.visit(val)
elts_aliases.update(map(ContainerOf, elt_aliases))
else:
elts_aliases = None
return self.add(node, elts_aliases)
def visit_Set(self, node):
'''
A set is abstracted as an unordered container of its elements
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b): return {a, b}')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Set)
{a, b} => ['|a|', '|b|']
where the |id| notation means something that may contain ``id``.
'''
if node.elts:
elts_aliases = {ContainerOf(alias)
for elt in node.elts
for alias in self.visit(elt)}
else:
elts_aliases = None
return self.add(node, elts_aliases)
def visit_Return(self, node):
'''
A side effect of computing aliases on a Return is that it updates the
``return_alias`` field of current function
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b): return a')
>>> result = pm.gather(Aliases, module)
>>> module.body[0].return_alias # doctest: +ELLIPSIS
<function ...merge_return_aliases at...>
This field is a function that takes as many nodes as the function
argument count as input and returns an expression based on
these arguments if the function happens to create aliasing
between its input and output. In our case:
>>> f = module.body[0].return_alias
>>> Aliases.dump(f([ast.Name('A', ast.Load(), None, None),
... ast.Constant(1, None)]))
['A']
This also works if the relationship between input and output
is more complex:
>>> module = ast.parse('def foo(a, b): return a or b[0]')
>>> result = pm.gather(Aliases, module)
>>> f = module.body[0].return_alias
>>> List = ast.List([ast.Name('L0', ast.Load(), None, None)],
... ast.Load())
>>> Aliases.dump(f([ast.Name('B', ast.Load(), None, None), List]))
['B', '[L0][0]']
Which actually means that when called with two arguments ``B`` and
the single-element list ``[L[0]]``, ``foo`` may returns either the
first argument, or the first element of the second argument.
'''
if not node.value:
return
ret_aliases = self.visit(node.value)
if Aliases.RetId in self.aliases:
ret_aliases = ret_aliases.union(self.aliases[Aliases.RetId])
self.aliases[Aliases.RetId] = ret_aliases
def call_return_alias(self, node):
def interprocedural_aliases(func, args):
arg_aliases = [self.result[arg] or {arg} for arg in args]
return_aliases = set()
for args_combination in product(*arg_aliases):
return_aliases.update(
func.return_alias(args_combination))
return {expand_subscript(ra) for ra in return_aliases}
def expand_subscript(node):
if isinstance(node, ast.Subscript):
if isinstance(node.value, ContainerOf):
return node.value.containee
return node
def full_args(func, call):
args = call.args
if isinstance(func, ast.FunctionDef):
extra = len(func.args.args) - len(args)
if extra:
tail = [deepcopy(n) for n in func.args.defaults[extra:]]
for arg in tail:
self.visit(arg)
args = args + tail
return args
func = node.func
aliases = set()
if node.keywords:
# too soon, we don't support keywords in interprocedural_aliases
pass
elif isinstance(func, ast.Attribute):
_, signature = methods.get(func.attr,
functions.get(func.attr,
[(None, None)])[0])
if signature:
args = full_args(signature, node)
aliases = interprocedural_aliases(signature, args)
elif isinstance(func, ast.Name):
func_aliases = self.result[func]
for func_alias in func_aliases:
if hasattr(func_alias, 'return_alias'):
args = full_args(func_alias, node)
aliases.update(interprocedural_aliases(func_alias, args))
else:
pass # better thing to do ?
[self.add(a) for a in aliases if a not in self.result]
return aliases or self.get_unbound_value_set()
def visit_Call(self, node):
'''
Resulting node alias to the return_alias of called function,
if the function is already known by Pythran (i.e. it's an Intrinsic)
or if Pythran already computed it's ``return_alias`` behavior.
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> fun = """
... def f(a): return a
... def foo(b): c = f(b)"""
>>> module = ast.parse(fun)
The ``f`` function create aliasing between
the returned value and its first argument.
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
f(b) => ['b']
This also works with intrinsics, e.g ``dict.setdefault`` which
may create alias between its third argument and the return value.
>>> fun = 'def foo(a, d): builtins.dict.setdefault(d, 0, a)'
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
builtins.dict.setdefault(d, 0, a) => ['<unbound-value>', 'a']
Note that complex cases can arise, when one of the formal parameter
is already known to alias to various values:
>>> fun = """
... def f(a, b): return a and b
... def foo(A, B, C, D): return f(A or B, C or D)"""
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
f((A or B), (C or D)) => ['A', 'B', 'C', 'D']
'''
self.generic_visit(node)
f = node.func
# special handler for bind functions
if isinstance(f, ast.Attribute) and f.attr == "partial":
return self.add(node, {node})
else:
return_alias = self.call_return_alias(node)
# expand collected aliases
all_aliases = set()
for value in return_alias:
# no translation
if isinstance(value, (ContainerOf, ast.FunctionDef,
Intrinsic)):
all_aliases.add(value)
elif value in self.result:
all_aliases.update(self.result[value])
else:
try:
ap = Aliases.access_path(value)
all_aliases.update(self.aliases.get(ap, ()))
except NotImplementedError:
# should we do something better here?
all_aliases.add(value)
return self.add(node, all_aliases)
visit_Constant = visit_UnaryOp
def visit_Attribute(self, node):
return self.add(node, {Aliases.access_path(node)})
def visit_Subscript(self, node):
'''
Resulting node alias stores the subscript relationship if we don't know
anything about the subscripted node.
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a): return a[0]')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Subscript)
a[0] => ['a[0]']
If we know something about the container, e.g. in case of a list, we
can use this information to get more accurate informations:
>>> module = ast.parse('def foo(a, b, c): return [a, b][c]')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Subscript)
[a, b][c] => ['a', 'b']
Moreover, in case of a tuple indexed by a constant value, we can
further refine the aliasing information:
>>> fun = """
... def f(a, b): return a, b
... def foo(a, b): return f(a, b)[0]"""
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Subscript)
f(a, b)[0] => ['a']
Nothing is done for slices, even if the indices are known :-/
>>> module = ast.parse('def foo(a, b, c): return [a, b, c][1:]')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Subscript)
[a, b, c][1:] => ['<unbound-value>']
'''
if isinstance(node.slice, ast.Tuple):
# could be enhanced through better handling of containers
self.visit(node.value)
for elt in node.slice.elts:
self.visit(elt)
aliases = None
else:
aliases = set()
self.visit(node.slice)
value_aliases = self.visit(node.value)
for alias in value_aliases:
if isinstance(alias, ContainerOf):
if isinstance(node.slice, ast.Slice):
continue
if isnum(node.slice):
if node.slice.value != alias.index:
continue
# FIXME: what if the index is a slice variable...
aliases.add(alias.containee)
elif isinstance(getattr(alias, 'ctx', None), (ast.Param,
ast.Store)):
aliases.add(ast.Subscript(alias, node.slice, node.ctx))
if not aliases:
aliases = None
return self.add(node, aliases)
def visit_OMPDirective(self, node):
'''
omp directive may introduce new variables, just register them
'''
for dep in node.deps:
self.add(dep)
def visit_Name(self, node):
if node.id not in self.aliases:
err = ("identifier {0} unknown, either because "
"it is an unsupported intrinsic, "
"the input code is faulty, "
"or... pythran is buggy.")
raise PythranSyntaxError(err.format(node.id), node)
return self.add(node, self.aliases[node.id])
def visit_Tuple(self, node):
'''
A tuple is abstracted as an ordered container of its values
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b): return a, b')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Tuple)
(a, b) => ['|[0]=a|', '|[1]=b|']
where the |[i]=id| notation means something that
may contain ``id`` at index ``i``.
'''
if node.elts:
elts_aliases = set()
for i, elt in enumerate(node.elts):
elt_aliases = self.visit(elt)
elts_aliases.update(ContainerOf(alias, i)
for alias in elt_aliases)
else:
elts_aliases = None
return self.add(node, elts_aliases)
visit_List = visit_Set
def visit_comprehension(self, node):
self.aliases[node.target.id] = {node.target}
self.generic_visit(node)
def visit_ListComp(self, node):
'''
A comprehension is not abstracted in any way
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b): return [a for i in b]')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.ListComp)
[a for i in b] => ['<unbound-value>']
'''
for generator in node.generators:
self.visit_comprehension(generator)
self.visit(node.elt)
return self.add(node)
visit_SetComp = visit_ListComp
visit_GeneratorExp = visit_ListComp
def visit_DictComp(self, node):
'''
A comprehension is not abstracted in any way
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b): return {i: i for i in b}')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.DictComp)
{i: i for i in b} => ['<unbound-value>']
'''
for generator in node.generators:
self.visit_comprehension(generator)
self.visit(node.key)
self.visit(node.value)
return self.add(node)
# aliasing created by statements
def visit_FunctionDef(self, node):
'''
Initialise aliasing default value before visiting.
Add aliasing values for :
- Pythonic
- globals declarations
- current function arguments
'''
self.aliases = IntrinsicAliases.copy()
self.aliases.update((k, {v})
for k, v in self.global_declarations.items())
self.aliases.update((arg.id, {arg})
for arg in node.args.args)
self.generic_visit(node)
if Aliases.RetId in self.aliases:
# parametrize the expression
def parametrize(exp):
# constant or global -> no change
if isinstance(exp, (ast.Constant, Intrinsic, ast.FunctionDef)):
return lambda _: {exp}
elif isinstance(exp, ContainerOf):
pcontainee = parametrize(exp.containee)
index = exp.index
return lambda args: {
ContainerOf(pc, index)
for pc in pcontainee(args)
}
elif isinstance(exp, ast.Name):
try:
w = node.args.args.index(exp)
def return_alias(args):
if w < len(args):
return {args[w]}
else:
return {node.args.defaults[w - len(args)]}
return return_alias
except ValueError:
return lambda _: self.get_unbound_value_set()
elif isinstance(exp, ast.Subscript):
values = parametrize(exp.value)
slices = parametrize(exp.slice)
return lambda args: {
ast.Subscript(value, slice, ast.Load())
for value in values(args)
for slice in slices(args)}
else:
return lambda _: self.get_unbound_value_set()
# this is a little tricky: for each returned alias,
# parametrize builds a function that, given a list of args,
# returns the alias
# then as we may have multiple returned alias, we compute the union
# of these returned aliases
return_aliases = [parametrize(ret_alias)
for ret_alias
in self.aliases[Aliases.RetId]]
def merge_return_aliases(args):
return {ra
for return_alias in return_aliases
for ra in return_alias(args)}
node.return_alias = merge_return_aliases
def visit_Assign(self, node):
r'''
Assignment creates aliasing between lhs and rhs
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a): c = a ; d = e = c ; {c, d, e}')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Set)
{c, d, e} => ['|a|']
Everyone points to the formal parameter 'a' \o/
'''
md.visit(self, node)
value_aliases = self.visit(node.value)
for t in node.targets:
if isinstance(t, ast.Name):
self.aliases[t.id] = set(value_aliases) or {t}
for alias in list(value_aliases):
if isinstance(alias, ast.Name):
a_id = alias.id
self.aliases[a_id] = self.aliases[a_id].union((t,))
self.add(t, self.aliases[t.id])
else:
self.visit(t)
def visit_For(self, node):
'''
For loop creates aliasing between the target
and the content of the iterator
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse("""
... def foo(a):
... for i in a:
... {i}""")
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Set)
{i} => ['|i|']
Not very useful, unless we know something about the iterated container
>>> module = ast.parse("""
... def foo(a, b):
... for i in [a, b]:
... {i}""")
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Set)
{i} => ['|a|', '|b|']
'''
iter_aliases = self.visit(node.iter)
if all(isinstance(x, ContainerOf) for x in iter_aliases):
target_aliases = {iter_alias.containee for iter_alias in
iter_aliases}
else:
target_aliases = {node.target}
self.add(node.target, target_aliases)
self.aliases[node.target.id] = self.result[node.target]
self.generic_visit(node)
self.generic_visit(node)
def visit_While(self, node):
'''
While statement evaluation is somehow equivalent to the evaluation of a
sequence, except the fact that in some subtle cases, the first rounds
of analyse fails because we do not follow the regular execution order
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> fun = """
... def foo(a):
... while(a):
... if a == 1: builtins.print(b)
... else: b = a"""
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
'''
self.generic_visit(node)
self.generic_visit(node)
def visit_If(self, node):
'''
After an if statement, the values from both branches are merged,
potentially creating more aliasing:
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> fun = """
... def foo(a, b):
... if a: c=a
... else: c=b
... return {c}"""
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Set)
{c} => ['|a|', '|b|']
'''
md.visit(self, node)
self.visit(node.test)
true_aliases = false_aliases = None
# first try the true branch
try:
tmp = self.aliases.copy()
for stmt in node.body:
self.visit(stmt)
true_aliases = self.aliases
self.aliases = tmp
except PythranSyntaxError:
pass
# then try the false branch
try:
for stmt in node.orelse:
self.visit(stmt)
false_aliases = self.aliases
except PythranSyntaxError:
pass
if true_aliases and not false_aliases:
self.aliases = true_aliases
for stmt in node.orelse:
self.visit(stmt)
false_aliases = self.aliases
if false_aliases and not true_aliases:
self.aliases = false_aliases
for stmt in node.body:
self.visit(stmt)
true_aliases = self.aliases
# merge the results from true and false branches
if false_aliases and true_aliases:
for k, v in true_aliases.items():
if k in self.aliases:
self.aliases[k] = self.aliases[k].union(v)
else:
assert isinstance(v, (frozenset, set))
self.aliases[k] = v
elif true_aliases:
self.aliases = true_aliases
def visit_ExceptHandler(self, node):
if node.name:
self.aliases[node.name.id] = {node.name}
self.generic_visit(node)
class StrictAliases(Aliases):
"""
Gather aliasing informations across nodes,
without adding unsure aliases.
"""
def get_unbound_value_set(self):
return set()
| {
"repo_name": "serge-sans-paille/pythran",
"path": "pythran/analyses/aliases.py",
"copies": "1",
"size": "27680",
"license": "bsd-3-clause",
"hash": -8463511336772986000,
"line_mean": 35.135770235,
"line_max": 79,
"alpha_frac": 0.5363078035,
"autogenerated": false,
"ratio": 4.141232794733693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 766
} |
ALIASES = {
'name': {
'honk': 'Kousaka Honoka',
'bird': 'Minami Kotori',
'birb': 'Minami Kotori',
'eri': 'Ayase Eli',
'harasho': 'Ayase Eli',
'yohane': 'Tsushima Yoshiko',
'datenshi': 'Tsushima Yoshiko',
'hana': 'Koizumi Hanayo',
'pana': 'Koizumi Hanayo',
'tomato': "Nishikino Maki",
'knuckles': "Nishikino Maki",
'zura': 'Kunikida Hanamaru',
'maru': 'Kunikida Hanamaru',
'zuramaru': 'Kunikida Hanamaru',
'technology': 'Kunikida Hanamaru',
'woah': 'Kunikida Hanamaru',
'mike': 'Kurosawa Ruby', # Memes
'nell': "Yazawa Nico",
'nya': 'Hoshizora Rin',
'shiny': 'Ohara Mari',
'shiny~!': 'Ohara Mari',
'lili': 'Sakurauchi Riko',
'riri': 'Sakurauchi Riko',
'its': 'Ohara Mari',
'joke': 'Ohara Mari',
'buu': 'Kurosawa Dia',
'kayo-chin': 'Koizumi Hanayo',
'yousoro': 'Watanabe You',
'rice': 'Koizumi Hanayo',
'mirai': 'Kunikida Hanamaru',
'future': 'Kunikida Hanamaru',
'mikan': 'Takami Chika',
'я': 'Ayase Eli',
'wooby': 'Kurosawa Ruby',
'ganba': 'Kurosawa Ruby',
'ganbaruby': 'Kurosawa Ruby',
'faito': 'Kousaka Honoka',
'bread': 'Kousaka Honoka',
'minalinsky': 'Minami Kotori'
},
'main_unit': {
'muse': "μ's",
"μ's": "μ's",
"µ's": "μ's",
'aqours': 'Aqours',
'aquas': 'Aqours',
'aqua': 'Aqours',
'aquors': 'Aqours',
'a-rise': 'A-RISE',
'arise': 'A-RISE',
'saint': 'Saint Snow',
'snow': 'Saint Snow'
},
'sub_unit': {
'lily': 'Lily White',
'white': 'Lily White',
'bibi': 'Bibi',
'printemps': 'Printemps',
'guilty': 'Guilty Kiss',
'kiss': 'Guilty Kiss',
'azalea': 'AZALEA',
'cyaron': 'CYaRon!',
'cyaron!': 'CYaRon!',
'crayon': 'CYaRon!',
'crayon!': 'CYaRon!'
}
}
def parse_arguments(bot, args: tuple,
allow_unsupported_lists: bool = False) -> dict:
"""
Parse all user arguments
:param args: Tuple of all arguments
:param allow_unsupported_lists: Whether parameters that School Idol
Tomodachi does not allow multiple values of are reduced.
:return: A list of tuples of (arg_type, arg_value)
"""
parsed_args = {
'name': [],
'main_unit': [],
'sub_unit': [],
'year': [],
'attribute': [],
'rarity': []
}
for arg in args:
for arg_type, arg_value in _parse_argument(bot, arg):
parsed_args[arg_type].append(arg_value)
# Covert all values to sets and back to lists to remove duplicates.
for arg_type in parsed_args:
parsed_args[arg_type] = list(set(parsed_args[arg_type]))
# Remove mutiple values from fields not supported
if not allow_unsupported_lists:
for key in ('sub_unit', 'attribute', 'year'):
parsed_args[key] = parsed_args[key][:1]
return parsed_args
def _parse_argument(bot, arg: str) -> list:
"""
Parse user argument.
:param arg: An argument.
:return: List of tuples of (arg_type, arg_value)
"""
arg = arg.lower()
found_args = []
# Check for unit and idol names by alias
for key, val in ALIASES.items():
search_result = val.get(arg, None)
if search_result:
return [(key, search_result)]
# Check for names/surnames
for full_name in bot.idol_names:
name_split = full_name.split(' ')
if arg.title() in name_split:
found_args.append(('name', full_name))
if found_args:
return found_args
# Check for years
if arg in ('first', 'second', 'third'):
return [('year', arg.title())]
# Check for attribute
if arg in ('cool', 'smile', 'pure'):
return [('attribute', arg.title())]
if arg in (':heart:', 'purple'):
return [('attribute', 'All')]
# Check for rarity
if arg.upper() in ('N', 'R', 'SR', 'SSR', 'UR'):
return [('rarity', arg.upper())]
return []
| {
"repo_name": "DamourYouKnow/HAHA-NO-UR",
"path": "core/argument_parser.py",
"copies": "1",
"size": "4227",
"license": "mit",
"hash": -5607456562393811000,
"line_mean": 27.7142857143,
"line_max": 71,
"alpha_frac": 0.5188343994,
"autogenerated": false,
"ratio": 3.015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40338343994,
"avg_score": null,
"num_lines": null
} |
"""aliases.txt support."""
import re
import util
from errors import FileError
def load_aliases(filename):
"""
Loads aliases.txt-formatted file and returns a dict.
Accepted formats:
aliasname,keystrokes (QF1.x style)
aliasname:keystrokes (QF2.x style)
"""
aliases = {}
# load the file contents
try:
with open(filename) as f:
data = f.read()
except:
raise FileError("Could not open aliases file " + filename)
data = util.convert_line_endings(data)
lines = data.split('\n')
# strip out comment and empty lines
lines = [line for line in lines if line != '' and line[0] != '#']
# break into {aliasname:keystrokes} pairs
for line in lines:
match = re.match(r'([\w\d]+)(,|:) *(.+)\s*\n*', line)
if match is not None:
aliases[match.group(1)] = match.group(3)
return aliases
def apply_aliases(layers, aliases):
"""
Applies aliases:dict(aliasname, keystrokes) to layers:[FileLayer].
Every cell in every layer will be replaced with 'keystrokes' if
it exactly matches 'aliasname' or 'aliasname(#x#)' formats.
Currently there is no support for having multiple aliases in a
single cell or mixing aliases with regular keystrokes in a cell.
"""
# sort the aliases longest-first so longer aliases match first
keys = aliases.keys()
keys.sort(key=lambda x: len(x), reverse=True)
for layer in layers:
for r, row in enumerate(layer.rows):
for c, cell in enumerate(row):
for alias in keys:
if cell == alias: # alias match
layer.rows[r][c] = aliases[alias]
break
testlen = len(alias) + 1
if cell[0:testlen] == alias + '(': # alias(#x#) match
layer.rows[r][c] = aliases[alias] + cell[testlen - 1:]
break
return layers
| {
"repo_name": "ev1l0rd/yalnpv",
"path": "quickfort/src/qfconvert/aliases.py",
"copies": "3",
"size": "2056",
"license": "mit",
"hash": -7163744323356857000,
"line_mean": 28.6865671642,
"line_max": 78,
"alpha_frac": 0.5578793774,
"autogenerated": false,
"ratio": 4.047244094488189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6105123471888189,
"avg_score": null,
"num_lines": null
} |
"""Alias for module blend.
Deprecated module. Original name for module blend.py. Was changed in 0.2.8.
"""
from __future__ import print_function, division, absolute_import
import imgaug as ia
from . import blend
_DEPRECATION_COMMENT = (
"It has the same interface, except that the parameter "
"`first` was renamed to `foreground` and the parameter "
"`second` to `background`."
)
@ia.deprecated(alt_func="imgaug.augmenters.blend.blend_alpha()",
comment=_DEPRECATION_COMMENT)
def blend_alpha(*args, **kwargs):
"""See :func:`~imgaug.augmenters.blend.blend_alpha`."""
# pylint: disable=invalid-name
return blend.blend_alpha(*args, **kwargs)
@ia.deprecated(alt_func="imgaug.augmenters.blend.BlendAlpha",
comment=_DEPRECATION_COMMENT)
def Alpha(*args, **kwargs):
"""See :class:`~imgaug.augmenters.blend.BlendAlpha`."""
# pylint: disable=invalid-name
return blend.Alpha(*args, **kwargs)
@ia.deprecated(alt_func="imgaug.augmenters.blend.BlendAlphaElementwise",
comment=_DEPRECATION_COMMENT)
def AlphaElementwise(*args, **kwargs):
"""See :class:`~imgaug.augmenters.blend.BlendAlphaElementwise`."""
# pylint: disable=invalid-name
return blend.AlphaElementwise(*args, **kwargs)
@ia.deprecated(alt_func="imgaug.augmenters.blend.BlendAlphaSimplexNoise",
comment=_DEPRECATION_COMMENT)
def SimplexNoiseAlpha(*args, **kwargs):
"""See :class:`~imgaug.augmenters.blend.BlendAlphaSimplexNoise`."""
# pylint: disable=invalid-name
return blend.SimplexNoiseAlpha(*args, **kwargs)
@ia.deprecated(alt_func="imgaug.augmenters.blend.BlendAlphaFrequencyNoise",
comment=_DEPRECATION_COMMENT)
def FrequencyNoiseAlpha(*args, **kwargs):
"""See :class:`~imgaug.augmenters.blend.BlendAlphaFrequencyNoise`."""
# pylint: disable=invalid-name
return blend.FrequencyNoiseAlpha(*args, **kwargs)
| {
"repo_name": "aleju/ImageAugmenter",
"path": "imgaug/augmenters/overlay.py",
"copies": "2",
"size": "1920",
"license": "mit",
"hash": -6990266061294369000,
"line_mean": 33.2857142857,
"line_max": 75,
"alpha_frac": 0.7020833333,
"autogenerated": false,
"ratio": 3.4972677595628414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5199351092862842,
"avg_score": null,
"num_lines": null
} |
alias_help = """
Dynamic alias creator for system Windows.
Features:
- execute program in fork, or in current command line
- custom static invoke arguments
Requirements:
- python
- folder with script in system PATH
EXAMPLES:
1) Register this script as new alias with name 'alias':
python {script} add alias python {script}
2) Register notepad with alias 'n':
python {script} add n notepad --fork
If you already registered this script as an 'alias' you can use:
alias add n notepad --fork
Now in any place you can just type:
n text.txt
And it will work!
Please note that --fork is important in this case.
It will allow to invoke notepad and do not block console.
In most cases this is useful for GUI applications.
"""
import argparse
import os
import sys
import textwrap
handlers = dict()
script_file = os.path.realpath(__file__)
script_dir = os.path.dirname(script_file)
drive_letter_index = script_dir.find(":")
script_dir = script_dir[:drive_letter_index].upper() + script_dir[drive_letter_index:]
def init_map():
handlers["install"] = handle_install
handlers["add"] = handle_add
handlers["list"] = handle_list
handlers["del"] = handle_rem
handlers["rem"] = handle_rem
handlers["get"] = handle_get
def parse_args():
parser = argparse.ArgumentParser(prog='Alias',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(alias_help.format(script=script_file)),
epilog="More info at http://github.com/asRIA/alias")
subparsers = parser.add_subparsers(dest='command')
subparsers.add_parser('install', help='Makes alias command visible globally')
subparsers.add_parser('list', help='List current registered aliases list')
parser_rem = subparsers.add_parser('rem', help='Remove alias with given name')
parser_rem.add_argument('alias', nargs=1, help='Alias name')
parser_del = subparsers.add_parser('del', help='Remove alias with given name')
parser_del.add_argument('alias', nargs=1, help='Alias name')
parser_get = subparsers.add_parser('get', help='Print definition for alias')
parser_get.add_argument('alias', nargs=1, help='')
parser_add = subparsers.add_parser('add', help='Add new alias')
parser_add.add_argument('alias', nargs=1, help='Alias name')
parser_add.add_argument('path', nargs=1, help='Path to executable file')
parser_add.add_argument('args', nargs="*", help='Custom executable arguments')
parser_add.add_argument('--fork',
action='store_true',
help="Run alias in fork of session (Useful for GUI applications).")
parser_add.add_argument('--force', action='store_true', help='Override alias if exists')
return vars(parser.parse_args())
def exists_alias(script_name):
return os.path.isfile(script_name)
def get_script_name(alias):
return os.path.join(script_dir, alias + ".bat")
def wrap_path(path):
if path.find(" ") >= 0:
path = "\"" + path + "\""
return path
def handle_add(options):
alias = options["alias"][0]
path = wrap_path(options["path"][0])
args = options["args"]
fork_mode = options["fork"]
alias_filename = get_script_name(alias)
if not options["force"] and exists_alias(alias_filename):
print("Alias '%s' already exists." % alias)
return 1
content_header = "@echo off\n"
content_args = ""
if fork_mode:
content_command_template = "start \"\" {path}{args} %*"
else:
content_command_template = "call {path}{args} %*"
for arg in args:
arg = wrap_path(arg)
content_args += " {arg}".format(arg=arg)
content = content_header + content_command_template.format(path=path, args=content_args)
alias_file = open(alias_filename, "w")
alias_file.write(content)
alias_file.close()
print("'{alias}' has been added in {mode} mode".format(alias=alias, mode="fork" if fork_mode else "normal"))
def handle_list(options):
files = os.listdir(script_dir)
aliases = ""
aliases_count = 0
for file in files:
file_path = os.path.join(script_dir, file)
if os.path.isfile(file_path):
dot_index = file.rfind(".")
file_name = file[:dot_index]
extension = file[dot_index+1:]
extension = extension.lower()
if extension == "bat":
aliases_count += 1
aliases += "- " + file_name + "\n"
if aliases_count > 0:
print("Found %d registered aliases:" % aliases_count)
print(aliases)
else:
print("There is no registered aliases")
def handle_get(options):
alias = options["alias"][0]
alias_filename = get_script_name(alias)
if not exists_alias(alias_filename):
print("'%s' doesn't exist" % alias)
return 1
alias_file = open(alias_filename, "r")
print(alias_file.read())
return 0
def handle_rem(options):
alias = options["alias"][0]
alias_filename = get_script_name(alias)
if not exists_alias(alias_filename):
print("'%s' doesn't exist" % alias)
return 1
os.remove(alias_filename)
print("'%s' has been removed" % alias)
return 0
def handle_install(options):
if not check_integration():
subprocess.Popen('setx PATH "%PATH%;{path}"'.format(path=script_dir), shell=True).communicate()
subprocess.call([sys.executable, script_file, "add", "alias", sys.executable, script_file], shell=True)
def check_integration():
paths = os.environ["PATH"].split(";")
script_dir_formatted = script_dir
if script_dir_formatted not in paths:
print("Aliases dir is not registered in system PATH, please modify user env variables by adding:")
print(script_dir_formatted)
return False
return True
if __name__ == '__main__':
init_map()
args = parse_args()
check_integration()
if args["command"] in handlers:
errcode = handlers[args["command"]](args)
else:
print('Missing command, please run with -h for help')
errcode = 1
exit(errcode)
| {
"repo_name": "asRIA/alias",
"path": "alias.py",
"copies": "1",
"size": "6270",
"license": "mit",
"hash": -7828211434235067000,
"line_mean": 29.7352941176,
"line_max": 112,
"alpha_frac": 0.6301435407,
"autogenerated": false,
"ratio": 3.820840950639854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4950984491339854,
"avg_score": null,
"num_lines": null
} |
"""Alias related views."""
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.shortcuts import render
from django.utils.translation import ugettext as _, ungettext
from django.views import generic
from django.contrib.auth import mixins as auth_mixins
from django.contrib.auth.decorators import (
login_required, permission_required
)
from reversion import revisions as reversion
from modoboa.core import signals as core_signals
from modoboa.lib.exceptions import PermDeniedException, Conflict
from modoboa.lib.web_utils import render_to_json_response
from ..forms import AliasForm
from ..models import Alias
def _validate_alias(request, form, successmsg, callback=None):
"""Alias validation
Common function shared between creation and modification actions.
"""
if form.is_valid():
try:
alias = form.save()
except IntegrityError:
raise Conflict(_("Alias with this name already exists"))
if callback:
callback(request.user, alias)
return render_to_json_response(successmsg)
return render_to_json_response({'form_errors': form.errors}, status=400)
def _new_alias(request, title, action, successmsg,
tplname="admin/aliasform.html"):
core_signals.can_create_object.send(
"new_alias", context=request.user, object_type="mailbox_aliases")
if request.method == "POST":
def callback(user, alias):
alias.post_create(user)
form = AliasForm(request.user, request.POST)
return _validate_alias(
request, form, successmsg, callback
)
ctx = {
"title": title,
"action": action,
"formid": "aliasform",
"action_label": _("Create"),
"action_classes": "submit",
"form": AliasForm(request.user)
}
return render(request, tplname, ctx)
@login_required
@permission_required("admin.add_alias")
@reversion.create_revision()
def newalias(request):
return _new_alias(
request, _("New alias"), reverse("admin:alias_add"),
_("Alias created")
)
@login_required
@permission_required("admin.change_alias")
@reversion.create_revision()
def editalias(request, alid, tplname="admin/aliasform.html"):
alias = Alias.objects.get(pk=alid)
if not request.user.can_access(alias):
raise PermDeniedException
if request.method == "POST":
successmsg = _("Alias modified")
form = AliasForm(request.user, request.POST, instance=alias)
return _validate_alias(request, form, successmsg)
ctx = {
'action': reverse("admin:alias_change", args=[alias.id]),
'formid': 'aliasform',
'title': alias.address,
'action_label': _('Update'),
'action_classes': 'submit',
'form': AliasForm(request.user, instance=alias)
}
return render(request, tplname, ctx)
@login_required
@permission_required("admin.delete_alias")
def delalias(request):
selection = request.GET["selection"].split(",")
for alid in selection:
alias = Alias.objects.get(pk=alid)
if not request.user.can_access(alias):
raise PermDeniedException
alias.delete()
msg = ungettext("Alias deleted", "Aliases deleted", len(selection))
return render_to_json_response(msg)
class AliasDetailView(
auth_mixins.PermissionRequiredMixin, generic.DetailView):
"""DetailView for Alias."""
model = Alias
permission_required = "admin.add_alias"
def has_permission(self):
"""Check object-level access."""
result = super(AliasDetailView, self).has_permission()
if not result:
return result
return self.request.user.can_access(self.get_object())
def get_context_data(self, **kwargs):
"""Add information to context."""
context = super(AliasDetailView, self).get_context_data(**kwargs)
context["selection"] = "identities"
return context
| {
"repo_name": "carragom/modoboa",
"path": "modoboa/admin/views/alias.py",
"copies": "1",
"size": "4006",
"license": "isc",
"hash": -9216744546486646000,
"line_mean": 30.0542635659,
"line_max": 76,
"alpha_frac": 0.6615077384,
"autogenerated": false,
"ratio": 4.014028056112225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5175535794512225,
"avg_score": null,
"num_lines": null
} |
"""Alias related views."""
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.shortcuts import render
from django.utils.translation import ugettext as _, ungettext
from django.views import generic
from django.contrib.auth import mixins as auth_mixins
from django.contrib.auth.decorators import (
login_required, permission_required
)
from reversion import revisions as reversion
from modoboa.core import signals as core_signals
from modoboa.lib.exceptions import PermDeniedException, Conflict
from modoboa.lib.web_utils import render_to_json_response
from ..forms import AliasForm
from ..models import Alias
def _validate_alias(request, form, successmsg, callback=None):
"""Alias validation
Common function shared between creation and modification actions.
"""
if form.is_valid():
try:
alias = form.save()
except IntegrityError:
raise Conflict(_("Alias with this name already exists"))
if callback:
callback(request.user, alias)
return render_to_json_response(successmsg)
return render_to_json_response({'form_errors': form.errors}, status=400)
def _new_alias(request, title, action, successmsg,
tplname="admin/aliasform.html"):
core_signals.can_create_object.send(
"new_alias", context=request.user, klass=Alias)
if request.method == "POST":
def callback(user, alias):
alias.post_create(user)
form = AliasForm(request.user, request.POST)
return _validate_alias(
request, form, successmsg, callback
)
ctx = {
"title": title,
"action": action,
"formid": "aliasform",
"action_label": _("Create"),
"action_classes": "submit",
"form": AliasForm(request.user)
}
return render(request, tplname, ctx)
@login_required
@permission_required("admin.add_alias")
@reversion.create_revision()
def newalias(request):
return _new_alias(
request, _("New alias"), reverse("admin:alias_add"),
_("Alias created")
)
@login_required
@permission_required("admin.change_alias")
@reversion.create_revision()
def editalias(request, alid, tplname="admin/aliasform.html"):
alias = Alias.objects.get(pk=alid)
if not request.user.can_access(alias):
raise PermDeniedException
if request.method == "POST":
successmsg = _("Alias modified")
form = AliasForm(request.user, request.POST, instance=alias)
return _validate_alias(request, form, successmsg)
ctx = {
'action': reverse("admin:alias_change", args=[alias.id]),
'formid': 'aliasform',
'title': alias.address,
'action_label': _('Update'),
'action_classes': 'submit',
'form': AliasForm(request.user, instance=alias)
}
return render(request, tplname, ctx)
@login_required
@permission_required("admin.delete_alias")
def delalias(request):
selection = request.GET["selection"].split(",")
for alid in selection:
alias = Alias.objects.get(pk=alid)
if not request.user.can_access(alias):
raise PermDeniedException
alias.delete()
msg = ungettext("Alias deleted", "Aliases deleted", len(selection))
return render_to_json_response(msg)
class AliasDetailView(
auth_mixins.PermissionRequiredMixin, generic.DetailView):
"""DetailView for Alias."""
model = Alias
permission_required = "admin.add_alias"
def has_permission(self):
"""Check object-level access."""
result = super(AliasDetailView, self).has_permission()
if not result:
return result
return self.request.user.can_access(self.get_object())
def get_context_data(self, **kwargs):
"""Add information to context."""
context = super(AliasDetailView, self).get_context_data(**kwargs)
context["selection"] = "identities"
return context
| {
"repo_name": "bearstech/modoboa",
"path": "modoboa/admin/views/alias.py",
"copies": "1",
"size": "4029",
"license": "isc",
"hash": -3332303187644758500,
"line_mean": 29.7557251908,
"line_max": 76,
"alpha_frac": 0.6619508563,
"autogenerated": false,
"ratio": 4.0129482071713145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5174899063471314,
"avg_score": null,
"num_lines": null
} |
"""Alias related views."""
from reversion import revisions as reversion
from django.contrib.auth import mixins as auth_mixins
from django.contrib.auth.decorators import login_required, permission_required
from django.db import IntegrityError
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import ugettext as _, ungettext
from django.views import generic
from modoboa.core import signals as core_signals
from modoboa.lib.exceptions import Conflict, PermDeniedException
from modoboa.lib.web_utils import render_to_json_response
from ..forms import AliasForm
from ..models import Alias
def _validate_alias(request, form, successmsg, callback=None):
"""Alias validation
Common function shared between creation and modification actions.
"""
if form.is_valid():
try:
alias = form.save()
except IntegrityError:
raise Conflict(_("Alias with this name already exists"))
if callback:
callback(request.user, alias)
return render_to_json_response(successmsg)
return render_to_json_response({"form_errors": form.errors}, status=400)
def _new_alias(request, title, action, successmsg,
tplname="admin/aliasform.html"):
core_signals.can_create_object.send(
"new_alias", context=request.user, klass=Alias)
if request.method == "POST":
def callback(user, alias):
alias.post_create(user)
form = AliasForm(request.user, request.POST)
return _validate_alias(
request, form, successmsg, callback
)
ctx = {
"title": title,
"action": action,
"formid": "aliasform",
"action_label": _("Create"),
"action_classes": "submit",
"form": AliasForm(request.user)
}
return render(request, tplname, ctx)
@login_required
@permission_required("admin.add_alias")
@reversion.create_revision()
def newalias(request):
return _new_alias(
request, _("New alias"), reverse("admin:alias_add"),
_("Alias created")
)
@login_required
@permission_required("admin.change_alias")
@reversion.create_revision()
def editalias(request, alid, tplname="admin/aliasform.html"):
alias = Alias.objects.get(pk=alid)
if not request.user.can_access(alias):
raise PermDeniedException
if request.method == "POST":
successmsg = _("Alias modified")
form = AliasForm(request.user, request.POST, instance=alias)
return _validate_alias(request, form, successmsg)
ctx = {
"action": reverse("admin:alias_change", args=[alias.id]),
"formid": "aliasform",
"title": alias.address,
"action_label": _("Update"),
"action_classes": "submit",
"form": AliasForm(request.user, instance=alias)
}
return render(request, tplname, ctx)
@login_required
@permission_required("admin.delete_alias")
def delalias(request):
selection = request.GET["selection"].split(",")
for alid in selection:
alias = Alias.objects.get(pk=alid)
if not request.user.can_access(alias):
raise PermDeniedException
alias.delete()
msg = ungettext("Alias deleted", "Aliases deleted", len(selection))
return render_to_json_response(msg)
class AliasDetailView(
auth_mixins.PermissionRequiredMixin, generic.DetailView):
"""DetailView for Alias."""
model = Alias
permission_required = "admin.add_alias"
def has_permission(self):
"""Check object-level access."""
result = super(AliasDetailView, self).has_permission()
if not result:
return result
return self.request.user.can_access(self.get_object())
def get_context_data(self, **kwargs):
"""Add information to context."""
context = super(AliasDetailView, self).get_context_data(**kwargs)
context["selection"] = "identities"
return context
| {
"repo_name": "modoboa/modoboa",
"path": "modoboa/admin/views/alias.py",
"copies": "1",
"size": "3965",
"license": "isc",
"hash": 4242295708826724000,
"line_mean": 30.72,
"line_max": 78,
"alpha_frac": 0.6617906683,
"autogenerated": false,
"ratio": 4.02129817444219,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 125
} |
# alias to keep the 'bytecode' variable free
from _pydevd_frame_eval.vendored import bytecode as _bytecode
from _pydevd_frame_eval.vendored.bytecode.concrete import ConcreteInstr
from _pydevd_frame_eval.vendored.bytecode.instr import Label, SetLineno, Instr
class BasicBlock(_bytecode._InstrList):
def __init__(self, instructions=None):
# a BasicBlock object, or None
self.next_block = None
if instructions:
super().__init__(instructions)
def __iter__(self):
index = 0
while index < len(self):
instr = self[index]
index += 1
if not isinstance(instr, (SetLineno, Instr)):
raise ValueError(
"BasicBlock must only contain SetLineno and Instr objects, "
"but %s was found" % instr.__class__.__name__
)
if isinstance(instr, Instr) and instr.has_jump():
if index < len(self):
raise ValueError(
"Only the last instruction of a basic " "block can be a jump"
)
if not isinstance(instr.arg, BasicBlock):
raise ValueError(
"Jump target must a BasicBlock, got %s",
type(instr.arg).__name__,
)
yield instr
def __getitem__(self, index):
value = super().__getitem__(index)
if isinstance(index, slice):
value = type(self)(value)
value.next_block = self.next_block
return value
def copy(self):
new = type(self)(super().copy())
new.next_block = self.next_block
return new
def legalize(self, first_lineno):
"""Check that all the element of the list are valid and remove SetLineno.
"""
lineno_pos = []
set_lineno = None
current_lineno = first_lineno
for pos, instr in enumerate(self):
if isinstance(instr, SetLineno):
set_lineno = current_lineno = instr.lineno
lineno_pos.append(pos)
continue
if set_lineno is not None:
instr.lineno = set_lineno
elif instr.lineno is None:
instr.lineno = current_lineno
else:
current_lineno = instr.lineno
for i in reversed(lineno_pos):
del self[i]
return current_lineno
def get_jump(self):
if not self:
return None
last_instr = self[-1]
if not (isinstance(last_instr, Instr) and last_instr.has_jump()):
return None
target_block = last_instr.arg
assert isinstance(target_block, BasicBlock)
return target_block
def _compute_stack_size(block, size, maxsize):
""" Generator used to reduce the use of function stacks.
This allows to avoid nested recursion and allow to treat more cases.
HOW-TO:
Following the methods of Trampoline
(see https://en.wikipedia.org/wiki/Trampoline_(computing)),
We yield either:
- the arguments that would be used in the recursive calls, i.e,
'yield block, size, maxsize' instead of making a recursive call
'_compute_stack_size(block, size, maxsize)', if we encounter an
instruction jumping to another block or if the block is linked to
another one (ie `next_block` is set)
- the required stack from the stack if we went through all the instructions
or encountered an unconditional jump.
In the first case, the calling function is then responsible for creating a
new generator with those arguments, iterating over it till exhaustion to
determine the stacksize required by the block and resuming this function
with the determined stacksize.
"""
# If the block is currently being visited (seen = True) or if it was visited
# previously by using a larger starting size than the one in use, return the
# maxsize.
if block.seen or block.startsize >= size:
yield maxsize
def update_size(delta, size, maxsize):
size += delta
if size < 0:
msg = "Failed to compute stacksize, got negative size"
raise RuntimeError(msg)
maxsize = max(maxsize, size)
return size, maxsize
# Prevent recursive visit of block if two blocks are nested (jump from one
# to the other).
block.seen = True
block.startsize = size
for instr in block:
# Ignore SetLineno
if isinstance(instr, SetLineno):
continue
# For instructions with a jump first compute the stacksize required when the
# jump is taken.
if instr.has_jump():
taken_size, maxsize = update_size(
instr.stack_effect(jump=True), size, maxsize
)
# Yield the parameters required to compute the stacksize required
# by the block to which the jumnp points to and resume when we now
# the maxsize.
maxsize = yield instr.arg, taken_size, maxsize
# For unconditional jumps abort early since the other instruction will
# never be seen.
if instr.is_uncond_jump():
block.seen = False
yield maxsize
# jump=False: non-taken path of jumps, or any non-jump
size, maxsize = update_size(instr.stack_effect(jump=False), size, maxsize)
if block.next_block:
maxsize = yield block.next_block, size, maxsize
block.seen = False
yield maxsize
class ControlFlowGraph(_bytecode.BaseBytecode):
def __init__(self):
super().__init__()
self._blocks = []
self._block_index = {}
self.argnames = []
self.add_block()
def legalize(self):
"""Legalize all blocks.
"""
current_lineno = self.first_lineno
for block in self._blocks:
current_lineno = block.legalize(current_lineno)
def get_block_index(self, block):
try:
return self._block_index[id(block)]
except KeyError:
raise ValueError("the block is not part of this bytecode")
def _add_block(self, block):
block_index = len(self._blocks)
self._blocks.append(block)
self._block_index[id(block)] = block_index
def add_block(self, instructions=None):
block = BasicBlock(instructions)
self._add_block(block)
return block
def compute_stacksize(self):
"""Compute the stack size by iterating through the blocks
The implementation make use of a generator function to avoid issue with
deeply nested recursions.
"""
# In the absence of any block return 0
if not self:
return 0
# Ensure that previous calculation do not impact this one.
for block in self:
block.seen = False
block.startsize = -32768 # INT_MIN
# Create a generator/coroutine responsible of dealing with the first block
coro = _compute_stack_size(self[0], 0, 0)
# Create a list of generator that have not yet been exhausted
coroutines = []
push_coroutine = coroutines.append
pop_coroutine = coroutines.pop
args = None
try:
while True:
args = coro.send(None)
# Consume the stored generators as long as they return a simple
# interger that is to be used to resume the last stored generator.
while isinstance(args, int):
coro = pop_coroutine()
args = coro.send(args)
# Otherwise we enter a new block and we store the generator under
# use and create a new one to process the new block
push_coroutine(coro)
coro = _compute_stack_size(*args)
except IndexError:
# The exception occurs when all the generators have been exhausted
# in which case teh last yielded value is the stacksize.
assert args is not None
return args
def __repr__(self):
return "<ControlFlowGraph block#=%s>" % len(self._blocks)
def get_instructions(self):
instructions = []
jumps = []
for block in self:
target_block = block.get_jump()
if target_block is not None:
instr = block[-1]
instr = ConcreteInstr(instr.name, 0, lineno=instr.lineno)
jumps.append((target_block, instr))
instructions.extend(block[:-1])
instructions.append(instr)
else:
instructions.extend(block)
for target_block, instr in jumps:
instr.arg = self.get_block_index(target_block)
return instructions
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argnames != other.argnames:
return False
instrs1 = self.get_instructions()
instrs2 = other.get_instructions()
if instrs1 != instrs2:
return False
# FIXME: compare block.next_block
return super().__eq__(other)
def __len__(self):
return len(self._blocks)
def __iter__(self):
return iter(self._blocks)
def __getitem__(self, index):
if isinstance(index, BasicBlock):
index = self.get_block_index(index)
return self._blocks[index]
def __delitem__(self, index):
if isinstance(index, BasicBlock):
index = self.get_block_index(index)
block = self._blocks[index]
del self._blocks[index]
del self._block_index[id(block)]
for index in range(index, len(self)):
block = self._blocks[index]
self._block_index[id(block)] -= 1
def split_block(self, block, index):
if not isinstance(block, BasicBlock):
raise TypeError("expected block")
block_index = self.get_block_index(block)
if index < 0:
raise ValueError("index must be positive")
block = self._blocks[block_index]
if index == 0:
return block
if index > len(block):
raise ValueError("index out of the block")
instructions = block[index:]
if not instructions:
if block_index + 1 < len(self):
return self[block_index + 1]
del block[index:]
block2 = BasicBlock(instructions)
block.next_block = block2
for block in self[block_index + 1 :]:
self._block_index[id(block)] += 1
self._blocks.insert(block_index + 1, block2)
self._block_index[id(block2)] = block_index + 1
return block2
@staticmethod
def from_bytecode(bytecode):
# label => instruction index
label_to_block_index = {}
jumps = []
block_starts = {}
for index, instr in enumerate(bytecode):
if isinstance(instr, Label):
label_to_block_index[instr] = index
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
jumps.append((index, instr.arg))
for target_index, target_label in jumps:
target_index = label_to_block_index[target_label]
block_starts[target_index] = target_label
bytecode_blocks = _bytecode.ControlFlowGraph()
bytecode_blocks._copy_attr_from(bytecode)
bytecode_blocks.argnames = list(bytecode.argnames)
# copy instructions, convert labels to block labels
block = bytecode_blocks[0]
labels = {}
jumps = []
for index, instr in enumerate(bytecode):
if index in block_starts:
old_label = block_starts[index]
if index != 0:
new_block = bytecode_blocks.add_block()
if not block[-1].is_final():
block.next_block = new_block
block = new_block
if old_label is not None:
labels[old_label] = block
elif block and isinstance(block[-1], Instr):
if block[-1].is_final():
block = bytecode_blocks.add_block()
elif block[-1].has_jump():
new_block = bytecode_blocks.add_block()
block.next_block = new_block
block = new_block
if isinstance(instr, Label):
continue
# don't copy SetLineno objects
if isinstance(instr, Instr):
instr = instr.copy()
if isinstance(instr.arg, Label):
jumps.append(instr)
block.append(instr)
for instr in jumps:
label = instr.arg
instr.arg = labels[label]
return bytecode_blocks
def to_bytecode(self):
"""Convert to Bytecode."""
used_blocks = set()
for block in self:
target_block = block.get_jump()
if target_block is not None:
used_blocks.add(id(target_block))
labels = {}
jumps = []
instructions = []
for block in self:
if id(block) in used_blocks:
new_label = Label()
labels[id(block)] = new_label
instructions.append(new_label)
for instr in block:
# don't copy SetLineno objects
if isinstance(instr, Instr):
instr = instr.copy()
if isinstance(instr.arg, BasicBlock):
jumps.append(instr)
instructions.append(instr)
# Map to new labels
for instr in jumps:
instr.arg = labels[id(instr.arg)]
bytecode = _bytecode.Bytecode()
bytecode._copy_attr_from(self)
bytecode.argnames = list(self.argnames)
bytecode[:] = instructions
return bytecode
def to_code(self, stacksize=None):
"""Convert to code."""
if stacksize is None:
stacksize = self.compute_stacksize()
bc = self.to_bytecode()
return bc.to_code(stacksize=stacksize)
| {
"repo_name": "fabioz/PyDev.Debugger",
"path": "_pydevd_frame_eval/vendored/bytecode/cfg.py",
"copies": "1",
"size": "14433",
"license": "epl-1.0",
"hash": 5153407549036227000,
"line_mean": 31.5801354402,
"line_max": 85,
"alpha_frac": 0.5609367422,
"autogenerated": false,
"ratio": 4.486478085172521,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5547414827372521,
"avg_score": null,
"num_lines": null
} |
# alias to keep the 'bytecode' variable free
from _pydevd_frame_eval.vendored import bytecode as _bytecode
try:
from enum import IntFlag
except ImportError:
from aenum import IntFlag
class CompilerFlags(IntFlag):
"""Possible values of the co_flags attribute of Code object.
Note: We do not rely on inspect values here as some of them are missing and
furthermore would be version dependent.
"""
OPTIMIZED = 0x00001 # noqa
NEWLOCALS = 0x00002 # noqa
VARARGS = 0x00004 # noqa
VARKEYWORDS = 0x00008 # noqa
NESTED = 0x00010 # noqa
GENERATOR = 0x00020 # noqa
NOFREE = 0x00040 # noqa
# New in Python 3.5
# Used for coroutines defined using async def ie native coroutine
COROUTINE = 0x00080 # noqa
# Used for coroutines defined as a generator and then decorated using
# types.coroutine
ITERABLE_COROUTINE = 0x00100 # noqa
# New in Python 3.6
# Generator defined in an async def function
ASYNC_GENERATOR = 0x00200 # noqa
# __future__ flags
FUTURE_GENERATOR_STOP = 0x80000 # noqa
def infer_flags(bytecode, is_async=None):
"""Infer the proper flags for a bytecode based on the instructions.
Because the bytecode does not have enough context to guess if a function
is asynchronous the algorithm tries to be conservative and will never turn
a previously async code into a sync one.
Parameters
----------
bytecode : Bytecode | ConcreteBytecode | ControlFlowGraph
Bytecode for which to infer the proper flags
is_async : bool | None, optional
Force the code to be marked as asynchronous if True, prevent it from
being marked as asynchronous if False and simply infer the best
solution based on the opcode and the existing flag if None.
"""
flags = CompilerFlags(0)
if not isinstance(
bytecode,
(_bytecode.Bytecode, _bytecode.ConcreteBytecode, _bytecode.ControlFlowGraph),
):
msg = (
"Expected a Bytecode, ConcreteBytecode or ControlFlowGraph "
"instance not %s"
)
raise ValueError(msg % bytecode)
instructions = (
bytecode.get_instructions()
if isinstance(bytecode, _bytecode.ControlFlowGraph)
else bytecode
)
instr_names = {
i.name
for i in instructions
if not isinstance(i, (_bytecode.SetLineno, _bytecode.Label))
}
# Identify optimized code
if not (instr_names & {"STORE_NAME", "LOAD_NAME", "DELETE_NAME"}):
flags |= CompilerFlags.OPTIMIZED
# Check for free variables
if not (
instr_names
& {
"LOAD_CLOSURE",
"LOAD_DEREF",
"STORE_DEREF",
"DELETE_DEREF",
"LOAD_CLASSDEREF",
}
):
flags |= CompilerFlags.NOFREE
# Copy flags for which we cannot infer the right value
flags |= bytecode.flags & (
CompilerFlags.NEWLOCALS
| CompilerFlags.VARARGS
| CompilerFlags.VARKEYWORDS
| CompilerFlags.NESTED
)
sure_generator = instr_names & {"YIELD_VALUE"}
maybe_generator = instr_names & {"YIELD_VALUE", "YIELD_FROM"}
sure_async = instr_names & {
"GET_AWAITABLE",
"GET_AITER",
"GET_ANEXT",
"BEFORE_ASYNC_WITH",
"SETUP_ASYNC_WITH",
"END_ASYNC_FOR",
}
# If performing inference or forcing an async behavior, first inspect
# the flags since this is the only way to identify iterable coroutines
if is_async in (None, True):
if bytecode.flags & CompilerFlags.COROUTINE:
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
elif bytecode.flags & CompilerFlags.ITERABLE_COROUTINE:
if sure_async:
msg = (
"The ITERABLE_COROUTINE flag is set but bytecode that"
"can only be used in async functions have been "
"detected. Please unset that flag before performing "
"inference."
)
raise ValueError(msg)
flags |= CompilerFlags.ITERABLE_COROUTINE
elif bytecode.flags & CompilerFlags.ASYNC_GENERATOR:
if not sure_generator:
flags |= CompilerFlags.COROUTINE
else:
flags |= CompilerFlags.ASYNC_GENERATOR
# If the code was not asynchronous before determine if it should now be
# asynchronous based on the opcode and the is_async argument.
else:
if sure_async:
# YIELD_FROM is not allowed in async generator
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
elif maybe_generator:
if is_async:
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
else:
flags |= CompilerFlags.GENERATOR
elif is_async:
flags |= CompilerFlags.COROUTINE
# If the code should not be asynchronous, check first it is possible and
# next set the GENERATOR flag if relevant
else:
if sure_async:
raise ValueError(
"The is_async argument is False but bytecodes "
"that can only be used in async functions have "
"been detected."
)
if maybe_generator:
flags |= CompilerFlags.GENERATOR
flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP
return flags
| {
"repo_name": "fabioz/PyDev.Debugger",
"path": "_pydevd_frame_eval/vendored/bytecode/flags.py",
"copies": "2",
"size": "5812",
"license": "epl-1.0",
"hash": -7843855972920849000,
"line_mean": 31.8361581921,
"line_max": 85,
"alpha_frac": 0.599449415,
"autogenerated": false,
"ratio": 4.386415094339623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00006569438969911969,
"num_lines": 177
} |
# alias to keep the 'bytecode' variable free
import bytecode as _bytecode
from bytecode.concrete import ConcreteInstr
from bytecode.instr import Label, SetLineno, Instr
class BasicBlock(_bytecode._InstrList):
def __init__(self, instructions=None):
# a BasicBlock object, or None
self.next_block = None
if instructions:
super().__init__(instructions)
def __iter__(self):
index = 0
while index < len(self):
instr = self[index]
index += 1
if not isinstance(instr, (SetLineno, Instr, ConcreteInstr)):
raise ValueError("BasicBlock must only contain SetLineno, "
"Instr and ConcreteInstr objects, "
"but %s was found"
% instr.__class__.__name__)
if isinstance(instr, Instr) and instr.has_jump():
if index < len(self):
raise ValueError("Only the last instruction of a basic "
"block can be a jump")
if not isinstance(instr.arg, BasicBlock):
raise ValueError("Jump target must a BasicBlock, got %s",
type(instr.arg).__name__)
yield instr
def get_jump(self):
if not self:
return None
last_instr = self[-1]
if not(isinstance(last_instr, Instr) and last_instr.has_jump()):
return None
target_block = last_instr.arg
assert isinstance(target_block, BasicBlock)
return target_block
def _compute_stack_size(block, size, maxsize):
if block.seen or block.startsize >= size:
return maxsize
def update_size(delta, size, maxsize):
size += delta
if size < 0:
msg = 'Failed to compute stacksize, got negative size'
raise RuntimeError(msg)
maxsize = max(maxsize, size)
return size, maxsize
block.seen = True
block.startsize = size
for instr in block:
if isinstance(instr, SetLineno):
continue
if instr.has_jump():
# first compute the taken-jump path
taken_size, maxsize = update_size(instr.stack_effect(jump=True),
size, maxsize)
maxsize = _compute_stack_size(instr.arg, taken_size, maxsize)
if instr.is_uncond_jump():
block.seen = False
return maxsize
# jump=False: non-taken path of jumps, or any non-jump
size, maxsize = update_size(instr.stack_effect(jump=False),
size, maxsize)
if block.next_block:
maxsize = _compute_stack_size(block.next_block, size, maxsize)
block.seen = 0
return maxsize
class ControlFlowGraph(_bytecode.BaseBytecode):
def __init__(self):
super().__init__()
self._blocks = []
self._block_index = {}
self.argnames = []
self.add_block()
def get_block_index(self, block):
try:
return self._block_index[id(block)]
except KeyError:
raise ValueError("the block is not part of this bytecode")
def _add_block(self, block):
block_index = len(self._blocks)
self._blocks.append(block)
self._block_index[id(block)] = block_index
def add_block(self, instructions=None):
block = BasicBlock(instructions)
self._add_block(block)
return block
def compute_stacksize(self):
if not self:
return 0
for block in self:
block.seen = False
block.startsize = -32768 # INT_MIN
return _compute_stack_size(self[0], 0, 0)
def __repr__(self):
return '<ControlFlowGraph block#=%s>' % len(self._blocks)
def get_instructions(self):
instructions = []
jumps = []
for block in self:
target_block = block.get_jump()
if target_block is not None:
instr = block[-1]
instr = ConcreteInstr(instr.name, 0, lineno=instr.lineno)
jumps.append((target_block, instr))
instructions.extend(block[:-1])
instructions.append(instr)
else:
instructions.extend(block)
for target_block, instr in jumps:
instr.arg = self.get_block_index(target_block)
return instructions
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argnames != other.argnames:
return False
instrs1 = self.get_instructions()
instrs2 = other.get_instructions()
if instrs1 != instrs2:
return False
# FIXME: compare block.next_block
return super().__eq__(other)
def __len__(self):
return len(self._blocks)
def __iter__(self):
return iter(self._blocks)
def __getitem__(self, index):
if isinstance(index, BasicBlock):
index = self.get_block_index(index)
return self._blocks[index]
def __delitem__(self, index):
if isinstance(index, BasicBlock):
index = self.get_block_index(index)
block = self._blocks[index]
del self._blocks[index]
del self._block_index[id(block)]
for index in range(index, len(self)):
block = self._blocks[index]
self._block_index[id(block)] -= 1
def split_block(self, block, index):
if not isinstance(block, BasicBlock):
raise TypeError("expected block")
block_index = self.get_block_index(block)
if index < 0:
raise ValueError("index must be positive")
block = self._blocks[block_index]
if index == 0:
return block
if index > len(block):
raise ValueError("index out of the block")
instructions = block[index:]
if not instructions:
if block_index + 1 < len(self):
return self[block_index + 1]
del block[index:]
block2 = BasicBlock(instructions)
block.next_block = block2
for block in self[block_index + 1:]:
self._block_index[id(block)] += 1
self._blocks.insert(block_index + 1, block2)
self._block_index[id(block2)] = block_index + 1
return block2
@staticmethod
def from_bytecode(bytecode):
# label => instruction index
label_to_block_index = {}
jumps = []
block_starts = {}
for index, instr in enumerate(bytecode):
if isinstance(instr, Label):
label_to_block_index[instr] = index
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
jumps.append((index, instr.arg))
for target_index, target_label in jumps:
target_index = label_to_block_index[target_label]
block_starts[target_index] = target_label
bytecode_blocks = _bytecode.ControlFlowGraph()
bytecode_blocks._copy_attr_from(bytecode)
bytecode_blocks.argnames = list(bytecode.argnames)
# copy instructions, convert labels to block labels
block = bytecode_blocks[0]
labels = {}
jumps = []
for index, instr in enumerate(bytecode):
if index in block_starts:
old_label = block_starts[index]
if index != 0:
new_block = bytecode_blocks.add_block()
if not block[-1].is_final():
block.next_block = new_block
block = new_block
if old_label is not None:
labels[old_label] = block
elif block and isinstance(block[-1], Instr):
if block[-1].is_final():
block = bytecode_blocks.add_block()
elif block[-1].has_jump():
new_block = bytecode_blocks.add_block()
block.next_block = new_block
block = new_block
if isinstance(instr, Label):
continue
# don't copy SetLineno objects
if isinstance(instr, (Instr, ConcreteInstr)):
instr = instr.copy()
if isinstance(instr.arg, Label):
jumps.append(instr)
block.append(instr)
for instr in jumps:
label = instr.arg
instr.arg = labels[label]
return bytecode_blocks
def to_bytecode(self):
"""Convert to Bytecode."""
used_blocks = set()
for block in self:
target_block = block.get_jump()
if target_block is not None:
used_blocks.add(id(target_block))
labels = {}
jumps = []
instructions = []
for block in self:
if id(block) in used_blocks:
new_label = Label()
labels[id(block)] = new_label
instructions.append(new_label)
for instr in block:
# don't copy SetLineno objects
if isinstance(instr, (Instr, ConcreteInstr)):
instr = instr.copy()
if isinstance(instr.arg, BasicBlock):
jumps.append(instr)
instructions.append(instr)
# Map to new labels
for instr in jumps:
instr.arg = labels[id(instr.arg)]
bytecode = _bytecode.Bytecode()
bytecode._copy_attr_from(self)
bytecode.argnames = list(self.argnames)
bytecode[:] = instructions
return bytecode
def to_code(self, stacksize=None):
"""Convert to code."""
if stacksize is None:
stacksize = self.compute_stacksize()
bc = self.to_bytecode()
return bc.to_code(stacksize=stacksize)
| {
"repo_name": "haypo/bytecode",
"path": "bytecode/cfg.py",
"copies": "1",
"size": "10025",
"license": "mit",
"hash": 2972598080645481500,
"line_mean": 30.1335403727,
"line_max": 77,
"alpha_frac": 0.536957606,
"autogenerated": false,
"ratio": 4.391151992991678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5428109598991678,
"avg_score": null,
"num_lines": null
} |
# alias to keep the 'bytecode' variable free
import bytecode as _bytecode
from bytecode.instr import UNSET, Label, SetLineno, Instr
from bytecode.flags import infer_flags
class BaseBytecode:
def __init__(self):
self.argcount = 0
self.kwonlyargcount = 0
self.first_lineno = 1
self.name = '<module>'
self.filename = '<string>'
self.docstring = UNSET
self.cellvars = []
# we cannot recreate freevars from instructions because of super()
# special-case
self.freevars = []
self._flags = _bytecode.CompilerFlags(0)
def _copy_attr_from(self, bytecode):
self.argcount = bytecode.argcount
self.kwonlyargcount = bytecode.kwonlyargcount
self.flags = bytecode.flags
self.first_lineno = bytecode.first_lineno
self.name = bytecode.name
self.filename = bytecode.filename
self.docstring = bytecode.docstring
self.cellvars = list(bytecode.cellvars)
self.freevars = list(bytecode.freevars)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argcount != other.argcount:
return False
if self.kwonlyargcount != other.kwonlyargcount:
return False
if self.compute_stacksize() != other.compute_stacksize():
return False
if self.flags != other.flags:
return False
if self.first_lineno != other.first_lineno:
return False
if self.filename != other.filename:
return False
if self.name != other.name:
return False
if self.docstring != other.docstring:
return False
if self.cellvars != other.cellvars:
return False
if self.freevars != other.freevars:
return False
return True
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if not isinstance(value, _bytecode.CompilerFlags):
value = _bytecode.CompilerFlags(value)
self._flags = value
def update_flags(self, *, is_async=False):
self.flags = infer_flags(self, is_async)
class _InstrList(list):
def _flat(self):
instructions = []
labels = {}
jumps = []
offset = 0
for index, instr in enumerate(self):
if isinstance(instr, Label):
instructions.append('label_instr%s' % index)
labels[instr] = offset
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
target_label = instr.arg
instr = _bytecode.ConcreteInstr(instr.name, 0,
lineno=instr.lineno)
jumps.append((target_label, instr))
instructions.append(instr)
offset += 1
for target_label, instr in jumps:
instr.arg = labels[target_label]
return instructions
def __eq__(self, other):
if not isinstance(other, _InstrList):
other = _InstrList(other)
return (self._flat() == other._flat())
class Bytecode(_InstrList, BaseBytecode):
def __init__(self, instructions=()):
BaseBytecode.__init__(self)
self.argnames = []
for instr in instructions:
self._check_instr(instr)
self.extend(instructions)
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
if not isinstance(instr, (Label, SetLineno, Instr,
_bytecode.ConcreteInstr)):
raise ValueError("Bytecode must only contain Label, "
"SetLineno, Instr and ConcreteInstr objects, "
"but %s was found"
% type(instr).__name__)
@staticmethod
def from_code(code):
concrete = _bytecode.ConcreteBytecode.from_code(code)
return concrete.to_bytecode()
def compute_stacksize(self):
cfg = _bytecode.ControlFlowGraph.from_bytecode(self)
return cfg.compute_stacksize()
def to_code(self, compute_jumps_passes=None, stacksize=None):
bc = self.to_concrete_bytecode(
compute_jumps_passes=compute_jumps_passes)
return bc.to_code(stacksize=stacksize)
def to_concrete_bytecode(self, compute_jumps_passes=None):
converter = _bytecode._ConvertBytecodeToConcrete(self)
return converter.to_concrete_bytecode(
compute_jumps_passes=compute_jumps_passes)
| {
"repo_name": "haypo/bytecode",
"path": "bytecode/bytecode.py",
"copies": "1",
"size": "4763",
"license": "mit",
"hash": 5956348102641009000,
"line_mean": 31.4013605442,
"line_max": 77,
"alpha_frac": 0.5767373504,
"autogenerated": false,
"ratio": 4.341841385597083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5418578735997083,
"avg_score": null,
"num_lines": null
} |
# alias to keep the 'bytecode' variable free
import bytecode as _bytecode
try:
from enum import IntFlag
except ImportError:
from aenum import IntFlag
class CompilerFlags(IntFlag):
"""Possible values of the co_flags attribute of Code object.
Note: We do not rely on inspect values here as some of them are missing and
furthermore would be version dependent.
"""
OPTIMIZED = 0x00001 # noqa
NEWLOCALS = 0x00002 # noqa
VARARGS = 0x00004 # noqa
VARKEYWORDS = 0x00008 # noqa
NESTED = 0x00010 # noqa
GENERATOR = 0x00020 # noqa
NOFREE = 0x00040 # noqa
# New in Python 3.5
COROUTINE = 0x00080 # noqa
ITERABLE_COROUTINE = 0x00100 # noqa
# New in Python 3.6
ASYNC_GENERATOR = 0x00200 # noqa
# __future__ flags
FUTURE_GENERATOR_STOP = 0x80000 # noqa
def infer_flags(bytecode, is_async=False):
"""Infer the proper flags for a bytecode based on the instructions.
"""
flags = CompilerFlags(0)
if not isinstance(bytecode, (_bytecode.Bytecode,
_bytecode.ConcreteBytecode,
_bytecode.ControlFlowGraph)):
msg = ('Expected a Bytecode, ConcreteBytecode or ControlFlowGraph '
'instance not %s')
raise ValueError(msg % bytecode)
instructions = (bytecode.get_instructions()
if isinstance(bytecode, _bytecode.ControlFlowGraph) else
bytecode)
instr_names = {i.name for i in instructions
if not isinstance(i, (_bytecode.SetLineno,
_bytecode.Label))}
if not (instr_names & {'STORE_NAME', 'LOAD_NAME', 'DELETE_NAME'}):
flags |= CompilerFlags.OPTIMIZED
flags |= bytecode.flags & (CompilerFlags.NEWLOCALS |
CompilerFlags.VARARGS |
CompilerFlags.VARKEYWORDS |
CompilerFlags.NESTED)
if instr_names & {'YIELD_VALUE', 'YIELD_FROM'}:
if not is_async and not bytecode.flags & CompilerFlags.ASYNC_GENERATOR:
flags |= CompilerFlags.GENERATOR
else:
flags |= CompilerFlags.ASYNC_GENERATOR
if not (instr_names & {'LOAD_CLOSURE', 'LOAD_DEREF', 'STORE_DEREF',
'DELETE_DEREF', 'LOAD_CLASSDEREF'}):
flags |= CompilerFlags.NOFREE
if (not (bytecode.flags & CompilerFlags.ITERABLE_COROUTINE or
flags & CompilerFlags.ASYNC_GENERATOR) and
(instr_names & {'GET_AWAITABLE', 'GET_AITER', 'GET_ANEXT',
'BEFORE_ASYNC_WITH', 'SETUP_ASYNC_WITH'} or
bytecode.flags & CompilerFlags.COROUTINE)):
flags |= CompilerFlags.COROUTINE
flags |= bytecode.flags & CompilerFlags.ITERABLE_COROUTINE
flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP
if ([bool(flags & getattr(CompilerFlags, k))
for k in ('COROUTINE', 'ITERABLE_COROUTINE', 'GENERATOR',
'ASYNC_GENERATOR')].count(True) > 1):
raise ValueError("Code should not have more than one of the "
"following flag set : generator, coroutine, "
"iterable coroutine and async generator, got:"
"%s" % flags)
return flags
| {
"repo_name": "haypo/bytecode",
"path": "bytecode/flags.py",
"copies": "1",
"size": "3449",
"license": "mit",
"hash": 2976361441260728300,
"line_mean": 37.7528089888,
"line_max": 79,
"alpha_frac": 0.5737895042,
"autogenerated": false,
"ratio": 4.130538922155688,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002553626149131767,
"num_lines": 89
} |
# alias to keep the 'bytecode' variable free
import sys
from _pydevd_frame_eval.vendored import bytecode as _bytecode
from _pydevd_frame_eval.vendored.bytecode.instr import UNSET, Label, SetLineno, Instr
from _pydevd_frame_eval.vendored.bytecode.flags import infer_flags
class BaseBytecode:
def __init__(self):
self.argcount = 0
if sys.version_info > (3, 8):
self.posonlyargcount = 0
self.kwonlyargcount = 0
self.first_lineno = 1
self.name = "<module>"
self.filename = "<string>"
self.docstring = UNSET
self.cellvars = []
# we cannot recreate freevars from instructions because of super()
# special-case
self.freevars = []
self._flags = _bytecode.CompilerFlags(0)
def _copy_attr_from(self, bytecode):
self.argcount = bytecode.argcount
if sys.version_info > (3, 8):
self.posonlyargcount = bytecode.posonlyargcount
self.kwonlyargcount = bytecode.kwonlyargcount
self.flags = bytecode.flags
self.first_lineno = bytecode.first_lineno
self.name = bytecode.name
self.filename = bytecode.filename
self.docstring = bytecode.docstring
self.cellvars = list(bytecode.cellvars)
self.freevars = list(bytecode.freevars)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argcount != other.argcount:
return False
if sys.version_info > (3, 8):
if self.posonlyargcount != other.posonlyargcount:
return False
if self.kwonlyargcount != other.kwonlyargcount:
return False
if self.flags != other.flags:
return False
if self.first_lineno != other.first_lineno:
return False
if self.filename != other.filename:
return False
if self.name != other.name:
return False
if self.docstring != other.docstring:
return False
if self.cellvars != other.cellvars:
return False
if self.freevars != other.freevars:
return False
if self.compute_stacksize() != other.compute_stacksize():
return False
return True
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if not isinstance(value, _bytecode.CompilerFlags):
value = _bytecode.CompilerFlags(value)
self._flags = value
def update_flags(self, *, is_async=None):
self.flags = infer_flags(self, is_async)
class _BaseBytecodeList(BaseBytecode, list):
"""List subclass providing type stable slicing and copying.
"""
def __getitem__(self, index):
value = super().__getitem__(index)
if isinstance(index, slice):
value = type(self)(value)
value._copy_attr_from(self)
return value
def copy(self):
new = type(self)(super().copy())
new._copy_attr_from(self)
return new
def legalize(self):
"""Check that all the element of the list are valid and remove SetLineno.
"""
lineno_pos = []
set_lineno = None
current_lineno = self.first_lineno
for pos, instr in enumerate(self):
if isinstance(instr, SetLineno):
set_lineno = instr.lineno
lineno_pos.append(pos)
continue
# Filter out Labels
if not isinstance(instr, Instr):
continue
if set_lineno is not None:
instr.lineno = set_lineno
elif instr.lineno is None:
instr.lineno = current_lineno
else:
current_lineno = instr.lineno
for i in reversed(lineno_pos):
del self[i]
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
raise NotImplementedError()
class _InstrList(list):
def _flat(self):
instructions = []
labels = {}
jumps = []
offset = 0
for index, instr in enumerate(self):
if isinstance(instr, Label):
instructions.append("label_instr%s" % index)
labels[instr] = offset
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
target_label = instr.arg
instr = _bytecode.ConcreteInstr(instr.name, 0, lineno=instr.lineno)
jumps.append((target_label, instr))
instructions.append(instr)
offset += 1
for target_label, instr in jumps:
instr.arg = labels[target_label]
return instructions
def __eq__(self, other):
if not isinstance(other, _InstrList):
other = _InstrList(other)
return self._flat() == other._flat()
class Bytecode(_InstrList, _BaseBytecodeList):
def __init__(self, instructions=()):
BaseBytecode.__init__(self)
self.argnames = []
for instr in instructions:
self._check_instr(instr)
self.extend(instructions)
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
if not isinstance(instr, (Label, SetLineno, Instr)):
raise ValueError(
"Bytecode must only contain Label, "
"SetLineno, and Instr objects, "
"but %s was found" % type(instr).__name__
)
def _copy_attr_from(self, bytecode):
super()._copy_attr_from(bytecode)
if isinstance(bytecode, Bytecode):
self.argnames = bytecode.argnames
@staticmethod
def from_code(code):
concrete = _bytecode.ConcreteBytecode.from_code(code)
return concrete.to_bytecode()
def compute_stacksize(self):
cfg = _bytecode.ControlFlowGraph.from_bytecode(self)
return cfg.compute_stacksize()
def to_code(self, compute_jumps_passes=None, stacksize=None):
bc = self.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)
return bc.to_code(stacksize=stacksize)
def to_concrete_bytecode(self, compute_jumps_passes=None):
converter = _bytecode._ConvertBytecodeToConcrete(self)
return converter.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)
| {
"repo_name": "fabioz/PyDev.Debugger",
"path": "_pydevd_frame_eval/vendored/bytecode/bytecode.py",
"copies": "1",
"size": "6650",
"license": "epl-1.0",
"hash": -2455584496067711000,
"line_mean": 30.9711538462,
"line_max": 88,
"alpha_frac": 0.5822556391,
"autogenerated": false,
"ratio": 4.232972628898791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002818160687451892,
"num_lines": 208
} |
# alias to keep the 'bytecode' variable free
from _pydevd_frame_eval.vendored import bytecode as _bytecode
from _pydevd_frame_eval.vendored.bytecode.concrete import ConcreteInstr
from _pydevd_frame_eval.vendored.bytecode.instr import Label, SetLineno, Instr
class BasicBlock(_bytecode._InstrList):
def __init__(self, instructions=None):
# a BasicBlock object, or None
self.next_block = None
if instructions:
super().__init__(instructions)
def __iter__(self):
index = 0
while index < len(self):
instr = self[index]
index += 1
if not isinstance(instr, (SetLineno, Instr)):
raise ValueError(
"BasicBlock must only contain SetLineno and Instr objects, "
"but %s was found" % instr.__class__.__name__
)
if isinstance(instr, Instr) and instr.has_jump():
if index < len(self):
raise ValueError(
"Only the last instruction of a basic " "block can be a jump"
)
if not isinstance(instr.arg, BasicBlock):
raise ValueError(
"Jump target must a BasicBlock, got %s",
type(instr.arg).__name__,
)
yield instr
def __getitem__(self, index):
value = super().__getitem__(index)
if isinstance(index, slice):
value = type(self)(value)
value.next_block = self.next_block
return value
def copy(self):
new = type(self)(super().copy())
new.next_block = self.next_block
return new
def legalize(self, first_lineno):
"""Check that all the element of the list are valid and remove SetLineno."""
lineno_pos = []
set_lineno = None
current_lineno = first_lineno
for pos, instr in enumerate(self):
if isinstance(instr, SetLineno):
set_lineno = current_lineno = instr.lineno
lineno_pos.append(pos)
continue
if set_lineno is not None:
instr.lineno = set_lineno
elif instr.lineno is None:
instr.lineno = current_lineno
else:
current_lineno = instr.lineno
for i in reversed(lineno_pos):
del self[i]
return current_lineno
def get_jump(self):
if not self:
return None
last_instr = self[-1]
if not (isinstance(last_instr, Instr) and last_instr.has_jump()):
return None
target_block = last_instr.arg
assert isinstance(target_block, BasicBlock)
return target_block
def _compute_stack_size(block, size, maxsize, *, check_pre_and_post=True):
"""Generator used to reduce the use of function stacks.
This allows to avoid nested recursion and allow to treat more cases.
HOW-TO:
Following the methods of Trampoline
(see https://en.wikipedia.org/wiki/Trampoline_(computing)),
We yield either:
- the arguments that would be used in the recursive calls, i.e,
'yield block, size, maxsize' instead of making a recursive call
'_compute_stack_size(block, size, maxsize)', if we encounter an
instruction jumping to another block or if the block is linked to
another one (ie `next_block` is set)
- the required stack from the stack if we went through all the instructions
or encountered an unconditional jump.
In the first case, the calling function is then responsible for creating a
new generator with those arguments, iterating over it till exhaustion to
determine the stacksize required by the block and resuming this function
with the determined stacksize.
"""
# If the block is currently being visited (seen = True) or if it was visited
# previously by using a larger starting size than the one in use, return the
# maxsize.
if block.seen or block.startsize >= size:
yield maxsize
def update_size(pre_delta, post_delta, size, maxsize):
size += pre_delta
if size < 0:
msg = "Failed to compute stacksize, got negative size"
raise RuntimeError(msg)
size += post_delta
maxsize = max(maxsize, size)
return size, maxsize
# Prevent recursive visit of block if two blocks are nested (jump from one
# to the other).
block.seen = True
block.startsize = size
for instr in block:
# Ignore SetLineno
if isinstance(instr, SetLineno):
continue
# For instructions with a jump first compute the stacksize required when the
# jump is taken.
if instr.has_jump():
effect = (
instr.pre_and_post_stack_effect(jump=True)
if check_pre_and_post
else (instr.stack_effect(jump=True), 0)
)
taken_size, maxsize = update_size(*effect, size, maxsize)
# Yield the parameters required to compute the stacksize required
# by the block to which the jumnp points to and resume when we now
# the maxsize.
maxsize = yield instr.arg, taken_size, maxsize
# For unconditional jumps abort early since the other instruction will
# never be seen.
if instr.is_uncond_jump():
block.seen = False
yield maxsize
# jump=False: non-taken path of jumps, or any non-jump
effect = (
instr.pre_and_post_stack_effect(jump=False)
if check_pre_and_post
else (instr.stack_effect(jump=False), 0)
)
size, maxsize = update_size(*effect, size, maxsize)
if block.next_block:
maxsize = yield block.next_block, size, maxsize
block.seen = False
yield maxsize
class ControlFlowGraph(_bytecode.BaseBytecode):
def __init__(self):
super().__init__()
self._blocks = []
self._block_index = {}
self.argnames = []
self.add_block()
def legalize(self):
"""Legalize all blocks."""
current_lineno = self.first_lineno
for block in self._blocks:
current_lineno = block.legalize(current_lineno)
def get_block_index(self, block):
try:
return self._block_index[id(block)]
except KeyError:
raise ValueError("the block is not part of this bytecode")
def _add_block(self, block):
block_index = len(self._blocks)
self._blocks.append(block)
self._block_index[id(block)] = block_index
def add_block(self, instructions=None):
block = BasicBlock(instructions)
self._add_block(block)
return block
def compute_stacksize(self, *, check_pre_and_post=True):
"""Compute the stack size by iterating through the blocks
The implementation make use of a generator function to avoid issue with
deeply nested recursions.
"""
# In the absence of any block return 0
if not self:
return 0
# Ensure that previous calculation do not impact this one.
for block in self:
block.seen = False
block.startsize = -32768 # INT_MIN
# Create a generator/coroutine responsible of dealing with the first block
coro = _compute_stack_size(self[0], 0, 0, check_pre_and_post=check_pre_and_post)
# Create a list of generator that have not yet been exhausted
coroutines = []
push_coroutine = coroutines.append
pop_coroutine = coroutines.pop
args = None
try:
while True:
args = coro.send(None)
# Consume the stored generators as long as they return a simple
# interger that is to be used to resume the last stored generator.
while isinstance(args, int):
coro = pop_coroutine()
args = coro.send(args)
# Otherwise we enter a new block and we store the generator under
# use and create a new one to process the new block
push_coroutine(coro)
coro = _compute_stack_size(*args, check_pre_and_post=check_pre_and_post)
except IndexError:
# The exception occurs when all the generators have been exhausted
# in which case teh last yielded value is the stacksize.
assert args is not None
return args
def __repr__(self):
return "<ControlFlowGraph block#=%s>" % len(self._blocks)
def get_instructions(self):
instructions = []
jumps = []
for block in self:
target_block = block.get_jump()
if target_block is not None:
instr = block[-1]
instr = ConcreteInstr(instr.name, 0, lineno=instr.lineno)
jumps.append((target_block, instr))
instructions.extend(block[:-1])
instructions.append(instr)
else:
instructions.extend(block)
for target_block, instr in jumps:
instr.arg = self.get_block_index(target_block)
return instructions
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argnames != other.argnames:
return False
instrs1 = self.get_instructions()
instrs2 = other.get_instructions()
if instrs1 != instrs2:
return False
# FIXME: compare block.next_block
return super().__eq__(other)
def __len__(self):
return len(self._blocks)
def __iter__(self):
return iter(self._blocks)
def __getitem__(self, index):
if isinstance(index, BasicBlock):
index = self.get_block_index(index)
return self._blocks[index]
def __delitem__(self, index):
if isinstance(index, BasicBlock):
index = self.get_block_index(index)
block = self._blocks[index]
del self._blocks[index]
del self._block_index[id(block)]
for index in range(index, len(self)):
block = self._blocks[index]
self._block_index[id(block)] -= 1
def split_block(self, block, index):
if not isinstance(block, BasicBlock):
raise TypeError("expected block")
block_index = self.get_block_index(block)
if index < 0:
raise ValueError("index must be positive")
block = self._blocks[block_index]
if index == 0:
return block
if index > len(block):
raise ValueError("index out of the block")
instructions = block[index:]
if not instructions:
if block_index + 1 < len(self):
return self[block_index + 1]
del block[index:]
block2 = BasicBlock(instructions)
block.next_block = block2
for block in self[block_index + 1 :]:
self._block_index[id(block)] += 1
self._blocks.insert(block_index + 1, block2)
self._block_index[id(block2)] = block_index + 1
return block2
@staticmethod
def from_bytecode(bytecode):
# label => instruction index
label_to_block_index = {}
jumps = []
block_starts = {}
for index, instr in enumerate(bytecode):
if isinstance(instr, Label):
label_to_block_index[instr] = index
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
jumps.append((index, instr.arg))
for target_index, target_label in jumps:
target_index = label_to_block_index[target_label]
block_starts[target_index] = target_label
bytecode_blocks = _bytecode.ControlFlowGraph()
bytecode_blocks._copy_attr_from(bytecode)
bytecode_blocks.argnames = list(bytecode.argnames)
# copy instructions, convert labels to block labels
block = bytecode_blocks[0]
labels = {}
jumps = []
for index, instr in enumerate(bytecode):
if index in block_starts:
old_label = block_starts[index]
if index != 0:
new_block = bytecode_blocks.add_block()
if not block[-1].is_final():
block.next_block = new_block
block = new_block
if old_label is not None:
labels[old_label] = block
elif block and isinstance(block[-1], Instr):
if block[-1].is_final():
block = bytecode_blocks.add_block()
elif block[-1].has_jump():
new_block = bytecode_blocks.add_block()
block.next_block = new_block
block = new_block
if isinstance(instr, Label):
continue
# don't copy SetLineno objects
if isinstance(instr, Instr):
instr = instr.copy()
if isinstance(instr.arg, Label):
jumps.append(instr)
block.append(instr)
for instr in jumps:
label = instr.arg
instr.arg = labels[label]
return bytecode_blocks
def to_bytecode(self):
"""Convert to Bytecode."""
used_blocks = set()
for block in self:
target_block = block.get_jump()
if target_block is not None:
used_blocks.add(id(target_block))
labels = {}
jumps = []
instructions = []
for block in self:
if id(block) in used_blocks:
new_label = Label()
labels[id(block)] = new_label
instructions.append(new_label)
for instr in block:
# don't copy SetLineno objects
if isinstance(instr, Instr):
instr = instr.copy()
if isinstance(instr.arg, BasicBlock):
jumps.append(instr)
instructions.append(instr)
# Map to new labels
for instr in jumps:
instr.arg = labels[id(instr.arg)]
bytecode = _bytecode.Bytecode()
bytecode._copy_attr_from(self)
bytecode.argnames = list(self.argnames)
bytecode[:] = instructions
return bytecode
def to_code(self, stacksize=None):
"""Convert to code."""
if stacksize is None:
stacksize = self.compute_stacksize()
bc = self.to_bytecode()
return bc.to_code(stacksize=stacksize)
| {
"repo_name": "glenngillen/dotfiles",
"path": ".vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/cfg.py",
"copies": "1",
"size": "15328",
"license": "mit",
"hash": -223557488881105380,
"line_mean": 32.2142857143,
"line_max": 88,
"alpha_frac": 0.545341858,
"autogenerated": false,
"ratio": 4.561904761904762,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5607246619904762,
"avg_score": null,
"num_lines": null
} |
# alias to keep the 'bytecode' variable free
from _pydevd_frame_eval.vendored import bytecode as _bytecode
try:
from enum import IntFlag
except ImportError:
from aenum import IntFlag
class CompilerFlags(IntFlag):
"""Possible values of the co_flags attribute of Code object.
Note: We do not rely on inspect values here as some of them are missing and
furthermore would be version dependent.
"""
OPTIMIZED = 0x00001 # noqa
NEWLOCALS = 0x00002 # noqa
VARARGS = 0x00004 # noqa
VARKEYWORDS = 0x00008 # noqa
NESTED = 0x00010 # noqa
GENERATOR = 0x00020 # noqa
NOFREE = 0x00040 # noqa
# New in Python 3.5
# Used for coroutines defined using async def ie native coroutine
COROUTINE = 0x00080 # noqa
# Used for coroutines defined as a generator and then decorated using
# types.coroutine
ITERABLE_COROUTINE = 0x00100 # noqa
# New in Python 3.6
# Generator defined in an async def function
ASYNC_GENERATOR = 0x00200 # noqa
# __future__ flags
FUTURE_GENERATOR_STOP = 0x80000 # noqa
def infer_flags(bytecode, is_async=None):
"""Infer the proper flags for a bytecode based on the instructions.
Because the bytecode does not have enough context to guess if a function
is asynchronous the algorithm tries to be conservative and will never turn
a previously async code into a sync one.
Parameters
----------
bytecode : Bytecode | ConcreteBytecode | ControlFlowGraph
Bytecode for which to infer the proper flags
is_async : bool | None, optional
Force the code to be marked as asynchronous if True, prevent it from
being marked as asynchronous if False and simply infer the best
solution based on the opcode and the existing flag if None.
"""
flags = CompilerFlags(0)
if not isinstance(
bytecode,
(_bytecode.Bytecode, _bytecode.ConcreteBytecode, _bytecode.ControlFlowGraph),
):
msg = (
"Expected a Bytecode, ConcreteBytecode or ControlFlowGraph "
"instance not %s"
)
raise ValueError(msg % bytecode)
instructions = (
bytecode.get_instructions()
if isinstance(bytecode, _bytecode.ControlFlowGraph)
else bytecode
)
instr_names = {
i.name
for i in instructions
if not isinstance(i, (_bytecode.SetLineno, _bytecode.Label))
}
# Identify optimized code
if not (instr_names & {"STORE_NAME", "LOAD_NAME", "DELETE_NAME"}):
flags |= CompilerFlags.OPTIMIZED
# Check for free variables
if not (
instr_names
& {
"LOAD_CLOSURE",
"LOAD_DEREF",
"STORE_DEREF",
"DELETE_DEREF",
"LOAD_CLASSDEREF",
}
):
flags |= CompilerFlags.NOFREE
# Copy flags for which we cannot infer the right value
flags |= bytecode.flags & (
CompilerFlags.NEWLOCALS
| CompilerFlags.VARARGS
| CompilerFlags.VARKEYWORDS
| CompilerFlags.NESTED
)
sure_generator = instr_names & {"YIELD_VALUE"}
maybe_generator = instr_names & {"YIELD_VALUE", "YIELD_FROM"}
sure_async = instr_names & {
"GET_AWAITABLE",
"GET_AITER",
"GET_ANEXT",
"BEFORE_ASYNC_WITH",
"SETUP_ASYNC_WITH",
"END_ASYNC_FOR",
}
# If performing inference or forcing an async behavior, first inspect
# the flags since this is the only way to identify iterable coroutines
if is_async in (None, True):
if bytecode.flags & CompilerFlags.COROUTINE:
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
elif bytecode.flags & CompilerFlags.ITERABLE_COROUTINE:
if sure_async:
msg = (
"The ITERABLE_COROUTINE flag is set but bytecode that"
"can only be used in async functions have been "
"detected. Please unset that flag before performing "
"inference."
)
raise ValueError(msg)
flags |= CompilerFlags.ITERABLE_COROUTINE
elif bytecode.flags & CompilerFlags.ASYNC_GENERATOR:
if not sure_generator:
flags |= CompilerFlags.COROUTINE
else:
flags |= CompilerFlags.ASYNC_GENERATOR
# If the code was not asynchronous before determine if it should now be
# asynchronous based on the opcode and the is_async argument.
else:
if sure_async:
# YIELD_FROM is not allowed in async generator
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
elif maybe_generator:
if is_async:
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
else:
flags |= CompilerFlags.GENERATOR
elif is_async:
flags |= CompilerFlags.COROUTINE
# If the code should not be asynchronous, check first it is possible and
# next set the GENERATOR flag if relevant
else:
if sure_async:
raise ValueError(
"The is_async argument is False but bytecodes "
"that can only be used in async functions have "
"been detected."
)
if maybe_generator:
flags |= CompilerFlags.GENERATOR
flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP
return flags
| {
"repo_name": "glenngillen/dotfiles",
"path": ".vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/flags.py",
"copies": "1",
"size": "5989",
"license": "mit",
"hash": -5257391810324939000,
"line_mean": 31.8361581921,
"line_max": 85,
"alpha_frac": 0.5817331775,
"autogenerated": false,
"ratio": 4.513187641296157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5594920818796157,
"avg_score": null,
"num_lines": null
} |
# alias to keep the 'bytecode' variable free
import sys
from _pydevd_frame_eval.vendored import bytecode as _bytecode
from _pydevd_frame_eval.vendored.bytecode.instr import UNSET, Label, SetLineno, Instr
from _pydevd_frame_eval.vendored.bytecode.flags import infer_flags
class BaseBytecode:
def __init__(self):
self.argcount = 0
if sys.version_info > (3, 8):
self.posonlyargcount = 0
self.kwonlyargcount = 0
self.first_lineno = 1
self.name = "<module>"
self.filename = "<string>"
self.docstring = UNSET
self.cellvars = []
# we cannot recreate freevars from instructions because of super()
# special-case
self.freevars = []
self._flags = _bytecode.CompilerFlags(0)
def _copy_attr_from(self, bytecode):
self.argcount = bytecode.argcount
if sys.version_info > (3, 8):
self.posonlyargcount = bytecode.posonlyargcount
self.kwonlyargcount = bytecode.kwonlyargcount
self.flags = bytecode.flags
self.first_lineno = bytecode.first_lineno
self.name = bytecode.name
self.filename = bytecode.filename
self.docstring = bytecode.docstring
self.cellvars = list(bytecode.cellvars)
self.freevars = list(bytecode.freevars)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argcount != other.argcount:
return False
if sys.version_info > (3, 8):
if self.posonlyargcount != other.posonlyargcount:
return False
if self.kwonlyargcount != other.kwonlyargcount:
return False
if self.flags != other.flags:
return False
if self.first_lineno != other.first_lineno:
return False
if self.filename != other.filename:
return False
if self.name != other.name:
return False
if self.docstring != other.docstring:
return False
if self.cellvars != other.cellvars:
return False
if self.freevars != other.freevars:
return False
if self.compute_stacksize() != other.compute_stacksize():
return False
return True
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if not isinstance(value, _bytecode.CompilerFlags):
value = _bytecode.CompilerFlags(value)
self._flags = value
def update_flags(self, *, is_async=None):
self.flags = infer_flags(self, is_async)
class _BaseBytecodeList(BaseBytecode, list):
"""List subclass providing type stable slicing and copying."""
def __getitem__(self, index):
value = super().__getitem__(index)
if isinstance(index, slice):
value = type(self)(value)
value._copy_attr_from(self)
return value
def copy(self):
new = type(self)(super().copy())
new._copy_attr_from(self)
return new
def legalize(self):
"""Check that all the element of the list are valid and remove SetLineno."""
lineno_pos = []
set_lineno = None
current_lineno = self.first_lineno
for pos, instr in enumerate(self):
if isinstance(instr, SetLineno):
set_lineno = instr.lineno
lineno_pos.append(pos)
continue
# Filter out Labels
if not isinstance(instr, Instr):
continue
if set_lineno is not None:
instr.lineno = set_lineno
elif instr.lineno is None:
instr.lineno = current_lineno
else:
current_lineno = instr.lineno
for i in reversed(lineno_pos):
del self[i]
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
raise NotImplementedError()
class _InstrList(list):
def _flat(self):
instructions = []
labels = {}
jumps = []
offset = 0
for index, instr in enumerate(self):
if isinstance(instr, Label):
instructions.append("label_instr%s" % index)
labels[instr] = offset
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
target_label = instr.arg
instr = _bytecode.ConcreteInstr(instr.name, 0, lineno=instr.lineno)
jumps.append((target_label, instr))
instructions.append(instr)
offset += 1
for target_label, instr in jumps:
instr.arg = labels[target_label]
return instructions
def __eq__(self, other):
if not isinstance(other, _InstrList):
other = _InstrList(other)
return self._flat() == other._flat()
class Bytecode(_InstrList, _BaseBytecodeList):
def __init__(self, instructions=()):
BaseBytecode.__init__(self)
self.argnames = []
for instr in instructions:
self._check_instr(instr)
self.extend(instructions)
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
if not isinstance(instr, (Label, SetLineno, Instr)):
raise ValueError(
"Bytecode must only contain Label, "
"SetLineno, and Instr objects, "
"but %s was found" % type(instr).__name__
)
def _copy_attr_from(self, bytecode):
super()._copy_attr_from(bytecode)
if isinstance(bytecode, Bytecode):
self.argnames = bytecode.argnames
@staticmethod
def from_code(code):
concrete = _bytecode.ConcreteBytecode.from_code(code)
return concrete.to_bytecode()
def compute_stacksize(self, *, check_pre_and_post=True):
cfg = _bytecode.ControlFlowGraph.from_bytecode(self)
return cfg.compute_stacksize(check_pre_and_post=check_pre_and_post)
def to_code(
self, compute_jumps_passes=None, stacksize=None, *, check_pre_and_post=True
):
bc = self.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)
return bc.to_code(stacksize=stacksize, check_pre_and_post=check_pre_and_post)
def to_concrete_bytecode(self, compute_jumps_passes=None):
converter = _bytecode._ConvertBytecodeToConcrete(self)
return converter.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)
| {
"repo_name": "glenngillen/dotfiles",
"path": ".vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/bytecode.py",
"copies": "1",
"size": "6986",
"license": "mit",
"hash": -6572307525388392000,
"line_mean": 31.9126213592,
"line_max": 88,
"alpha_frac": 0.568279416,
"autogenerated": false,
"ratio": 4.29379225568531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.536207167168531,
"avg_score": null,
"num_lines": null
} |
ALIBABA_API_URL = 'http://gw.api.alibaba.com/openapi/param2/2/portals.open/%(api_call)s/%(api_key)s?%(call_parameters)s'
ALIBABA_API_CALLS = {
'list': 'api.listPromotionProduct',
'details': 'api.getPromotionProductDetail',
'links': 'api.getPromotionLinks'
}
ALIBABA_API_PARAMS = {
'list': [
'fields',
'keywords',
'categoryId',
'originalPriceFrom',
'originalPriceTo',
'volumeFrom',
'volumeTo',
'pageNo',
'pageSize',
'sort',
'startCreditScore',
'endCreditScore',
'highQualityItems',
'localCurrency',
'language'
],
'details': [
'fields',
'productId'
],
'links': [
'fields',
'trackingId',
'urls'
]
}
ALIBABA_API_FIELDS = {
'list': [
'totalResults',
'productId',
'productTitle',
'productUrl',
'imageUrl',
'originalPrice',
'salePrice',
'discount',
'evaluateScore',
'commission',
'commissionRate',
'30daysCommission',
'volume',
'packageType',
'lotNum',
'validTime',
'localPrice',
'allImageUrls'
],
'details': [
'productId',
'productTitle',
'productUrl',
'imageUrl',
'originalPrice',
'salePrice',
'discount',
'evaluateScore',
'commission',
'commissionRate',
'30daysCommission',
'volume',
'packageType',
'lotNum',
'validTime',
'storeName',
'storeUrl',
'allImageUrls',
],
'links': [
'totalResults',
'trackingId',
'publisherId',
'url',
'promotionUrl',
'localPrice'
]
}
ALIBABA_API_CATEGORIES = {
3: 'Apparel & Accessories',
34: 'Automobiles & Motorcycles',
1501: 'Baby Products',
66: 'Beauty & Health',
7: 'Computer & Networking',
13: 'Construction & Real Estate',
44: 'Consumer Electronics',
100008578: 'Customized Products',
5: 'Electrical Equipment & Supplies',
502: 'Electronic Components & Supplies',
2: 'Food',
1503: 'Furniture',
200003655: 'Hair & Accessories',
42: 'Hardware',
15: 'Home & Garden',
6: 'Home Appliances',
200003590: 'Industry & Business',
36: 'Jewelry & Watch',
39: 'Lights & Lighting',
1524: 'Luggage & Bags',
21: 'Office & School Supplies',
509: 'Phones & Telecommunications',
30: 'Security & Protection',
322: 'Shoes',
200001075: 'Special Category',
18: 'Sports & Entertainment',
1420: 'Tools',
26: 'Toys & Hobbies',
1511: 'Watches',
320: 'Wedding & Events'
}
ALIBABA_API_ERROR_CODES = {
'list': {
20010000: 'Call succeeds',
20020000: 'System Error',
20030000: 'Unauthorized transfer request',
20030010: 'Required parameters',
20030020: 'Invalid protocol format',
20030030: 'API version input parameter error',
20030040: 'API name space input parameter error',
20030050: 'API name input parameter error',
20030060: 'Fields input parameter error',
20030070: 'Keyword input parameter error',
20030080: 'Category ID input parameter error',
20030090: 'Tracking ID input parameter error',
20030100: 'Commission rate input parameter error',
20030110: 'Original Price input parameter error',
20030120: 'Discount input parameter error',
20030130: 'Volume input parameter error',
20030140: 'Page number input parameter error',
20030150: 'Page size input parameter error',
20030160: 'Sort input parameter error',
20030170: 'Credit Score input parameter error'
},
'details': {
20010000: 'Call succeeds',
20020000: 'System Error',
20030000: 'Unauthorized transfer request',
20030010: 'Required parameters',
20030020: 'Invalid protocol format',
20030030: 'API version input parameter error',
20030040: 'API name space input parameter error',
20030050: 'API name input parameter error',
20030060: 'Fields input parameter error',
20030070: 'Product ID input parameter error'
},
'links': {
20010000: 'Call succeeds',
20020000: 'System Error',
20030000: 'Unauthorized transfer request',
20030010: 'Required parameters',
20030020: 'Invalid protocol format',
20030030: 'API version input parameter error',
20030040: 'API name space input parameter error',
20030050: 'API name input parameter error',
20030060: 'Fields input parameter error',
20030070: 'Tracking ID input parameter error',
20030080: 'URL input parameter error or beyond the maximum number of the URLs'
}
}
| {
"repo_name": "kronas/python-aliexpress-api-client",
"path": "aliexpress_api_client/config.py",
"copies": "1",
"size": "4910",
"license": "mit",
"hash": 5042680528330674000,
"line_mean": 27.7134502924,
"line_max": 120,
"alpha_frac": 0.5753564155,
"autogenerated": false,
"ratio": 3.77401998462721,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.484937640012721,
"avg_score": null,
"num_lines": null
} |
# A library and utility for drawing ONNX nets. Most of this implementation has
# been borrowed from the caffe2 implementation
# https://github.com/caffe2/caffe2/blob/master/caffe2/python/net_drawer.py
#
# The script takes two required arguments:
# -input: a path to a serialized ModelProto .pb file.
# -output: a path to write a dot file representation of the graph
#
# Given this dot file representation, you can-for example-export this to svg
# with the graphviz `dot` utility, like so:
#
# $ dot -Tsvg my_output.dot -o my_output.svg
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from collections import defaultdict
import json
from onnx import ModelProto, GraphProto, NodeProto
import pydot # type: ignore
from typing import Text, Any, Callable, Optional, Dict
OP_STYLE = {
'shape': 'box',
'color': '#0F9D58',
'style': 'filled',
'fontcolor': '#FFFFFF'
}
BLOB_STYLE = {'shape': 'octagon'}
_NodeProducer = Callable[[NodeProto, int], pydot.Node]
def _escape_label(name): # type: (Text) -> Text
# json.dumps is poor man's escaping
return json.dumps(name)
def _form_and_sanitize_docstring(s): # type: (Text) -> Text
url = 'javascript:alert('
url += _escape_label(s).replace('"', '\'').replace('<', '').replace('>', '')
url += ')'
return url
def GetOpNodeProducer(embed_docstring=False, **kwargs): # type: (bool, **Any) -> _NodeProducer
def ReallyGetOpNode(op, op_id): # type: (NodeProto, int) -> pydot.Node
if op.name:
node_name = '%s/%s (op#%d)' % (op.name, op.op_type, op_id)
else:
node_name = '%s (op#%d)' % (op.op_type, op_id)
for i, input in enumerate(op.input):
node_name += '\n input' + str(i) + ' ' + input
for i, output in enumerate(op.output):
node_name += '\n output' + str(i) + ' ' + output
node = pydot.Node(node_name, **kwargs)
if embed_docstring:
url = _form_and_sanitize_docstring(op.doc_string)
node.set_URL(url)
return node
return ReallyGetOpNode
def GetPydotGraph(
graph, # type: GraphProto
name=None, # type: Optional[Text]
rankdir='LR', # type: Text
node_producer=None, # type: Optional[_NodeProducer]
embed_docstring=False, # type: bool
): # type: (...) -> pydot.Dot
if node_producer is None:
node_producer = GetOpNodeProducer(embed_docstring=embed_docstring, **OP_STYLE)
pydot_graph = pydot.Dot(name, rankdir=rankdir)
pydot_nodes = {} # type: Dict[Text, pydot.Node]
pydot_node_counts = defaultdict(int) # type: Dict[Text, int]
for op_id, op in enumerate(graph.node):
op_node = node_producer(op, op_id)
pydot_graph.add_node(op_node)
for input_name in op.input:
if input_name not in pydot_nodes:
input_node = pydot.Node(
_escape_label(
input_name + str(pydot_node_counts[input_name])),
label=_escape_label(input_name),
**BLOB_STYLE
)
pydot_nodes[input_name] = input_node
else:
input_node = pydot_nodes[input_name]
pydot_graph.add_node(input_node)
pydot_graph.add_edge(pydot.Edge(input_node, op_node))
for output_name in op.output:
if output_name in pydot_nodes:
pydot_node_counts[output_name] += 1
output_node = pydot.Node(
_escape_label(
output_name + str(pydot_node_counts[output_name])),
label=_escape_label(output_name),
**BLOB_STYLE
)
pydot_nodes[output_name] = output_node
pydot_graph.add_node(output_node)
pydot_graph.add_edge(pydot.Edge(op_node, output_node))
return pydot_graph
def main(): # type: () -> None
parser = argparse.ArgumentParser(description="ONNX net drawer")
parser.add_argument(
"--input",
type=Text, required=True,
help="The input protobuf file.",
)
parser.add_argument(
"--output",
type=Text, required=True,
help="The output protobuf file.",
)
parser.add_argument(
"--rankdir", type=Text, default='LR',
help="The rank direction of the pydot graph.",
)
parser.add_argument(
"--embed_docstring", action="store_true",
help="Embed docstring as javascript alert. Useful for SVG format.",
)
args = parser.parse_args()
model = ModelProto()
with open(args.input, 'rb') as fid:
content = fid.read()
model.ParseFromString(content)
pydot_graph = GetPydotGraph(
model.graph,
name=model.graph.name,
rankdir=args.rankdir,
node_producer=GetOpNodeProducer(
embed_docstring=args.embed_docstring,
**OP_STYLE
),
)
pydot_graph.write_dot(args.output)
if __name__ == '__main__':
main()
| {
"repo_name": "mlperf/training_results_v0.7",
"path": "Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/tools/net_drawer.py",
"copies": "2",
"size": "5101",
"license": "apache-2.0",
"hash": 7507450820229027000,
"line_mean": 33.2348993289,
"line_max": 95,
"alpha_frac": 0.5951774162,
"autogenerated": false,
"ratio": 3.5203588681849554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5115536284384955,
"avg_score": null,
"num_lines": null
} |
# A library file for simplifying pygame interaction. You MUST place this file in the same directory as your game py files.
'''This code is the original work of Luther Tychonievich, who releases it
into the public domain.
As a courtesy, Luther would appreciate it if you acknowledged him in any work
that benefited from this code.'''
from __future__ import division
import pygame, sys
import urllib, os.path
if 'urlretrieve' not in dir(urllib):
from urllib.request import urlretrieve as _urlretrieve
else:
_urlretrieve = urllib.urlretrieve
pygame.init()
# a cache to avoid loading images many time
_known_images = {}
_known_sounds = {}
def _image(key, flip=False, w=0, h=0, angle=0):
'''A method for loading images, caching them, and flipping them'''
if '__hash__' not in dir(key):
key = id(key)
angle,w,h = int(angle), int(w), int(h)
ans = None
if (key,flip,w,h,angle) in _known_images:
ans = _known_images[(key,flip,w,h,angle)]
elif angle != 0:
base = _image(key,flip,w,h)
img = pygame.transform.rotozoom(base, angle, 1)
_known_images[(key,flip,w,h,angle)] = img
ans = img
elif w != 0 or h != 0:
base = _image(key, flip)
img = pygame.transform.smoothscale(base, (w,h))
_known_images[(key,flip,w,h,angle)] = img
ans = img
elif flip:
base = _image(key)
img = pygame.transform.flip(base, True, False)
_known_images[(key,flip,w,h,angle)] = img
ans = img
else:
img, _ = _get_image(key)
_known_images[(key,flip,w,h,angle)] = img
ans = img
if w == 0 and h == 0:
if angle != 0: tmp = _image(key, flip, w, h)
else: tmp = ans
_known_images[(key,flip,tmp.get_width(),tmp.get_height(), angle)] = ans
return ans
def _image_from_url(url):
'''a method for loading images from urls by first saving them locally'''
filename = os.path.basename(url)
if not os.path.exists(filename):
if '://' not in url: url = 'http://'+url
_urlretrieve(url, filename)
image, filename =_image_from_file(filename)
return image, filename
def _image_from_file(filename):
'''a method for loading images from files'''
image = pygame.image.load(filename).convert_alpha()
_known_images[filename] = image
_known_images[(image.get_width(), image.get_height(), filename)] = image
return image, filename
def _get_image(thing):
'''a method for loading images from cache, then file, then url'''
if thing in _known_images: return _known_images[thing], thing
sid = '__id__'+str(id(thing))
if sid in _known_images: return _known_images[sid], sid
if type(thing) is str:
if os.path.exists(thing): return _image_from_file(thing)
return _image_from_url(thing)
_known_images[sid] = thing
_known_images[(thing.get_width(), thing.get_height(), sid)] = thing
return thing, sid
def load_sprite_sheet(url_or_filename, rows, columns):
'''Loads a sprite sheet. Assumes the sheet has rows-by-columns evenly-spaced images and returns a list of those images.'''
sheet, key = _get_image(url_or_filename)
height = sheet.get_height() / rows
width = sheet.get_width() / columns
frames = []
for row in range(rows):
for col in range(columns):
clip = pygame.Rect( col*width, row*height, width, height )
frame = sheet.subsurface(clip)
frames.append(frame)
return frames
__all__ = ['load_sprite_sheet']
def from_image(x, y, filename_or_url):
'''Creates a SpriteBox object at the given location from the provided filename or url'''
image, key = _get_image(filename_or_url)
return SpriteBox(x, y, image, None)
__all__.append('from_image')
def from_color(x, y, color, width, height):
'''Creates a SpriteBox object at the given location with the given color, width, and height'''
return SpriteBox(x, y, None, color, width, height)
__all__.append('from_color')
def from_text(x, y, text, fontname, fontsize, color, bold=False, italic=False):
'''Creates a SpriteBox object at the given location with the given text as its content'''
font = pygame.font.match_font(fontname.replace(" ","").lower())
if font is None:
sys.stderr.write("ERROR: no font named "+fontname+"; using default font instead")
font = pygame.font.Font(font,fontsize)
font.set_bold(bold)
font.set_italic(italic)
if type(color) is str: color = pygame.Color(color)
return from_image(x,y,font.render(text,True,color))
__all__.append('from_text')
def load_sound(url_or_filename):
'''Reads a sound file from a given filename or url'''
if url_or_filename in _known_images: return _known_sounds[url_or_filename]
if not os.path.exists(url_or_filename):
filename = os.path.basename(url_or_filename)
if not os.path.exists(filename):
_urlretrieve(url_or_filename, filename)
url_or_filename = filename
sound = pygame.mixer.Sound(url_or_filename)
_known_sounds[url_or_filename] = sound
return sound
__all__.append('load_sound')
class Camera(object):
'''A camera defines what is visible. It has a width, height, full screen status,
and can be moved. Moving a camera changes what is visible.
'''
is_initialized = False
# __slots__ = ["_surface", "x", "y", "speedx", "speedy"]
def __init__(self, width, height, full_screen=False):
'''Camera(pixelsWide, pixelsTall, False) makes a window; using True instead makes a full-screen display.'''
if Camera.is_initialized: raise Exception("You can only have one Camera at a time")
# if height > 768: raise Exception("The Game Expo screens will only be 768 pixels tall")
# if width > 1366: raise Exception("The Game Expo screens will only be 1366 pixels wide")
if full_screen:
self.__dict__['_surface'] = pygame.display.set_mode([width, height], pygame.FULLSCREEN)
else:
self.__dict__['_surface'] = pygame.display.set_mode([width, height])
self.__dict__['_x'] = 0
self.__dict__['_y'] = 0
Camera.is_initialized = True
def move(self, x, y=None):
'''camera.move(3, -7) moves the screen's center to be 3 more pixels to the right and 7 more up'''
if y is None: x, y = x
self.x += x
self.y += y
def draw(self, thing, *args):
'''camera.draw(box) draws the provided SpriteBox object
camera.draw(image, x, y) draws the provided image centered at the provided coordinates
camera.draw("Hi", "Arial", 12, "red", x, y) draws the text Hi in a red 12-point Arial font at x,y'''
if isinstance(thing, SpriteBox):
thing.draw(self)
elif isinstance(thing, pygame.Surface):
try:
if len(args) == 1: x,y = args[0]
else: x,y = args[:2]
self._surface.blit(thing, [x-thing.get_width()/2,y-thing.get_height()/2])
except e:
raise Exception("Wrong arguments; try .draw(surface, [x,y])")
elif type(thing) is str:
try:
font = pygame.font.match_font(args[0].replace(" ","").lower())
if font is None:
sys.stderr.write("ERROR: no font named "+fontname+"; using default font instead")
size = args[1]
color = args[2]
if type(color) is str: color = pygame.Color(color)
self.draw(pygame.font.Font(font,size).render(thing,True,color), *args[3:])
except e:
raise Exception("Wrong arguments; try .draw(text, fontName, fontSize, color, [x,y])")
else:
raise Exception("I don't know how to draw a ",type(thing))
def display(self):
'''Causes what has been drawn recently by calls to draw(...) to be displayed on the screen'''
pygame.display.flip()
def clear(self, color):
'''Erases the screen by filling it with the given color'''
if type(color) is str: color = pygame.Color(color)
self._surface.fill(color)
def __getattr__(self, name):
if name in self.__dict__: return self.__dict__[name]
x, y, w, h = self._x, self._y, self._surface.get_width(), self._surface.get_height()
if name == 'left': return x
if name == 'right': return x + w
if name == 'top': return y
if name == 'bottom': return y + h
if name == 'x': return x + w/2
if name == 'y': return y + h/2
if name == 'center': return x+w/2, y+h/2
if name == 'topleft': return x,y
if name == 'topright': return x + w, y
if name == 'bottomleft': return x, y + h
if name == 'bottomright': return x + w, y + h
if name == 'width': return w
if name == 'height': return h
if name == 'size': return w, h
if name == 'mousex': return pygame.mouse.get_pos()[0] + self._x
if name == 'mousey': return pygame.mouse.get_pos()[1] + self._y
if name == 'mouse': return pygame.mouse.get_pos()[0] + self._x, pygame.mouse.get_pos()[1] + self._y
if name == 'mouseclick': return any(pygame.mouse.get_pressed())
raise Exception("There is no '" + name + "' in a Camera object")
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
return
w, h = self._surface.get_width(), self._surface.get_height()
if name == 'left': self._x = value
elif name == 'right': self._x = value - w
elif name == 'top': self._y = value
elif name == 'bottom': self._y = value - h
elif name == 'x': self._x = value-w/2
elif name == 'y': self._y = value-h/2
elif name == 'center': self._x, self._y = value[0]-w/2, value[1]-h/2
elif name == 'topleft': self._x, self._y = value[0], value[1]
elif name == 'topright': self._x, self._y = value[0] - w, value[1]
elif name == 'bottomleft': self._x, self._y = value[0], value[1] - h
elif name == 'bottomright': self._x, self._y = value[0] - w, value[1] - h
elif name in ['width','height','size','mouse','mousex','mousey','mouseclick']:
raise Exception("You cannot change the '" + name + "' of a Camera object")
else:
sys.stderr.write("creating field named "+name)
self.__dict__[name] = value
def __repr__(self):
return str(self)
def __str__(self):
return '%dx%d Camera centered at %d,%d' % (self.width, self.height, self.x, self.y)
__all__.append('Camera')
class SpriteBox(object):
'''Intended to represent a sprite (i.e., an image that can be drawn as part of a larger view) and the box that contains it. Has various collision and movement methods built in.'''
# __slots__ = ["x","y","speedx","speedy","_w","_h","_key","_image","_color"]
def __init__(self, x, y, image, color, w=None, h=None):
'''You should probably use the from_image, from_text, or from_color method instead of this one'''
self.__dict__['x'] = x
self.__dict__['y'] = y
self.__dict__['speedx'] = 0
self.__dict__['speedy'] = 0
if image is not None:
self._set_key(image, False, 0, 0, 0)
if w is not None:
if h is not None: self.size = w,h
else: self.width = w
elif h is not None: self.height = h
elif color is not None:
if w is None or h is None: raise Exception("must supply size of color box")
self.__dict__['_key'] = None
self.__dict__['_image'] = None
self.__dict__['_w'] = w
self.__dict__['_h'] = h
self.color = color
pass
def _set_key(self, name, flip, width, height, angle):
width = int(width+0.5)
height = int(height+0.5)
angle = ((int(angle)%360)+360)%360
unrot = _image(name, flip, width, height)
if width == 0 and height == 0:
width = unrot.get_width()
height = unrot.get_height()
self.__dict__['_key'] = (name, flip, width, height, angle)
self.__dict__['_image'] = _image(*self.__dict__['_key'])
self.__dict__['_color'] = None
self.__dict__['_w'] = self.__dict__['_image'].get_width()
self.__dict__['_h'] = self.__dict__['_image'].get_height()
def __getattr__(self, name):
x, y, w, h = self.x, self.y, self._w, self._h
if name == 'xspeed': name = 'speedx'
if name == 'yspeed': name = 'speedy'
if name == 'left': return x - w / 2
if name == 'right': return x + w / 2
if name == 'top': return y - h / 2
if name == 'bottom': return y + h / 2
if name == 'center': return x, y
if name == 'topleft': return x - w / 2, y - h / 2
if name == 'topright': return x + w / 2, y - h / 2
if name == 'bottomleft': return x - w / 2, y + h / 2
if name == 'bottomright': return x + w / 2, y + h / 2
if name == 'width': return w
if name == 'height': return h
if name == 'width': return w
if name == 'height': return h
if name == 'size': return w, h
if name == 'speed': return self.speedx, self.speedy
if name == 'rect': return pygame.Rect(self.topleft, self.size)
if name == 'image': return self.__dict__['_image']
if name in self.__dict__:
return self.__dict__[name]
raise Exception("There is no '" + name + "' in a SpriteBox object")
def __setattr__(self, name, value):
w, h = self._w, self._h
if name == 'xspeed': name = 'speedx'
if name == 'yspeed': name = 'speedy'
if name in self.__dict__:
self.__dict__[name] = value
elif name == 'left': self.x = value + w / 2
elif name == 'right': self.x = value - w / 2
elif name == 'top': self.y = value + h / 2
elif name == 'bottom': self.y = value - h / 2
elif name == 'center': self.x, self.y = value[0], value[1]
elif name == 'topleft': self.x, self.y = value[0] + w / 2, value[1] + h / 2
elif name == 'topright': self.x, self.y = value[0] - w / 2, value[1] + h / 2
elif name == 'bottomleft': self.x, self.y = value[0] + w / 2, value[1] - h / 2
elif name == 'bottomright': self.x, self.y = value[0] - w / 2, value[1] - h / 2
elif name == 'width': self.scale_by(value/w)
elif name == 'height': self.scale_by(value/h)
elif name == 'size':
if self.__dict__['_image'] is not None:
key = self.__dict__['_key']
self._set_key(key[0], key[1], value[0], value[1], key[4])
else:
self.__dict__['_w'] = value[0]
self.__dict__['_h'] = value[1]
elif name == 'speed': self.speedx, self.speedy = value[0], value[1]
elif name == 'color':
self.__dict__['_image'] = None
self.__dict__['_key'] = None
if type(value) is str: value = pygame.Color(value)
self.__dict__['_color'] = value
elif name == 'image':
self.__dict__['_color'] = None
if self.__dict__['_key'] is None:
self._set_key(value, False, w, h, 0)
else:
key = self.__dict__['_key']
self._set_key(value, *key[1:])
else:
sys.stderr.write("creating filed named "+name)
self.__dict__[name] = value
def overlap(self, other, padding=0, padding2=None):
'''b1.overlap(b1) returns a list of 2 values such that self.move(result) will cause them to not overlap
Returns [0,0] if there is no overlap (i.e., if b1.touches(b2) returns False
b1.overlap(b2, 5) adds a 5-pixel padding to b1 before computing the overlap
b1.overlap(b2, 5, 10) adds a 5-pixel padding in x and a 10-pixel padding in y before computing the overlap'''
if padding2 is None: padding2 = padding
l = other.left - self.right - padding
r = self.left - other.right - padding
t = other.top - self.bottom - padding2
b = self.top - other.bottom - padding2
m = max(l, r, t, b)
if m >= 0: return [0, 0]
elif m == l: return [l, 0]
elif m == r: return [-r, 0]
elif m == t: return [0, t]
else: return [0, -b]
def touches(self, other, padding=0, padding2=None):
'''b1.touches(b1) returns True if the two SpriteBoxes overlap, False if they do not
b1.touches(b2, 5) adds a 5-pixel padding to b1 before computing the touch
b1.touches(b2, 5, 10) adds a 5-pixel padding in x and a 10-pixel padding in y before computing the touch'''
if padding2 is None: padding2 = padding
l = other.left - self.right - padding
r = self.left - other.right - padding
t = other.top - self.bottom - padding2
b = self.top - other.bottom - padding2
return max(l,r,t,b) <= 0
def bottom_touches(self, other, padding=0, padding2=None):
'''b1.bottom_touches(b2) returns True if both b1.touches(b2) and b1's bottom edge is the one causing the overlap.'''
if padding2 is None: padding2 = padding
return self.overlap(other,padding+1,padding2+1)[1] < 0
def top_touches(self, other, padding=0, padding2=None):
'''b1.top_touches(b2) returns True if both b1.touches(b2) and b1's top edge is the one causing the overlap.'''
if padding2 is None: padding2 = padding
return self.overlap(other,padding+1,padding2+1)[1] > 0
def left_touches(self, other, padding=0, padding2=None):
'''b1.left_touches(b2) returns True if both b1.touches(b2) and b1's left edge is the one causing the overlap.'''
if padding2 is None: padding2 = padding
return self.overlap(other,padding+1,padding2+1)[0] > 0
def right_touches(self, other, padding=0, padding2=None):
'''b1.right_touches(b2) returns True if both b1.touches(b2) and b1's right edge is the one causing the overlap.'''
if padding2 is None: padding2 = padding
return self.overlap(other,padding+1,padding2+1)[0] < 0
def contains(self, x, y=None):
'''checks if the given point is inside this SpriteBox's bounds or not'''
if y is None: x,y = x
return abs(x-self.x)*2 < self._w and abs(y-self.y)*2 < self._h
def move_to_stop_overlapping(self, other, padding=0, padding2=None):
'''b1.move_to_stop_overlapping(b2) makes the minimal change to b1's position necessary so that they no longer overlap'''
o = self.overlap(other,padding, padding2)
if o != [0,0]:
self.move(o)
if o[0] * self.speedx < 0: self.speedx = 0
if o[1] * self.speedy < 0: self.speedy = 0
def move_both_to_stop_overlapping(self, other, padding=0, padding2=None):
'''b1.move_both_to_stop_overlapping(b2) changes both b1 and b2's positions so that they no longer overlap'''
o = self.overlap(other,padding, padding2)
if o != [0,0]:
self.move(o[0]/2,o[1]/2)
other.move(-o[0]/2,-o[1]/2)
if o[0] != 0:
self.speedx = (self.speedx+other.speedx)/2
other.speedx = self.speedx
if o[1] != 0:
self.speedy = (self.speedy+other.speedy)/2
other.speedy = self.speedy
def move(self, x, y=None):
'''change position by the given amount in x and y. If only x given, assumed to be a point [x,y]'''
if y is None: x, y = x
self.x += x
self.y += y
def move_speed(self):
'''change position by the current speed field of the SpriteBox object'''
self.move(self.speedx, self.speedy)
def full_size(self):
'''change size of this SpriteBox to be the original size of the source image'''
if self.__dict__['_key'] is None: return
key = self.__dict__['_key']
self._set_key(key[0],key[1],0,0,key[4])
def __repr__(self):
return str(self)
def __str__(self):
return '%dx%d SpriteBox centered at %d,%d' % (self._w, self._h, self.x, self.y)
def copy_at(self, newx, newy):
'''Make a new SpriteBox just like this one but at the given location instead of here'''
return SpriteBox(newx, newy, self._image, self._color, self._w, self._h)
def copy(self):
'''Make a new SpriteBox just like this one and in the same location'''
return self.copy_at(self.x, self.y)
def scale_by(self, multiplier):
'''Change the size of this SpriteBox by the given factor
b1.scale_by(1) does nothing; b1.scale_by(0.4) makes b1 40% of its original width and height.'''
if self.__dict__['_key'] is None:
self._w *= multiplier
self._h *= multiplier
else:
key = self.__dict__['_key']
self._set_key(key[0], key[1], key[2]*multiplier, key[3]*multiplier, key[4])
def draw(self, surface):
'''b1.draw(camera) is the same as saying camera.draw(b1)
b1.draw(image) draws a copy of b1 on the image proivided'''
if isinstance(surface, Camera):
if self.__dict__['_color'] is not None:
region = self.rect.move(-surface._x, -surface._y)
region = region.clip(surface._surface.get_rect())
surface._surface.fill(self._color, region)
elif self.__dict__['_image'] is not None:
surface._surface.blit(self._image, [self.left - surface._x, self.top - surface._y])
else:
if self.__dict__['_color'] is not None:
surface.fill(self._color, self.rect)
elif self.__dict__['_image'] is not None:
surface.blit(self._image, self.topleft)
def flip(self):
'''mirrors the SpriteBox left-to-right.
Mirroring top-to-bottom can be accomplished by
b1.rotate(180)
b1.flip()'''
if self.__dict__['_key'] is None: return
key = self.__dict__['_key']
self._set_key(key[0], not key[1], *key[2:])
def rotate(self, angle):
'''Rotates the SpriteBox by the given angle (in degrees).'''
if self.__dict__['_key'] is None: return
key = self.__dict__['_key']
self._set_key(key[0], key[1], key[2], key[3], key[4]+angle)
_timeron = False
_timerfps = 0
def timer_loop(fps, callback):
'''Requests that pygame call the provided function fps times a second
fps: a number between 1 and 60
callback: a function that accepts a set of keys pressed since the last tick
----
seconds = 0
def tick(keys):
seconds += 1/30
if pygame.K_DOWN in keys:
print 'down arrow pressed'
if not keys:
print 'no keys were pressed since the last tick'
camera.draw(box)
camera.display()
gamebox.timer_loop(30, tick)
----'''
global _timeron, _timerfps
keys = set([])
if fps > 1000: fps = 1000
_timerfps = fps
_timeron = True
pygame.time.set_timer(pygame.USEREVENT, int(1000/fps))
while True:
event = pygame.event.wait()
if event.type == pygame.QUIT: break
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: break
if event.type == pygame.KEYDOWN:
keys.add(event.key)
if event.type == pygame.KEYUP and event.key in keys:
keys.remove(event.key)
if event.type == pygame.USEREVENT:
pygame.event.clear(pygame.USEREVENT)
callback(keys)
pygame.time.set_timer(pygame.USEREVENT, 0)
_timeron = False
def pause():
'''Pauses the timer; an error if there is no timer to pause'''
if not _timeron: raise Exception("Cannot pause a timer before calling timer_loop(fps, callback)")
pygame.time.set_timer(pygame.USEREVENT, 0)
def unpause():
'''Unpauses the timer; an error if there is no timer to unpause'''
if not _timeron: raise Exception("Cannot pause a timer before calling timer_loop(fps, callback)")
pygame.time.set_timer(pygame.USEREVENT, int(1000/_timerfps))
def stop_loop():
'''Completely quits one timer_loop or keys_loop, usually ending the program'''
pygame.event.post(pygame.event.Event(pygame.QUIT))
def keys_loop(callback):
'''Requests that pygame call the provided function each time a key is pressed
callback: a function that accepts the key pressed
----
def onPress(key):
if pygame.K_DOWN == key:
print 'down arrow pressed'
if pygame.K_a in keys:
print 'A key pressed'
camera.draw(box)
camera.display()
gamebox.keys_loop(onPress)
----'''
while True:
event = pygame.event.wait()
if event.type == pygame.QUIT: break
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: break
if event.type == pygame.KEYDOWN:
callback(event.key)
if __name__ == "__main__":
camera = Camera(400, 400)
camera.x = 10
b = from_text(40,50,"Blue","Arial",40,"red", italic=True, bold=True)
b.speedx = 3
b.left += 2
b.y = 100
b.move_speed()
camera.draw(b)
camera.display()
smurfs = load_sprite_sheet("http://www.flashpulse.com/moho/smurf_sprite.PNG", 4, 4)
def tick(keys):
if keys:
if pygame.K_0 in keys: b.image = smurfs[0]
elif pygame.K_1 in keys: b.image = smurfs[1]
elif pygame.K_2 in keys: b.image = smurfs[2]
elif pygame.K_3 in keys: b.image = smurfs[3]
elif pygame.K_4 in keys: b.image = smurfs[4]
elif pygame.K_5 in keys: b.image = smurfs[5]
elif pygame.K_6 in keys: b.image = smurfs[6]
elif pygame.K_7 in keys: b.image = smurfs[7]
elif pygame.K_8 in keys: b.image = smurfs[8]
elif pygame.K_9 in keys: b.image = smurfs[9]
elif pygame.K_a in keys: stop_loop()
elif keys: b.image = "http://www.pygame.org/docs/_static/pygame_tiny.png"
b.full_size()
b.rotate(-5)
b.center = camera.mouse
b.bottom = camera.bottom
camera.draw(b)
camera.display()
timer_loop(30, tick)
| {
"repo_name": "acs3ss/austinsullivan.github.io",
"path": "files/Gamebox/gamebox.py",
"copies": "1",
"size": "26384",
"license": "unlicense",
"hash": -772929578608106800,
"line_mean": 42.2524590164,
"line_max": 183,
"alpha_frac": 0.5740600364,
"autogenerated": false,
"ratio": 3.448438112665011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45224981490650107,
"avg_score": null,
"num_lines": null
} |
"""A Library for accessing the Science Logic EM7.
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyem7',
version='0.0.dev1',
description='Library for accessing the Science Logic EM7',
long_description=long_description,
url='https://github.com/the-kid89/pyem7',
author='Emett Speer',
author_email='pyem7@emettspeer.com',
license='BSD-3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords=['library', 'rest', 'sciencelogic', 'em7'],
packages=find_packages(exclude=['contrib', 'docs', 'tests*', 'dist', 'data', 'build',
'.tox', '*.egg-info']),
install_requires=['requests==2.7.0'],
extras_require={
'dev': ['requests_mock',
'nose'],
'test': ['requests_mock',
'nose'],
},
)
| {
"repo_name": "the-kid89/pyem7",
"path": "setup.py",
"copies": "1",
"size": "1397",
"license": "bsd-3-clause",
"hash": 3946435962172243000,
"line_mean": 31.488372093,
"line_max": 89,
"alpha_frac": 0.5862562634,
"autogenerated": false,
"ratio": 3.8591160220994474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9944080295835365,
"avg_score": 0.0002583979328165375,
"num_lines": 43
} |
"""A library for creating graphs using Unicode braille characters.
https://pypi.python.org/pypi/braillegraph
Someone on reddit posted a screenshot of their xmobar setup, which used braille
characters to show the loads of their four processor cores, as well as several
other metrics. I was impressed that you could fit so much data into a single
line. I immediately set out to implement braille bar graphs for myself.
The characters this script outputs are in the Unicode Braille Patterns section,
code points 0x2800 through 0x28FF. Not all fonts support these characters, so
if you can't see the examples below check your font settings.
There are two ways to use this package: imported in Python code, or as a
command line script.
To use the package in Python, import it and use the vertical_graph and
horizontal_graph functions.
>>> from braillegraph import vertical_graph, horizontal_graph
>>> vertical_graph([3, 1, 4, 1])
'⡯⠥'
>>> horizontal_graph([3, 1, 4, 1])
'⣆⣇'
To use the package as a script, run it as
% python -m braillegraph vertical 3 1 4 1 5 9 2 6
⡯⠥
⣿⣛⣓⠒⠂
% python -m braillegraph horizontal 3 1 4 1 5 9 2 6
⠀⠀⢀
⠀⠀⣸⢠
⣆⣇⣿⣼
For a description of the arguments and flags, run
% python -m braillegraph --help
"""
import argparse
from .braillegraph import horizontal_graph, vertical_graph
def run():
"""Display the arguments as a braille graph on standard output."""
# We override the program name to reflect that this script must be run with
# the python executable.
parser = argparse.ArgumentParser(
prog='python -m braillegraph',
description='Print a braille bar graph of the given integers.'
)
# This flag sets the end string that we'll print. If we pass end=None to
# print(), it will use its default. If we pass end='', it will suppress the
# newline character.
parser.add_argument('-n', '--no-newline', action='store_const',
dest='end', const='', default=None,
help='do not print the trailing newline character')
# Add subparsers for the directions
subparsers = parser.add_subparsers(title='directions')
horizontal_parser = subparsers.add_parser('horizontal',
help='a horizontal graph')
horizontal_parser.set_defaults(
func=lambda args: horizontal_graph(args.integers)
)
horizontal_parser.add_argument('integers', metavar='N', type=int,
nargs='+', help='an integer')
vertical_parser = subparsers.add_parser('vertical',
help='a vertical graph')
vertical_parser.set_defaults(
func=lambda args: vertical_graph(args.integers, sep=args.sep)
)
vertical_parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer')
# The separator for groups of bars (i.e., "lines"). If we pass None,
# vertical_parser will use its default.
vertical_parser.add_argument('-s', '--sep', action='store', default=None,
help='separator for groups of bars')
args = parser.parse_args()
print(args.func(args), end=args.end)
if __name__ == '__main__':
run()
| {
"repo_name": "chrisbouchard/braillegraph",
"path": "braillegraph/__main__.py",
"copies": "1",
"size": "3356",
"license": "bsd-2-clause",
"hash": -5837396303707746000,
"line_mean": 34.6129032258,
"line_max": 79,
"alpha_frac": 0.6458333333,
"autogenerated": false,
"ratio": 3.942857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010752688172043012,
"num_lines": 93
} |
"""A library for *curl*.
Add new keywords here to invoke curl in a desired way
\`Get_Url_Status\`
\`Check_For_Dead_Links\`.
"""
import subprocess
import re
import time
from selenium import webdriver
from selenium.common.exceptions import StaleElementReferenceException
class curl(object):
ROBOT_LIBRARY_VERSION = 1.0
local_url_identifier = ''
start_url = ''
main_attribute = ''
main_element = ''
pages_to_be_checked = []
pages_already_checked = []
links_already_checked = []
failed_links = {}
browser = None
recursive = False
stale_counter = 0
def __init__(self):
pass
"""This will check for the give startUrl for invalid links. Invalid link is one that does not return with '200' status.
Parameters:
browser_instance - an instance of a running browser which can be obtained by parent property of Webelement object
Example:
${body_element_of_page}= Get Webelement css=body
${browser_instance}= Set Variable ${body_element_of_page.parent}
recursive - boolean True will recursively check each local url; boolean False will only check the status of the links found on the startUrl.
startUrl - the start url where parsing should begin.
localUrlIdentifier - a string to match against to determine if a url is local.
Example:
startUrl = http://www.google.com
localUrlIdentifier = google\.com
*NOTE* the dot is escaped with a slash to avoid it being treated as a wild token.
attribute - the attribute of html element to be checked for; it can be href or src.
element - optional element tag to limit search to particular html tags.
Example:
element = a #will look only at anchor elements
element = img #will only look at image elements
leaving 'element' blank will look at all elements which contain the specified attribute.
Add new keywords here to invoke curl in a desired way
"""
def Check_For_Dead_Links(self, browser_instance, recursive, startUrl, localUrlIdentifier, attribute, element=''):
self.recursive = recursive
self.local_url_identifier = localUrlIdentifier
self.start_url = startUrl
self.main_attribute = attribute
self.main_element = element
start_url_details = {}
start_url_details['location'] = '<this is the starting url>'
start_url_details[attribute] = self.start_url
start_url_details['href'] = self.start_url
start_url_details['text'] = ''
start_url_details['selector'] = ''
self.pages_to_be_checked.append(start_url_details)
self.browser = browser_instance #webdriver.Chrome()
while True:
if len(self.pages_to_be_checked) == 0:
break
pageToCheck = self.pages_to_be_checked.pop(0)
self._Parse_Page_For_Links(pageToCheck)
return self.failed_links
def _Parse_Page_For_Links(self, page_url):
url = page_url[self.main_attribute]
if url in self.pages_already_checked:
return
self.pages_already_checked.append(url)
self.links_already_checked.append(url)
url_status = self.Get_Url_Status(url)
if not url_status.startswith('200 '): #if status is not 200 then it failed
page_url['status'] = url_status
self._Increment_Failed_Links(page_url)
return
self.browser.get(url)
elements_to_check = self._Get_All_Elements_From_A_Page_With_Attribute(self.main_attribute, self.main_element)
for element in elements_to_check:
element['location'] = url
self._Process_URL(element)
def _Process_URL(self, link):
link['status'] = 'assuming same as the first'
url = link[self.main_attribute]
if url in self.failed_links:
self._Increment_Failed_Links(link)
return
if url in self.links_already_checked:
return
#mark it as checked
self.links_already_checked.append(url)
isLocal = self._Check_If_URL_Is_Local(url)
urlStatus = self.Get_Url_Status(url)
link['status'] = urlStatus
if urlStatus.startswith('200 '):
if isLocal and self.recursive: #if the url is local to the starting url then add it to pages to be checked if it is not already added
if url not in self.pages_to_be_checked:
self.pages_to_be_checked.append(link)
return # status of 200 means url is good
self._Increment_Failed_Links(link) #if we got to this point that means the url failed
def _Check_If_URL_Is_Local(self, url):
checkIfLocal = re.compile('(\/\/' + self.local_url_identifier + ')|(\.' + self.local_url_identifier + ')')
return checkIfLocal.search(url)
def _Increment_Failed_Links(self, url_link):
previousFails = []
url = url_link[self.main_attribute]
if url in self.failed_links:
previousFails = self.failed_links[url]
previousFails.append(url_link)
self.failed_links[url] = previousFails
def _Get_All_Elements_From_A_Page_With_Attribute(self, attribute, tagname=''):
elements = self.browser.find_elements_by_css_selector(tagname + '[' + attribute + ']')
result = []
try:
for element in elements:
element_properties = {}
element_properties['text'] = element.text if element.text != '' else '<no text>'
element_properties[attribute] = element.get_attribute(attribute)
element_properties['selector'] = self._Get_Element_Selector(element)
result.append(element_properties)
except StaleElementReferenceException as e:
#sometimes the page is not fully loaded or is being reloaded when we try to interact with it
#so we try calling ourselves again up to 3 times
if self.stale_counter < 3:
self.stale_counter += 1
result = self._Get_All_Elements_From_A_Page_With_Attribute(attribute, tagname)
else:
#if we fail 3 times then something must be really wrong
result = []
element_properties = {}
element_properties['text'] = 'failed to parse this page. it probably did not load in time.'
element_properties[attribute] = ''
element_properties['selector'] = ''
result.append(element_properties)
return result
def _Get_Element_Selector(self, element):
if (element == None):
return ''
element_tag = element.tag_name
if element_tag == 'HTML':
return 'HTML'
element_selector = ''
element_id = element.get_attribute('id')
element_class = element.get_attribute('class')
element_class = element_class.replace('\n', '').replace('\r', '')
element_href = self._Get_WebElement_Attribute_Value(element, 'getAttribute("href")')
element_href = element_href if element_href != None else ''
element_src = self._Get_WebElement_Attribute_Value(element, 'getAttribute("src")')
element_src = element_src if element_src != None else ''
element_parent = self._Get_WebElement_Attribute_Value(element, 'parentElement')
if element_id != '':
return element_tag + '[id="' + element_id + '"]'
element_selector = element_tag
if element_class != '':
element_selector += '[class="' + element_class + '"]'
if element_href != '':
element_selector += '[href="' + element_href + '"]'
if element_src != '':
element_selector += '[src="' + element_src + '"]'
parent_selector = self._Get_Element_Selector(element_parent)
element_selector = parent_selector + ' ' + element_selector
return element_selector
def _Get_WebElement_Attribute_Value(self, element, attribute):
driver = element.parent
script = 'var result = []; result.push(arguments[0].' + attribute + '); return result;'
value = driver.execute_script(script, element)
return value[0]
def Get_Url_Status(self, url):
"""Invokes curl with -I -s flags to silently get the status from the specified url.
Returns string: '<status code> <message>' for resolved host or 'invalid host' when host cannot be resolved.
Example: Get Url Status http://www.google.ca #outputs '200 OK'
Example: Get Url Status http://blah.bah #outputs 'invalid host'
"""
process = subprocess.Popen(['curl', '-I', '-s', url], stdout=subprocess.PIPE)
out, err = process.communicate()
if out == '':
status = 'invalid host'
else:
out_array = out.split("\r\n");
status = out_array[0].split(" ", 1)[1];
print '*INFO* Checked url {url_value} and got status {status_value}'.format(url_value=url, status_value=status)
return status
| {
"repo_name": "ayxos/react-cellar",
"path": "e2e/robot/resources/lib/curl/curl.py",
"copies": "16",
"size": "9243",
"license": "mit",
"hash": 5423838486920210000,
"line_mean": 44.7574257426,
"line_max": 148,
"alpha_frac": 0.6084604566,
"autogenerated": false,
"ratio": 4.1766832354270225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A library for dealing with authentication."""
from functools import wraps
__all__ = ['Need', 'needs', 'no_need']
class Need(object):
"""An authentication requirement.
This can be used in a few different ways. Calling the need returns a
boolean which indicates whether or not the need is met. This allows
you to do things like this:
```
if login_need:
# Do stuff that requires a login.
else:
# Do other stuff that doesn't require a login.
```
You can also use a need as a context, which will raise an Unauthorized
exception if the need is not met:
```
with login_need:
# Do stuff that requires a login.
```
Implementing a need is simple, just overwrite the is_met() method.
While these will tend to be singletons (e.g login_need, admin_need,
...), they don't have to be. One use case would be for owner
permissions, which will likely require an argument when initializing
the need. For example:
```
class ObjectOwnerNeed(Need):
def is_met(self):
return bool(obj.owner == self.session.user)
# later...
owner_need = ObjectOwnerNeed(some_obj)
with owner_need:
# Do something only the owner of that object should do.
```
Needs can be inverted using the `~` unary operator:
```
with ~login_need:
# Do stuff that a logged-in user cannot do.
```
Needs can also be and-ed or or-ed together:
```
with admin_need | -login_need:
# Do stuff that can't be done while logged in as a normal user.
with admin_need & owner_need(some_obj):
# Do stuff that can only be done as an admin owner of some_obj.
```
Needs can also be used as a decorator:
```
@login_need
def do_something():
# Do stuff that can't be done while not logged in.
```
"""
error = Exception
def __init__(self, bool_=True):
self.bool_ = bool_
def __call__(self, f):
@wraps(f)
def decorated(*args, **kargs):
with self:
return f(*args, **kargs)
return decorated
def __enter__(self):
if not self:
raise self.error
def __exit__(self, type_, value, traceback):
pass
def __invert__(self):
return self.Negator(self)
def __and__(self, other):
return self.AndNeed(self, other)
def __or__(self, other):
return self.OrNeed(self, other)
def __xor__(self, other):
return self.XorNeed(self, other)
def is_met(self):
"""This should be overwritten for each need class.
Returns:
(bool) - True if the need is met, False otherwise.
"""
return self.bool_
# Aliases for is_met().
def __bool__(self):
return bool(self.is_met())
__nonzero__ = __bool__
class NegativeNeed(Need):
"""A need that returns the opposite of its parent need."""
@property
def error(self):
return self.parent_need.error
def __init__(self, parent_need):
self.parent_need = parent_need
def is_met(self):
return not bool(self.parent_need)
Need.Negator = NegativeNeed
class AndNeed(Need):
def __init__(self, first_need, second_need):
self.first_need = first_need
self.second_need = second_need
def __enter__(self):
with self.first_need:
with self.second_need:
pass
def is_met(self):
return bool(self.first_need) and bool(self.second_need)
Need.AndNeed = AndNeed
class OrNeed(Need):
def __init__(self, first_need, second_need):
self.first_need = first_need
self.second_need = second_need
def __enter__(self):
try:
with self.first_need:
pass
except:
with self.second_need:
pass
def is_met(self):
return bool(self.first_need) or bool(self.second_need)
Need.OrNeed = OrNeed
class XorNeed(Need):
def __init__(self, first_need, second_need):
self.first_need = first_need
self.second_need = second_need
def __enter__(self):
"""Raises the second need's error if neither are met. Raises the first
need's error if both are met.
"""
f = bool(self.first_need)
s = bool(self.second_need)
if not f and not s:
with self.second_need:
pass
elif f and s:
with ~self.first_need:
pass
def is_met(self):
return bool(self.first_need) != bool(self.second_need)
Need.XorNeed = XorNeed
def needs(need):
"""A decorator to handle different needs.
needs(login_need)(f) == login_need(f)
"""
return need
no_need = Need()
| {
"repo_name": "astex/needs",
"path": "needs/base.py",
"copies": "1",
"size": "5139",
"license": "mit",
"hash": 190132734270362500,
"line_mean": 24.9545454545,
"line_max": 79,
"alpha_frac": 0.54271259,
"autogenerated": false,
"ratio": 4.1645056726094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001472794871132467,
"num_lines": 198
} |
"""A library for dealing with authentication."""
from needs import Need
from werkzeug.exceptions import Unauthorized, Forbidden
__all__ = [
'SelfNeed', 'no_apps_need', 'app_need', 'login_need', 'admin_need',
'csrf_need'
]
class FlaskNeed(Need):
"""A basic need with some handy flask wrapping."""
error = Unauthorized
@property
def session(self):
"""The flask session."""
from flask import session
return session
@property
def db_session(self):
"""The database session."""
from peanuts.lib.database import db
return db.session
@property
def request(self):
"""The flask request."""
from flask import request
return request
class ApplicationNeed(FlaskNeed):
"""A need that checks for a valid application."""
error = Forbidden('Please submit a valid app token.')
def is_met(self):
return bool(self.session.application)
class NoApplicationsNeed(FlaskNeed):
"""A need that checks that no applications exist."""
def is_met(self):
from peanuts.models.app import Application
return not self.db_session.query(
self.db_session.query(Application).exists()
).first()[0]
class LoginNeed(FlaskNeed):
"""A need that checks basic authentication."""
error = Unauthorized('That requires a login.')
def is_met(self):
"""Checks if the user is logged in."""
return bool(self.session.user)
class AdminNeed(FlaskNeed):
"""A need that checks if the user is an admin."""
error = Unauthorized('That requires admin credentials.')
def is_met(self):
"""Checks if the user is an admin."""
return bool(self.session.user and self.session.user.is_admin)
class NoAdminNeed(FlaskNeed):
"""A need that checks that there are no registered users."""
error = Unauthorized('No admin users may be registered.')
def is_met(self):
from peanuts.models.user import User
return not self.db_session.query(
self.db_session.query(User).filter(User.is_admin == True).exists()
).first()[0]
class SelfNeed(FlaskNeed):
"""Checks that the user is looking at itself."""
def __init__(self, user_id):
self.user_id = user_id
@property
def error(self):
"""Raises an error which indicates the required user."""
return Unauthorized(
'Only the user with id {user_id} can do that.'.format(
user_id=self.user_id
)
)
def is_met(self):
"""Checks if the user_id is the user_id in the session."""
return bool(
(
self.session.user and
str(self.session.user.id) == self.user_id
) or
admin_need()
)
class CSRFNeed(FlaskNeed):
"""Checks that a valid csrf token is provided."""
error = Forbidden(
'A valid csrf token is required. Try refreshing the page.'
)
def is_met(self):
"""Checks the csrf token."""
csrf = self.session.get('csrf')
header_csrf = self.request.headers.get('x-peanuts-csrf')
if (
not csrf or
csrf and not header_csrf or
csrf != header_csrf
):
return False
return True
no_apps_need = NoApplicationsNeed()
app_need = ApplicationNeed()
login_need = LoginNeed()
admin_need = AdminNeed()
no_admin_need = NoAdminNeed()
csrf_need = CSRFNeed()
| {
"repo_name": "astex/peanuts",
"path": "peanuts/lib/auth.py",
"copies": "1",
"size": "3540",
"license": "mit",
"hash": -4424007504795224000,
"line_mean": 26.874015748,
"line_max": 78,
"alpha_frac": 0.5991525424,
"autogenerated": false,
"ratio": 4.14519906323185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.524435160563185,
"avg_score": null,
"num_lines": null
} |
"""A library for dealing with session-based authentication."""
from werkzeug.exceptions import Forbidden
from flask.sessions import SecureCookieSession, SecureCookieSessionInterface
__all__ = ['PeanutsSessionInterface']
class PeanutsSession(SecureCookieSession):
"""A custom session object for use with peanuts."""
@property
def request(self):
"""Returns the flask request."""
from flask import request
return request
@property
def application(self):
"""The application, taken from the database, if it exists."""
from flask import request
from peanuts.lib.database import db
from peanuts.models.app import Application
application_id = request.headers.get('x-peanuts-application')
if application_id:
return db.session.query(Application).filter(
Application.token == application_id
).first()
else:
return None
@property
def user(self):
"""The user, taken from the database, if it exists.
The csrf token is checked here so that any and all endpoints
requiring a user must have csrf protection.
"""
from peanuts.lib.database import db
from peanuts.models.user import User
user_id = self.get('user_id')
if user_id:
return db.session.query(User).get(user_id)
else:
return None
@property
def public_dict(self):
"""The dictionary to actually display."""
return {'user_id': self.get('user_id')}
def clear(self, *args, **kargs):
"""Let's the csrf persist across cleared sessions."""
csrf = self.get('csrf')
super(PeanutsSession, self).clear(*args, **kargs)
self['csrf'] = csrf
class PeanutsSessionInterface(SecureCookieSessionInterface):
"""A custom session interface for use with peanuts."""
session_class = PeanutsSession
| {
"repo_name": "astex/peanuts",
"path": "peanuts/lib/session.py",
"copies": "1",
"size": "1964",
"license": "mit",
"hash": 282019314125100860,
"line_mean": 30.1746031746,
"line_max": 76,
"alpha_frac": 0.633910387,
"autogenerated": false,
"ratio": 4.4840182648401825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5617928651840183,
"avg_score": null,
"num_lines": null
} |
"""A library for finding approximate subsequence matches.
Contains several implementations of fuzzy sub-sequence search functions. Such
functions find parts of a sequence which match a given sub-sequence up to a
given maximum Levenshtein distance.
The simplest use is via the find_near_matches utility function, which chooses
a suitable fuzzy search implementation based on the given parameters.
Example:
>>> find_near_matches('PATTERN', '---PATERN---', max_l_dist=1)
[Match(start=3, end=9, dist=1, matched='PATERN')]
"""
__author__ = 'Tal Einat'
__email__ = 'taleinat@gmail.com'
__version__ = '0.7.3'
__all__ = [
'find_near_matches',
'find_near_matches_in_file',
'Match',
]
import io
from fuzzysearch.common import Match, LevenshteinSearchParams
from fuzzysearch.generic_search import GenericSearch
from fuzzysearch.levenshtein import LevenshteinSearch
from fuzzysearch.search_exact import ExactSearch
from fuzzysearch.substitutions_only import SubstitutionsOnlySearch
import attr
def find_near_matches(subsequence, sequence,
max_substitutions=None,
max_insertions=None,
max_deletions=None,
max_l_dist=None):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the maximum allowed number of character substitutions
* the maximum allowed number of new characters inserted
* and the maximum allowed number of character deletions
* the total number of substitutions, insertions and deletions
(a.k.a. the Levenshtein distance)
"""
search_params = LevenshteinSearchParams(max_substitutions,
max_insertions,
max_deletions,
max_l_dist)
search_class = choose_search_class(search_params)
matches = search_class.search(subsequence, sequence, search_params)
return search_class.consolidate_matches(matches)
def choose_search_class(search_params):
max_substitutions, max_insertions, max_deletions, max_l_dist = search_params.unpacked
# if the limitations are so strict that only exact matches are allowed,
# use search_exact()
if max_l_dist == 0:
return ExactSearch
# if only substitutions are allowed, use find_near_matches_substitutions()
elif max_insertions == 0 and max_deletions == 0:
return SubstitutionsOnlySearch
# if it is enough to just take into account the maximum Levenshtein
# distance, use find_near_matches_levenshtein()
elif max_l_dist <= min(
(max_substitutions if max_substitutions is not None else (1 << 29)),
(max_insertions if max_insertions is not None else (1 << 29)),
(max_deletions if max_deletions is not None else (1 << 29)),
):
return LevenshteinSearch
# if none of the special cases above are met, use the most generic version
else:
return GenericSearch
def find_near_matches_in_file(subsequence, sequence_file,
max_substitutions=None,
max_insertions=None,
max_deletions=None,
max_l_dist=None,
_chunk_size=2**20):
"""search for near-matches of subsequence in a file
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the maximum allowed number of character substitutions
* the maximum allowed number of new characters inserted
* and the maximum allowed number of character deletions
* the total number of substitutions, insertions and deletions
(a.k.a. the Levenshtein distance)
"""
search_params = LevenshteinSearchParams(max_substitutions,
max_insertions,
max_deletions,
max_l_dist)
search_class = choose_search_class(search_params)
if (
'b' in getattr(sequence_file, 'mode', '')
or
isinstance(sequence_file, io.RawIOBase)
):
matches = _search_binary_file(subsequence,
sequence_file,
search_params,
search_class,
_chunk_size=_chunk_size)
else:
matches = _search_unicode_file(subsequence,
sequence_file,
search_params,
search_class,
_chunk_size=_chunk_size)
return search_class.consolidate_matches(matches)
def _search_binary_file(subsequence, sequence_file, search_params, search_class,
_chunk_size):
if not subsequence:
raise ValueError('subsequence must not be empty')
CHUNK_SIZE = _chunk_size
keep_bytes = (
len(subsequence) - 1 +
search_class.extra_items_for_chunked_search(subsequence, search_params)
)
# To allocate memory only once, we'll use a pre-allocated bytearray and
# file.readinto(). Furthermore, since we'll need to keep part of each
# chunk along with the next chunk, we'll use a memoryview of the bytearray
# to move data around within a single block of memory and thus avoid
# allocations.
chunk_bytes = bytearray(CHUNK_SIZE)
chunk_memview = memoryview(chunk_bytes)
# The search will be done with bytearray objects. Note that in Python 2,
# getting an item from a bytes object returns a string (rather than an
# int as in Python 3), so we explicitly convert the sub-sequence to a
# bytearray in case it is a bytes/str object.
subseq_bytearray = bytearray(subsequence)
n_read = sequence_file.readinto(chunk_memview)
offset = 0
chunk_len = n_read
while n_read:
search_bytes = chunk_bytes if chunk_len == CHUNK_SIZE else chunk_bytes[:chunk_len]
for match in search_class.search(subseq_bytearray, search_bytes, search_params):
yield attr.evolve(match,
start=match.start + offset,
end=match.end + offset)
if keep_bytes > 0:
n_to_keep = min(keep_bytes, chunk_len)
chunk_memview[:n_to_keep] = chunk_memview[chunk_len - n_to_keep:chunk_len]
else:
n_to_keep = 0
offset += chunk_len - n_to_keep
n_read = sequence_file.readinto(chunk_memview[n_to_keep:])
chunk_len = n_to_keep + n_read
def _search_unicode_file(subsequence, sequence_file, search_params, search_class,
_chunk_size):
if not subsequence:
raise ValueError('subsequence must not be empty')
CHUNK_SIZE = _chunk_size
keep_chars = (
len(subsequence) - 1 +
search_class.extra_items_for_chunked_search(subsequence, search_params)
)
chunk = sequence_file.read(CHUNK_SIZE)
offset = 0
while chunk:
for match in search_class.search(subsequence, chunk, search_params):
yield attr.evolve(match,
start=match.start + offset,
end=match.end + offset)
n_to_keep = min(keep_chars, len(chunk))
offset += len(chunk) - n_to_keep
if n_to_keep:
chunk = chunk[-n_to_keep:] + sequence_file.read(CHUNK_SIZE)
if len(chunk) == n_to_keep:
break
else:
chunk = sequence_file.read(CHUNK_SIZE)
| {
"repo_name": "taleinat/fuzzysearch",
"path": "src/fuzzysearch/__init__.py",
"copies": "1",
"size": "7866",
"license": "mit",
"hash": 5272758319984604000,
"line_mean": 38.33,
"line_max": 90,
"alpha_frac": 0.6092041698,
"autogenerated": false,
"ratio": 4.213176218532405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00034685565496715145,
"num_lines": 200
} |
"""A library for generating fake user data"""
VERSION = (0,0,4)
__version__ = ".".join(map(str, VERSION))
__author__ = "Dylan Clendenin"
__contact__ = "dylan.clendenin@gmail.com"
__homepage__ = "https://github.com/deepthawtz/faker"
__all__ = ("Faker",)
import random
from faker import data
from faker import patterns
from faker.utils import rand, numerify, domain, secondary_address
# Decorator for methods that need _get_names. This ensures that if we
# repeatedly use one method, we get fresh names, but if we cycle through the
# methods, we get a set of names/email addresses that correspond. The individual
# methods must not call each other for this to work.
def uses_names(func):
def _wrapped(self):
if not self._name_accesses or func.__name__ in self._name_accesses:
self._get_names()
self._name_accesses.add(func.__name__)
return func(self)
_wrapped.__name__ = func.__name__
return _wrapped
class Faker(object):
def __init__(self):
self._names = None
self._name_accesses = set()
def _get_names(self):
self._names = [rand(data.FIRST_NAMES), rand(data.LAST_NAMES)]
self._name_accesses = set()
@uses_names
def name(self):
return " ".join(self._names)
@uses_names
def first_name(self):
return self._names[0]
@uses_names
def last_name(self):
return self._names[1]
@uses_names
def username(self):
first, last = self._names
return "".join([first[:1], last]).lower().replace("'", "")
@uses_names
def email(self):
first, last = self._names
return ("%s%s@%s" % (first[:1], last, domain())).lower().replace("'", "")
def full_address(self):
return "%s\n%s, %s %s" % (self.street_address(), self.city(), self.state(), self.zip_code())
def phonenumber(self):
return numerify("###-###-#####")
def street_address(self):
return numerify(random.choice(["##### %s" % patterns.STREET_NAME(),
"#### %s Ave." % patterns.STREET_NAME(),
"### %s St." % patterns.STREET_NAME(),
"### %s %s" % (patterns.STREET_NAME(), secondary_address()),
"#### %s %s" % (patterns.STREET_NAME(), secondary_address())]))
def city(self):
return patterns.CITY()
def state(self):
return rand(data.STATE_ABBR)
def zip_code(self):
return numerify(random.choice(["#####", "#####-####"]))
def company(self):
return patterns.COMPANY_NAME()
def lorem(self):
paragraph = []
word_list = data.WORDS.split()
random.shuffle(word_list)
for w in word_list:
paragraph.append(w)
return " ".join(paragraph)
def age(self):
return random.randint(16, 80)
def gender(self):
return random.choice(["M","F"])
| {
"repo_name": "mgedmin/faker",
"path": "faker/__init__.py",
"copies": "2",
"size": "2983",
"license": "mit",
"hash": -8164056640425130000,
"line_mean": 29.1313131313,
"line_max": 104,
"alpha_frac": 0.5581629232,
"autogenerated": false,
"ratio": 3.6289537712895377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5187116694489537,
"avg_score": null,
"num_lines": null
} |
"""A library for generating fake user data"""
VERSION = (0,1,4)
__version__ = ".".join(map(str, VERSION))
__author__ = "Dylan Clendenin"
__contact__ = "dylan.clendenin@gmail.com"
__homepage__ = "https://github.com/deepthawtz/faker"
__all__ = ("Faker",)
import random
import warnings
from faker import data
from faker import patterns
from faker.utils import rand, numerify, domain, secondary_address
deprecation_message = """
This faker package is being deprecated September 15, 2016.
You should switch to using https://pypi.python.org/pypi/fake-factory instead.
After September 15, 2016 the PyPi faker package will be changing to that!
"""
warnings.simplefilter("always", PendingDeprecationWarning)
warnings.warn(deprecation_message, PendingDeprecationWarning)
# Decorator for methods that need _get_names. This ensures that if we
# repeatedly use one method, we get fresh names, but if we cycle through the
# methods, we get a set of names/email addresses that correspond. The individual
# methods must not call each other for this to work.
def uses_names(func):
def _wrapped(self):
if not self._name_accesses or func.__name__ in self._name_accesses:
self._get_names()
self._name_accesses.add(func.__name__)
return func(self)
_wrapped.__name__ = func.__name__
return _wrapped
class Faker(object):
def __init__(self,
seed=None, zip_type=None, min_age=None, max_age=None):
self._names = None
self._name_accesses = set()
# set the type of zip to None (default), 5 or 9
self.zip_type = zip_type if zip_type in [None, 5, 9] else None
# set the minimum and maximum ages (swap if min > max)
self.min_age = min_age if type(min_age) == int else 16
self.max_age = max_age if type(max_age) == int else 80
if self.min_age > self.max_age:
self.min_age, self.max_age = self.max_age, self.min_age
# set the random seed
self.reset(seed)
def _get_names(self):
self._names = [rand(data.FIRST_NAMES), rand(data.LAST_NAMES)]
self._name_accesses = set()
def reset(self, seed=None):
"""Reset the seed for the random number generator.
The seed can be any integer and using the same
seed repeatedly will generate the same sequence
of random numbers multiple times. If no seed (or
an invalid seed) is given, the seed will be a
new randomized value.
"""
if seed is not None and type(seed) is int:
random.seed(seed)
else:
random.seed()
@uses_names
def name(self):
return " ".join(self._names)
@uses_names
def first_name(self):
return self._names[0]
@uses_names
def last_name(self):
return self._names[1]
@uses_names
def username(self):
first, last = self._names
return "".join([first[:1], last]).lower().replace("'", "")
@uses_names
def email(self):
first, last = self._names
return ("%s%s@%s" % (first[:1], last, domain())).lower().replace("'", "")
def full_address(self):
return "%s\n%s, %s %s" % (self.street_address(), self.city(), self.state(), self.zip_code())
def phonenumber(self):
return numerify("###-###-#####")
def street_address(self):
return numerify(random.choice(["##### %s" % patterns.STREET_NAME(),
"#### %s Ave." % patterns.STREET_NAME(),
"### %s St." % patterns.STREET_NAME(),
"### %s %s" % (patterns.STREET_NAME(), secondary_address()),
"#### %s %s" % (patterns.STREET_NAME(), secondary_address())]))
def city(self):
return patterns.CITY()
def state(self):
return rand(data.STATE_ABBR)
def zip_code(self):
zips = { None:random.choice(["#####", "#####-####"]),
5:"#####",
9:"#####-####" }
return numerify(zips[self.zip_type])
def company(self):
return patterns.COMPANY_NAME()
def lorem(self):
paragraph = []
word_list = data.WORDS.split()
random.shuffle(word_list)
for w in word_list:
paragraph.append(w)
return " ".join(paragraph)
def age(self):
return random.randint(self.min_age, self.max_age)
def gender(self):
return random.choice(["M","F"])
| {
"repo_name": "deepthawtz/faker",
"path": "faker/__init__.py",
"copies": "1",
"size": "4504",
"license": "mit",
"hash": -5438433923563680000,
"line_mean": 31.1714285714,
"line_max": 102,
"alpha_frac": 0.5810390764,
"autogenerated": false,
"ratio": 3.6827473426001633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9703482205302603,
"avg_score": 0.012060842739512226,
"num_lines": 140
} |
"""A library for helping optimizing Python extensions compilation."""
from sys import version_info as _version_info
if _version_info[0] < 3 or (_version_info[0] == 3 and _version_info[1] < 6):
raise ImportError("Compilertools needs Python 3.6 or above.")
del _version_info
from compilertools._version import __version__ # noqa: E402
from compilertools import imports # noqa: E402
from compilertools.processors import get_processor as _get_processor # noqa: E402
from compilertools.compilers import get_compiler as _get_compiler # noqa: E402
def get_compiler():
"""
Get current compiler information.
Returns
-------
compilertools.processors.ProcessorBase subclass instance
Processor
"""
return _get_compiler(current_compiler=True)
def get_processor():
"""
Get current processor information.
Returns
-------
compilertools.compilers.CompilerBase subclass instance
Compiler
"""
return _get_processor(arch=None, current_machine=True)
__all__ = ["imports", "get_compiler", "get_processor", "__version__"]
| {
"repo_name": "JGoutin/compilertools",
"path": "compilertools/__init__.py",
"copies": "1",
"size": "1092",
"license": "bsd-2-clause",
"hash": 3798193727462256000,
"line_mean": 25.6341463415,
"line_max": 82,
"alpha_frac": 0.6923076923,
"autogenerated": false,
"ratio": 4.029520295202952,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 41
} |
"""A library for integrating pyOpenSSL with CherryPy.
The OpenSSL module must be importable for SSL functionality.
You can obtain it from `here <https://launchpad.net/pyopenssl>`_.
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
SSLAdapter. There are two ways to use SSL:
Method One
----------
* ``ssl_adapter.context``: an instance of SSL.Context.
If this is not None, it is assumed to be an SSL.Context instance,
and will be passed to SSL.Connection on bind(). The developer is
responsible for forming a valid Context object. This approach is
to be preferred for more flexibility, e.g. if the cert and key are
streams instead of files, or need decryption, or SSL.SSLv3_METHOD
is desired instead of the default SSL.SSLv23_METHOD, etc. Consult
the pyOpenSSL documentation for complete options.
Method Two (shortcut)
---------------------
* ``ssl_adapter.certificate``: the filename of the server SSL certificate.
* ``ssl_adapter.private_key``: the filename of the server's private key file.
Both are None by default. If ssl_adapter.context is None, but .private_key
and .certificate are both given and valid, they will be read, and the
context will be automatically created from them.
"""
import socket
import threading
import time
from django_wsgiserver import wsgiserver
try:
from OpenSSL import SSL
from OpenSSL import crypto
except ImportError:
SSL = None
class SSL_fileobject(wsgiserver.CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError as e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in wsgiserver.socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error as e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError()
raise wsgiserver.FatalSSLAlert(*e.args)
except:
raise
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, size):
return self._safe_call(True, super(SSL_fileobject, self).recv, size)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall,
*args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send,
*args, **kwargs)
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
``*args``: the arguments to create the wrapped ``SSL.Connection(*args)``.
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout', 'gettimeout'):
exec("""def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f))
def shutdown(self, *args):
self._lock.acquire()
try:
# pyOpenSSL.socket.shutdown takes no args
return self._ssl_conn.shutdown()
finally:
self._lock.release()
class pyOpenSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating pyOpenSSL with CherryPy."""
context = None
"""An instance of SSL.Context."""
certificate = None
"""The filename of the server SSL certificate."""
private_key = None
"""The filename of the server's private key file."""
certificate_chain = None
"""Optional. The filename of CA's intermediate certificate bundle.
This is needed for cheaper "chained root" SSL certificates, and should be
left as None if not required."""
def __init__(self, certificate, private_key, certificate_chain=None):
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
self.context = None
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
self._environ = None
def bind(self, sock):
"""Wrap and return the given socket."""
if self.context is None:
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
return sock, self._environ.copy()
def get_context(self):
"""Return an SSL.Context from self attributes."""
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
def get_environ(self):
"""Return WSGI environ entries to be merged into each request."""
ssl_environ = {
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
# 'SSL_PROTOCOL': 'SSLv2',
# SSL_CIPHER string The cipher specification name
# SSL_VERSION_INTERFACE string The mod_ssl program version
# SSL_VERSION_LIBRARY string The OpenSSL program version
}
if self.certificate:
# Server certificate attributes
cert = open(self.certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
# 'SSL_SERVER_V_START': Validity of server's certificate (start time),
# 'SSL_SERVER_V_END': Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
if SSL and isinstance(sock, SSL.ConnectionType):
timeout = sock.gettimeout()
f = SSL_fileobject(sock, mode, bufsize)
f.ssl_timeout = timeout
return f
else:
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| {
"repo_name": "Hernrup/django-wsgiserver3",
"path": "django_wsgiserver/wsgiserver/ssl_pyopenssl.py",
"copies": "1",
"size": "9160",
"license": "bsd-3-clause",
"hash": 4781519175381370000,
"line_mean": 35.4940239044,
"line_max": 79,
"alpha_frac": 0.5804585153,
"autogenerated": false,
"ratio": 4.173120728929385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5253579244229385,
"avg_score": null,
"num_lines": null
} |
"""A library for integrating pyOpenSSL with CherryPy.
The OpenSSL module must be importable for SSL functionality.
You can obtain it from http://pyopenssl.sourceforge.net/
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
SSLAdapter. There are two ways to use SSL:
Method One:
ssl_adapter.context: an instance of SSL.Context.
If this is not None, it is assumed to be an SSL.Context instance,
and will be passed to SSL.Connection on bind(). The developer is
responsible for forming a valid Context object. This approach is
to be preferred for more flexibility, e.g. if the cert and key are
streams instead of files, or need decryption, or SSL.SSLv3_METHOD
is desired instead of the default SSL.SSLv23_METHOD, etc. Consult
the pyOpenSSL documentation for complete options.
Method Two (shortcut):
ssl_adapter.certificate: the filename of the server SSL certificate.
ssl_adapter.private_key: the filename of the server's private key file.
Both are None by default. If ssl_adapter.context is None, but .private_key
and .certificate are both given and valid, they will be read, and the
context will be automatically created from them.
ssl_adapter.certificate_chain: (optional) the filename of CA's intermediate
certificate bundle. This is needed for cheaper "chained root" SSL
certificates, and should be left as None if not required.
"""
import socket
import threading
import time
from cherrypy import wsgiserver
try:
from OpenSSL import SSL
from OpenSSL import crypto
except ImportError:
SSL = None
class SSL_fileobject(wsgiserver.CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in wsgiserver.socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError()
raise wsgiserver.FatalSSLAlert(*e.args)
except:
raise
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, *args, **kwargs):
buf = []
r = super(SSL_fileobject, self).recv
while True:
data = self._safe_call(True, r, *args, **kwargs)
buf.append(data)
p = self._sock.pending()
if not p:
return "".join(buf)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall,
*args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send,
*args, **kwargs)
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
*args: the arguments to create the wrapped SSL.Connection(*args).
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout', 'gettimeout'):
exec("""def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f))
def shutdown(self, *args):
self._lock.acquire()
try:
# pyOpenSSL.socket.shutdown takes no args
return self._ssl_conn.shutdown()
finally:
self._lock.release()
class pyOpenSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating pyOpenSSL with CherryPy."""
def __init__(self, certificate, private_key, certificate_chain=None):
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
self.context = None
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
self._environ = None
def bind(self, sock):
"""Wrap and return the given socket."""
if self.context is None:
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
return sock, self._environ.copy()
def get_context(self):
"""Return an SSL.Context from self attributes."""
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
def get_environ(self):
"""Return WSGI environ entries to be merged into each request."""
ssl_environ = {
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
## 'SSL_PROTOCOL': 'SSLv2',
## SSL_CIPHER string The cipher specification name
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
if self.certificate:
# Server certificate attributes
cert = open(self.certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
if SSL and isinstance(sock, SSL.ConnectionType):
timeout = sock.gettimeout()
f = SSL_fileobject(sock, mode, bufsize)
f.ssl_timeout = timeout
return f
else:
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| {
"repo_name": "southpawtech/TACTIC-DEV",
"path": "3rd_party/CherryPy/cherrypy/wsgiserver/ssl_pyopenssl.py",
"copies": "7",
"size": "9378",
"license": "epl-1.0",
"hash": -7422377571311049000,
"line_mean": 37.9128630705,
"line_max": 86,
"alpha_frac": 0.5592876946,
"autogenerated": false,
"ratio": 4.293956043956044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8353243738556043,
"avg_score": null,
"num_lines": null
} |
"""A library for integrating pyOpenSSL with CherryPy.
The ssl module must be importable for SSL functionality.
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
BuiltinSSLAdapter.
ssl_adapter.certificate: the filename of the server SSL certificate.
ssl_adapter.private_key: the filename of the server's private key file.
"""
try:
import ssl
except ImportError:
ssl = None
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating Python's builtin ssl module with CherryPy."""
def __init__(self, certificate, private_key, certificate_chain=None):
if ssl is None:
raise ImportError("You must install the ssl module to use HTTPS.")
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def bind(self, sock):
"""Wrap and return the given socket."""
return sock
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
try:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
server_side=True, certfile=self.certificate,
keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
except ssl.SSLError, e:
if e.errno == ssl.SSL_ERROR_EOF:
# This is almost certainly due to the cherrypy engine
# 'pinging' the socket to assert it's connectable;
# the 'ping' isn't SSL.
return None, {}
elif e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
# The client is speaking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError
raise
return s, self.get_environ(s)
# TODO: fill this out more with mod ssl env
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| {
"repo_name": "mwx1993/TACTIC",
"path": "3rd_party/CherryPy/cherrypy/wsgiserver/ssl_builtin.py",
"copies": "7",
"size": "2541",
"license": "epl-1.0",
"hash": 1958851954904281000,
"line_mean": 35.8260869565,
"line_max": 78,
"alpha_frac": 0.6099960645,
"autogenerated": false,
"ratio": 4.299492385786802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.030197233588278208,
"num_lines": 69
} |
"""A library for integrating Python's builtin ``ssl`` library with CherryPy.
The ssl module must be importable for SSL functionality.
To use this module, set ``CherryPyWSGIServer.ssl_adapter`` to an instance of
``BuiltinSSLAdapter``.
"""
try:
import ssl
except ImportError:
ssl = None
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating Python's builtin ssl module with CherryPy."""
certificate = None
"""The filename of the server SSL certificate."""
private_key = None
"""The filename of the server's private key file."""
def __init__(self, certificate, private_key, certificate_chain=None):
if ssl is None:
raise ImportError("You must install the ssl module to use HTTPS.")
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def bind(self, sock):
"""Wrap and return the given socket."""
return sock
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
try:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
server_side=True, certfile=self.certificate,
keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
except ssl.SSLError, e:
if e.errno == ssl.SSL_ERROR_EOF:
# This is almost certainly due to the cherrypy engine
# 'pinging' the socket to assert it's connectable;
# the 'ping' isn't SSL.
return None, {}
elif e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
# The client is speaking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError
raise
return s, self.get_environ(s)
# TODO: fill this out more with mod ssl env
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| {
"repo_name": "jolth/websiteDevMicrosystem",
"path": "web/wsgiserver/ssl_builtin.py",
"copies": "79",
"size": "2589",
"license": "apache-2.0",
"hash": -7540007263591561000,
"line_mean": 34.9583333333,
"line_max": 78,
"alpha_frac": 0.5998455002,
"autogenerated": false,
"ratio": 4.329431438127091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0344945710776555,
"num_lines": 72
} |
"""A library for math related things
Copyright 2015 Heung Ming Tai
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
class Interval(object):
"""An interval with a starting and a ending points, open or closed.
It's a read-only class.
Attributes:
start (int or float): The starting point of the interval.
end (int or float): The ending point of the interval.
is_start_opened (Optional[bool]): True if the starting point is open.
It's False by default.
is_end_opened (Optional[bool]): True if the ending point is open.
It's False by default.
"""
def __init__(self, start, end, is_start_opened=False, is_end_opened=False):
self._start = start
self._end = end
self._is_start_opened = is_start_opened
self._is_end_opened = is_end_opened
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def is_start_opened(self):
return self._is_start_opened
@property
def is_end_opened(self):
return self._is_end_opened
def __str__(self):
tmp = "Interval(start=%r,end=%r,is_start_opened=%r,is_end_opened=%r)"
return tmp % (
self._start,
self._end,
self._is_start_opened,
self._is_end_opened,
)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, Interval):
return False
return (
self._start,
self._end,
self._is_start_opened,
self._is_end_opened,
) == (
other._start,
other._end,
other._is_start_opened,
other._is_end_opened,
)
| {
"repo_name": "hermantai/sorno-py-scripts",
"path": "sorno/mathlib.py",
"copies": "1",
"size": "2471",
"license": "apache-2.0",
"hash": -2402409308476059000,
"line_mean": 27.4022988506,
"line_max": 79,
"alpha_frac": 0.6171590449,
"autogenerated": false,
"ratio": 4.084297520661157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5201456565561157,
"avg_score": null,
"num_lines": null
} |
"""A library for parsing and evaluating dice notation."""
from __future__ import absolute_import, print_function, unicode_literals
from pyparsing import ParseBaseException
import dice.elements
import dice.grammar
import dice.utilities
from dice.constants import DiceExtreme
from dice.exceptions import DiceBaseException, DiceException, DiceFatalException
__all__ = [
"roll",
"roll_min",
"roll_max",
"elements",
"grammar",
"utilities",
"command",
"DiceBaseException",
"DiceException",
"DiceFatalException",
"DiceExtreme",
]
__author__ = "Sam Clements <sam@borntyping.co.uk>, " "Caleb Johnson <me@calebj.io>"
__version__ = "3.1.2"
def roll(string, **kwargs):
"""Parses and evaluates a dice expression"""
return _roll(string, **kwargs)
def roll_min(string, **kwargs):
"""Parses and evaluates the minimum of a dice expression"""
return _roll(string, force_extreme=DiceExtreme.EXTREME_MIN, **kwargs)
def roll_max(string, **kwargs):
"""Parses and evaluates the maximum of a dice expression"""
return _roll(string, force_extreme=DiceExtreme.EXTREME_MAX, **kwargs)
def parse_expression(string):
return dice.grammar.expression.parseString(string, parseAll=True)
def _roll(string, single=True, raw=False, return_kwargs=False, **kwargs):
try:
ast = parse_expression(string)
elements = list(ast)
if not raw:
elements = [element.evaluate_cached(**kwargs) for element in elements]
if single:
elements = dice.utilities.single(elements)
if return_kwargs:
return elements, kwargs
return elements
except ParseBaseException as e:
raise DiceBaseException.from_other(e)
| {
"repo_name": "borntyping/python-dice",
"path": "dice/__init__.py",
"copies": "1",
"size": "1739",
"license": "mit",
"hash": 7790421438806954000,
"line_mean": 25.7538461538,
"line_max": 83,
"alpha_frac": 0.6797009776,
"autogenerated": false,
"ratio": 3.6457023060796647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48254032836796645,
"avg_score": null,
"num_lines": null
} |
"""A library for parsing Heroes of the Storm replays."""
from s2protocol.mpyq import mpyq
from s2protocol import protocol34784 as proto
class Replay(object):
def __init__(self, replay_file):
self.replay_file = replay_file
self.archive = mpyq.MPQArchive(self.replay_file)
def _get_header(self):
contents = self.archive.header['user_data_header']['content']
header = proto.decode_replay_header(contents)
return header
def _get_details(self):
contents = self.archive.read_file('replay.details')
return proto.decode_replay_details(contents)
def _get_game_events(self):
contents = self.archive.read_file('replay.game.events')
return [event for event in
proto.decode_replay_game_events(contents)]
def _get_tracker_events(self):
contents = self.archive.read_file('replay.tracker.events')
return [event for event in
proto.decode_replay_tracker_events(contents)]
def get_deaths(self):
events = self._get_tracker_events()
death_event = ('NNet.Replay.Tracker.SUnitDiedEvent',)
return self.filter_events(events, death_event)
@staticmethod
def filter_events(events, event_names):
"""Filter events given a tuple of a event types.
:param events: Full list of event dictionaries
:param event_names: a tuple of event names to filter on
:return: a filtered event list containing only the specific events
"""
return [event for event in events if event['_event'] in event_names]
def get_players(self):
details = self._get_details()
player_list = details.get('m_playerList')
return player_list
def get_hotkey_presses(self):
keys = ('NNet.Game.STriggerHotkeyPressedEvent',
'NNet.Game.STriggerKeyPressedEvent')
game_events = self._get_game_events()
key_presses = [event for event in game_events
if event['_event'] in keys]
return key_presses
| {
"repo_name": "HoTSStuff/replaylib",
"path": "replaylib/ReplayLib.py",
"copies": "1",
"size": "2057",
"license": "apache-2.0",
"hash": -4096498512928898000,
"line_mean": 35.7321428571,
"line_max": 76,
"alpha_frac": 0.6431696646,
"autogenerated": false,
"ratio": 3.9633911368015413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5106560801401541,
"avg_score": null,
"num_lines": null
} |
import trunk.matgen
from numpy import *
def oldCompleteCholesky(M):
''' Computes the cholesky factorization of a matrix M, i.e the
lower triangular matrix T such as M = T . T.transpose
M should be a symmetric positive definite matrix
runs in O(n^3/3) where M is a matrix of size nxn'''
n = M.shape[0]
T = zeros((n,n))
col = 0
while col < n:
T[col][col] = M[col][col]
row = 0
while row < col:
T[col][col] -= T[col][row]*T[col][row]
row+=1
T[col][col] = sqrt(T[col][col])
row+=1
while row < n:
T[row][col] = M[row][col]
k = 0
while k < col:
T[row,col] -= T[col,k]*T[row,k]
k+=1
T[row,col] /= T[col,col]
row+=1
col+=1
return T
def completeCholesky(M):
''' Computes the cholesky factorization of a matrix M, i.e the
lower triangular matrix T such as M = T . T.transpose
M should be a symmetric positive definite matrix
runs in O(n^3/3) where M is a matrix of size nxn'''
n = M.shape[0]
T = zeros((n,n))
for col in range(n):
T[col,col] = sqrt(M[col,col] - dot(T[col,:col], T[col,:col]))
T[col+1:n,col] = (M[col+1:n,col] - dot(T[col+1:n,:col], (T[col,:col]).transpose())) / T[col,col]
return T
def oldIncompleteCholesky(M):
'''Only performs an incomplete cholesky factorization
which result in only an approximation of the result, but is faster '''
n = M.shape[0]
T = zeros((n,n))
col = 0
while col < n:
T[col][col] = M[col][col]
row = 0
if M[col][col] != 0:
while row < col:
T[col][col] -= T[col][row]*T[col][row]
row+=1
T[col][col] = sqrt(T[col][col])
row+=1
else:
row = col+1
while row < n:
if M[row][col] != 0:
T[row][col] = M[row][col]
k = 0
while k < col:
T[row,col] -= T[col,k]*T[row,k]
k+=1
T[row,col] /= T[col,col]
row+=1
col+=1
return T
def incompleteCholesky(M):
'''Only performs an incomplete cholesky factorization
which result in only an approximation of the result, but is faster '''
n = M.shape[0]
T = zeros((n,n))
for col in range(n):
T[col,col] = sqrt(M[col,col] - dot(T[col,:col], T[col,:col]))
for row in range(col+1, n):
if M[row,col] == 0:
T[row,col] = 0
else:
T[row,col] = (M[row,col] - dot(T[row,:col], (T[col,:col]).transpose())) / T[col,col]
return T
| {
"repo_name": "ethiery/heat-solver",
"path": "trunk/cholesky.py",
"copies": "1",
"size": "2853",
"license": "mit",
"hash": 4237637251507400700,
"line_mean": 30.0108695652,
"line_max": 106,
"alpha_frac": 0.4994742376,
"autogenerated": false,
"ratio": 3.2128378378378377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9115245012710688,
"avg_score": 0.01941341254542992,
"num_lines": 92
} |
# A library for the molecular dynamics simulation of a gas using verlet integration
# William Gilpin 2014
from matplotlib import pyplot
from scipy import *
from numpy import *
from random import randrange
# NOTE: it might be more clever to construct entire simulation as a CLASS with dt, dx, dy as attributes
# and an explicit generator/yield function for each new timestep.
# def V(r, eps=100.0, sig=1.0e-1):
def V(r, eps=100.0, sig=1.0e-2):
# Lennard-Jones potential
eout = eps*((sig/r)**12 - (sig/r)**6)
# eout[isnan(eout)] = 1e1*rand()
eout[r==0.0] = 0.0
return eout
def init_lattice(N, L=1.0, d=2):
# Build an NxN lattice of real size L in which the average kinetic energy of
# the starting particles is set by the temperature T
num_pdim = ceil(N**(1./d))
x = tile(linspace(L/(2.*num_pdim),L-L/(2.*num_pdim), num_pdim), num_pdim)
y = kron(linspace(L/(2.*num_pdim),L-L/(2.*num_pdim), num_pdim), ones(num_pdim))
return vstack((x,y))
def period_bc(coords, L=1.0):
# boundary copying is effective up to 1/2 the box diameter, which is
# for the the short-ranged LJ potential
push0 = zeros(coords.shape)
push0[0, :] = L
push1 = zeros(coords.shape)
push1[1, :] = L
coords1 = hstack((coords, coords + push0, coords - push0, \
coords + push1, coords - push1, coords + push0 + push1,\
coords + push0 - push1, coords - push0 + push1, coords - push0 - push1))
return coords1
def tile_stack(tle, num):
# helpful function for doing kronecker product of a row vector
# and a column vector
return reshape( tile(tle, num), (len(tle), num) )
def update_pos(coords, prev_coords, dt=1e-2, dx=1e-3, dy=1e-3, L=1.0, m=1.0):
# Use first-order verlet to update the position of a list of particle given snapshots
# of their coordinates at two consecutive timesteps
new_coords = list()
new_vels = list()
coords1 = period_bc(coords)
acc = zeros(coords.shape)
for pair in zip(coords.T, prev_coords.T):
coord = pair[0]
prev_coord = pair[1]
r0 = sqrt(sum(( coords1 - tile_stack(coord, len(coords1.T)) )**2, axis=0))
r1px = sqrt(sum(( coords1 - tile_stack(coord + array([dx, 0]), len(coords1.T)) )**2, axis=0))
r1nx = sqrt(sum(( coords1 - tile_stack(coord - array([dx, 0]), len(coords1.T)) )**2, axis=0))
r1py = sqrt(sum(( coords1 - tile_stack(coord + array([0, dy]), len(coords1.T)) )**2, axis=0))
r1ny = sqrt(sum(( coords1 - tile_stack(coord - array([0, dy]), len(coords1.T)) )**2, axis=0))
acc = (-1./m)*array([ (V(r1px) - V(r1nx))/(2.*dx) , (V(r1py) - V(r1ny))/(2.*dy) ])
acc = sum(acc, axis=1)
new_coord = 2*coord - prev_coord + acc*(dt**2)
new_coord = mod(new_coord, L)
new_coords.append(new_coord)
new_coords = array(new_coords)
return new_coords.T
def rand_vels(numb, T):
# returns a random vector of length numb such that
# the sum of squares adds up to T
T = double(T)
vec = ones(numb)*T
pert = .3*vec[0]
for ii in xrange(8*numb):
ri2 = randrange(0, len(vec))
if ((vec[ri2] - pert) > 0.0):
vec[ri2] = vec[ri2] - pert
ri1 = randrange(0, len(vec))
vec[ri1] = vec[ri1] + pert
else:
pass
return sqrt(vec)
def rand_vcomps(vels):
# takes a list of vector lengths and returns their components randomly
# projected along two axes
comps = list()
for vel in vels:
theta = 2*pi*rand()
comps.append((vel*cos(theta), vel*sin(theta)))
return array(comps)
def verlet_gas(coords0, T=10.0, nsteps=100, dt=1e-2, L=1.0):
# wrapper for verlet routine that repeatedly calls update_pos.py to increment the positions
# of a list of particles initialized to a given temperature
time = linspace(0.0, nsteps*dt, nsteps)
all_coords = list()
vels0 = rand_vels(len(coords0.T), T)
vcomps0 = rand_vcomps(vels0).T
prev_coords = coords0 - dt*vcomps0
prev_coords = mod(prev_coords, L)
all_coords.append(prev_coords)
all_coords.append(coords0)
for ii in xrange(nsteps):
nxt = update_pos(all_coords[-1:][0], all_coords[-2:-1][0], dt=dt)
all_coords.append(nxt)
return all_coords
def auto_corr(all_coords, ind=0):
# Calculate the cumulative autocorrellation function for a particle
# specified by the index 'ind'
r0 = all_coords[0][:,ind]
corr = list()
# might need to think about wraparound here
for coords in all_coords:
r = coords[:,ind]
corr.append(sum((r - r0)**2))
corr = array(corr)
return cumsum( corr/(arange(len(corr))+1.) ) | {
"repo_name": "williamgilpin/vergas",
"path": "vergas_funcs.py",
"copies": "1",
"size": "4839",
"license": "mit",
"hash": 2452828129331204600,
"line_mean": 32.6111111111,
"line_max": 103,
"alpha_frac": 0.5986774127,
"autogenerated": false,
"ratio": 3.0529968454258674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4151674258125867,
"avg_score": null,
"num_lines": null
} |
"""A library for validating SQLAlchemy-jsonapi apis."""
import copy
import functools
from jsonschema import validate as schema_validate, exceptions
from sqlalchemy.sql import sqltypes as types
_TYPE_MAP = {
types.Integer: 'integer',
types.String: 'string',
types.Boolean: 'boolean',
}
_BASE_SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'object',
'properties': {
'data': {
'type': 'object',
'properties': {
'type': {
'type': 'string'
},
'id': {
'type': 'string'
},
'attributes': {
'type': 'object',
'properties': None,
},
},
'required': [
'type',
'attributes'
],
},
},
'required': [
'data',
],
}
class Validator(object):
"""A class used for specifying validation information."""
@property
def schema(self):
schema = copy.deepcopy(_BASE_SCHEMA)
fields = {}
for field in self.Meta.fields:
try:
if (hasattr(self.Meta, 'field_types')
and field in self.Meta.field_types):
attr_type = self.Meta.field_types[field]
else:
attr_type = _TYPE_MAP[
type(self.Meta.validates.__table__.c[field].type)]
except KeyError:
raise
fields[field.replace('_', '-')] = {'type': attr_type}
schema['properties']['data'][
'properties']['attributes']['properties'] = fields
if hasattr(self.Meta, 'required') and len(self.Meta.required):
required = [item.replace('_', '-') for item in self.Meta.required]
schema['properties']['data'][
'properties']['attributes']['required'] = required
return schema
def _user_error(status_code, message, title):
"""General user input error."""
import flask
from sqlalchemy_jsonapi import __version__ as jsonapi_version
response = {
'errors': [{
'status': status_code,
'source': {'pointer': '{0}'.format(flask.request.path)},
'title': title,
'detail': message,
}],
'jsonapi': {
'version': '1.0'
},
'meta': {
'sqlalchemy_jsonapi_version': jsonapi_version
}
}
return flask.jsonify(response), status_code
# XXX: rockstar (14 Feb 2017) - The methods argument here is
# sub-optimal. Validation shouldn't be http method specific,
# but rather view specific. This is a side-effect of cramming
# logic for many different actions into a single view. Once
# that's fixed, we should remove the methods argument here.
def validate(validator, methods=None):
def decorated(f):
@functools.wraps(f)
def _(*args, **kwargs):
from flask import request
if methods is None or request.method in methods:
try:
schema_validate(request.json, validator().schema)
except exceptions.ValidationError as err:
return _user_error(422, err.message, 'Invalid Body')
return f(*args, **kwargs)
return _
return decorated
| {
"repo_name": "rockstar/puff",
"path": "puff.py",
"copies": "1",
"size": "3424",
"license": "mit",
"hash": -5167272237142649000,
"line_mean": 29.5714285714,
"line_max": 78,
"alpha_frac": 0.5189836449,
"autogenerated": false,
"ratio": 4.475816993464052,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5494800638364052,
"avg_score": null,
"num_lines": null
} |
"""A library for working with Asciidoc markup."""
##==============================================================#
## DEVELOPED 2014, REVISED 2014, Jeff Rimko. #
##==============================================================#
##==============================================================#
## SECTION: Imports #
##==============================================================#
import datetime
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def format_doc(title, body, date="", author="", add_date=False):
"""Formats the document text as Asciidoc.
:param title: (str) Title of the document.
:param body: (str) Body of the document.
:param date: (str) Date stamp for the document.
:param author: (str) Author of the document.
"""
if not title:
title = "Untitled"
doc = "= %s\n" % title
if author:
doc += ":author: %s\n" % author
if date or add_date:
if add_date and not date:
date = datetime.datetime.now().strftime("%d %B %Y").lstrip("0")
doc += ":date: %s\n" % date
doc += "\n"
doc += body
return doc
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
print(format_doc("Hello world!", "some test here\nmore text", add_date=True))
| {
"repo_name": "jeffrimko/Archiver",
"path": "app/adoclib.py",
"copies": "1",
"size": "1631",
"license": "mit",
"hash": 4455405561736292400,
"line_mean": 37.8333333333,
"line_max": 81,
"alpha_frac": 0.3513182097,
"autogenerated": false,
"ratio": 4.839762611275964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5691080820975964,
"avg_score": null,
"num_lines": null
} |
"""A Library is a local collection of bundles. It holds a database for the configuration
of the bundles that have been installed into it.
"""
# Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE.txt
from ambry.orm import Dataset, Partition
from ambry.orm import Table, Column
from ..identity import Identity, PartitionNumber, DatasetNumber
class _qc_attrdict(object):
def __init__(self, inner, query):
self.__dict__['inner'] = inner
self.__dict__['query'] = query
def __setattr__(self, key, value):
#key = key.strip('_')
inner = self.__dict__['inner']
inner[key] = value
def __getattr__(self, key):
#key = key.strip('_')
inner = self.__dict__['inner']
if key not in inner:
return None
return inner[key]
def __len__(self):
return len(self.inner)
def __iter__(self):
return iter(self.inner)
def items(self):
return self.inner.items()
def __call__(self, **kwargs):
for k,v in kwargs.items():
self.inner[k] = v
return self.query
class QueryCommand(object):
'''An object that contains and transfers a query for a bundle
Components of the query can include.
Identity
id
name
vname
source
dataset
subset
variation
creator
revision
Column
name, altname
description
keywords
datatype
measure
units
universe
Table
name, altname
description
keywords
Partition
id
name
vname
time
space
table
format
other
When the Partition search is included, the other three components are used
to find a bundle, then the pretition information is used to select a bundle
All of the values are text, except for revision, which is numeric. The text
values are used in an SQL LIKE phtase, with '%' replaced by '*', so some
example values are:
word Matches text field, that is, in it entirety, 'word'
word* Matches a text field that begins with 'word'
*word Matches a text fiels that
'''
def __init__(self, dict_ = None):
if dict_ is None:
dict_ = {}
self._dict = dict_
def to_dict(self):
return self._dict
def from_dict(self, dict_):
for k,v in dict_.items():
print "FROM DICT",k,v
def getsubdict(self, group):
'''Fetch a confiration group and return the contents as an
attribute-accessible dict'''
if not group in self._dict:
self._dict[group] = {}
inner = self._dict[group]
query = self
return _qc_attrdict(inner, query)
class ParseError(Exception):
pass
@classmethod
def parse(cls, s):
from io import StringIO
import tokenize, token
state = 'name_start'
n1 = None
n2 = None
value = None
is_like = False
qc = QueryCommand()
for tt in tokenize.generate_tokens(StringIO(unicode(s)).readline):
t_type = tt[0]
t_string = tt[1].strip()
pos = tt[2][0]
line = tt[4]
#print "{:5d} {:5d} {:15s} {:20s} {:8s}.{:8s}= {:10s} || {}".format(t_type, pos, "'"+t_string+"'", state, n1, n2, value, line)
def err(expected):
raise cls.ParseError("Expected {} in {} at char {}, got {}, '{}' ".format(expected, line, pos, token.tok_name[t_type], t_string))
if not t_string:
continue
if state == 'value_continuation':
value += t_string
if is_like:
value = '%' + value + '%'
state = 'value_continuation'
qc.getsubdict(n1).__setattr__(n2, value.strip("'").strip('"'))
elif state == 'name_start' or state == 'name_start_or_value':
# First part of name
if state == 'name_start_or_value' and t_string in ('-','.'):
if is_like:
value = value.strip('%')
value += t_string
state = 'value_continuation'
elif t_type == token.NAME:
n1 = t_string
state = 'name_sep'
is_like = False
elif t_type == token.OP and t_string == ',':
state = 'name_start'
elif t_type == token.ENDMARKER:
state = 'done'
else:
err( "NAME or ','; got: '{}' ".format(t_string))
elif state == 'name_sep':
# '.' that separates names
if t_type == token.OP and t_string == '.':
state = 'name_2'
else:
raise err("'.'")
elif state == 'name_2':
# Second part of name
if t_type == token.NAME:
state = 'value_sep'
n2 = t_string
else:
raise err("NAME")
elif state == 'value_sep':
# The '=' that seperates name from values
if (t_type == token.OP and t_string == '=') or (t_type == token.NAME and t_string == 'like'):
state = 'value'
if t_string == 'like':
is_like = True
else:
raise err("'='")
elif state == 'value':
# The Value
if t_type == token.NAME or t_type == token.STRING or t_type == token.NUMBER:
value = t_string
if is_like:
value = '%'+value+'%'
state = 'name_start_or_value'
qc.getsubdict(n1).__setattr__(n2,value.strip("'").strip('"'))
else:
raise err("NAME or STRING")
elif state == 'done':
raise cls.ParseError("Got token after end")
else:
raise cls.ParseError("Unknown state: {} at char {}".format(state))
return qc
@property
def identity(self):
'''Return an array of terms for identity searches'''
return self.getsubdict('identity')
@identity.setter
def identity(self, value):
self._dict['identity'] = value
@property
def table(self):
'''Return an array of terms for table searches'''
return self.getsubdict('table')
@property
def column(self):
'''Return an array of terms for column searches'''
return self.getsubdict('column')
@property
def partition(self):
'''Return an array of terms for partition searches'''
return self.getsubdict('partition')
def __str__(self):
return str(self._dict)
class Resolver(object):
'''Find a reference to a dataset or partition based on a string,
which may be a name or object number '''
def __init__(self, session):
self.session = session # a Sqlalchemy connection
def _resolve_ref_orm(self, ref):
import semantic_version
ip = Identity.classify(ref)
dqp = None # Dataset query parts
pqp = None # Partition query parts
if ip.isa == PartitionNumber:
if ip.on.revision:
pqp = Partition.vid == str(ip.on)
else:
pqp = Partition.id_ == str(ip.on)
elif ip.isa == DatasetNumber:
if ip.on.revision:
dqp = Dataset.vid == str(ip.on)
else:
dqp = Dataset.id_ == str(ip.on)
elif ip.vname:
dqp = Dataset.vname == ip.vname
pqp = Partition.vname == ip.vname
elif ip.cache_key:
dqp = Dataset.cache_key == ip.cache_key
pqp = Partition.cache_key == ip.cache_key
else:
dqp = Dataset.name == ip.sname
pqp = Partition.name == ip.sname
out = []
if dqp is not None:
for dataset in (self.session.query(Dataset).filter(dqp).order_by(Dataset.revision.desc()).all()):
out.append((dataset, None))
if pqp is not None:
for row in (self.session.query(Dataset, Partition).join(Partition).filter(pqp)
.order_by(Dataset.revision.desc()).all()):
out.append((row.Dataset, row.Partition))
return ip, out
def _resolve_ref(self, ref, location = Dataset.LOCATION.LIBRARY):
'''Convert the output from _resolve_ref to nested identities'''
ip, results = self._resolve_ref_orm(ref)
from collections import OrderedDict
if location and not isinstance(location,(list, tuple)):
location = [location]
# Convert the ORM results to identities
out = OrderedDict()
for d,p in results:
if location and d.location not in location:
continue
if not d.vid in out:
out[d.vid] = d.identity
out[d.vid].locations.set(d.location)
# Partitions are only added for the LOCATION.LIBRARY location, so
# we don't have to deal with duplicate partitions
if p:
out[d.vid].add_partition(p.identity)
return ip, out
def resolve_ref_all(self, ref, location = Dataset.LOCATION.LIBRARY):
return self._resolve_ref(ref, location)
def resolve_ref_one(self, ref, location = Dataset.LOCATION.LIBRARY):
'''Return the "best" result for an object specification
'''
import semantic_version
ip, refs = self._resolve_ref(ref, location)
if not isinstance(ip.version, semantic_version.Spec):
return ip, refs.values().pop(0) if refs and len(refs.values()) else None
else:
versions = {semantic_version.Version(d.name.version):d for d in refs.values()}
best = ip.version.select(versions.keys())
if not best:
return ip, None
else:
return ip, versions[best]
def resolve(self, ref, location = Dataset.LOCATION.LIBRARY):
return self.resolve_ref_one(ref, location)[1]
def find(self, query_command):
'''Find a bundle or partition record by a QueryCommand or Identity
Args:
query_command. QueryCommand or Identity
returns:
A list of identities, either Identity, for datasets, or PartitionIdentity
for partitions.
'''
def like_or_eq(c,v):
if v and '%' in v:
return c.like(v)
else:
return c == v
has_partition = False
has_where = False
if isinstance(query_command, Identity):
raise NotImplementedError()
out = []
for d in self.queryByIdentity(query_command).all():
id_ = d.identity
d.path = os.path.join(self.cache,id_.cache_key)
out.append(d)
tables = [Dataset]
if len(query_command.partition) > 0:
tables.append(Partition)
if len(query_command.table) > 0:
tables.append(Table)
if len(query_command.column) > 0:
tables.append(Column)
tables.append(Dataset.id_) # Dataset.id_ is included to ensure result is always a tuple)
query = self.session.query(*tables) # Dataset.id_ is included to ensure result is always a tuple
if len(query_command.identity) > 0:
for k,v in query_command.identity.items():
if k == 'id':
k = 'id_'
try:
query = query.filter( like_or_eq(getattr(Dataset, k),v) )
except AttributeError as e:
# Dataset doesn't have the attribute, so ignore it.
pass
if len(query_command.partition) > 0:
query = query.join(Partition)
for k,v in query_command.partition.items():
if k == 'id':
k = 'id_'
from sqlalchemy.sql import or_
if k == 'any':
continue # Just join the partition
elif k == 'table':
# The 'table" value could be the table id
# or a table name
query = query.join(Table)
query = query.filter( or_(Partition.t_id == v,
like_or_eq(Table.name,v)))
elif k == 'space':
query = query.filter( or_( like_or_eq(Partition.space,v)))
else:
query = query.filter( like_or_eq(getattr(Partition, k),v) )
if not query_command.partition.format:
# Exclude CSV if not specified
query = query.filter( Partition.format != 'csv')
if len(query_command.table) > 0:
query = query.join(Table)
for k,v in query_command.table.items():
query = query.filter( like_or_eq(getattr(Table, k),v) )
if len(query_command.column) > 0:
query = query.join(Table)
query = query.join(Column)
for k,v in query_command.column.items():
query = query.filter( like_or_eq(getattr(Column, k),v) )
query = query.distinct().order_by(Dataset.revision.desc())
return query
class RemoteResolver(object):
'''Find a reference to a dataset or partition based on a string,
which may be a name or object number '''
def __init__(self, local_resolver, remote_urls):
self.local_resolver = local_resolver
self.urls = remote_urls
def resolve_ref_one(self, ref, location = Dataset.LOCATION.LIBRARY):
from requests.exceptions import ConnectionError
from ambry.client.rest import RemoteLibrary
import semantic_version
from ..identity import Identity
if self.local_resolver:
ip,ident = self.local_resolver.resolve_ref_one(ref, location)
if ident:
idents = [ident]
else:
idents = []
else:
ip = Identity.classify(ref)
idents = []
# If the local returned a result, we only need to go to the
# remote if this is a semantic version request, to possible
# get a newer version
if len(idents) == 0 or isinstance(ip.version, semantic_version.Spec):
if self.urls:
for url in self.urls:
rl = RemoteLibrary(url)
try:
ident = rl.resolve(ref, location)
except ConnectionError:
continue
if ident:
ident.locations.set(Dataset.LOCATION.REMOTE)
ident.url = url
idents.append(ident)
if not idents:
return ip, None
idents = sorted(idents, reverse=True, key=lambda x: x.on.revision )
# Since we sorted by revision, and the internal resolutions take care of semantic versioning,
# if this is a semantic version request, the idents array should be sorted with the highest revision number
# for the spec at the top
return ip, idents.pop(0)
def resolve(self, ref, location = Dataset.LOCATION.LIBRARY):
return self.resolve_ref_one(ref, location)[1]
def find(self, query_command):
raise NotImplementedError
| {
"repo_name": "kball/ambry",
"path": "ambry/library/query.py",
"copies": "1",
"size": "15936",
"license": "bsd-2-clause",
"hash": 7596141980257792000,
"line_mean": 28.0273224044,
"line_max": 145,
"alpha_frac": 0.5229041165,
"autogenerated": false,
"ratio": 4.376819555067289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00522577157578339,
"num_lines": 549
} |
"""A Library is a local collection of bundles. It holds a database for the configuration
of the bundles that have been installed into it.
"""
# Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE.txt
import os.path
import ambry
import ambry.util
from ambry.util import temp_file_name
from ambry.bundle import DbBundle
from ..identity import LocationRef, Identity
from ambry.orm import Column, Partition, Table, Dataset, Config, File
from collections import namedtuple
from sqlalchemy.exc import IntegrityError, ProgrammingError, OperationalError
ROOT_CONFIG_NAME = 'd000'
ROOT_CONFIG_NAME_V = 'd000001'
class LibraryDb(object):
'''Represents the Sqlite database that holds metadata for all installed bundles'''
Dbci = namedtuple('Dbc', 'dsn_template sql') #Database connection information
DBCI = {
'postgis':Dbci(dsn_template='postgresql+psycopg2://{user}:{password}@{server}{colon_port}/{name}',sql='support/configuration-pg.sql'),
'postgres':Dbci(dsn_template='postgresql+psycopg2://{user}:{password}@{server}{colon_port}/{name}',sql='support/configuration-pg.sql'), # Stored in the ambry module.
'sqlite':Dbci(dsn_template='sqlite:///{name}',sql='support/configuration-sqlite.sql'),
'spatialite': Dbci(dsn_template='sqlite:///{name}', sql='support/configuration-sqlite.sql'),
'mysql':Dbci(dsn_template='mysql://{user}:{password}@{server}{colon_port}/{name}',sql='support/configuration-sqlite.sql')
}
def __init__(self, driver=None, server=None, dbname = None,
username=None, password=None, port=None, **kwargs):
self.driver = driver
self.server = server
self.dbname = dbname
self.username = username
self.password = password
if port:
self.colon_port = ':'+str(port)
else:
self.colon_port = ''
self.dsn_template = self.DBCI[self.driver].dsn_template
self.dsn = self.dsn_template.format(user=self.username, password=self.password,
server=self.server, name=self.dbname, colon_port=self.colon_port)
self.Session = None
self._session = None
self._engine = None
self._connection = None
if self.driver in ['postgres','postgis']:
self._schema = 'library'
else:
self._schema = None
self.logger = ambry.util.get_logger(__name__)
import logging
self.logger.setLevel(logging.INFO)
self.enable_delete = False
##
## Sqlalchemy connection, engine, session, metadata
##
@property
def engine(self):
'''return the SqlAlchemy engine for this database'''
from sqlalchemy import create_engine
from ..database.sqlite import _on_connect_update_sqlite_schema
from sqlalchemy.pool import AssertionPool
from sqlalchemy.pool import NullPool
if not self._engine:
#print "Create Engine",os.getpid(), self.dsn
# There appears to be a problem related to connection pooling on Linux + Postgres, where
# multiprocess runs will throw exceptions when the Datasets table record can't be
# found. It looks like connections are losing the setting for the search path to the
# library schema.
# Disabling connection pooling solves the problem.
self._engine = create_engine(self.dsn,echo=False, poolclass=NullPool)
self._engine.pool._use_threadlocal = True # Easier than constructing the pool
from sqlalchemy import event
if self.driver == 'sqlite':
event.listen(self._engine, 'connect', _pragma_on_connect)
#event.listen(self._engine, 'connect', _on_connect_update_schema)
_on_connect_update_sqlite_schema(self.connection, None)
return self._engine
@property
def connection(self):
'''Return an SqlAlchemy connection'''
if not self._connection:
self._connection = self.engine.connect()
if self.driver in ['postgres', 'postgis']:
self._connection.execute("SET search_path TO library")
return self._connection
@property
def session(self):
'''Return a SqlAlchemy session'''
from sqlalchemy.orm import sessionmaker
if not self.Session:
# expire_on_commit=False prevents DetatchedInstanceErrors when
# using database object outside the database.
self.Session = sessionmaker(bind=self.engine, expire_on_commit=False)
if not self._session:
self._session = self.Session()
# set the search path
if self.driver in ('postgres','postgis') and self._schema:
self._session.execute("SET search_path TO {}".format(self._schema))
return self._session
def close(self):
self.close_session()
self.close_connection()
if self._engine:
self._engine.dispose()
def close_session(self):
if self._session:
self._session.close()
#self._session.bind.dispose()
self._session = None
def close_connection(self):
if self._connection:
self._connection.close()
self._connection = None
def commit(self):
try:
self.session.commit()
self.close_session()
except Exception as e:
#self.logger.error("Failed to commit in {}; {}".format(self.dsn, e))
raise
def rollback(self):
self.session.rollback()
self.close_session()
@property
def metadata(self):
'''Return an SqlAlchemy MetaData object, bound to the engine'''
from sqlalchemy import MetaData
metadata = MetaData(bind=self.engine, schema = self._schema)
metadata.reflect(self.engine)
return metadata
@property
def inspector(self):
from sqlalchemy.engine.reflection import Inspector
return Inspector.from_engine(self.engine)
##
## Creation and Existence
##
def exists(self):
from sqlalchemy.exc import ProgrammingError, OperationalError
if self.driver == 'sqlite' and not os.path.exists(self.dbname):
return False
self.engine
try:
try:
# Since we are using the connection, rather than the session, need to
# explicitly set the search path.
if self.driver in ('postgres','postgis') and self._schema:
self.connection.execute("SET search_path TO {}".format(self._schema))
rows = self.connection.execute(
"SELECT * FROM datasets WHERE d_vid = '{}' "
.format(ROOT_CONFIG_NAME_V)).fetchone()
except ProgrammingError as e:
# This happens when the datasets table doesnt exist
rows = False
if not rows:
return False
else:
return True
except Exception as e:
# What is the more specific exception here?
return False
finally:
self.close_connection()
def clean(self, add_config_root=True):
s = self.session
s.query(Config).delete()
s.query(File).delete()
s.query(Column).delete()
s.query(Partition).delete()
s.query(Table).delete()
s.query(Dataset).delete()
if add_config_root:
self._add_config_root()
self.commit()
def create(self):
"""Create the database from the base SQL"""
if not self.exists():
self._create_path()
self.enable_delete = True
self.create_tables()
self._add_config_root()
return True
return False
def _create_path(self):
"""Create the path to hold the database, if one wwas specified"""
if self.driver == 'sqlite':
dir_ = os.path.dirname(self.dbname)
if dir_ and not os.path.exists(dir_):
try:
os.makedirs(dir_) # MUltiple process may try to make, so it could already exist
except Exception as e: #@UnusedVariable
pass
if not os.path.exists(dir_):
raise Exception("Couldn't create directory " + dir_)
def _drop(self, s):
if not self.enable_delete:
raise Exception("Deleting not enabled. Set library.database.enable_delete = True")
tables = [Config.__tablename__, Column.__tablename__, Partition.__tablename__,
Table.__tablename__, File.__tablename__, Dataset.__tablename__]
for table in reversed(self.metadata.sorted_tables): # sorted by foreign key dependency
if table.name in tables:
table.drop(self.engine, checkfirst=True)
def drop(self):
s = self.session
self._drop(s)
s.commit()
def __del__(self):
pass # print 'closing LibraryDb'
def clone(self):
return self.__class__(self.driver, self.server, self.dbname, self.username, self.password)
def create_tables(self):
tables = [ Dataset, Config, Table, Column, File, Partition]
self.drop()
orig_schemas = {}
for table in tables:
it = table.__table__
# These schema shenanigans are almost certainly wrong.
# But they are expedient
if self._schema:
orig_schemas[it] = it.schema
it.schema = self._schema
it.create(bind=self.engine)
self.commit()
# We have to put the schemas back because when installing to a warehouse.
# the same library classes can be used to access a Sqlite database, which
# does not handle schemas.
if self._schema:
for it, orig_schema in orig_schemas.items():
it.schema = orig_schema
def _add_config_root(self):
from sqlalchemy.orm.exc import NoResultFound
try:
self.session.query(Dataset).filter(Dataset.vid==ROOT_CONFIG_NAME).one()
self.close_session()
except NoResultFound:
o = Dataset(
id=ROOT_CONFIG_NAME,
name=ROOT_CONFIG_NAME,
vname=ROOT_CONFIG_NAME_V,
fqname='datasetroot-0.0.0~'+ROOT_CONFIG_NAME_V,
cache_key=ROOT_CONFIG_NAME,
version='0.0.0',
source=ROOT_CONFIG_NAME,
dataset = ROOT_CONFIG_NAME,
creator=ROOT_CONFIG_NAME,
revision=1,
)
self.session.add(o)
self.commit()
def _clean_config_root(self):
'''Hack need to clean up some installed databases'''
ds = self.session.query(Dataset).filter(Dataset.id_==ROOT_CONFIG_NAME).one()
ds.id_=ROOT_CONFIG_NAME
ds.name=ROOT_CONFIG_NAME
ds.vname=ROOT_CONFIG_NAME_V
ds.source=ROOT_CONFIG_NAME
ds.dataset = ROOT_CONFIG_NAME
ds.creator=ROOT_CONFIG_NAME
ds.revision=1
self.session.merge(ds)
self.commit()
def inserter(self,table_name, **kwargs):
from ..database.inserter import ValueInserter
from sqlalchemy.schema import Table
table = Table(table_name, self.metadata, autoload=True, autoload_with=self.engine)
return ValueInserter(self, None, table , **kwargs)
##
##
##
##
## Configuration values
##
def set_config_value(self, group, key, value):
'''Set a configuration value in the database'''
from ambry.orm import Config as SAConfig
from sqlalchemy.exc import IntegrityError, ProgrammingError
s = self.session
s.query(SAConfig).filter(SAConfig.group == group,
SAConfig.key == key,
SAConfig.d_vid == ROOT_CONFIG_NAME_V).delete()
try:
o = SAConfig(group=group,key=key,d_vid=ROOT_CONFIG_NAME_V,value = value)
s.add(o)
self.commit()
except IntegrityError:
self.rollback()
o = s.query(SAConfig).filter(SAConfig.group == group,
SAConfig.key == key,
SAConfig.d_vid == ROOT_CONFIG_NAME_V).one()
o.value = value
s.merge(o)
self.commit()
def get_config_value(self, group, key):
from ambry.orm import Config as SAConfig
s = self.session
try:
c = s.query(SAConfig).filter(SAConfig.group == group,
SAConfig.key == key,
SAConfig.d_vid == ROOT_CONFIG_NAME_V).first()
return c
except:
return None
@property
def config_values(self):
from ambry.orm import Config as SAConfig
s = self.session
d = {}
for config in s.query(SAConfig).filter(SAConfig.d_vid == ROOT_CONFIG_NAME_V).all():
d[(str(config.group),str(config.key))] = config.value
self.close_session()
return d
def _mark_update(self):
import datetime
self.set_config_value('activity','change', datetime.datetime.utcnow().isoformat())
##
## Install and remove bundles and partitions
##
def install_dataset_identity(self, identity, location=Dataset.LOCATION.LIBRARY, data = {}):
'''Create the record for the dataset. Does not add an File objects'''
from sqlalchemy.exc import IntegrityError
from ..dbexceptions import ConflictError
ds = Dataset(**identity.dict)
ds.name = identity.sname
ds.vname = identity.vname
ds.fqname = identity.fqname
ds.cache_key = identity.cache_key
ds.creator = 'N/A'
ds.location = location
ds.data = data
try:
try:
self.session.add(ds)
self.commit()
except:
self.session.rollback()
self.session.merge(ds)
self.commit()
except IntegrityError as e:
raise ConflictError("Can't install dataset vid={} vname={} cache_key={}; \nOne already exists. ('{}')"
.format(identity.vid, identity.vname, identity.cache_key, e.message))
def install_bundle_file(self, identity, bundle_file):
"""Install a bundle in the database, starting from a file that may
be a partition or a bundle"""
from ..identity import Identity
#
# This is really just used to ignore partitions
#
if isinstance(identity , dict):
identity = Identity.from_dict(identity)
if identity.is_bundle:
bundle = DbBundle(bundle_file)
self.install_bundle(bundle)
def install_dataset(self, bundle):
"""Install only the most basic parts of the bundle, excluding the
partitions and tables. Use install_bundle to install everything.
This will delete all of the tables and partitions associated with the
bundle, if they already exist, so callers should check that the dataset does not
already exist if before installing again.
"""
# There should be only one dataset record in the
# bundle
db = bundle.database
db.update_schema()
bdbs = db.unmanaged_session
s = self.session
s.autoflush = False
dataset = bdbs.query(Dataset).one()
dataset.location = Dataset.LOCATION.LIBRARY
s.merge(dataset)
for config in bdbs.query(Config).all():
s.merge(config)
s.query(Partition).filter(Partition.d_vid == dataset.vid).delete()
for table in dataset.tables:
s.query(Column).filter(Column.t_vid == table.vid).delete()
s.query(Table).filter(Table.d_vid == dataset.vid).delete()
try:
self.commit()
except IntegrityError as e:
self.logger.error("Failed to merge in {}".format(self.dsn))
self.rollback()
raise e
return dataset
def install_bundle(self, bundle, install_partitions = True):
'''Copy the schema and partitions lists into the library database
'''
from ambry.bundle import Bundle
if not isinstance(bundle, Bundle):
raise ValueError("Can only install a Bundle object")
# The Tables only get installed when the dataset is installed,
# not for the partition
self._mark_update()
try:
dataset = self.install_dataset(bundle)
except Exception as e:
from ..dbexceptions import DatabaseError
raise DatabaseError("Failed to install {} into {}: {}".format(
bundle.database.path, self.dsn, e.message
))
s = self.session
for table in dataset.tables:
s.merge(table)
for column in table.columns:
s.merge(column)
if install_partitions:
for partition in dataset.partitions:
s.merge(partition)
try:
self.commit()
except IntegrityError as e:
self.logger.error("Failed to merge")
self.rollback()
raise e
def install_partition(self, bundle, p_id, install_bundle=True, install_tables = True):
"""Install a single partition and its tables. This is mostly
used for installing into warehouses, where it isn't desirable to install
the whole bundle"""
from ..dbexceptions import NotFoundError
from ..identity import PartitionNameQuery
from sqlalchemy.orm.exc import NoResultFound
if install_bundle:
try:
b = self.get(bundle.identity.vid)
except NotFoundError:
b = None
if not b:
self.install_bundle(bundle)
partition = bundle.partitions.get(p_id)
s = self.session
if install_tables:
for table_name in partition.tables:
table = bundle.schema.table(table_name)
try:
s.query(Table).filter(Table.vid == table.vid).one()
# the library already has the table
except NoResultFound as e:
s.merge(table)
for column in table.columns:
s.merge(column)
s.merge(partition.record)
return
try:
self.commit()
except IntegrityError as e:
self.logger.error("Failed to merge")
self.rollback()
raise e
def mark_table_installed(self, table_or_vid, name=None):
"""Mark a table record as installed"""
s = self.session
table = None
table = s.query(Table).filter(Table.vid == table_or_vid).one()
if not table:
table = s.query(Table).filter(Table.name == table.vid).one()
if not name:
name = table.name
table.installed = name
s.merge(table)
s.commit()
def mark_table_installed(self, table_or_vid, name=None):
"""Mark a table record as installed"""
s = self.session
table = None
table = s.query(Table).filter(Table.vid == table_or_vid).one()
if not table:
table = s.query(Table).filter(Table.name == table.vid).one()
if not name:
name = table.name
table.installed = name
s.merge(table)
s.commit()
def mark_partition_installed(self, p_vid):
"""Mark a table record as installed"""
s = self.session
table = None
p = s.query(Partition).filter(Partition.vid == p_vid).one()
p.installed = 'y'
s.merge(p)
s.commit()
def remove_bundle(self, bundle):
'''remove a bundle from the database'''
from ..orm import Dataset
from ..bundle import LibraryDbBundle
try:
dataset, partition = self.get_id(bundle.identity.vid) #@UnusedVariable
except AttributeError:
dataset, partition = bundle, None
if not dataset:
return False
if partition:
self.remove_partition(partition)
else:
b = LibraryDbBundle(self, dataset.identity.vid)
for p in b.partitions:
self.remove_partition(p)
dataset = (self.session.query(Dataset)
.filter(Dataset.location == Dataset.LOCATION.LIBRARY)
.filter(Dataset.vid==dataset.identity.vid).one())
# Can't use delete() on the query -- bulk delete queries do not
# trigger in-python cascades!
self.session.delete(dataset)
self.commit()
def remove_dataset(self, vid):
'''Remove all references to a Dataset'''
from ..orm import Dataset
self.session.query(Dataset).filter(Dataset.vid == vid).delete()
self.commit()
def remove_partition(self, partition):
from ..bundle import LibraryDbBundle
from ..orm import Partition
try:
dataset = self.get(partition.identity.vid) #@UnusedVariable
p_vid = partition.identity.vid
except AttributeError:
# It is actually an identity, we hope
dataset = partition.as_dataset()
p_vid = partition.vid
b = LibraryDbBundle(self, dataset.vid)
s = self.session
s.query(Partition).filter(Partition.t_vid == p_vid).delete()
self.commit()
##
## Get objects by reference, or resolve a reference
##
def get(self, vid):
'''Get an identity by a vid. For partitions, returns a nested Identity'''
from ..identity import ObjectNumber, DatasetNumber, PartitionNumber
from ..orm import Dataset, Partition
from sqlalchemy.orm.exc import NoResultFound
from ..dbexceptions import NotFoundError
try:
if isinstance(vid, basestring):
vid = ObjectNumber.parse(vid)
if isinstance(vid, DatasetNumber):
d = (self.session.query(Dataset)
.filter(Dataset.location == Dataset.LOCATION.LIBRARY )
.filter(Dataset.vid == str(vid)).one())
did = d.identity
elif isinstance(vid, PartitionNumber):
d,p = (self.session.query(Dataset, Partition).join(Partition)
.filter(Dataset.location == Dataset.LOCATION.LIBRARY)
.filter(Partition.vid == str(vid)).one())
did = d.identity
did.add_partition(p.identity)
else:
raise ValueError('vid was wrong type: {}'.format(type(vid)))
return did
except NoResultFound:
raise NotFoundError("No object found for vid {}".format(vid))
def get_table(self, table_vid):
s = self.session
return s.query(Table).filter(Table.vid == table_vid).one()
def list(self, datasets=None, locations = None, key='vid'):
"""
:param datasets: If specified, must be a dict, which the internal dataset data will be
put into.
:return: vnames of the datasets in the library.
"""
from ..orm import Dataset, Partition
from .files import Files
from sqlalchemy.sql import or_
if datasets is None:
datasets = {}
q1 = (self.session.query(Dataset, Partition).join(Partition)
.filter(Dataset.vid != ROOT_CONFIG_NAME_V))
q2 = (self.session.query(Dataset)
.filter(Dataset.vid != ROOT_CONFIG_NAME_V))
if locations:
if not isinstance(locations,(list, tuple)):
locations=[locations]
terms = [ Dataset.location == location for location in locations]
q1 = q1.filter(or_(*terms))
q2 = q2.filter(or_(*terms))
for d,p in (q1.all() + [ (d,None) for d in q2.all()]):
ck = getattr(d.identity, key)
if ck not in datasets:
dsid = d.identity
datasets[ck] = dsid
else:
dsid = datasets[ck]
# The dataset locations are linked to the identity locations
dsid.locations.set(d.location)
if p and ( not datasets[ck].partitions or p.vid not in datasets[ck].partitions):
pident = p.identity
pident.locations.set(d.location)
datasets[ck].add_partition(pident)
if d.location == Files.TYPE.SOURCE:
files = Files(self)
f = files.query.type(Files.TYPE.SOURCE).ref(dsid.vid).one_maybe
if f:
dsid.bundle_state = f.state
return datasets
def datasets(self, key='vid'):
'''List only the dataset records'''
from ..orm import Dataset
datasets = {}
for d in (self.session.query(Dataset)
.filter(Dataset.location == Dataset.LOCATION.LIBRARY)
.filter(Dataset.vid != ROOT_CONFIG_NAME_V).all()):
ck = getattr(d.identity, key)
datasets[ck] = d.identity
return datasets
@property
def resolver(self):
from .query import Resolver
return Resolver(self.session)
def find(self, query_command):
'''Find a bundle or partition record by a QueryCommand or Identity
Args:
query_command. QueryCommand or Identity
returns:
A list of identities, either Identity, for datasets, or PartitionIdentity
for partitions.
'''
def like_or_eq(c,v):
if v and '%' in v:
return c.like(v)
else:
return c == v
s = self.session
has_partition = False
has_where = False
if isinstance(query_command, Identity):
raise NotImplementedError()
out = []
for d in self.queryByIdentity(query_command).all():
id_ = d.identity
d.path = os.path.join(self.cache,id_.cache_key)
out.append(d)
tables = [Dataset]
if len(query_command.partition) > 0:
tables.append(Partition)
if len(query_command.table) > 0:
tables.append(Table)
if len(query_command.column) > 0:
tables.append(Column)
tables.append(Dataset.id_) # Dataset.id_ is included to ensure result is always a tuple)
query = s.query(*tables) # Dataset.id_ is included to ensure result is always a tuple
if len(query_command.identity) > 0:
for k,v in query_command.identity.items():
if k == 'id':
k = 'id_'
try:
query = query.filter( like_or_eq(getattr(Dataset, k),v) )
except AttributeError as e:
# Dataset doesn't have the attribute, so ignore it.
pass
if len(query_command.partition) > 0:
query = query.join(Partition)
for k,v in query_command.partition.items():
if k == 'id':
k = 'id_'
from sqlalchemy.sql import or_
if k == 'any':
continue # Just join the partition
elif k == 'table':
# The 'table" value could be the table id
# or a table name
query = query.join(Table)
query = query.filter( or_(Partition.t_id == v,
like_or_eq(Table.name,v)))
elif k == 'space':
query = query.filter( or_( like_or_eq(Partition.space,v)))
else:
query = query.filter( like_or_eq(getattr(Partition, k),v) )
if not query_command.partition.format:
# Exclude CSV if not specified
query = query.filter( Partition.format != 'csv')
if len(query_command.table) > 0:
query = query.join(Table)
for k,v in query_command.table.items():
query = query.filter( like_or_eq(getattr(Table, k),v) )
if len(query_command.column) > 0:
query = query.join(Table)
query = query.join(Column)
for k,v in query_command.column.items():
query = query.filter( like_or_eq(getattr(Column, k),v) )
query = query.distinct().order_by(Dataset.revision.desc())
out = []
try:
for r in query.all():
o = {}
try:
o['identity'] = r.Dataset.identity.dict
o['partition'] = r.Partition.identity.dict
except:
o['identity'] = r.Dataset.identity.dict
try: o['table'] = r.Table.dict
except: pass
try:o['column'] = r.Column.dict
except: pass
out.append(o)
except Exception as e:
self.logger.error("Exception while querrying in {}, schema {}".format(self.dsn, self._schema))
raise
self.close_session()
return out
def queryByIdentity(self, identity):
from ..orm import Dataset, Partition
from ..identity import Identity,PartitionIdentity
from sqlalchemy import desc
s = self.database.session
# If it is a string, it is a name or a dataset id
if isinstance(identity, str) or isinstance(identity, unicode) :
query = (s.query(Dataset)
.filter(Dataset.location == Dataset.LOCATION.LIBRARY)
.filter( (Dataset.id_==identity) | (Dataset.name==identity)) )
elif isinstance(identity, PartitionIdentity):
query = s.query(Dataset, Partition)
for k,v in identity.to_dict().items():
d = {}
if k == 'revision':
v = int(v)
d[k] = v
query = query.filter_by(**d)
elif isinstance(identity, Identity):
query = s.query(Dataset).filter(Dataset.location == Dataset.LOCATION.LIBRARY)
for k,v in identity.to_dict().items():
d = {}
d[k] = v
query = query.filter_by(**d)
elif isinstance(identity, dict):
query = s.query(Dataset).filter(Dataset.location == Dataset.LOCATION.LIBRARY)
for k,v in identity.items():
d = {}
d[k] = v
query = query.filter_by(**d)
else:
raise ValueError("Invalid type for identity")
query.order_by(desc(Dataset.revision))
return query
##
## Database backup and restore. Synchronizes the database with
## a remote. This is used when a library is created attached to a remote, and
## needs to get the library database from the remote.
##
def _copy_db(self, src, dst):
from sqlalchemy.orm.exc import NoResultFound
try:
dst.session.query(Dataset).filter(Dataset.vid=='a0').delete()
except:
pass
for table in self.metadata.sorted_tables: # sorted by foreign key dependency
rows = src.session.execute(table.select()).fetchall()
dst.session.execute(table.delete())
for row in rows:
dst.session.execute(table.insert(), row)
dst.session.commit()
def dump(self, path):
'''Copy the database to a new Sqlite file, as a backup. '''
import datetime
dst = LibraryDb(driver='sqlite', dbname=path)
dst.create()
self.set_config_value('activity','dump', datetime.datetime.utcnow().isoformat())
self._copy_db(self, dst)
def needs_dump(self):
'''Return true if the last dump date is after the last change date, and
the last change date is more than 10s in the past'''
import datetime
from dateutil import parser
configs = self.config_values
td = datetime.timedelta(seconds=10)
changed = parser.parse(configs.get(('activity','change'),datetime.datetime.fromtimestamp(0).isoformat()))
dumped = parser.parse(configs.get(('activity','dump'),datetime.datetime.fromtimestamp(0).isoformat()))
dumped_past = dumped + td
now = datetime.datetime.utcnow()
if ( changed > dumped and now > dumped_past):
return True
else:
return False
def restore(self, path):
'''Restore a sqlite database dump'''
import datetime
self.create()
src = LibraryDb(driver='sqlite', dbname=path)
self._copy_db(src, self)
self.set_config_value('activity','restore', datetime.datetime.utcnow().isoformat())
def _pragma_on_connect(dbapi_con, con_record):
'''ISSUE some Sqlite pragmas when the connection is created'''
#dbapi_con.execute('PRAGMA foreign_keys = ON;')
return # Not clear that there is a performance improvement.
dbapi_con.execute('PRAGMA journal_mode = MEMORY')
dbapi_con.execute('PRAGMA synchronous = OFF')
dbapi_con.execute('PRAGMA temp_store = MEMORY')
dbapi_con.execute('PRAGMA cache_size = 500000')
dbapi_con.execute('pragma foreign_keys=ON')
| {
"repo_name": "kball/ambry",
"path": "ambry/library/database.py",
"copies": "1",
"size": "34182",
"license": "bsd-2-clause",
"hash": 1080923694879913600,
"line_mean": 29.0633245383,
"line_max": 177,
"alpha_frac": 0.5615821192,
"autogenerated": false,
"ratio": 4.4334630350194555,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00435699713187573,
"num_lines": 1137
} |
"""A Library is a local collection of bundles. It holds a database for the configuration
of the bundles that have been installed into it.
"""
# Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE.txt
import imp
import os
import sys
import tempfile
import traceback
import json
from fs.osfs import OSFS
from fs.opener import fsopendir
from sqlalchemy import or_
from six import text_type
from requests.exceptions import HTTPError
from ambry.bundle import Bundle
from ambry.dbexceptions import ConfigurationError
from ambry.identity import Identity, ObjectNumber, NotObjectNumberError, NumberServer, DatasetNumber
from ambry.library.search import Search
from ambry.orm import Partition, File, Config, Table, Database, Dataset, Account
from ambry.orm.exc import NotFoundError, ConflictError
from ambry.run import get_runconfig
from ambry.util import get_logger, memoize
from .filesystem import LibraryFilesystem
logger = get_logger(__name__)
# debug logging
import logging
#logger = get_logger(__name__, level=logging.DEBUG)
global_library = None
def new_library(config=None):
if config is None:
config = get_runconfig()
l = Library(config)
global global_library
global_library = l
return l
class LibraryContext(object):
"""A context object for creating a new library and closing it"""
def __init__(self, ctor_args):
self._ctor_args = ctor_args
self._library = None
def __enter__(self):
logger.debug("Entering library context id={}".format(id(self)))
self._library = Library(**self._ctor_args)
return self._library
def __exit__(self, exc_type, exc_val, exc_tb):
logger.debug("Leaving library context id={}".format(id(self)))
if self._library:
self._library.commit()
self._library.close()
class Library(object):
def __init__(self, config=None, search=None, echo=None, read_only=False):
from sqlalchemy.exc import OperationalError
from ambry.orm.exc import DatabaseMissingError
if config:
self._config = config
else:
self._config = get_runconfig()
self.logger = logger
self.read_only = read_only # allow optimizations that assume we aren't building bundles.
self._echo = echo
self._fs = LibraryFilesystem(config)
self._db = Database(self._fs.database_dsn, echo=echo)
self._account_password = self.config.accounts.password
self._warehouse = None # Will be populated in the warehouse property.
try:
self._db.open()
except OperationalError as e:
raise DatabaseMissingError("Failed to open database '{}': {} ".format(self._db.dsn, e))
self.processes = None # Number of multiprocessing proccors. Default to all of them
if search:
self._search = Search(self, search)
else:
self._search = None
@property
def ctor_args(self):
"""Return arguments for constructing a copy"""
return dict(
config=self._config,
search=self._search,
echo=self._echo,
read_only=self.read_only
)
def clone(self):
"""Create a deep copy of this library"""
return Library(**self.ctor_args)
@property
def context(self):
"""Return a new LibraryContext, for use later. This will result in a new instance of the current library.
not on operations on the current library. The new context will open new connectinos on the database.
"""
return LibraryContext(self.ctor_args)
def sync_config(self, force=False):
"""Sync the file config into the library proxy data in the root dataset """
from ambry.library.config import LibraryConfigSyncProxy
lcsp = LibraryConfigSyncProxy(self)
lcsp.sync(force=force)
def init_debug(self):
"""Initialize debugging features, such as a handler for USR2 to print a trace"""
import signal
def debug_trace(sig, frame):
"""Interrupt running process, and provide a python prompt for interactive
debugging."""
self.log('Trace signal received')
self.log(''.join(traceback.format_stack(frame)))
signal.signal(signal.SIGUSR2, debug_trace) # Register handler
def resolve_object_number(self, ref):
"""Resolve a variety of object numebrs to a dataset number"""
if not isinstance(ref, ObjectNumber):
on = ObjectNumber.parse(ref)
else:
on = ref
ds_on = on.as_dataset
return ds_on
def drop(self):
return self.database.drop()
def clean(self):
return self.database.clean()
def close(self):
return self.database.close()
def exists(self):
return self.database.exists
def create(self):
from config import LibraryConfigSyncProxy
self.database.create()
lcsp = LibraryConfigSyncProxy(self)
lcsp.sync()
@property
def database(self):
return self._db
@property
def dsn(self):
return self._db.dsn
@property
def filesystem(self):
return self._fs
@memoize
def warehouse(self, dsn=None):
from ambry.library.warehouse import Warehouse
if self.database.dsn.startswith('sqlite') and dsn is None:
from ambry.util import parse_url_to_dict
d = parse_url_to_dict(self.database.dsn)
dsn = self.database.dsn.replace(os.path.basename(d['path']), 'warehouse.db')
return Warehouse(self, dsn=dsn)
@property
def config(self):
return self._config
@property
def download_cache(self):
return OSFS(self._fs.downloads())
def commit(self):
self._db.commit()
@property
def root(self):
"""Return the root dataset"""
return self._db.root_dataset
@property
def datasets(self):
"""Return all datasets"""
return self._db.datasets
def dataset(self, ref, load_all=False, exception=True):
"""Return all datasets"""
return self.database.dataset(ref, load_all=load_all, exception=exception)
def new_bundle(self, assignment_class=None, **kwargs):
"""
Create a new bundle, with the same arguments as creating a new dataset
:param assignment_class: String. assignment class to use for fetching a number, if one
is not specified in kwargs
:param kwargs:
:return:
"""
if not ('id' in kwargs and bool(kwargs['id'])) or assignment_class is not None:
kwargs['id'] = self.number(assignment_class)
ds = self._db.new_dataset(**kwargs)
self._db.commit()
b = self.bundle(ds.vid)
b.state = Bundle.STATES.NEW
b.set_last_access(Bundle.STATES.NEW)
b.set_file_system(source_url=self._fs.source(b.identity.source_path),
build_url=self._fs.build(b.identity.source_path))
bs_meta = b.build_source_files.file(File.BSFILE.META)
bs_meta.set_defaults()
bs_meta.record_to_objects()
bs_meta.objects_to_record()
b.commit()
self._db.commit()
return b
def new_from_bundle_config(self, config):
"""
Create a new bundle, or link to an existing one, based on the identity in config data.
:param config: A Dict form of a bundle.yaml file
:return:
"""
identity = Identity.from_dict(config['identity'])
ds = self._db.dataset(identity.vid, exception=False)
if not ds:
ds = self._db.new_dataset(**identity.dict)
b = Bundle(ds, self)
b.commit()
b.state = Bundle.STATES.NEW
b.set_last_access(Bundle.STATES.NEW)
# b.set_file_system(source_url=self._fs.source(ds.name),
# build_url=self._fs.build(ds.name))
return b
def bundle(self, ref, capture_exceptions=False):
"""Return a bundle build on a dataset, with the given vid or id reference"""
from ..orm.exc import NotFoundError
if isinstance(ref, Dataset):
ds = ref
else:
try:
ds = self._db.dataset(ref)
except NotFoundError:
ds = None
if not ds:
try:
p = self.partition(ref)
ds = p._bundle.dataset
except NotFoundError:
ds = None
if not ds:
raise NotFoundError('Failed to find dataset for ref: {}'.format(ref))
b = Bundle(ds, self)
b.capture_exceptions = capture_exceptions
return b
def bundle_by_cache_key(self, cache_key):
ds = self._db.dataset_by_cache_key(cache_key)
return self.bundle(ds)
@property
def bundles(self):
""" Returns all datasets in the library as bundles. """
for ds in self.datasets:
yield self.bundle(ds.vid)
def partition(self, ref, localize=False):
""" Finds partition by ref and converts to bundle partition.
:param ref: A partition reference
:param localize: If True, copy a remote partition to local filesystem. Defaults to False
:raises: NotFoundError: if partition with given ref not found.
:return: orm.Partition: found partition.
"""
if not ref:
raise NotFoundError("No partition for empty ref")
try:
on = ObjectNumber.parse(ref)
ds_on = on.as_dataset
ds = self._db.dataset(ds_on) # Could do it in on SQL query, but this is easier.
# The refresh is required because in some places the dataset is loaded without the partitions,
# and if that persist, we won't have partitions in it until it is refreshed.
self.database.session.refresh(ds)
p = ds.partition(ref)
except NotObjectNumberError:
q = (self.database.session.query(Partition)
.filter(or_(Partition.name == str(ref), Partition.vname == str(ref)))
.order_by(Partition.vid.desc()))
p = q.first()
if not p:
raise NotFoundError("No partition for ref: '{}'".format(ref))
b = self.bundle(p.d_vid)
p = b.wrap_partition(p)
if localize:
p.localize()
return p
def table(self, ref):
""" Finds table by ref and returns it.
Args:
ref (str): id, vid (versioned id) or name of the table
Raises:
NotFoundError: if table with given ref not found.
Returns:
orm.Table
"""
try:
obj_number = ObjectNumber.parse(ref)
ds_obj_number = obj_number.as_dataset
dataset = self._db.dataset(ds_obj_number) # Could do it in on SQL query, but this is easier.
table = dataset.table(ref)
except NotObjectNumberError:
q = self.database.session.query(Table)\
.filter(Table.name == str(ref))\
.order_by(Table.vid.desc())
table = q.first()
if not table:
raise NotFoundError("No table for ref: '{}'".format(ref))
return table
def remove(self, bundle):
""" Removes a bundle from the library and deletes the configuration for
it from the library database."""
from six import string_types
if isinstance(bundle, string_types):
bundle = self.bundle(bundle)
self.database.remove_dataset(bundle.dataset)
#
# Storing
#
def create_bundle_file(self, b):
fh, path = tempfile.mkstemp()
os.fdopen(fh).close()
db = Database('sqlite:///{}.db'.format(path))
db.open()
b.commit()
ds = db.copy_dataset(b.dataset)
ds.commit()
db.close()
return db.path
def duplicate(self, b):
"""Duplicate a bundle, with a higher version number.
This only copies the files, under the theory that the bundle can be rebuilt from them.
"""
on = b.identity.on
on.revision = on.revision + 1
try:
extant = self.bundle(str(on))
if extant:
raise ConflictError('Already have a bundle with vid: {}'.format(str(on)))
except NotFoundError:
pass
d = b.dataset.dict
d['revision'] = on.revision
d['vid'] = str(on)
del d['name']
del d['vname']
del d['version']
del d['fqname']
del d['cache_key']
ds = self.database.new_dataset(**d)
nb = self.bundle(ds.vid)
nb.set_file_system(source_url=b.source_fs.getsyspath('/'))
nb.state = Bundle.STATES.NEW
nb.commit()
# Copy all of the files.
for f in b.dataset.files:
assert f.major_type == f.MAJOR_TYPE.BUILDSOURCE
nb.dataset.files.append(nb.dataset.bsfile(f.minor_type, f.path).update(f))
# Load the metadata in to records, then back out again. The objects_to_record process will set the
# new identity object numbers in the metadata file
nb.build_source_files.file(File.BSFILE.META).record_to_objects()
nb.build_source_files.file(File.BSFILE.META).objects_to_record()
ds.commit()
return nb
def checkin_bundle(self, db_path, replace=True, cb=None):
"""Add a bundle, as a Sqlite file, to this library"""
from ambry.orm.exc import NotFoundError
db = Database('sqlite:///{}'.format(db_path))
db.open()
if len(db.datasets) == 0:
raise NotFoundError("Did not get a dataset in the {} bundle".format(db_path))
ds = db.dataset(db.datasets[0].vid) # There should only be one
assert ds is not None
assert ds._database
try:
b = self.bundle(ds.vid)
self.logger.info(
"Removing old bundle before checking in new one of same number: '{}'"
.format(ds.vid))
self.remove(b)
except NotFoundError:
pass
try:
self.dataset(ds.vid) # Skip loading bundles we already have
except NotFoundError:
self.database.copy_dataset(ds, cb=cb)
b = self.bundle(ds.vid) # It had better exist now.
# b.state = Bundle.STATES.INSTALLED
b.commit()
#self.search.index_library_datasets(tick)
self.search.index_bundle(b)
return b
def send_to_remote(self, b, no_partitions=False):
"""
Copy a bundle to a new Sqlite file, then store the file on the remote.
:param b: The bundle
:return:
"""
raise DeprecationWarning("Don't use any more?")
from ambry.bundle.process import call_interval
remote_name = self.resolve_remote(b)
remote = self.remote(remote_name)
db_path = b.package()
with b.progress.start('checkin', 0, message='Check in bundle') as ps:
ps.add(message='Checking in bundle {} to {}'.format(b.identity.vname, remote))
db_ck = b.identity.cache_key + '.db'
ps.add(message='Upload bundle file', item_type='bytes', item_count=0)
total = [0]
@call_interval(5)
def upload_cb(n):
total[0] += n
ps.update(message='Upload bundle file', item_count=total[0])
with open(db_path) as f:
remote.makedir(os.path.dirname(db_ck), recursive=True, allow_recreate=True)
self.logger.info('Send bundle file {} '.format(db_path))
e = remote.setcontents_async(db_ck, f, progress_callback=upload_cb)
e.wait()
ps.update(state='done')
if not no_partitions:
for p in b.partitions:
ps.add(message='Upload partition', item_type='bytes', item_count=0, p_vid=p.vid)
with p.datafile.open(mode='rb') as fin:
total = [0]
@call_interval(5)
def progress(bytes):
total[0] += bytes
ps.update(
message='Upload partition'.format(p.identity.vname),
item_count=total[0])
remote.makedir(os.path.dirname(p.datafile.path), recursive=True, allow_recreate=True)
event = remote.setcontents_async(p.datafile.path, fin, progress_callback=progress)
event.wait()
ps.update(state='done')
ps.add(message='Setting metadata')
ident = json.dumps(b.identity.dict)
remote.setcontents(os.path.join('_meta', 'vid', b.identity.vid), ident)
remote.setcontents(os.path.join('_meta', 'id', b.identity.id_), ident)
remote.setcontents(os.path.join('_meta', 'vname', text_type(b.identity.vname)), ident)
remote.setcontents(os.path.join('_meta', 'name', text_type(b.identity.name)), ident)
ps.update(state='done')
b.dataset.commit()
return remote_name, db_ck
def _init_git(self, b):
"""If the source directory is configured for git, create a new repo and
add the bundle to it. """
#
# Remotes
#
def sync_remote(self, remote_name):
from ambry.orm import Remote
if isinstance(remote_name, text_type):
remote = self.remote(remote_name)
else:
remote = remote_name
assert isinstance(remote, Remote)
for e in remote.list():
self._checkin_remote_bundle(remote, e)
self.commit()
def checkin_remote_bundle(self, ref, remote=None):
""" Checkin a remote bundle to this library.
:param ref: Any bundle reference
:param remote: If specified, use this remote. If not, search for the reference
in cached directory listings
:param cb: A one argument progress callback
:return:
"""
if not remote:
remote, vname = self.find_remote_bundle(ref)
if vname:
ref = vname
else:
pass
if not remote:
raise NotFoundError("Failed to find bundle ref '{}' in any remote".format(ref))
self.logger.info("Load '{}' from '{}'".format(ref, remote))
vid = self._checkin_remote_bundle(remote, ref)
self.commit()
return vid
def _checkin_remote_bundle(self, remote, ref):
"""
Checkin a remote bundle from a remote
:param remote: a Remote object
:param ref: Any bundle reference
:return: The vid of the loaded bundle
"""
from ambry.bundle.process import call_interval
from ambry.orm.exc import NotFoundError
from ambry.orm import Remote
from ambry.util.flo import copy_file_or_flo
from tempfile import NamedTemporaryFile
assert isinstance(remote, Remote)
@call_interval(5)
def cb(r, total):
self.logger.info("{}: Downloaded {} bytes".format(ref, total))
b = None
try:
b = self.bundle(ref)
self.logger.info("{}: Already installed".format(ref))
vid = b.identity.vid
except NotFoundError:
self.logger.info("{}: Syncing".format(ref))
db_dir = self.filesystem.downloads('bundles')
db_f = os.path.join(db_dir, ref) #FIXME. Could get multiple versions of same file. ie vid and vname
if not os.path.exists(os.path.join(db_dir, db_f)):
self.logger.info("Downloading bundle '{}' to '{}".format(ref, db_f))
with open(db_f, 'wb') as f_out:
with remote.checkout(ref) as f:
copy_file_or_flo(f, f_out, cb=cb)
f_out.flush()
self.checkin_bundle(db_f)
b = self.bundle(ref) # Should exist now.
b.dataset.data['remote_name'] = remote.short_name
b.dataset.upstream = remote.url
b.dstate = b.STATES.CHECKEDOUT
b.commit()
finally:
if b:
b.progress.close()
vid = b.identity.vid
return vid
@property
def remotes(self):
"""Return the names and URLs of the remotes"""
from ambry.orm import Remote
for r in self.database.session.query(Remote).all():
if not r.short_name:
continue
yield self.remote(r.short_name)
def _remote(self, name):
"""Return a remote for which 'name' matches the short_name or url """
from ambry.orm import Remote
from sqlalchemy import or_
from ambry.orm.exc import NotFoundError
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
if not name.strip():
raise NotFoundError("Empty remote name")
try:
try:
r = self.database.session.query(Remote).filter(Remote.short_name == name).one()
except NoResultFound as e:
r = None
if not r:
r = self.database.session.query(Remote).filter(Remote.url == name).one()
except NoResultFound as e:
raise NotFoundError(str(e)+'; '+name)
except MultipleResultsFound as e:
self.logger.error("Got multiple results for search for remote '{}': {}".format(name, e))
return None
return r
def remote(self, name_or_bundle):
from ambry.orm.exc import NotFoundError
r = None
if not r:
# It is the upstream for the dataset -- where it was checked out from
# This should really only apply to partitions, so they come from the same place as bundle
try:
if name_or_bundle.dstate != Bundle.STATES.BUILDING:
r = self._remote(name_or_bundle.dataset.upstream)
except NotFoundError as e:
raise
r = None
except (NotFoundError, AttributeError, KeyError) as e:
r = None
if not isinstance(name_or_bundle, Bundle): # It is a remote short_name
try:
r = self._remote(text_type(name_or_bundle))
except NotFoundError:
r = None
if not r: # Explicitly named in the metadata
try:
r = self._remote(name_or_bundle.metadata.about.remote)
except (NotFoundError, AttributeError, KeyError):
r = None
if not r: # Inferred from the metadata
try:
r = self._remote(name_or_bundle.metadata.about.access)
except (NotFoundError, AttributeError, KeyError):
r = None
if not r:
raise NotFoundError("Failed to find remote for ref '{}'".format(str(name_or_bundle)))
r.account_accessor = self.account_accessor
return r
def add_remote(self, r):
self.database.session.add(r)
self.commit()
def find_or_new_remote(self, name, **kwargs):
try:
r = self.remote(name)
except NotFoundError:
from ambry.orm import Remote
if 'short_name' in kwargs:
assert name == kwargs['short_name']
del kwargs['short_name']
r = Remote(short_name=name, **kwargs)
self.database.session.add(r)
return r
def delete_remote(self, r_or_name):
from ambry.orm import Remote
if isinstance(r_or_name, Remote):
r = r_or_name
else:
r = self.remote(r_or_name)
self.database.session.delete(r)
self.commit()
def _find_remote_bundle(self, ref, remote_service_type='s3'):
"""
Locate a bundle, by any reference, among the configured remotes. The routine will
only look in the cache directory lists stored in the remotes, which must
be updated to be current.
:param ref:
:return: (remote,vname) or (None,None) if the ref is not found
"""
for r in self.remotes:
if remote_service_type and r.service != remote_service_type:
continue
if 'list' not in r.data:
continue
for k, v in r.data['list'].items():
if ref in v.values():
return (r, v['vname'])
return None, None
def find_remote_bundle(self, ref, try_harder=None):
"""
Locate a bundle, by any reference, among the configured remotes. The routine will only look in the cache
directory lists stored in the remotes, which must be updated to be current.
:param vid: A bundle or partition reference, vid, or name
:param try_harder: If the reference isn't found, try parsing for an object id, or subsets of the name
:return: (remote,vname) or (None,None) if the ref is not found
"""
from ambry.identity import ObjectNumber
remote, vid = self._find_remote_bundle(ref)
if remote:
return (remote, vid)
if try_harder:
on = ObjectNumber.parse(vid)
if on:
raise NotImplementedError()
don = on.as_dataset
return self._find_remote_bundle(vid)
# Try subsets of a name, assuming it is a name
parts = ref.split('-')
for i in range(len(parts) - 1, 2, -1):
remote, vid = self._find_remote_bundle('-'.join(parts[:i]))
if remote:
return (remote, vid)
return (None, None)
#
# Accounts
#
@property
def password(self):
"""The password for decrypting the account secrets"""
return self._account_password
@password.setter
def password(self, v):
self._account_password = v
def account(self, url):
"""
Return accounts references for the given account id.
:param account_id:
:param accounts_password: The password for decrypting the secret
:return:
"""
from sqlalchemy.orm.exc import NoResultFound
from ambry.orm.exc import NotFoundError
from ambry.util import parse_url_to_dict
from ambry.orm import Account
pd = parse_url_to_dict(url)
# Old method of storing account information.
try:
act = self.database.session.query(Account).filter(Account.account_id == pd['netloc']).one()
act.secret_password = self._account_password
return act
except NoResultFound:
pass
# Try the remotes.
for r in self.remotes:
if url.startswith(r.url):
return r
raise NotFoundError("Did not find account for url: '{}' ".format(url))
@property
def account_accessor(self):
def _accessor(account_id):
return self.account(account_id).dict
return _accessor
@property
def accounts(self):
"""
Return an account reference
:param account_id:
:param accounts_password: The password for decrypting the secret
:return:
"""
d = {}
if False and not self._account_password:
from ambry.dbexceptions import ConfigurationError
raise ConfigurationError(
"Can't access accounts without setting an account password"
" either in the accounts.password config, or in the AMBRY_ACCOUNT_PASSWORD"
" env var.")
for act in self.database.session.query(Account).all():
if self._account_password:
act.secret_password = self._account_password
e = act.dict
a_id = e['account_id']
d[a_id] = e
return d
def add_account(self, a):
self.database.session.add(a)
self.commit()
def delete_account(self, a):
from six import string_types
if isinstance(a, string_types):
a = self.account(a)
self.database.session.delete(a)
self.commit()
def find_or_new_account(self, name, **kwargs):
try:
a = self.account(name)
except NotFoundError:
from ambry.orm import Account
a = Account(account_id=name, **kwargs)
self.database.session.add(a)
a.secret_password = self._account_password
return a
@property
def services(self):
return self.database.root_dataset.config.library['services']
@property
def ui_config(self):
return self.database.root_dataset.config.library['ui']
def number(self, assignment_class=None, namespace='d'):
"""
Return a new number.
:param assignment_class: Determines the length of the number. Possible values are 'authority' (3 characters) ,
'registered' (5) , 'unregistered' (7) and 'self' (9). Self assigned numbers are random and acquired locally,
while the other assignment classes use the number server defined in the configuration. If None,
then look in the number server configuration for one of the class keys, starting
with the longest class and working to the shortest.
:param namespace: The namespace character, the first character in the number. Can be one of 'd', 'x' or 'b'
:return:
"""
if assignment_class == 'self':
# When 'self' is explicit, don't look for number server config
return str(DatasetNumber())
elif assignment_class is None:
try:
nsconfig = self.services['numbers']
except ConfigurationError:
# A missing configuration is equivalent to 'self'
self.logger.error('No number server configuration; returning self assigned number')
return str(DatasetNumber())
for assignment_class in ('self', 'unregistered', 'registered', 'authority'):
if assignment_class+'-key' in nsconfig:
break
# For the case where the number configuratoin references a self-assigned key
if assignment_class == 'self':
return str(DatasetNumber())
else:
try:
nsconfig = self.services['numbers']
except ConfigurationError:
raise ConfigurationError('No number server configuration')
if assignment_class + '-key' not in nsconfig:
raise ConfigurationError(
'Assignment class {} not number server config'.format(assignment_class))
try:
key = nsconfig[assignment_class + '-key']
config = {
'key': key,
'host': nsconfig['host'],
'port': nsconfig.get('port', 80)
}
ns = NumberServer(**config)
n = str(next(ns))
self.logger.info('Got number from number server: {}'.format(n))
except HTTPError as e:
self.logger.error('Failed to get number from number server for key: {}'.format(key, e.message))
self.logger.error('Using self-generated number. There is no problem with this, '
'but they are longer than centrally generated numbers.')
n = str(DatasetNumber())
return n
def edit_history(self):
"""Return config record information about the most recent bundle accesses and operations"""
ret = self._db.session\
.query(Config)\
.filter(Config.type == 'buildstate')\
.filter(Config.group == 'access')\
.filter(Config.key == 'last')\
.order_by(Config.modified.desc())\
.all()
return ret
@property
def search(self):
if not self._search:
self._search = Search(self)
return self._search
def install_packages(self, module_name, pip_name, force=False):
from ambry.util.packages import install
python_dir = self._fs.python()
if not python_dir:
raise ConfigurationError(
"Can't install python requirements without a configuration item for filesystems.python")
if not os.path.exists(python_dir):
os.makedirs(python_dir)
sys.path.append(python_dir)
if force:
self.logger.info('Upgrading required package: {}->{}'.format(module_name, pip_name))
install(python_dir, module_name, pip_name)
else:
try:
imp.find_module(module_name)
return # self.log("Required package already installed: {}->{}".format(module_name, pip_name))
except ImportError:
self.logger.info('Installing required package: {}->{}'.format(module_name, pip_name))
install(python_dir, module_name, pip_name)
def import_bundles(self, dir, detach=False, force=False):
"""
Import bundles from a directory
:param dir:
:return:
"""
import yaml
fs = fsopendir(dir)
bundles = []
for f in fs.walkfiles(wildcard='bundle.yaml'):
self.logger.info('Visiting {}'.format(f))
config = yaml.load(fs.getcontents(f))
if not config:
self.logger.error("Failed to get a valid bundle configuration from '{}'".format(f))
bid = config['identity']['id']
try:
b = self.bundle(bid)
except NotFoundError:
b = None
if not b:
b = self.new_from_bundle_config(config)
self.logger.info('{} Loading New'.format(b.identity.fqname))
else:
self.logger.info('{} Loading Existing'.format(b.identity.fqname))
source_url = os.path.dirname(fs.getsyspath(f))
b.set_file_system(source_url=source_url)
self.logger.info('{} Loading from {}'.format(b.identity.fqname, source_url))
b.sync_in()
if detach:
self.logger.info('{} Detaching'.format(b.identity.fqname))
b.set_file_system(source_url=None)
if force:
self.logger.info('{} Sync out'.format(b.identity.fqname))
# FIXME. It won't actually sync out until re-starting the bundle.
# The source_file_system is probably cached
b = self.bundle(bid)
b.sync_out()
bundles.append(b)
b.close()
return bundles
def process_pool(self, limited_run=False):
"""Return a pool for multiprocess operations, sized either to the number of CPUS, or a configured value"""
from multiprocessing import cpu_count
from ambry.bundle.concurrent import Pool, init_library
if self.processes:
cpus = self.processes
else:
cpus = cpu_count()
self.logger.info('Starting MP pool with {} processors'.format(cpus))
return Pool(self, processes=cpus, initializer=init_library,
maxtasksperchild=1,
initargs=[self.database.dsn, self._account_password, limited_run])
| {
"repo_name": "CivicKnowledge/ambry",
"path": "ambry/library/__init__.py",
"copies": "1",
"size": "35853",
"license": "bsd-2-clause",
"hash": 9155002464403941000,
"line_mean": 29.4613423959,
"line_max": 121,
"alpha_frac": 0.5736479514,
"autogenerated": false,
"ratio": 4.248992652287272,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5322640603687272,
"avg_score": null,
"num_lines": null
} |
"""A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
def unchanged_shape(op):
"""Shape function for ops that output an tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def bias_add_shape(op):
"""Shape function for a BiasAdd op."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(2)
bias_shape = op.inputs[1].get_shape().with_rank(1)
if input_shape.ndims is not None:
# Output has the same shape as input, and matches the length of
# bias in its last dimension.
output_shape = input_shape[0:-1].concatenate(
input_shape[-1].merge_with(bias_shape[0]))
else:
output_shape = tensor_shape.unknown_shape()
return [output_shape]
def get2d_conv_output_size(input_height, input_width, filter_height,
filter_width, row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
input_height = tensor_shape.as_dimension(input_height)
input_width = tensor_shape.as_dimension(input_width)
filter_height = tensor_shape.as_dimension(filter_height)
filter_width = tensor_shape.as_dimension(filter_width)
row_stride = int(row_stride)
col_stride = int(col_stride)
if filter_height.value == 1 and filter_width.value == 1 and (
row_stride == 1 and col_stride == 1):
return input_height, input_width
else:
if filter_height > input_height or filter_width > input_width:
raise ValueError("filter must not be larger than the input: ",
"Filter: [", filter_height, "x", filter_width, "] ",
"Input: [", input_height, "x", input_width, "] ")
if row_stride > filter_height or col_stride > filter_width:
raise ValueError("stride must be less than or equal to filter size",
"stride: [", row_stride, "x", col_stride, "] ",
"filter: [", filter_height, "x", filter_width, "] ")
# Compute number of rows in the output, based on the padding.
if input_height.value is None or filter_height.value is None:
out_rows = None
elif padding_type == "VALID":
out_rows = ((input_height.value - filter_height.value + row_stride) //
row_stride)
elif padding_type == "SAME":
out_rows = (input_height.value + row_stride - 1) // row_stride
else:
raise ValueError("Invalid value for padding: %r" % padding_type)
# Compute number of columns in the output, based on the padding.
if input_width.value is None or filter_width.value is None:
out_cols = None
elif padding_type == "VALID":
out_cols = ((input_width.value - filter_width.value + col_stride) //
col_stride)
elif padding_type == "SAME":
out_cols = (input_width.value + col_stride - 1) // col_stride
return out_rows, out_cols
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(
in_rows, in_cols, filter_rows, filter_cols, stride, stride, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(
in_rows, in_cols, filter_rows, filter_cols, stride, stride, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(
in_rows, in_cols, ksize_r, ksize_c, stride_r, stride_c, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth])]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(
in_rows, in_cols, ksize_r, ksize_c, stride_r, stride_c, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth])]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
return [tensor_shape.TensorShape([batch_size, in_rows, in_cols, depth //
ksize_d])]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
| {
"repo_name": "kcartier/tensorflow-toe-in-the-water",
"path": "tensorflow/python/ops/common_shapes.py",
"copies": "1",
"size": "13282",
"license": "apache-2.0",
"hash": -6323996811829626000,
"line_mean": 34.8972972973,
"line_max": 80,
"alpha_frac": 0.6675199518,
"autogenerated": false,
"ratio": 3.533386538973131,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9652624187664942,
"avg_score": 0.009656460621637691,
"num_lines": 370
} |
"""A library of common shape functions."""
import math
from tensorflow.python.framework import tensor_shape
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
def unchanged_shape(op):
"""Shape function for ops that output an tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def bias_add_shape(op):
"""Shape function for a BiasAdd op."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(2)
bias_shape = op.inputs[1].get_shape().with_rank(1)
if input_shape.ndims is not None:
# Output has the same shape as input, and matches the length of
# bias in its last dimension.
output_shape = input_shape[0:-1].concatenate(
input_shape[-1].merge_with(bias_shape[0]))
else:
output_shape = tensor_shape.unknown_shape()
return [output_shape]
def _Get2DOutputSize(input_height, input_width, filter_height, filter_width,
row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
input_height = tensor_shape.as_dimension(input_height)
input_width = tensor_shape.as_dimension(input_width)
filter_height = tensor_shape.as_dimension(filter_height)
filter_width = tensor_shape.as_dimension(filter_width)
row_stride = int(row_stride)
col_stride = int(col_stride)
if filter_height.value == 1 and filter_width.value == 1 and (
row_stride == 1 and col_stride == 1):
return input_height, input_width
else:
if filter_height > input_height or filter_width > input_width:
raise ValueError("filter must not be larger than the input: ",
"Filter: [", filter_height, "x", filter_width, "] ",
"Input: [", input_height, "x", input_width, "] ")
if row_stride > filter_height or col_stride > filter_width:
raise ValueError("stride must be less than or equal to filter size",
"stride: [", row_stride, "x", col_stride, "] ",
"filter: [", filter_height, "x", filter_width, "] ")
# Compute number of rows in the output, based on the padding.
if input_height.value is None or filter_height.value is None:
out_rows = None
elif padding_type == "VALID":
out_rows = int(
math.ceil((input_height.value - filter_height.value + 1.0)
/ row_stride))
elif padding_type == "SAME":
out_rows = int(math.ceil(input_height.value * 1.0
/ row_stride))
else:
raise ValueError("Invalid value for padding: %r" % padding_type)
# Compute number of columns in the output, based on the padding.
if input_width.value is None or filter_width.value is None:
out_cols = None
elif padding_type == "VALID":
out_cols = int(
math.ceil((input_width.value - filter_width.value + 1.0)
/ col_stride))
elif padding_type == "SAME":
out_cols = int(math.ceil(input_width.value * 1.0 / col_stride))
return out_rows, out_cols
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = _Get2DOutputSize(
in_rows, in_cols, filter_rows, filter_cols, stride, stride, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = _Get2DOutputSize(
in_rows, in_cols, filter_rows, filter_cols, stride, stride, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = _Get2DOutputSize(
in_rows, in_cols, ksize_r, ksize_c, stride_r, stride_c, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth])]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = _Get2DOutputSize(
in_rows, in_cols, ksize_r, ksize_c, stride_r, stride_c, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth])]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
return [tensor_shape.TensorShape(
[batch_size, in_rows, in_cols, depth / ksize_d])]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
| {
"repo_name": "liyu1990/tensorflow",
"path": "tensorflow/python/ops/common_shapes.py",
"copies": "5",
"size": "13192",
"license": "apache-2.0",
"hash": 6890098805992269000,
"line_mean": 34.5579514825,
"line_max": 80,
"alpha_frac": 0.6660855064,
"autogenerated": false,
"ratio": 3.5206832132372563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009837639731090815,
"num_lines": 371
} |
"""A library of components for tk with rx"""
import tkinter as tk
import rx
def create(title):
"""Create a Tk root
title - a title for the application
"""
assert isinstance(title, str)
root = tk.Tk()
root.title = title
rx.concurrency.TkinterScheduler(root)
return root
def input_stream(root, prompt, row):
"""Creates input component with label given as prompt on a given row,
root - a Tk root
prompt - an observable of string values
row - the row of the tk window to place the radio buttons
returns observable of input values
"""
assert isinstance(root, tk.Tk)
assert isinstance(prompt, rx.Observable)
assert isinstance(row, int)
output_label(root, prompt, row)
subject = rx.subjects.Subject()
string_var = tk.StringVar()
string_var.trace('w', lambda *args: subject.on_next(string_var.get()))
tk.Entry(root, textvariable=string_var).grid(row=row, column=1)
return subject
def radio_stream(root, options, row, default=''):
"""Produce a stream of user selections on a radio button
root - a Tk root
options - a list of pairs, [(value, text)]
value - the value that is generated when selected
text - the text displayed to the user
row - the row of the tk window to place the radio buttons
"""
assert isinstance(root, tk.Tk)
assert isinstance(options, list)
assert isinstance(row, int)
string_var = tk.StringVar()
string_var.set(default)
subject = rx.subjects.ReplaySubject()
subject.on_next(default)
column = 0
for (value, text) in options:
radio_button = tk.Radiobutton(
root,
text=text,
variable=string_var,
value=value,
command=lambda: subject.on_next(string_var.get()))
radio_button.grid(row=row, column=column)
column += 1
return subject
def scale_stream(root, prompt, row, default='', **kwargs):
"""Produce a stream of slider values on a scale
root - a Tk root
prompt - an observable of string values
row - the row of the tk window to place the scale
"""
assert isinstance(root, tk.Tk)
assert isinstance(prompt, rx.Observable)
assert isinstance(row, int)
string_var = tk.StringVar()
string_var.set(default)
subject = rx.subjects.ReplaySubject()
subject.on_next(default)
output_label(root, prompt, row)
scale = tk.Scale(root,
variable=string_var,
command=lambda _: subject.on_next(string_var.get()),
**kwargs)
scale.grid(row=row, column=1)
return subject
def select(root, options, prompt, row, default=''):
""" Produce a stream of user selections on a drop down
root - a Tk root
prompt - an observable of string values
options - a set of options
value - the value that is generated when selected
row - the row of the tk window to place the drop down
"""
assert isinstance(root, tk.Tk)
assert isinstance(options, set)
assert isinstance(row, int)
string_var = tk.StringVar()
string_var.set(default)
subject = rx.subjects.ReplaySubject()
subject.on_next(default)
output_label(root, prompt, row)
tk.OptionMenu(root, string_var, *options)
return subject
def output_label(root, stream, row, **kwargs):
"""Used to display a stream of values to the user
root - a Tk root
stream - a stream of values to be displayed
row = the row of the tk window to place the output
"""
assert isinstance(root, tk.Tk)
assert isinstance(stream, rx.Observable)
string_var = tk.StringVar()
string_var.set('')
stream.subscribe(string_var.set)
tk.Label(root, textvariable=string_var, **kwargs).grid(row=row)
| {
"repo_name": "brad-h/expy",
"path": "ExPy/ExPy/tkcomponents.py",
"copies": "1",
"size": "3787",
"license": "mit",
"hash": 5672266438646759000,
"line_mean": 29.7886178862,
"line_max": 74,
"alpha_frac": 0.6501188276,
"autogenerated": false,
"ratio": 3.880122950819672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012773587275790113,
"num_lines": 123
} |
"""A library of helper functions for the Cheroot test suite."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import logging
import os
import sys
import time
import threading
import types
from six.moves import http_client
import six
import cheroot.server
import cheroot.wsgi
from cheroot.test import webtest
log = logging.getLogger(__name__)
thisdir = os.path.abspath(os.path.dirname(__file__))
config = {
'bind_addr': ('127.0.0.1', 54583),
'server': 'wsgi',
'wsgi_app': None,
}
class CherootWebCase(webtest.WebCase):
"""Helper class for a web app test suite."""
script_name = ''
scheme = 'http'
available_servers = {
'wsgi': cheroot.wsgi.Server,
'native': cheroot.server.HTTPServer,
}
@classmethod
def setup_class(cls):
"""Create and run one HTTP server per class."""
conf = config.copy()
conf.update(getattr(cls, 'config', {}))
s_class = conf.pop('server', 'wsgi')
server_factory = cls.available_servers.get(s_class)
if server_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
cls.httpserver = server_factory(**conf)
cls.HOST, cls.PORT = cls.httpserver.bind_addr
if cls.httpserver.ssl_adapter is None:
ssl = ''
cls.scheme = 'http'
else:
ssl = ' (ssl)'
cls.HTTP_CONN = http_client.HTTPSConnection
cls.scheme = 'https'
v = sys.version.split()[0]
log.info('Python version used to run this test script: %s' % v)
log.info('Cheroot version: %s' % cheroot.__version__)
log.info('HTTP server version: %s%s' % (cls.httpserver.protocol, ssl))
log.info('PID: %s' % os.getpid())
if hasattr(cls, 'setup_server'):
# Clear the wsgi server so that
# it can be updated with the new root
cls.setup_server()
cls.start()
@classmethod
def teardown_class(cls):
"""Cleanup HTTP server."""
if hasattr(cls, 'setup_server'):
cls.stop()
@classmethod
def start(cls):
"""Load and start the HTTP server."""
threading.Thread(target=cls.httpserver.safe_start).start()
while not cls.httpserver.ready:
time.sleep(0.1)
@classmethod
def stop(cls):
"""Terminate HTTP server."""
cls.httpserver.stop()
td = getattr(cls, 'teardown', None)
if td:
td()
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert ``abs(dt1 - dt2)`` is within ``Y`` seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError(
'%r and %r are not within %r seconds.' %
(dt1, dt2, seconds),
)
class Request:
"""HTTP request container."""
def __init__(self, environ):
"""Initialize HTTP request."""
self.environ = environ
class Response:
"""HTTP response container."""
def __init__(self):
"""Initialize HTTP response."""
self.status = '200 OK'
self.headers = {'Content-Type': 'text/html'}
self.body = None
def output(self):
"""Generate iterable response body object."""
if self.body is None:
return []
elif isinstance(self.body, six.text_type):
return [self.body.encode('iso-8859-1')]
elif isinstance(self.body, six.binary_type):
return [self.body]
else:
return [x.encode('iso-8859-1') for x in self.body]
class Controller:
"""WSGI app for tests."""
def __call__(self, environ, start_response):
"""WSGI request handler."""
req, resp = Request(environ), Response()
try:
# Python 3 supports unicode attribute names
# Python 2 encodes them
handler = self.handlers[environ['PATH_INFO']]
except KeyError:
resp.status = '404 Not Found'
else:
output = handler(req, resp)
if (
output is not None
and not any(
resp.status.startswith(status_code)
for status_code in ('204', '304')
)
):
resp.body = output
try:
resp.headers.setdefault('Content-Length', str(len(output)))
except TypeError:
if not isinstance(output, types.GeneratorType):
raise
start_response(resp.status, resp.headers.items())
return resp.output()
| {
"repo_name": "cherrypy/cheroot",
"path": "cheroot/test/helper.py",
"copies": "1",
"size": "4896",
"license": "bsd-3-clause",
"hash": -86871369143762580,
"line_mean": 27.1379310345,
"line_max": 79,
"alpha_frac": 0.5533088235,
"autogenerated": false,
"ratio": 4.097071129707113,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5150379953207114,
"avg_score": null,
"num_lines": null
} |
"""A library of helper functions for the Cheroot test suite."""
import datetime
import io
import logging
import os
import subprocess
import sys
import time
import threading
import types
from six.moves import http_client
import portend
import pytest
import six
import cheroot.server
import cheroot.wsgi
from cheroot.test import webtest
log = logging.getLogger(__name__)
thisdir = os.path.abspath(os.path.dirname(__file__))
serverpem = os.path.join(os.getcwd(), thisdir, 'test.pem')
config = {
'bind_addr': ('127.0.0.1', 54583),
'server': 'wsgi',
'wsgi_app': None,
}
class CherootWebCase(webtest.WebCase):
script_name = ''
scheme = 'http'
available_servers = {
'wsgi': cheroot.wsgi.Server,
'native': cheroot.server.HTTPServer,
}
@classmethod
def setup_class(cls):
"""Create and run one HTTP server per class."""
conf = config.copy()
conf.update(getattr(cls, 'config', {}))
s_class = conf.pop('server', 'wsgi')
server_factory = cls.available_servers.get(s_class)
if server_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
cls.httpserver = server_factory(**conf)
cls.HOST, cls.PORT = cls.httpserver.bind_addr
if cls.httpserver.ssl_adapter is None:
ssl = ''
cls.scheme = 'http'
else:
ssl = ' (ssl)'
cls.HTTP_CONN = http_client.HTTPSConnection
cls.scheme = 'https'
v = sys.version.split()[0]
log.info('Python version used to run this test script: %s' % v)
log.info('Cheroot version: %s' % cheroot.__version__)
log.info('HTTP server version: %s%s' % (cls.httpserver.protocol, ssl))
log.info('PID: %s' % os.getpid())
if hasattr(cls, 'setup_server'):
# Clear the wsgi server so that
# it can be updated with the new root
cls.setup_server()
cls.start()
@classmethod
def teardown_class(cls):
''
if hasattr(cls, 'setup_server'):
cls.stop()
@classmethod
def start(cls):
"""Load and start the HTTP server."""
threading.Thread(target=cls.httpserver.safe_start).start()
while not cls.httpserver.ready:
time.sleep(0.1)
@classmethod
def stop(cls):
cls.httpserver.stop()
td = getattr(cls, 'teardown', None)
if td:
td()
def base(self):
if ((self.scheme == 'http' and self.PORT == 80) or
(self.scheme == 'https' and self.PORT == 443)):
port = ''
else:
port = ':%s' % self.PORT
return '%s://%s%s%s' % (self.scheme, self.HOST, port,
self.script_name.rstrip('/'))
def exit(self):
sys.exit()
def skip(self, msg='skipped '):
pytest.skip(msg)
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert abs(dt1 - dt2) is within Y seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError('%r and %r are not within %r seconds.' %
(dt1, dt2, seconds))
class Request(object):
def __init__(self, environ):
self.environ = environ
class Response(object):
def __init__(self):
self.status = '200 OK'
self.headers = {'Content-Type': 'text/html'}
self.body = None
def output(self):
if self.body is None:
return []
elif isinstance(self.body, six.text_type):
return [self.body.encode('iso-8859-1')]
elif isinstance(self.body, six.binary_type):
return [self.body]
else:
return [x.encode('iso-8859-1') for x in self.body]
class Controller(object):
def __call__(self, environ, start_response):
req, resp = Request(environ), Response()
try:
# Python 3 supports unicode attribute names
# Python 2 encodes them
handler = self.handlers[environ['PATH_INFO']]
except KeyError:
resp.status = '404 Not Found'
else:
output = handler(req, resp)
if (output is not None and
not any(resp.status.startswith(status_code)
for status_code in ('204', '304'))):
resp.body = output
try:
resp.headers.setdefault('Content-Length', str(len(output)))
except TypeError:
if not isinstance(output, types.GeneratorType):
raise
start_response(resp.status, resp.headers.items())
return resp.output()
# --------------------------- Spawning helpers --------------------------- #
class CherootProcess(object):
pid_file = os.path.join(thisdir, 'test.pid')
config_file = os.path.join(thisdir, 'test.conf')
config_template = """[global]
server.socket_host: '%(host)s'
server.socket_port: %(port)s
checker.on: False
log.screen: False
log.error_file: r'%(error_log)s'
log.access_file: r'%(access_log)s'
%(ssl)s
%(extra)s
"""
error_log = os.path.join(thisdir, 'test.error.log')
access_log = os.path.join(thisdir, 'test.access.log')
def __init__(self, wait=False, daemonize=False, ssl=False,
socket_host=None, socket_port=None):
self.wait = wait
self.daemonize = daemonize
self.ssl = ssl
self.host = socket_host
self.port = socket_port
def write_conf(self, extra=''):
if self.ssl:
serverpem = os.path.join(thisdir, 'test.pem')
ssl = """
server.ssl_certificate: r'%s'
server.ssl_private_key: r'%s'
""" % (serverpem, serverpem)
else:
ssl = ''
conf = self.config_template % {
'host': self.host,
'port': self.port,
'error_log': self.error_log,
'access_log': self.access_log,
'ssl': ssl,
'extra': extra,
}
with io.open(self.config_file, 'w', encoding='utf-8') as f:
f.write(six.text_type(conf))
def start(self, imports=None):
"""Start cherryd in a subprocess."""
portend.free(self.host, self.port, timeout=1)
args = [
os.path.join(thisdir, '..', 'cherryd'),
'-c', self.config_file,
'-p', self.pid_file,
]
if not isinstance(imports, (list, tuple)):
imports = [imports]
for i in imports:
if i:
args.append('-i')
args.append(i)
if self.daemonize:
args.append('-d')
env = os.environ.copy()
# Make sure we import the cheroot package in which this module is
# defined.
grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..'))
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = os.pathsep.join(
(grandparentdir, env['PYTHONPATH']))
else:
env['PYTHONPATH'] = grandparentdir
self._proc = subprocess.Popen([sys.executable] + args, env=env)
if self.wait:
self.exit_code = self._proc.wait()
else:
portend.occupied(self.host, self.port, timeout=5)
# Give the engine a wee bit more time to finish STARTING
if self.daemonize:
time.sleep(2)
else:
time.sleep(1)
def get_pid(self):
if self.daemonize:
return int(open(self.pid_file, 'rb').read())
return self._proc.pid
def join(self):
"""Wait for the process to exit."""
if self.daemonize:
return self._join_daemon()
self._proc.wait()
def _join_daemon(self):
try:
try:
# Mac, UNIX
os.wait()
except AttributeError:
# Windows
try:
pid = self.get_pid()
except IOError:
# Assume the subprocess deleted the pidfile on shutdown.
pass
else:
os.waitpid(pid, 0)
except OSError as ex:
if ex.args != (10, 'No child processes'):
raise
| {
"repo_name": "diegocortassa/TACTIC",
"path": "3rd_party/site-packages/cheroot/test/helper.py",
"copies": "2",
"size": "8516",
"license": "epl-1.0",
"hash": 2308926685222647000,
"line_mean": 27.6734006734,
"line_max": 79,
"alpha_frac": 0.5338186942,
"autogenerated": false,
"ratio": 3.8850364963503647,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5418855190550365,
"avg_score": null,
"num_lines": null
} |
"""A library of helper functions for the CherryPy test suite."""
import datetime
import io
import logging
import os
import re
import subprocess
import sys
import time
import unittest
import warnings
import contextlib
import portend
import pytest
from cheroot.test import webtest
import cherrypy
from cherrypy._cpcompat import text_or_bytes, HTTPSConnection, ntob
from cherrypy.lib import httputil
from cherrypy.lib import gctools
log = logging.getLogger(__name__)
thisdir = os.path.abspath(os.path.dirname(__file__))
serverpem = os.path.join(os.getcwd(), thisdir, 'test.pem')
class Supervisor(object):
"""Base class for modeling and controlling servers during testing."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k == 'port':
setattr(self, k, int(v))
setattr(self, k, v)
def log_to_stderr(msg, level):
return sys.stderr.write(msg + os.linesep)
class LocalSupervisor(Supervisor):
"""Base class for modeling/controlling servers which run in the same
process.
When the server side runs in a different process, start/stop can dump all
state between each test module easily. When the server side runs in the
same process as the client, however, we have to do a bit more work to
ensure config and mounted apps are reset between tests.
"""
using_apache = False
using_wsgi = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
cherrypy.server.httpserver = self.httpserver_class
# This is perhaps the wrong place for this call but this is the only
# place that i've found so far that I KNOW is early enough to set this.
cherrypy.config.update({'log.screen': False})
engine = cherrypy.engine
if hasattr(engine, 'signal_handler'):
engine.signal_handler.subscribe()
if hasattr(engine, 'console_control_handler'):
engine.console_control_handler.subscribe()
def start(self, modulename=None):
"""Load and start the HTTP server."""
if modulename:
# Unhook httpserver so cherrypy.server.start() creates a new
# one (with config from setup_server, if declared).
cherrypy.server.httpserver = None
cherrypy.engine.start()
self.sync_apps()
def sync_apps(self):
"""Tell the server about any apps which the setup functions mounted."""
pass
def stop(self):
td = getattr(self, 'teardown', None)
if td:
td()
cherrypy.engine.exit()
for name, server in getattr(cherrypy, 'servers', {}).copy().items():
server.unsubscribe()
del cherrypy.servers[name]
class NativeServerSupervisor(LocalSupervisor):
"""Server supervisor for the builtin HTTP server."""
httpserver_class = 'cherrypy._cpnative_server.CPHTTPServer'
using_apache = False
using_wsgi = False
def __str__(self):
return 'Builtin HTTP Server on %s:%s' % (self.host, self.port)
class LocalWSGISupervisor(LocalSupervisor):
"""Server supervisor for the builtin WSGI server."""
httpserver_class = 'cherrypy._cpwsgi_server.CPWSGIServer'
using_apache = False
using_wsgi = True
def __str__(self):
return 'Builtin WSGI Server on %s:%s' % (self.host, self.port)
def sync_apps(self):
"""Hook a new WSGI app into the origin server."""
cherrypy.server.httpserver.wsgi_app = self.get_app()
def get_app(self, app=None):
"""Obtain a new (decorated) WSGI app to hook into the origin server."""
if app is None:
app = cherrypy.tree
if self.validate:
try:
from wsgiref import validate
except ImportError:
warnings.warn(
'Error importing wsgiref. The validator will not run.')
else:
# wraps the app in the validator
app = validate.validator(app)
return app
def get_cpmodpy_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_cpmodpy
return sup
def get_modpygw_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_modpython_gateway
sup.using_wsgi = True
return sup
def get_modwsgi_supervisor(**options):
from cherrypy.test import modwsgi
return modwsgi.ModWSGISupervisor(**options)
def get_modfcgid_supervisor(**options):
from cherrypy.test import modfcgid
return modfcgid.ModFCGISupervisor(**options)
def get_modfastcgi_supervisor(**options):
from cherrypy.test import modfastcgi
return modfastcgi.ModFCGISupervisor(**options)
def get_wsgi_u_supervisor(**options):
cherrypy.server.wsgi_version = ('u', 0)
return LocalWSGISupervisor(**options)
class CPWebCase(webtest.WebCase):
script_name = ''
scheme = 'http'
available_servers = {'wsgi': LocalWSGISupervisor,
'wsgi_u': get_wsgi_u_supervisor,
'native': NativeServerSupervisor,
'cpmodpy': get_cpmodpy_supervisor,
'modpygw': get_modpygw_supervisor,
'modwsgi': get_modwsgi_supervisor,
'modfcgid': get_modfcgid_supervisor,
'modfastcgi': get_modfastcgi_supervisor,
}
default_server = 'wsgi'
@classmethod
def _setup_server(cls, supervisor, conf):
v = sys.version.split()[0]
log.info('Python version used to run this test script: %s' % v)
log.info('CherryPy version: %s' % cherrypy.__version__)
if supervisor.scheme == 'https':
ssl = ' (ssl)'
else:
ssl = ''
log.info('HTTP server version: %s%s' % (supervisor.protocol, ssl))
log.info('PID: %s' % os.getpid())
cherrypy.server.using_apache = supervisor.using_apache
cherrypy.server.using_wsgi = supervisor.using_wsgi
if sys.platform[:4] == 'java':
cherrypy.config.update({'server.nodelay': False})
if isinstance(conf, text_or_bytes):
parser = cherrypy.lib.reprconf.Parser()
conf = parser.dict_from_file(conf).get('global', {})
else:
conf = conf or {}
baseconf = conf.copy()
baseconf.update({'server.socket_host': supervisor.host,
'server.socket_port': supervisor.port,
'server.protocol_version': supervisor.protocol,
'environment': 'test_suite',
})
if supervisor.scheme == 'https':
# baseconf['server.ssl_module'] = 'builtin'
baseconf['server.ssl_certificate'] = serverpem
baseconf['server.ssl_private_key'] = serverpem
# helper must be imported lazily so the coverage tool
# can run against module-level statements within cherrypy.
# Also, we have to do "from cherrypy.test import helper",
# exactly like each test module does, because a relative import
# would stick a second instance of webtest in sys.modules,
# and we wouldn't be able to globally override the port anymore.
if supervisor.scheme == 'https':
webtest.WebCase.HTTP_CONN = HTTPSConnection
return baseconf
@classmethod
def setup_class(cls):
''
# Creates a server
conf = {
'scheme': 'http',
'protocol': 'HTTP/1.1',
'port': 54583,
'host': '127.0.0.1',
'validate': False,
'server': 'wsgi',
}
supervisor_factory = cls.available_servers.get(
conf.get('server', 'wsgi'))
if supervisor_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
supervisor = supervisor_factory(**conf)
# Copied from "run_test_suite"
cherrypy.config.reset()
baseconf = cls._setup_server(supervisor, conf)
cherrypy.config.update(baseconf)
setup_client()
if hasattr(cls, 'setup_server'):
# Clear the cherrypy tree and clear the wsgi server so that
# it can be updated with the new root
cherrypy.tree = cherrypy._cptree.Tree()
cherrypy.server.httpserver = None
cls.setup_server()
# Add a resource for verifying there are no refleaks
# to *every* test class.
cherrypy.tree.mount(gctools.GCRoot(), '/gc')
cls.do_gc_test = True
supervisor.start(cls.__module__)
cls.supervisor = supervisor
@classmethod
def teardown_class(cls):
''
if hasattr(cls, 'setup_server'):
cls.supervisor.stop()
do_gc_test = False
def test_gc(self):
if not self.do_gc_test:
return
self.getPage('/gc/stats')
try:
self.assertBody('Statistics:')
except Exception:
'Failures occur intermittently. See #1420'
def prefix(self):
return self.script_name.rstrip('/')
def base(self):
if ((self.scheme == 'http' and self.PORT == 80) or
(self.scheme == 'https' and self.PORT == 443)):
port = ''
else:
port = ':%s' % self.PORT
return '%s://%s%s%s' % (self.scheme, self.HOST, port,
self.script_name.rstrip('/'))
def exit(self):
sys.exit()
def getPage(self, url, *args, **kwargs):
"""Open the url.
"""
if self.script_name:
url = httputil.urljoin(self.script_name, url)
return webtest.WebCase.getPage(self, url, *args, **kwargs)
def skip(self, msg='skipped '):
pytest.skip(msg)
def assertErrorPage(self, status, message=None, pattern=''):
"""Compare the response body with a built in error page.
The function will optionally look for the regexp pattern,
within the exception embedded in the error page."""
# This will never contain a traceback
page = cherrypy._cperror.get_error_page(status, message=message)
# First, test the response body without checking the traceback.
# Stick a match-all group (.*) in to grab the traceback.
def esc(text):
return re.escape(ntob(text))
epage = re.escape(page)
epage = epage.replace(
esc('<pre id="traceback"></pre>'),
esc('<pre id="traceback">') + b'(.*)' + esc('</pre>'))
m = re.match(epage, self.body, re.DOTALL)
if not m:
self._handlewebError(
'Error page does not match; expected:\n' + page)
return
# Now test the pattern against the traceback
if pattern is None:
# Special-case None to mean that there should be *no* traceback.
if m and m.group(1):
self._handlewebError('Error page contains traceback')
else:
if (m is None) or (
not re.search(ntob(re.escape(pattern), self.encoding),
m.group(1))):
msg = 'Error page does not contain %s in traceback'
self._handlewebError(msg % repr(pattern))
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert abs(dt1 - dt2) is within Y seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError('%r and %r are not within %r seconds.' %
(dt1, dt2, seconds))
def _test_method_sorter(_, x, y):
"""Monkeypatch the test sorter to always run test_gc last in each suite."""
if x == 'test_gc':
return 1
if y == 'test_gc':
return -1
if x > y:
return 1
if x < y:
return -1
return 0
unittest.TestLoader.sortTestMethodsUsing = _test_method_sorter
def setup_client():
"""Set up the WebCase classes to match the server's socket settings."""
webtest.WebCase.PORT = cherrypy.server.socket_port
webtest.WebCase.HOST = cherrypy.server.socket_host
if cherrypy.server.ssl_certificate:
CPWebCase.scheme = 'https'
# --------------------------- Spawning helpers --------------------------- #
class CPProcess(object):
pid_file = os.path.join(thisdir, 'test.pid')
config_file = os.path.join(thisdir, 'test.conf')
config_template = """[global]
server.socket_host: '%(host)s'
server.socket_port: %(port)s
checker.on: False
log.screen: False
log.error_file: r'%(error_log)s'
log.access_file: r'%(access_log)s'
%(ssl)s
%(extra)s
"""
error_log = os.path.join(thisdir, 'test.error.log')
access_log = os.path.join(thisdir, 'test.access.log')
def __init__(self, wait=False, daemonize=False, ssl=False,
socket_host=None, socket_port=None):
self.wait = wait
self.daemonize = daemonize
self.ssl = ssl
self.host = socket_host or cherrypy.server.socket_host
self.port = socket_port or cherrypy.server.socket_port
def write_conf(self, extra=''):
if self.ssl:
serverpem = os.path.join(thisdir, 'test.pem')
ssl = """
server.ssl_certificate: r'%s'
server.ssl_private_key: r'%s'
""" % (serverpem, serverpem)
else:
ssl = ''
conf = self.config_template % {
'host': self.host,
'port': self.port,
'error_log': self.error_log,
'access_log': self.access_log,
'ssl': ssl,
'extra': extra,
}
with io.open(self.config_file, 'w', encoding='utf-8') as f:
f.write(str(conf))
def start(self, imports=None):
"""Start cherryd in a subprocess."""
portend.free(self.host, self.port, timeout=1)
args = [
'-m',
'cherrypy',
'-c', self.config_file,
'-p', self.pid_file,
]
r"""
Command for running cherryd server with autoreload enabled
Using
```
['-c',
"__requires__ = 'CherryPy'; \
import pkg_resources, re, sys; \
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]); \
sys.exit(\
pkg_resources.load_entry_point(\
'CherryPy', 'console_scripts', 'cherryd')())"]
```
doesn't work as it's impossible to reconstruct the `-c`'s contents.
Ref: https://github.com/cherrypy/cherrypy/issues/1545
"""
if not isinstance(imports, (list, tuple)):
imports = [imports]
for i in imports:
if i:
args.append('-i')
args.append(i)
if self.daemonize:
args.append('-d')
env = os.environ.copy()
# Make sure we import the cherrypy package in which this module is
# defined.
grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..'))
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = os.pathsep.join(
(grandparentdir, env['PYTHONPATH']))
else:
env['PYTHONPATH'] = grandparentdir
self._proc = subprocess.Popen([sys.executable] + args, env=env)
if self.wait:
self.exit_code = self._proc.wait()
else:
portend.occupied(self.host, self.port, timeout=5)
# Give the engine a wee bit more time to finish STARTING
if self.daemonize:
time.sleep(2)
else:
time.sleep(1)
def get_pid(self):
if self.daemonize:
return int(open(self.pid_file, 'rb').read())
return self._proc.pid
def join(self):
"""Wait for the process to exit."""
if self.daemonize:
return self._join_daemon()
self._proc.wait()
def _join_daemon(self):
with contextlib.suppress(IOError):
os.waitpid(self.get_pid(), 0)
| {
"repo_name": "cherrypy/cherrypy",
"path": "cherrypy/test/helper.py",
"copies": "2",
"size": "16369",
"license": "bsd-3-clause",
"hash": -1430271156406365700,
"line_mean": 30.5394990366,
"line_max": 79,
"alpha_frac": 0.5763944041,
"autogenerated": false,
"ratio": 3.9982901807523206,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5574684584852321,
"avg_score": null,
"num_lines": null
} |
"""A library of helper functions for the CherryPy test suite."""
import datetime
import logging
log = logging.getLogger(__name__)
import os
thisdir = os.path.abspath(os.path.dirname(__file__))
serverpem = os.path.join(os.getcwd(), thisdir, 'test.pem')
import unittest
import re
import sys
import time
import warnings
import cherrypy
from cherrypy._cpcompat import basestring, copyitems, HTTPSConnection, ntob
from cherrypy.lib import httputil
from cherrypy.lib import gctools
from cherrypy.lib.reprconf import unrepr
from cherrypy.test import webtest
# Use subprocess module from Python 2.7 on Python 2.3-2.6
if sys.version_info < (2, 7):
import cherrypy._cpcompat_subprocess as subprocess
else:
import subprocess
import nose
_testconfig = None
def get_tst_config(overconf={}):
global _testconfig
if _testconfig is None:
conf = {
'scheme': 'http',
'protocol': "HTTP/1.1",
'port': 54583,
'host': '127.0.0.1',
'validate': False,
'conquer': False,
'server': 'wsgi',
}
try:
import testconfig
_conf = testconfig.config.get('supervisor', None)
if _conf is not None:
for k, v in _conf.items():
if isinstance(v, basestring):
_conf[k] = unrepr(v)
conf.update(_conf)
except ImportError:
pass
_testconfig = conf
conf = _testconfig.copy()
conf.update(overconf)
return conf
class Supervisor(object):
"""Base class for modeling and controlling servers during testing."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k == 'port':
setattr(self, k, int(v))
setattr(self, k, v)
log_to_stderr = lambda msg, level: sys.stderr.write(msg + os.linesep)
class LocalSupervisor(Supervisor):
"""Base class for modeling/controlling servers which run in the same
process.
When the server side runs in a different process, start/stop can dump all
state between each test module easily. When the server side runs in the
same process as the client, however, we have to do a bit more work to
ensure config and mounted apps are reset between tests.
"""
using_apache = False
using_wsgi = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
cherrypy.server.httpserver = self.httpserver_class
# This is perhaps the wrong place for this call but this is the only
# place that i've found so far that I KNOW is early enough to set this.
cherrypy.config.update({'log.screen': False})
engine = cherrypy.engine
if hasattr(engine, "signal_handler"):
engine.signal_handler.subscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.subscribe()
#engine.subscribe('log', log_to_stderr)
def start(self, modulename=None):
"""Load and start the HTTP server."""
if modulename:
# Unhook httpserver so cherrypy.server.start() creates a new
# one (with config from setup_server, if declared).
cherrypy.server.httpserver = None
cherrypy.engine.start()
self.sync_apps()
def sync_apps(self):
"""Tell the server about any apps which the setup functions mounted."""
pass
def stop(self):
td = getattr(self, 'teardown', None)
if td:
td()
cherrypy.engine.exit()
for name, server in copyitems(getattr(cherrypy, 'servers', {})):
server.unsubscribe()
del cherrypy.servers[name]
class NativeServerSupervisor(LocalSupervisor):
"""Server supervisor for the builtin HTTP server."""
httpserver_class = "cherrypy._cpnative_server.CPHTTPServer"
using_apache = False
using_wsgi = False
def __str__(self):
return "Builtin HTTP Server on %s:%s" % (self.host, self.port)
class LocalWSGISupervisor(LocalSupervisor):
"""Server supervisor for the builtin WSGI server."""
httpserver_class = "cherrypy._cpwsgi_server.CPWSGIServer"
using_apache = False
using_wsgi = True
def __str__(self):
return "Builtin WSGI Server on %s:%s" % (self.host, self.port)
def sync_apps(self):
"""Hook a new WSGI app into the origin server."""
cherrypy.server.httpserver.wsgi_app = self.get_app()
def get_app(self, app=None):
"""Obtain a new (decorated) WSGI app to hook into the origin server."""
if app is None:
app = cherrypy.tree
if self.conquer:
try:
import wsgiconq
except ImportError:
warnings.warn(
"Error importing wsgiconq. pyconquer will not run.")
else:
app = wsgiconq.WSGILogger(app, c_calls=True)
if self.validate:
try:
from wsgiref import validate
except ImportError:
warnings.warn(
"Error importing wsgiref. The validator will not run.")
else:
# wraps the app in the validator
app = validate.validator(app)
return app
def get_cpmodpy_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_cpmodpy
return sup
def get_modpygw_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_modpython_gateway
sup.using_wsgi = True
return sup
def get_modwsgi_supervisor(**options):
from cherrypy.test import modwsgi
return modwsgi.ModWSGISupervisor(**options)
def get_modfcgid_supervisor(**options):
from cherrypy.test import modfcgid
return modfcgid.ModFCGISupervisor(**options)
def get_modfastcgi_supervisor(**options):
from cherrypy.test import modfastcgi
return modfastcgi.ModFCGISupervisor(**options)
def get_wsgi_u_supervisor(**options):
cherrypy.server.wsgi_version = ('u', 0)
return LocalWSGISupervisor(**options)
class CPWebCase(webtest.WebCase):
script_name = ""
scheme = "http"
available_servers = {'wsgi': LocalWSGISupervisor,
'wsgi_u': get_wsgi_u_supervisor,
'native': NativeServerSupervisor,
'cpmodpy': get_cpmodpy_supervisor,
'modpygw': get_modpygw_supervisor,
'modwsgi': get_modwsgi_supervisor,
'modfcgid': get_modfcgid_supervisor,
'modfastcgi': get_modfastcgi_supervisor,
}
default_server = "wsgi"
def _setup_server(cls, supervisor, conf):
v = sys.version.split()[0]
log.info("Python version used to run this test script: %s" % v)
log.info("CherryPy version: %s" % cherrypy.__version__)
if supervisor.scheme == "https":
ssl = " (ssl)"
else:
ssl = ""
log.info("HTTP server version: %s%s" % (supervisor.protocol, ssl))
log.info("PID: %s" % os.getpid())
cherrypy.server.using_apache = supervisor.using_apache
cherrypy.server.using_wsgi = supervisor.using_wsgi
if sys.platform[:4] == 'java':
cherrypy.config.update({'server.nodelay': False})
if isinstance(conf, basestring):
parser = cherrypy.lib.reprconf.Parser()
conf = parser.dict_from_file(conf).get('global', {})
else:
conf = conf or {}
baseconf = conf.copy()
baseconf.update({'server.socket_host': supervisor.host,
'server.socket_port': supervisor.port,
'server.protocol_version': supervisor.protocol,
'environment': "test_suite",
})
if supervisor.scheme == "https":
#baseconf['server.ssl_module'] = 'builtin'
baseconf['server.ssl_certificate'] = serverpem
baseconf['server.ssl_private_key'] = serverpem
# helper must be imported lazily so the coverage tool
# can run against module-level statements within cherrypy.
# Also, we have to do "from cherrypy.test import helper",
# exactly like each test module does, because a relative import
# would stick a second instance of webtest in sys.modules,
# and we wouldn't be able to globally override the port anymore.
if supervisor.scheme == "https":
webtest.WebCase.HTTP_CONN = HTTPSConnection
return baseconf
_setup_server = classmethod(_setup_server)
def setup_class(cls):
''
# Creates a server
conf = get_tst_config()
supervisor_factory = cls.available_servers.get(
conf.get('server', 'wsgi'))
if supervisor_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
supervisor = supervisor_factory(**conf)
# Copied from "run_test_suite"
cherrypy.config.reset()
baseconf = cls._setup_server(supervisor, conf)
cherrypy.config.update(baseconf)
setup_client()
if hasattr(cls, 'setup_server'):
# Clear the cherrypy tree and clear the wsgi server so that
# it can be updated with the new root
cherrypy.tree = cherrypy._cptree.Tree()
cherrypy.server.httpserver = None
cls.setup_server()
# Add a resource for verifying there are no refleaks
# to *every* test class.
cherrypy.tree.mount(gctools.GCRoot(), '/gc')
cls.do_gc_test = True
supervisor.start(cls.__module__)
cls.supervisor = supervisor
setup_class = classmethod(setup_class)
def teardown_class(cls):
''
if hasattr(cls, 'setup_server'):
cls.supervisor.stop()
teardown_class = classmethod(teardown_class)
do_gc_test = False
def test_gc(self):
if self.do_gc_test:
self.getPage("/gc/stats")
self.assertBody("Statistics:")
def prefix(self):
return self.script_name.rstrip("/")
def base(self):
if ((self.scheme == "http" and self.PORT == 80) or
(self.scheme == "https" and self.PORT == 443)):
port = ""
else:
port = ":%s" % self.PORT
return "%s://%s%s%s" % (self.scheme, self.HOST, port,
self.script_name.rstrip("/"))
def exit(self):
sys.exit()
def getPage(self, url, headers=None, method="GET", body=None,
protocol=None, raise_subcls=None):
"""Open the url. Return status, headers, body.
`raise_subcls` must be a tuple with the exceptions classes
or a single exception class that are not going to be considered
a socket.error regardless that they were are subclass of a
socket.error and therefore not considered for a connection retry.
"""
if self.script_name:
url = httputil.urljoin(self.script_name, url)
return webtest.WebCase.getPage(self, url, headers, method, body,
protocol, raise_subcls)
def skip(self, msg='skipped '):
raise nose.SkipTest(msg)
def assertErrorPage(self, status, message=None, pattern=''):
"""Compare the response body with a built in error page.
The function will optionally look for the regexp pattern,
within the exception embedded in the error page."""
# This will never contain a traceback
page = cherrypy._cperror.get_error_page(status, message=message)
# First, test the response body without checking the traceback.
# Stick a match-all group (.*) in to grab the traceback.
def esc(text):
return re.escape(ntob(text))
epage = re.escape(page)
epage = epage.replace(
esc('<pre id="traceback"></pre>'),
esc('<pre id="traceback">') + ntob('(.*)') + esc('</pre>'))
m = re.match(epage, self.body, re.DOTALL)
if not m:
self._handlewebError(
'Error page does not match; expected:\n' + page)
return
# Now test the pattern against the traceback
if pattern is None:
# Special-case None to mean that there should be *no* traceback.
if m and m.group(1):
self._handlewebError('Error page contains traceback')
else:
if (m is None) or (
not re.search(ntob(re.escape(pattern), self.encoding),
m.group(1))):
msg = 'Error page does not contain %s in traceback'
self._handlewebError(msg % repr(pattern))
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert abs(dt1 - dt2) is within Y seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError('%r and %r are not within %r seconds.' %
(dt1, dt2, seconds))
def _test_method_sorter(_, x, y):
"""Monkeypatch the test sorter to always run test_gc last in each suite."""
if x == 'test_gc':
return 1
if y == 'test_gc':
return -1
if x > y:
return 1
if x < y:
return -1
return 0
unittest.TestLoader.sortTestMethodsUsing = _test_method_sorter
def setup_client():
"""Set up the WebCase classes to match the server's socket settings."""
webtest.WebCase.PORT = cherrypy.server.socket_port
webtest.WebCase.HOST = cherrypy.server.socket_host
if cherrypy.server.ssl_certificate:
CPWebCase.scheme = 'https'
# --------------------------- Spawning helpers --------------------------- #
class CPProcess(object):
pid_file = os.path.join(thisdir, 'test.pid')
config_file = os.path.join(thisdir, 'test.conf')
config_template = """[global]
server.socket_host: '%(host)s'
server.socket_port: %(port)s
checker.on: False
log.screen: False
log.error_file: r'%(error_log)s'
log.access_file: r'%(access_log)s'
%(ssl)s
%(extra)s
"""
error_log = os.path.join(thisdir, 'test.error.log')
access_log = os.path.join(thisdir, 'test.access.log')
def __init__(self, wait=False, daemonize=False, ssl=False,
socket_host=None, socket_port=None):
self.wait = wait
self.daemonize = daemonize
self.ssl = ssl
self.host = socket_host or cherrypy.server.socket_host
self.port = socket_port or cherrypy.server.socket_port
def write_conf(self, extra=""):
if self.ssl:
serverpem = os.path.join(thisdir, 'test.pem')
ssl = """
server.ssl_certificate: r'%s'
server.ssl_private_key: r'%s'
""" % (serverpem, serverpem)
else:
ssl = ""
conf = self.config_template % {
'host': self.host,
'port': self.port,
'error_log': self.error_log,
'access_log': self.access_log,
'ssl': ssl,
'extra': extra,
}
f = open(self.config_file, 'wb')
f.write(ntob(conf, 'utf-8'))
f.close()
def start(self, imports=None):
"""Start cherryd in a subprocess."""
cherrypy._cpserver.wait_for_free_port(self.host, self.port)
args = [
os.path.join(thisdir, '..', 'cherryd'),
'-c', self.config_file,
'-p', self.pid_file,
]
if not isinstance(imports, (list, tuple)):
imports = [imports]
for i in imports:
if i:
args.append('-i')
args.append(i)
if self.daemonize:
args.append('-d')
env = os.environ.copy()
# Make sure we import the cherrypy package in which this module is
# defined.
grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..'))
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = os.pathsep.join(
(grandparentdir, env['PYTHONPATH']))
else:
env['PYTHONPATH'] = grandparentdir
self._proc = subprocess.Popen([sys.executable] + args, env=env)
if self.wait:
self.exit_code = self._proc.wait()
else:
cherrypy._cpserver.wait_for_occupied_port(self.host, self.port)
# Give the engine a wee bit more time to finish STARTING
if self.daemonize:
time.sleep(2)
else:
time.sleep(1)
def get_pid(self):
if self.daemonize:
return int(open(self.pid_file, 'rb').read())
return self._proc.pid
def join(self):
"""Wait for the process to exit."""
if self.daemonize:
return self._join_daemon()
self._proc.wait()
def _join_daemon(self):
try:
try:
# Mac, UNIX
os.wait()
except AttributeError:
# Windows
try:
pid = self.get_pid()
except IOError:
# Assume the subprocess deleted the pidfile on shutdown.
pass
else:
os.waitpid(pid, 0)
except OSError:
x = sys.exc_info()[1]
if x.args != (10, 'No child processes'):
raise
| {
"repo_name": "CodyKochmann/pi-cluster",
"path": "software/src/cherrypy/test/helper.py",
"copies": "1",
"size": "17824",
"license": "mit",
"hash": 9067739917278950000,
"line_mean": 31.3484573503,
"line_max": 79,
"alpha_frac": 0.5741135548,
"autogenerated": false,
"ratio": 4.0564406008192995,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5130554155619299,
"avg_score": null,
"num_lines": null
} |
""" A library of object-oriented patterns """
from __future__ import division
import new, weakref
class injector:
""" Injection of new methods into an existing class
* synopsis *
class some_descriptive_text(injector, some_existing_class,
another_existing_class, ...):
def new_method(self, ...): ...
def another_new_method(self, ...): ...
obj = some_existing_class(...)
obj.new_method(...)
obj = another_existing_class(...)
obj.new_method(...)
# class 'some_descriptive_text' is an empty shell with no use by itself.
* motivation *
The traditional way to add methods to an existing class involves typing
three times the same information:
def new_method(self, ...): ...
existing_class.new_method = new_method
or to defer the naming to after the definition:
def foo(self, ...): ...
existing_class.new_method = foo
A bit of metaclass trickery results in a cleaner syntax.
"""
class __metaclass__(type):
def __init__(cls, classname, bases, classdict):
for target_class in bases[1:]:
for name, attribute in classdict.items():
if name in ('__module__', '__doc__'): continue
assert not hasattr(target_class, name), (
"class %s already has attribute '%s'"
% (target_class.__name__, name))
setattr(target_class, name, attribute)
class memoize(object):
""" Memoize the result returned by a function """
def __init__(self, func):
self.cached = {}
self.func = func
self.__doc__ = self.func.__doc__
def __call__(self, *args):
try:
return self.cached[args]
except KeyError:
self.cached[args] = result = self.func(*args)
return result
except TypeError:
return self.func(*args)
class memoize_method(object):
""" Memoize the result returned by a bound method.
This is to be used with immutable objects only.
"""
def __init__(self, meth):
self.cache = '_memoized_%s' % meth.__name__
self.meth = meth
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return getattr(obj, self.cache)
except AttributeError:
# We use weakref.proxy to break the following cycle
# obj._memoized_xxx.func.im_self is obj
# It's always better to enable reference counting to collect
# unreachable object as soon as they become so instead of relying
# on a later gc collection.
func = new.instancemethod(self.meth, weakref.proxy(obj), type)
memoized = memoize(func)
setattr(obj, self.cache, memoized)
return memoized
class null(object):
def __init__(self, *args, **kwds): pass
def __getattr__(self, a): return self
def __setattr__(self, a, v): return self
def __delattr__(self, a): return self
def __call__(self, *args, **kwds): return self
def __getitem__(self, i): return self
def __setitem__(self, i, v): return self
def __delitem__(self, i): return self
def __repr__(self): return 'null()'
def __nonzero__(self): return False
class proxy(object):
def __init__(self, subject):
self.subject = subject
def __getattr__(self, attr):
return getattr(self.subject, attr)
class journal_mixin(object):
""" An easy way to store the history of an attribute as it changes
through the course of a routine.
"""
__journal__ = []
__journal_suffix__ = "_history"
def __getattr__(self, name):
if name in self.__journal__:
key = name+self.__journal_suffix__
else:
key = name
if key in self.__dict__:
return self.__dict__[key][-1]
else: raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__journal__:
key = name+self.__journal_suffix__
if key not in self.__dict__:
self.__dict__[key] = [value]
else:
self.__dict__[key].append(value)
else:
self.__dict__[name] = value
def __delattr__(self, name):
if name in self.__journal__:
key = name+self.__journal_suffix__
else:
key = name
del self.__dict__[key]
def calculate_state(holder):
holder.data = holder.data()
holder.state = retrieve_state
return holder.data
def retrieve_state(holder):
return holder.data
class lazy_initialization(object):
"""
Defers initialization until the value is accessed (state pattern)
"""
def __init__(self, calculation):
self.data = calculation
self.state = calculate_state
def __call__(self):
return self.state( holder = self )
| {
"repo_name": "hickerson/bbn",
"path": "fable/fable_sources/libtbx/object_oriented_patterns.py",
"copies": "1",
"size": "4540",
"license": "mit",
"hash": -1590494384021151000,
"line_mean": 23.8087431694,
"line_max": 74,
"alpha_frac": 0.6174008811,
"autogenerated": false,
"ratio": 3.8572642310960066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4974665112196006,
"avg_score": null,
"num_lines": null
} |
# A library of sport implementations.
import random
DRAW = 0
WIN = 1
LOSS = 2
def basic_game(team1, team2, teams, settings):
# A template for all games. The game must take two team dictionaries.
# If the game is won by a team, the result is set to the WIN constant.
# If the game is drawn, the result is set to the DRAW constant.
result = WIN
# If the game has a winner, the winner is set to the name of the winning
# team and the loser to the name of the losing team. In the event of a
# draw, order does not matter, but both variables must still be filled.
winner = team1
loser = team2
# All extra game statistics are stored in the stats dictionary. The key for
# each entry is the name of the statistic.
stats = {}
# The game must return this information in the following tuple format.
return (result, winner, loser, stats)
def cricket(team1, team2, teams, settings):
'''Simulate a game of cricket.'''
if teams[team1]['Strength'] == teams[team2]['Strength']:
result = DRAW
winner = team1
loser = team2
else:
result = WIN
if teams[team1]['Strength'] > teams[team2]['Strength']:
winner = team1
loser = team2
teams[team2]['Strength'] += 1
else:
winner = team2
loser = team1
teams[team1]['Strength'] += 1
# Generate winner statistics
winning_runs = random.randint(settings['min_runs'], settings['max_runs'])
winning_wickets = random.randint(5, 10)
winning_score = str(winning_wickets) + '/' + str(winning_runs)
if result == DRAW:
losing_runs = winning_runs
else:
losing_runs = random.randint(80, winning_runs)
losing_wickets = random.randint(5, 10)
losing_score = str(losing_wickets) + '/' + str(losing_runs)
stats = {'Winning Score': winning_score,
'Losing Score': losing_score}
# Return results tuple and modified teams dictionary
return ((result, winner, loser, stats), teams)
def football(team1, team2, teams, settings):
'''Simulate a game of football (soccer).'''
if teams[team1]['Strength'] == teams[team2]['Strength']:
result = DRAW
winner = team1
loser = team2
else:
result = WIN
if teams[team1]['Strength'] > teams[team2]['Strength']:
winner = team1
loser = team2
else:
winner = team2
loser = team1
# Generate winner statistics
winning_goals = random.randint(1, 3)
if result == DRAW:
losing_goals = winning_goals
else:
losing_goals = random.randint(0, winning_goals - 1)
stats = {'Winning Score': winning_goals,
'Losing Score': losing_goals}
return ((result, winner, loser, stats), teams)
# Define default characteristics of each game.
games = {'Cricket': {'parameters': ['Name', 'Strength'],
'function_name': cricket,
'settings': {'max_runs': '180',
'min_runs': '120'}},
'Football (soccer)': {'parameters': ['Name', 'Strength', 'Goals'],
'function_name': football,
'settings': {'max_goals': '3',
'min_goals': '1'}}} | {
"repo_name": "kdelwat/Ladder",
"path": "sports.py",
"copies": "1",
"size": "3361",
"license": "mit",
"hash": -420477309148616200,
"line_mean": 30.7169811321,
"line_max": 79,
"alpha_frac": 0.5781017554,
"autogenerated": false,
"ratio": 3.772166105499439,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.984869159318613,
"avg_score": 0.00031525354266168565,
"num_lines": 106
} |
# A library of things to help with simple symmetry operation stuff.
#
# FIXED 17/NOV/06 add a method in here to give a list of likely, and then
# less likely, spacegroups based on an input spacegroup.
# For instance, if the input spacegroup is P 41 21 2 then
# another likely spacegroup is P 43 21 2 and less likely
# spacegroups are all those in the same pointgroup with
# different screw axes - e.g. P 41 2 2 (thinking of an Ed
# Mitchell example.) This should also allow in the likely
# case for body centred spacegroups where the screw axes
# are hidden, for example I 2 2 2/I 21 21 21 and I 2 3/I 21 3.
# This is now handled by Pointless in the "likely spacegroups"
# section.
#
# FIXME 06/DEC/06 need a mapping table from "old" spacegroup names to e.g. xHM
# for use with phenix.hyss.
import os
def lattice_to_spacegroup(lattice):
"""Convert a lattice e.g. tP into the minimal spacegroup number
to represent this."""
_lattice_to_spacegroup = {
"aP": 1,
"mP": 3,
"mC": 5,
"mI": 5,
"oP": 16,
"oC": 20,
"oF": 22,
"oI": 23,
"tP": 75,
"tI": 79,
"hP": 143,
"hR": 146,
"cP": 195,
"cF": 196,
"cI": 197,
}
if lattice not in _lattice_to_spacegroup:
raise RuntimeError('lattice "%s" unknown' % lattice)
return _lattice_to_spacegroup[lattice]
def spacegroup_name_xHM_to_old(xHM):
"""Convert to an old name."""
# generate mapping table
mapping = {}
current_old = ""
current_xHM = ""
old_names = set()
syminfo = os.path.join(os.environ["CCP4"], "lib", "data", "syminfo.lib")
with open(syminfo) as fh:
for line in fh.readlines():
if line[0] == "#":
continue
if "symbol old" in line:
current_old = line.split("'")[1]
if "symbol xHM" in line:
current_xHM = line.split("'")[1]
if "end_spacegroup" in line:
mapping[current_xHM] = current_old
old_names.add(current_old)
xHM = xHM.upper()
if xHM not in mapping:
if xHM in old_names:
return xHM
raise RuntimeError("spacegroup %s unknown" % xHM)
return mapping[xHM]
def clean_reindex_operator(symop):
return str(symop).replace("[", "").replace("]", "")
def lattices_in_order():
"""Return a list of possible crystal lattices (e.g. tP) in order of
increasing symmetry..."""
# eliminated this entry ... 'oA': 38,
lattices = [
"aP",
"mP",
"mC",
"oP",
"oC",
"oF",
"oI",
"tP",
"tI",
"hP",
"hR",
"cP",
"cF",
"cI",
]
# FIXME this should = lattice!
spacegroup_to_lattice = {
lattice_to_spacegroup(lattice): lattice for lattice in lattices
}
# lattice_to_spacegroup(lattice)
spacegroups = sorted(spacegroup_to_lattice)
return [spacegroup_to_lattice[s] for s in spacegroups]
def sort_lattices(lattices):
ordered_lattices = []
for l in lattices_in_order():
if l in lattices:
ordered_lattices.append(l)
return ordered_lattices
def lauegroup_to_lattice(lauegroup):
"""Convert a Laue group representation (from pointless, e.g. I m m m)
to something useful, like the implied crystal lattice (in this
case, oI.)"""
# this has been calculated from the results of Ralf GK's sginfo and a
# little fiddling...
#
# 19/feb/08 added mI record as pointless has started producing this -
# why??? this is not a "real" spacegroup... may be able to switch this
# off...
# 'I2/m': 'mI',
lauegroup_to_lattice = {
"Ammm": "oA",
"C2/m": "mC",
"Cmmm": "oC",
"Fm-3": "cF",
"Fm-3m": "cF",
"Fmmm": "oF",
"H-3": "hR",
"H-3m": "hR",
"R-3:H": "hR",
"R-3m:H": "hR",
"I4/m": "tI",
"I4/mmm": "tI",
"Im-3": "cI",
"Im-3m": "cI",
"Immm": "oI",
"P-1": "aP",
"P-3": "hP",
"P-3m": "hP",
"P2/m": "mP",
"P4/m": "tP",
"P4/mmm": "tP",
"P6/m": "hP",
"P6/mmm": "hP",
"Pm-3": "cP",
"Pm-3m": "cP",
"Pmmm": "oP",
}
updated_laue = ""
for l in lauegroup.split():
if not l == "1":
updated_laue += l
return lauegroup_to_lattice[updated_laue]
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/lib/SymmetryLib.py",
"copies": "1",
"size": "4704",
"license": "bsd-3-clause",
"hash": -3971862335227733000,
"line_mean": 24.7049180328,
"line_max": 78,
"alpha_frac": 0.5106292517,
"autogenerated": false,
"ratio": 3.239669421487603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9247548351747552,
"avg_score": 0.000550064288010286,
"num_lines": 183
} |
"""A library that holds the bulk of the logic for merging JSON profiles.
Collect duration statistics of events across these profiles.
Duration is measured in milliseconds.
"""
from __future__ import division
import csv
import gzip
import json
import os
def _median(lst):
"""Returns the median of the input list.
Args:
lst: the input list.
Returns:
The median of the list, or None if the list is empty/None.
"""
sorted_lst = sorted(lst)
length = len(sorted_lst)
if length % 2:
return sorted_lst[length // 2]
return (sorted_lst[length // 2 - 1] + sorted_lst[length // 2]) / 2
def write_to_csv(bazel_source, project_source, project_commit, event_list,
output_csv_path):
"""Writes the event_list to output_csv_path.
event_list format:
[{'cat': <string>, 'name': <string>, 'min': <int>,
'median': <int>, 'max': <int>, 'count': <int>}, ...]
Args:
bazel_source: the bazel commit or path to the bazel binary from which these
JSON profiles were collected.
project_source: the project on which the runs that generated these JSON
projects were performed.
project_commit: the project commit on which the Bazel runs were performed.
event_list: the list of events, aggregated from the JSON profiles.
output_csv_path: a path to the output CSV file.
"""
output_dir = os.path.dirname(output_csv_path)
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_csv_path, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([
'bazel_source', 'project_source', 'project_commit', 'cat', 'name',
'min', 'median', 'max', 'count'
])
for event in event_list:
csv_writer.writerow([
bazel_source, project_source, project_commit, event['cat'],
event['name'], event['min'], event['median'], event['max'],
event['count']
])
def _accumulate_event_duration(event_list, accum_dict, only_phases=False):
"""Fill up accum_dict by accummulating durations of each event.
Also create the entries for each phase by subtracting the build phase markers'
ts attribute.
Args:
event_list: the list of event objects.
accum_dict: the dict to be filled up with a mapping of the following format:
{ <name>: { name: ..., cat: ..., dur_list: [...]}, ...}
only_phases: only collect entries from phase markers.
"""
# A list of tuples of the form (marker, occurrence time in micro s)
build_markers_ts_pairs = []
max_ts = 0
# Only collect events with a duration.
# Special case: markers that indicates beginning/end of execution.
for event in event_list:
if 'ts' in event:
max_ts = max(max_ts, event['ts'])
if 'cat' in event and event['cat'] == 'build phase marker':
build_markers_ts_pairs.append((event['name'], event['ts']))
if 'dur' not in event:
continue
if not only_phases:
if event['name'] not in accum_dict:
accum_dict[event['name']] = {
'name': event['name'],
'cat': event['cat'],
'dur_list': []
}
accum_dict[event['name']]['dur_list'].append(event['dur'])
# Append an artificial marker that signifies the end of the run.
# This is to determine the duration from the last marker to the actual end of
# the run and will not end up in the final data.
build_markers_ts_pairs.append((None, max_ts))
# Fill in the markers.
for i, marker_ts_pair in enumerate(build_markers_ts_pairs[:-1]):
marker, ts = marker_ts_pair
_, next_ts = build_markers_ts_pairs[i + 1]
if marker not in accum_dict:
accum_dict[marker] = {
'name': marker,
'cat': 'build phase marker',
'dur_list': []
}
current_phase_duration_millis = (
next_ts - ts) / 1000 # Convert from microseconds to milliseconds
accum_dict[marker]['dur_list'].append(current_phase_duration_millis)
def _aggregate_from_accum_dict(accum_dict):
"""Aggregate the result from the accummulated dict.
Calculate statistics of the durations and counts for each event.
All measurements of time should be in milliseconds.
Args:
accum_dict: the dict to be filled up with a mapping of the following format:
{ <name>: { name: ..., cat: ..., dur_list: [...]}, ...}
Returns:
A list of the following format:
[{ name: ..., cat: ..., median: ..., min: ..., median: ..., max: ...,
count: ... }]
"""
result = []
for obj in accum_dict.values():
result.append({
'name': obj['name'],
'cat': obj['cat'],
'median': _median(obj['dur_list']),
'min': min(obj['dur_list']),
'max': max(obj['dur_list']),
'count': len(obj['dur_list'])
})
return result
def aggregate_data(input_profiles, only_phases=False):
"""Produces the aggregated data from the JSON profile inputs.
Collects information on cat, name and median duration of the events in the
JSON profiles.
Args:
input_profiles: a list of paths to .profile or .profile.gz files.
only_phases: only output entries from phase markers.
Returns:
The list of objects which contain the info about cat, name and statistics on
the
duration of events.
"""
# A map from event name to an object which accumulates the durations.
accum_dict = dict()
for file_path in input_profiles:
if file_path.endswith('.gz'):
with gzip.GzipFile(file_path, 'r') as gz_input_file:
event_list = json.loads(gz_input_file.read().decode('utf-8'))
else:
with open(file_path, 'r') as input_file:
event_list = json.load(input_file)
# The events in the JSON profiles can be presented directly as a list,
# or as the value of key 'traceEvents'.
if 'traceEvents' in event_list:
event_list = event_list['traceEvents']
_accumulate_event_duration(event_list, accum_dict, only_phases)
return _aggregate_from_accum_dict(accum_dict)
| {
"repo_name": "bazelbuild/bazel-bench",
"path": "utils/json_profiles_merger_lib.py",
"copies": "1",
"size": "5977",
"license": "apache-2.0",
"hash": -7705798818457562000,
"line_mean": 32.2055555556,
"line_max": 80,
"alpha_frac": 0.6397858457,
"autogenerated": false,
"ratio": 3.669122160834868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9774909640842683,
"avg_score": 0.006799673138436959,
"num_lines": 180
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.